summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/drm.tmpl24
-rw-r--r--Documentation/devicetree/bindings/drm/msm/dsi.txt41
-rw-r--r--Documentation/devicetree/bindings/drm/msm/hdmi.txt3
-rw-r--r--Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt8
-rw-r--r--Documentation/devicetree/bindings/gpu/st,stih4xx.txt72
-rw-r--r--Documentation/devicetree/bindings/panel/auo,b080uan01.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/lg,lg4573.txt19
-rw-r--r--Documentation/devicetree/bindings/panel/nec,nl4827hc19-05b.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/okaya,rs800480t-7x0gp.txt7
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt2
-rw-r--r--Documentation/devicetree/bindings/video/fsl,dcu.txt22
-rw-r--r--MAINTAINERS14
-rw-r--r--arch/arm/configs/exynos_defconfig6
-rw-r--r--arch/arm/configs/multi_v7_defconfig6
-rw-r--r--arch/x86/configs/x86_64_defconfig1
-rw-r--r--drivers/char/agp/intel-gtt.c4
-rw-r--r--drivers/gpio/gpiolib.c13
-rw-r--r--drivers/gpu/drm/Kconfig28
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h213
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c269
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c670
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c543
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c838
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c283
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c213
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c154
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c580
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c128
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c195
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c156
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c187
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c305
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_dpm.c181
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h182
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_smc.c857
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c155
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c98
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c127
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_smc.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c120
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi_dpm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_regs.h11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c103
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c249
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c99
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h398
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h1
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h39
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h1246
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_enum.h1282
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h6080
-rw-r--r--drivers/gpu/drm/amd/include/atom-bits.h (renamed from drivers/gpu/drm/amd/amdgpu/atom-bits.h)0
-rw-r--r--drivers/gpu/drm/amd/include/atom-names.h (renamed from drivers/gpu/drm/amd/amdgpu/atom-names.h)0
-rw-r--r--drivers/gpu/drm/amd/include/atom-types.h (renamed from drivers/gpu/drm/amd/amdgpu/atom-types.h)0
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h (renamed from drivers/gpu/drm/amd/amdgpu/atombios.h)0
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h624
-rw-r--r--drivers/gpu/drm/amd/include/cgs_linux.h135
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h3
-rw-r--r--drivers/gpu/drm/amd/include/pptable.h (renamed from drivers/gpu/drm/amd/amdgpu/pptable.h)6
-rw-r--r--drivers/gpu/drm/amd/include/vi_structs.h417
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c424
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h134
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c81
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c33
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c48
-rw-r--r--drivers/gpu/drm/ast/ast_main.c16
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c6
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c216
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c4
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c4
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c36
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c16
-rw-r--r--drivers/gpu/drm/bridge/Kconfig24
-rw-r--r--drivers/gpu/drm/bridge/Makefile4
-rw-r--r--drivers/gpu/drm/bridge/dw_hdmi.c387
-rw-r--r--drivers/gpu/drm/bridge/dw_hdmi.h8
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c (renamed from drivers/gpu/drm/bridge/ptn3460.c)0
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c (renamed from drivers/gpu/drm/bridge/ps8622.c)0
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c4
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c41
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c15
-rw-r--r--drivers/gpu/drm/drm_atomic.c90
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c125
-rw-r--r--drivers/gpu/drm/drm_context.c51
-rw-r--r--drivers/gpu/drm/drm_crtc.c229
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c75
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c10
-rw-r--r--drivers/gpu/drm/drm_drv.c19
-rw-r--r--drivers/gpu/drm/drm_edid.c4
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c63
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c379
-rw-r--r--drivers/gpu/drm/drm_gem.c13
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c10
-rw-r--r--drivers/gpu/drm/drm_ioc32.c55
-rw-r--r--drivers/gpu/drm/drm_ioctl.c3
-rw-r--r--drivers/gpu/drm/drm_irq.c332
-rw-r--r--drivers/gpu/drm/drm_legacy.h2
-rw-r--r--drivers/gpu/drm/drm_lock.c6
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c59
-rw-r--r--drivers/gpu/drm/drm_of.c2
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c23
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c45
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/Makefile7
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c113
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c147
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c123
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c186
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c36
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c86
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c286
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.h20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c111
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c224
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h134
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c138
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c174
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c164
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c129
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c182
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c65
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c348
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h58
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c56
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c124
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c1014
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c223
-rw-r--r--drivers/gpu/drm/fsl-dcu/Kconfig18
-rw-r--r--drivers/gpu/drm/fsl-dcu/Makefile7
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c210
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.h19
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c404
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h197
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c23
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c43
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h33
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c261
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h17
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c182
-rw-r--r--drivers/gpu/drm/gma500/accel_2d.c6
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c48
-rw-r--r--drivers/gpu/drm/i915/Kconfig24
-rw-r--r--drivers/gpu/drm/i915/Makefile21
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c63
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c10
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c342
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c42
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c89
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h298
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c861
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c94
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c160
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c787
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c732
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h64
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c70
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c306
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c303
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c5
-rw-r--r--drivers/gpu/drm/i915/i915_guc_reg.h102
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c138
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c543
-rw-r--r--drivers/gpu/drm/i915/i915_params.c24
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h181
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c22
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h16
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c220
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c41
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c3
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c232
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h29
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c49
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c22
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c1117
-rw-r--r--drivers/gpu/drm/i915/intel_display.c4264
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c499
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c39
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h163
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c51
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h3
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c97
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c46
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c540
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c110
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c117
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h245
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c446
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c508
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c893
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h21
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c74
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c335
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.h57
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c104
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c63
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c94
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c827
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c81
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c408
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h92
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c115
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c47
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c205
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c74
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c22
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c41
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c25
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c221
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c8
-rw-r--r--drivers/gpu/drm/msm/Kconfig15
-rw-r--r--drivers/gpu/drm/msm/Makefile15
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h18
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h33
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h206
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h18
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h18
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c58
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h43
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h211
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c92
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h44
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c270
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c216
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h26
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c (renamed from drivers/gpu/drm/msm/dsi/dsi_phy.c)413
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h89
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c150
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c166
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.c42
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.h9
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c31
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h26
-rw-r--r--drivers/gpu/drm/msm/edp/edp.xml.h22
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c79
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h32
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h28
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c16
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c101
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c1437
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c52
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c32
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c57
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h26
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h22
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c19
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c38
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h24
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c9
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h180
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c180
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h13
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c139
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c243
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h43
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c18
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c19
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c80
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h57
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c334
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c26
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_common.xml.h28
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_format.c46
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.c3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.h20
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c82
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h19
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c34
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c45
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c23
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c29
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.h26
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c15
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c16
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c30
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h199
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/client.h27
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/device.h73
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/ioctl.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/notify.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h70
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/os.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/client.h65
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/debug.h9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h274
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/devidx.h62
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/engine.h81
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/enum.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h62
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/handle.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/memory.h53
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/mm.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h53
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/object.h261
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h22
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/option.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/parent.h58
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/pci.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/printk.h29
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h28
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h139
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/device.h30
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h39
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h32
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/dmaobj.h26
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h75
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h160
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h118
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h63
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h50
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h38
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h29
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h15
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h24
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h44
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h70
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h43
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h139
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h26
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h151
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h30
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h54
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h37
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h78
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h30
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h34
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h106
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h83
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h30
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h48
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c221
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.c195
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c44
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c84
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c123
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c148
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h33
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c58
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_nvif.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c227
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.h47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sysfs.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c66
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c197
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.c68
-rw-r--r--drivers/gpu/drm/nouveau/nvif/device.c55
-rw-r--r--drivers/gpu/drm/nouveau/nvif/notify.c49
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c200
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/Kbuild7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/client.c188
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/engctx.c239
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/engine.c154
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/enum.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c379
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/handle.c221
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ioctl.c395
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/memory.c64
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/mm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/namedb.c199
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/object.c400
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/oproxy.c200
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/option.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/parent.c159
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/printk.c103
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ramht.c144
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/subdev.c208
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c79
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c180
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c174
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c167
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c144
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c189
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c2923
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c82
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/gf100.c358
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c326
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c190
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/nv04.c89
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/nv10.c204
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/nv20.c131
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/nv30.c153
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/nv40.c427
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/nv50.c478
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c1685
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c295
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c371
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild86
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c325
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c114
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk104.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk110.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt200.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt215.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c123
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c301
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h127
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c118
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h61
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c117
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c63
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c244
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c132
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c242
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/mc/g94.c)26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf106.c)27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c68
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c63
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c247
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h91
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c86
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c275
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c139
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c1310
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c536
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c265
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c109
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c147
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c105
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf110.c)34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf119.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf110.c)41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c186
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c1667
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h231
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv4c.c)29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c68
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c127
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h82
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c202
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h63
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c77
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c101
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c103
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c111
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c81
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c83
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c165
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h78
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/pm/gk110.c)59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c171
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm204.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c139
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c399
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c95
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf110.c)83
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c74
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c138
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c157
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv40.c)28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c (renamed from drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/base.c)96
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c149
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c131
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c133
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c156
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf100.c176
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf110.c165
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv04.c163
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv50.c195
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/falcon.c292
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c345
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c415
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c285
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c270
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c220
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c96
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c243
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c91
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c481
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c924
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c1037
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h89
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c94
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c293
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c323
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c92
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c638
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h170
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c153
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c208
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c335
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c533
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h132
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild48
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c136
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c327
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c88
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c143
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c135
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c119
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c103
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c196
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c1556
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h128
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c227
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c349
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c215
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c223
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c83
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c48
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c48
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c1213
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c824
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c567
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c220
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c180
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c331
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c218
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c218
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c590
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c108
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c877
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c84
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c406
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c248
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c228
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c98
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c101
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c98
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c911
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/daemon.c108
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c126
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c214
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c154
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c157
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c113
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c152
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c138
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c110
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c111
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c188
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c151
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c106
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c224
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.c85
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c79
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c192
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c133
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c56
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c205
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c287
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c147
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c72
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c81
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c592
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c92
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c174
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c187
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c116
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c98
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c (renamed from drivers/gpu/drm/amd/amdgpu/amdgpu_family.h)76
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c71
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c78
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c81
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c176
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c318
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c326
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c356
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c344
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c282
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c56
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c173
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c294
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c128
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c82
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c125
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c77
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c242
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c71
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c151
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c197
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c121
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.h53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c77
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.h14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c351
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h50
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c342
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c263
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c304
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c104
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c176
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c50
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c507
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c147
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf110.c)50
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c374
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c151
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c181
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c181
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c742
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c149
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c245
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busgf119.c95
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv04.c96
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv4e.c86
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv50.c113
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c241
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf110.c106
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c199
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c104
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c96
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c109
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.h32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c119
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h107
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv4e.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv50.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/port.h13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h67
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c99
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c124
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c301
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c394
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c240
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c247
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c266
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c124
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c202
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c146
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h76
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c178
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c85
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h46
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c234
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c138
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c128
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c136
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c195
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c174
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c171
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c182
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c65
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv50.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c230
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4 (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4.h)4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf110.c)29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c102
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c149
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c69
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c305
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c117
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c67
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c80
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c190
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf110.c174
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c153
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c85
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c129
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c106
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h86
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c122
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c158
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c253
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.h25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c88
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c85
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c128
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c123
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h20
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c38
-rw-r--r--drivers/gpu/drm/panel/Kconfig16
-rw-r--r--drivers/gpu/drm/panel/Makefile5
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c298
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c (renamed from drivers/gpu/drm/panel/panel-ld9040.c)2
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c (renamed from drivers/gpu/drm/panel/panel-s6e8aa0.c)2
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c99
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c40
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c5
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_auxch.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c47
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c12
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c269
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h88
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/sti/Makefile7
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c141
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.h12
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c (renamed from drivers/gpu/drm/sti/sti_drm_crtc.c)211
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.h22
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c243
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.h5
-rw-r--r--drivers/gpu/drm/sti/sti_drm_crtc.h22
-rw-r--r--drivers/gpu/drm/sti/sti_drm_plane.c251
-rw-r--r--drivers/gpu/drm/sti/sti_drm_plane.h18
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c (renamed from drivers/gpu/drm/sti/sti_drm_drv.c)147
-rw-r--r--drivers/gpu/drm/sti/sti_drv.h (renamed from drivers/gpu/drm/sti/sti_drm_drv.h)6
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c536
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.h7
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c27
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c482
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.h12
-rw-r--r--drivers/gpu/drm/sti/sti_layer.c213
-rw-r--r--drivers/gpu/drm/sti/sti_layer.h131
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c72
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h27
-rw-r--r--drivers/gpu/drm/sti/sti_plane.c122
-rw-r--r--drivers/gpu/drm/sti/sti_plane.h71
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c54
-rw-r--r--drivers/gpu/drm/sti/sti_vid.c72
-rw-r--r--drivers/gpu/drm/sti/sti_vid.h19
-rw-r--r--drivers/gpu/drm/tegra/dc.c300
-rw-r--r--drivers/gpu/drm/tegra/dc.h24
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c63
-rw-r--r--drivers/gpu/drm/tegra/dpaux.h2
-rw-r--r--drivers/gpu/drm/tegra/drm.c16
-rw-r--r--drivers/gpu/drm/tegra/drm.h10
-rw-r--r--drivers/gpu/drm/tegra/dsi.c126
-rw-r--r--drivers/gpu/drm/tegra/dsi.h4
-rw-r--r--drivers/gpu/drm/tegra/fb.c35
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c78
-rw-r--r--drivers/gpu/drm/tegra/output.c20
-rw-r--r--drivers/gpu/drm/tegra/rgb.c49
-rw-r--r--drivers/gpu/drm/tegra/sor.c1606
-rw-r--r--drivers/gpu/drm/tegra/sor.h298
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c4
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c41
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c32
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile3
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/includeCheck.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h110
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h2071
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h457
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h1487
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h99
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h50
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h1204
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h1633
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_escape.h (renamed from drivers/gpu/drm/vmwgfx/svga_escape.h)2
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h (renamed from drivers/gpu/drm/vmwgfx/svga_overlay.h)10
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_reg.h (renamed from drivers/gpu/drm/vmwgfx/svga_reg.h)664
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_types.h46
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h21
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h2627
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h912
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_types.h45
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c1294
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.h209
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c1303
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c26
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c786
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c662
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c184
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c508
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h335
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c1935
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c575
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c145
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c47
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c1654
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h194
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c49
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c212
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_reg.h12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c277
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c556
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c500
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c555
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.h160
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c1266
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c315
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c2
-rw-r--r--drivers/gpu/host1x/mipi.c253
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c95
-rw-r--r--drivers/gpu/vga/vgaarb.c142
-rw-r--r--drivers/mfd/Kconfig1
-rw-r--r--drivers/mfd/intel_soc_pmic_core.c29
-rw-r--r--drivers/mfd/intel_soc_pmic_crc.c3
-rw-r--r--drivers/pwm/Kconfig7
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/pwm-crc.c143
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--include/drm/bridge/dw_hdmi.h7
-rw-r--r--include/drm/drmP.h57
-rw-r--r--include/drm/drm_atomic.h3
-rw-r--r--include/drm/drm_atomic_helper.h4
-rw-r--r--include/drm/drm_crtc.h83
-rw-r--r--include/drm/drm_crtc_helper.h8
-rw-r--r--include/drm/drm_dp_helper.h3
-rw-r--r--include/drm/drm_fb_helper.h212
-rw-r--r--include/drm/drm_modeset_lock.h1
-rw-r--r--include/drm/drm_plane_helper.h45
-rw-r--r--include/drm/intel-gtt.h4
-rw-r--r--include/linux/gpio/machine.h1
-rw-r--r--include/uapi/drm/drm_fourcc.h7
-rw-r--r--include/uapi/drm/i915_drm.h16
-rw-r--r--include/uapi/drm/vmwgfx_drm.h38
-rw-r--r--include/video/samsung_fimd.h1
1226 files changed, 104731 insertions, 60750 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 2fb9a5457522..9ddf8c6cb887 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -3982,7 +3982,6 @@ int num_ioctls;</synopsis>
         <title>Interrupt Handling</title>
 !Pdrivers/gpu/drm/i915/i915_irq.c interrupt handling
 !Fdrivers/gpu/drm/i915/i915_irq.c intel_irq_init intel_irq_init_hw intel_hpd_init
-!Fdrivers/gpu/drm/i915/i915_irq.c intel_irq_fini
 !Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_disable_interrupts
 !Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_enable_interrupts
       </sect2>
@@ -4012,7 +4011,6 @@ int num_ioctls;</synopsis>
         <title>Frontbuffer Tracking</title>
 !Pdrivers/gpu/drm/i915/intel_frontbuffer.c frontbuffer tracking
 !Idrivers/gpu/drm/i915/intel_frontbuffer.c
-!Fdrivers/gpu/drm/i915/intel_drv.h intel_frontbuffer_flip
 !Fdrivers/gpu/drm/i915/i915_gem.c i915_gem_track_fb
       </sect2>
       <sect2>
@@ -4045,6 +4043,11 @@ int num_ioctls;</synopsis>
         </para>
       </sect2>
       <sect2>
+        <title>Hotplug</title>
+!Pdrivers/gpu/drm/i915/intel_hotplug.c Hotplug
+!Idrivers/gpu/drm/i915/intel_hotplug.c
+      </sect2>
+      <sect2>
 	<title>High Definition Audio</title>
 !Pdrivers/gpu/drm/i915/intel_audio.c High Definition Audio over HDMI and Display Port
 !Idrivers/gpu/drm/i915/intel_audio.c
@@ -4195,6 +4198,23 @@ int num_ioctls;</synopsis>
 !Idrivers/gpu/drm/i915/i915_gem_gtt.c
       </sect2>
       <sect2>
+        <title>GTT Fences and Swizzling</title>
+!Idrivers/gpu/drm/i915/i915_gem_fence.c
+        <sect3>
+          <title>Global GTT Fence Handling</title>
+!Pdrivers/gpu/drm/i915/i915_gem_fence.c fence register handling
+        </sect3>
+        <sect3>
+          <title>Hardware Tiling and Swizzling Details</title>
+!Pdrivers/gpu/drm/i915/i915_gem_fence.c tiling swizzling details
+        </sect3>
+      </sect2>
+      <sect2>
+        <title>Object Tiling IOCTLs</title>
+!Idrivers/gpu/drm/i915/i915_gem_tiling.c
+!Pdrivers/gpu/drm/i915/i915_gem_tiling.c buffer object tiling
+      </sect2>
+      <sect2>
         <title>Buffer Object Eviction</title>
 	<para>
 	  This section documents the interface functions for evicting buffer
diff --git a/Documentation/devicetree/bindings/drm/msm/dsi.txt b/Documentation/devicetree/bindings/drm/msm/dsi.txt
index cd8fe6cf536c..d56923cd5590 100644
--- a/Documentation/devicetree/bindings/drm/msm/dsi.txt
+++ b/Documentation/devicetree/bindings/drm/msm/dsi.txt
@@ -30,20 +30,27 @@ Optional properties:
 - panel@0: Node of panel connected to this DSI controller.
   See files in Documentation/devicetree/bindings/panel/ for each supported
   panel.
-- qcom,dual-panel-mode: Boolean value indicating if the DSI controller is
+- qcom,dual-dsi-mode: Boolean value indicating if the DSI controller is
   driving a panel which needs 2 DSI links.
-- qcom,master-panel: Boolean value indicating if the DSI controller is driving
+- qcom,master-dsi: Boolean value indicating if the DSI controller is driving
   the master link of the 2-DSI panel.
-- qcom,sync-dual-panel: Boolean value indicating if the DSI controller is
+- qcom,sync-dual-dsi: Boolean value indicating if the DSI controller is
   driving a 2-DSI panel whose 2 links need receive command simultaneously.
 - interrupt-parent: phandle to the MDP block if the interrupt signal is routed
   through MDP block
+- pinctrl-names: the pin control state names; should contain "default"
+- pinctrl-0: the default pinctrl state (active)
+- pinctrl-n: the "sleep" pinctrl state
+- port: DSI controller output port. This contains one endpoint subnode, with its
+  remote-endpoint set to the phandle of the connected panel's endpoint.
+  See Documentation/devicetree/bindings/graph.txt for device graph info.
 
 DSI PHY:
 Required properties:
 - compatible: Could be the following
   * "qcom,dsi-phy-28nm-hpm"
   * "qcom,dsi-phy-28nm-lp"
+  * "qcom,dsi-phy-20nm"
 - reg: Physical base address and length of the registers of PLL, PHY and PHY
   regulator
 - reg-names: The names of register regions. The following regions are required:
@@ -59,6 +66,10 @@ Required properties:
   * "iface_clk"
 - vddio-supply: phandle to vdd-io regulator device node
 
+Optional properties:
+- qcom,dsi-phy-regulator-ldo-mode: Boolean value indicating if the LDO mode PHY
+  regulator is wanted.
+
 Example:
 	mdss_dsi0: qcom,mdss_dsi@fd922800 {
 		compatible = "qcom,mdss-dsi-ctrl";
@@ -90,9 +101,13 @@ Example:
 
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 
-		qcom,dual-panel-mode;
-		qcom,master-panel;
-		qcom,sync-dual-panel;
+		qcom,dual-dsi-mode;
+		qcom,master-dsi;
+		qcom,sync-dual-dsi;
+
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&mdss_dsi_active>;
+		pinctrl-1 = <&mdss_dsi_suspend>;
 
 		panel: panel@0 {
 			compatible = "sharp,lq101r1sx01";
@@ -101,6 +116,18 @@ Example:
 
 			power-supply = <...>;
 			backlight = <...>;
+
+			port {
+				panel_in: endpoint {
+					remote-endpoint = <&dsi0_out>;
+				};
+			};
+		};
+
+		port {
+			dsi0_out: endpoint {
+				remote-endpoint = <&panel_in>;
+			};
 		};
 	};
 
@@ -117,4 +144,6 @@ Example:
 		clock-names = "iface_clk";
 		clocks = <&mmcc MDSS_AHB_CLK>;
 		vddio-supply = <&pma8084_l12>;
+
+		qcom,dsi-phy-regulator-ldo-mode;
 	};
diff --git a/Documentation/devicetree/bindings/drm/msm/hdmi.txt b/Documentation/devicetree/bindings/drm/msm/hdmi.txt
index c43aa53debed..e926239e1101 100644
--- a/Documentation/devicetree/bindings/drm/msm/hdmi.txt
+++ b/Documentation/devicetree/bindings/drm/msm/hdmi.txt
@@ -2,8 +2,9 @@ Qualcomm adreno/snapdragon hdmi output
 
 Required properties:
 - compatible: one of the following
+   * "qcom,hdmi-tx-8994"
    * "qcom,hdmi-tx-8084"
-   * "qcom,hdmi-tx-8074"
+   * "qcom,hdmi-tx-8974"
    * "qcom,hdmi-tx-8660"
    * "qcom,hdmi-tx-8960"
 - reg: Physical base address and length of the controller's registers
diff --git a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
index 009f4bfa1590..e685610d38e2 100644
--- a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
+++ b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
@@ -197,9 +197,11 @@ of the following host1x client modules:
 - sor: serial output resource
 
   Required properties:
-  - compatible: For Tegra124, must contain "nvidia,tegra124-sor".  Otherwise,
-    must contain '"nvidia,<chip>-sor", "nvidia,tegra124-sor"', where <chip>
-    is tegra132.
+  - compatible: Should be:
+    - "nvidia,tegra124-sor": for Tegra124 and Tegra132
+    - "nvidia,tegra132-sor": for Tegra132
+    - "nvidia,tegra210-sor": for Tegra210
+    - "nvidia,tegra210-sor1": for Tegra210
   - reg: Physical base address and length of the controller's registers.
   - interrupts: The interrupt outputs from the controller.
   - clocks: Must contain an entry for each entry in clock-names.
diff --git a/Documentation/devicetree/bindings/gpu/st,stih4xx.txt b/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
index 6b1d75f1a529..a36dfce0032e 100644
--- a/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
+++ b/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
@@ -52,10 +52,9 @@ STMicroelectronics stih4xx platforms
     See ../reset/reset.txt for details.
   - reset-names: names of the resets listed in resets property in the same
     order.
-  - ranges: to allow probing of subdevices
 
 - sti-hdmi: hdmi output block
-  must be a child of sti-tvout
+  must be a child of sti-display-subsystem
   Required properties:
   - compatible: "st,stih<chip>-hdmi";
   - reg: Physical base address of the IP registers and length of memory mapped region.
@@ -72,7 +71,7 @@ STMicroelectronics stih4xx platforms
 
 sti-hda:
   Required properties:
-  must be a child of sti-tvout
+  must be a child of sti-display-subsystem
   - compatible: "st,stih<chip>-hda"
   - reg: Physical base address of the IP registers and length of memory mapped region.
   - reg-names: names of the mapped memory regions listed in regs property in
@@ -85,7 +84,7 @@ sti-hda:
 
 sti-dvo:
   Required properties:
-  must be a child of sti-tvout
+  must be a child of sti-display-subsystem
   - compatible: "st,stih<chip>-dvo"
   - reg: Physical base address of the IP registers and length of memory mapped region.
   - reg-names: names of the mapped memory regions listed in regs property in
@@ -195,38 +194,37 @@ Example:
 			reg-names	= "tvout-reg", "hda-reg", "syscfg";
 			reset-names     = "tvout";
 			resets          = <&softreset STIH416_HDTVOUT_SOFTRESET>;
-			ranges;
-
-			sti-hdmi@fe85c000 {
-				compatible	= "st,stih416-hdmi";
-				reg		= <0xfe85c000 0x1000>, <0xfe830000 0x10000>;
-				reg-names	= "hdmi-reg", "syscfg";
-				interrupts	= <GIC_SPI 173 IRQ_TYPE_NONE>;
-				interrupt-names	= "irq";
-				clock-names	= "pix", "tmds", "phy", "audio";
-				clocks          = <&clockgen_c_vcc CLK_S_PIX_HDMI>, <&clockgen_c_vcc CLK_S_TMDS_HDMI>, <&clockgen_c_vcc CLK_S_HDMI_REJECT_PLL>, <&clockgen_b1 CLK_S_PCM_0>;
-			};
-
-			sti-hda@fe85a000 {
-				compatible	= "st,stih416-hda";
-				reg		= <0xfe85a000 0x400>, <0xfe83085c 0x4>;
-				reg-names	= "hda-reg", "video-dacs-ctrl";
-				clock-names	= "pix", "hddac";
-				clocks          = <&clockgen_c_vcc CLK_S_PIX_HD>, <&clockgen_c_vcc CLK_S_HDDAC>;
-			};
-
-			sti-dvo@8d00400 {
-				compatible	= "st,stih407-dvo";
-				reg		= <0x8d00400 0x200>;
-				reg-names	= "dvo-reg";
-				clock-names	= "dvo_pix", "dvo",
-						  "main_parent", "aux_parent";
-				clocks		= <&clk_s_d2_flexgen CLK_PIX_DVO>, <&clk_s_d2_flexgen CLK_DVO>,
-						  <&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 1>;
-				pinctrl-names	= "default";
-				pinctrl-0	= <&pinctrl_dvo>;
-				sti,panel	= <&panel_dvo>;
-			};
+		};
+
+		sti-hdmi@fe85c000 {
+			compatible	= "st,stih416-hdmi";
+			reg		= <0xfe85c000 0x1000>, <0xfe830000 0x10000>;
+			reg-names	= "hdmi-reg", "syscfg";
+			interrupts	= <GIC_SPI 173 IRQ_TYPE_NONE>;
+			interrupt-names	= "irq";
+			clock-names	= "pix", "tmds", "phy", "audio";
+			clocks          = <&clockgen_c_vcc CLK_S_PIX_HDMI>, <&clockgen_c_vcc CLK_S_TMDS_HDMI>, <&clockgen_c_vcc CLK_S_HDMI_REJECT_PLL>, <&clockgen_b1 CLK_S_PCM_0>;
+		};
+
+		sti-hda@fe85a000 {
+			compatible	= "st,stih416-hda";
+			reg		= <0xfe85a000 0x400>, <0xfe83085c 0x4>;
+			reg-names	= "hda-reg", "video-dacs-ctrl";
+			clock-names	= "pix", "hddac";
+			clocks          = <&clockgen_c_vcc CLK_S_PIX_HD>, <&clockgen_c_vcc CLK_S_HDDAC>;
+		};
+
+		sti-dvo@8d00400 {
+			compatible	= "st,stih407-dvo";
+			reg		= <0x8d00400 0x200>;
+			reg-names	= "dvo-reg";
+			clock-names	= "dvo_pix", "dvo",
+					  "main_parent", "aux_parent";
+			clocks		= <&clk_s_d2_flexgen CLK_PIX_DVO>, <&clk_s_d2_flexgen CLK_DVO>,
+					  <&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 1>;
+			pinctrl-names	= "default";
+			pinctrl-0	= <&pinctrl_dvo>;
+			sti,panel	= <&panel_dvo>;
 		};
 
 		sti-hqvdp@9c000000 {
@@ -237,7 +235,7 @@ Example:
 				reset-names     = "hqvdp";
 				resets          = <&softreset STIH407_HDQVDP_SOFTRESET>;
 				st,vtg		= <&vtg_main>;
-			};
+		};
 	};
 	...
 };
diff --git a/Documentation/devicetree/bindings/panel/auo,b080uan01.txt b/Documentation/devicetree/bindings/panel/auo,b080uan01.txt
new file mode 100644
index 000000000000..bae0e2b51467
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/auo,b080uan01.txt
@@ -0,0 +1,7 @@
+AU Optronics Corporation 8.0" WUXGA TFT LCD panel
+
+Required properties:
+- compatible: should be "auo,b101ean01"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/lg,lg4573.txt b/Documentation/devicetree/bindings/panel/lg,lg4573.txt
new file mode 100644
index 000000000000..824441f4e95a
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/lg,lg4573.txt
@@ -0,0 +1,19 @@
+LG LG4573 TFT Liquid Crystal Display with SPI control bus
+
+Required properties:
+  - compatible: "lg,lg4573"
+  - reg: address of the panel on the SPI bus
+
+The panel must obey rules for SPI slave device specified in document [1].
+
+[1]: Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Example:
+
+	lcd_panel: display@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "lg,lg4573";
+		spi-max-frequency = <10000000>;
+		reg = <0>;
+	};
diff --git a/Documentation/devicetree/bindings/panel/nec,nl4827hc19-05b.txt b/Documentation/devicetree/bindings/panel/nec,nl4827hc19-05b.txt
new file mode 100644
index 000000000000..8e1914d1edb8
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/nec,nl4827hc19-05b.txt
@@ -0,0 +1,7 @@
+NEC LCD Technologies,Ltd. WQVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "nec,nl4827hc19-05b"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/okaya,rs800480t-7x0gp.txt b/Documentation/devicetree/bindings/panel/okaya,rs800480t-7x0gp.txt
new file mode 100644
index 000000000000..ddf8e211d382
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/okaya,rs800480t-7x0gp.txt
@@ -0,0 +1,7 @@
+OKAYA Electric America, Inc. RS800480T-7X0GP 7" WVGA LCD panel
+
+Required properties:
+- compatible: should be "okaya,rs800480t-7x0gp"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index ab1e493b2be4..ac5f0c34ae00 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -145,6 +145,7 @@ mundoreader	Mundo Reader S.L.
 murata	Murata Manufacturing Co., Ltd.
 mxicy	Macronix International Co., Ltd.
 national	National Semiconductor
+nec	NEC LCD Technologies, Ltd.
 neonode		Neonode Inc.
 netgear	NETGEAR
 netlogic	Broadcom Corporation (formerly NetLogic Microsystems)
@@ -155,6 +156,7 @@ nokia	Nokia
 nuvoton	Nuvoton Technology Corporation
 nvidia	NVIDIA
 nxp	NXP Semiconductors
+okaya	Okaya Electric America, Inc.
 onnn	ON Semiconductor Corp.
 opencores	OpenCores.org
 option	Option NV
diff --git a/Documentation/devicetree/bindings/video/fsl,dcu.txt b/Documentation/devicetree/bindings/video/fsl,dcu.txt
new file mode 100644
index 000000000000..ebf1be9ae393
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/fsl,dcu.txt
@@ -0,0 +1,22 @@
+Device Tree bindings for Freescale DCU DRM Driver
+
+Required properties:
+- compatible:		Should be one of
+	* "fsl,ls1021a-dcu".
+	* "fsl,vf610-dcu".
+
+- reg:			Address and length of the register set for dcu.
+- clocks:		From common clock binding: handle to dcu clock.
+- clock-names:		From common clock binding: Shall be "dcu".
+- big-endian		Boolean property, LS1021A DCU registers are big-endian.
+- fsl,panel:		The phandle to panel node.
+
+Examples:
+dcu: dcu@2ce0000 {
+	compatible = "fsl,ls1021a-dcu";
+	reg = <0x0 0x2ce0000 0x0 0x10000>;
+	clocks = <&platform_clk 0>;
+	clock-names = "dcu";
+	big-endian;
+	fsl,panel = <&panel>;
+};
diff --git a/MAINTAINERS b/MAINTAINERS
index 73069e879ed5..a9abe32f8e14 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -643,9 +643,14 @@ M:	Oded Gabbay <oded.gabbay@gmail.com>
 L:	dri-devel@lists.freedesktop.org
 T:	git git://people.freedesktop.org/~gabbayo/linux.git
 S:	Supported
+F:	drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+F:	drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+F:	drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+F:	drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
 F:	drivers/gpu/drm/amd/amdkfd/
 F:	drivers/gpu/drm/amd/include/cik_structs.h
 F:	drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+F:	drivers/gpu/drm/amd/include/vi_structs.h
 F:	drivers/gpu/drm/radeon/radeon_kfd.c
 F:	drivers/gpu/drm/radeon/radeon_kfd.h
 F:	include/uapi/linux/kfd_ioctl.h
@@ -3577,6 +3582,15 @@ F:	drivers/gpu/drm/exynos/
 F:	include/drm/exynos*
 F:	include/uapi/drm/exynos*
 
+DRM DRIVERS FOR FREESCALE DCU
+M:	Jianwei Wang <jianwei.wang.chn@gmail.com>
+M:	Alison Wang <alison.wang@freescale.com>
+L:	dri-devel@lists.freedesktop.org
+S:	Supported
+F:	drivers/gpu/drm/fsl-dcu/
+F:	Documentation/devicetree/bindings/video/fsl,dcu.txt
+F:	Documentation/devicetree/bindings/panel/nec,nl4827hc19_05b.txt
+
 DRM DRIVERS FOR FREESCALE IMX
 M:	Philipp Zabel <p.zabel@pengutronix.de>
 L:	dri-devel@lists.freedesktop.org
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 9504e7790288..3eaf8fbaf603 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -124,14 +124,14 @@ CONFIG_REGULATOR_S2MPS11=y
 CONFIG_REGULATOR_S5M8767=y
 CONFIG_REGULATOR_TPS65090=y
 CONFIG_DRM=y
-CONFIG_DRM_PTN3460=y
-CONFIG_DRM_PS8622=y
+CONFIG_DRM_NXP_PTN3460=y
+CONFIG_DRM_PARADE_PS8622=y
 CONFIG_DRM_EXYNOS=y
 CONFIG_DRM_EXYNOS_FIMD=y
 CONFIG_DRM_EXYNOS_DSI=y
 CONFIG_DRM_EXYNOS_HDMI=y
 CONFIG_DRM_PANEL_SIMPLE=y
-CONFIG_DRM_PANEL_S6E8AA0=y
+CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=y
 CONFIG_FB_SIMPLE=y
 CONFIG_EXYNOS_VIDEO=y
 CONFIG_EXYNOS_MIPI_DSI=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 824a0cff3998..f84471d5d2db 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -440,8 +440,8 @@ CONFIG_VIDEO_ML86V7667=m
 CONFIG_DRM=y
 # CONFIG_DRM_I2C_CH7006 is not set
 # CONFIG_DRM_I2C_SIL164 is not set
-CONFIG_DRM_PTN3460=m
-CONFIG_DRM_PS8622=m
+CONFIG_DRM_NXP_PTN3460=m
+CONFIG_DRM_PARADE_PS8622=m
 CONFIG_DRM_NOUVEAU=m
 CONFIG_DRM_EXYNOS=m
 CONFIG_DRM_EXYNOS_DSI=y
@@ -449,7 +449,7 @@ CONFIG_DRM_EXYNOS_FIMD=y
 CONFIG_DRM_EXYNOS_HDMI=y
 CONFIG_DRM_RCAR_DU=m
 CONFIG_DRM_TEGRA=y
-CONFIG_DRM_PANEL_S6E8AA0=m
+CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=m
 CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_FB_ARMCLCD=y
 CONFIG_FB_WM8505=y
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 962297d244b3..cb5b3ab5beec 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -208,7 +208,6 @@ CONFIG_AGP_AMD64=y
 CONFIG_AGP_INTEL=y
 CONFIG_DRM=y
 CONFIG_DRM_I915=y
-CONFIG_DRM_I915_KMS=y
 CONFIG_FB_MODE_HELPERS=y
 CONFIG_FB_TILEBLITTING=y
 CONFIG_FB_EFI=y
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index c6dea3f6917b..1341a94cc779 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1408,8 +1408,8 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
 }
 EXPORT_SYMBOL(intel_gmch_probe);
 
-void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
-		   phys_addr_t *mappable_base, unsigned long *mappable_end)
+void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
+		   phys_addr_t *mappable_base, u64 *mappable_end)
 {
 	*gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
 	*stolen_size = intel_private.stolen_size;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index b562dd36c4af..980c1f87866a 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1686,6 +1686,19 @@ void gpiod_add_lookup_table(struct gpiod_lookup_table *table)
 	mutex_unlock(&gpio_lookup_lock);
 }
 
+/**
+ * gpiod_remove_lookup_table() - unregister GPIO device consumers
+ * @table: table of consumers to unregister
+ */
+void gpiod_remove_lookup_table(struct gpiod_lookup_table *table)
+{
+	mutex_lock(&gpio_lookup_lock);
+
+	list_del(&table->list);
+
+	mutex_unlock(&gpio_lookup_lock);
+}
+
 static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
 				      unsigned int idx,
 				      enum gpio_lookup_flags *flags)
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index c46ca311d8c3..1a0a8df2eed8 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -37,9 +37,29 @@ config DRM_KMS_FB_HELPER
 	select FB
 	select FRAMEBUFFER_CONSOLE if !EXPERT
 	select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
+	select FB_SYS_FOPS
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
 	help
 	  FBDEV helpers for KMS drivers.
 
+config DRM_FBDEV_EMULATION
+	bool "Enable legacy fbdev support for your modesetting driver"
+	depends on DRM
+	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
+	default y
+	help
+	  Choose this option if you have a need for the legacy fbdev
+	  support. Note that this support also provides the linux console
+	  support on top of your modesetting driver.
+
+	  If in doubt, say "Y".
+
 config DRM_LOAD_EDID_FIRMWARE
 	bool "Allow to specify an EDID data set instead of probing for it"
 	depends on DRM_KMS_HELPER
@@ -79,8 +99,6 @@ config DRM_KMS_CMA_HELPER
 
 source "drivers/gpu/drm/i2c/Kconfig"
 
-source "drivers/gpu/drm/bridge/Kconfig"
-
 config DRM_TDFX
 	tristate "3dfx Banshee/Voodoo3+"
 	depends on DRM && PCI
@@ -110,6 +128,7 @@ config DRM_RADEON
 	select POWER_SUPPLY
 	select HWMON
 	select BACKLIGHT_CLASS_DEVICE
+	select BACKLIGHT_LCD_SUPPORT
 	select INTERVAL_TREE
 	help
 	  Choose this option if you have an ATI Radeon graphics card.  There
@@ -133,6 +152,7 @@ config DRM_AMDGPU
 	select POWER_SUPPLY
 	select HWMON
 	select BACKLIGHT_CLASS_DEVICE
+	select BACKLIGHT_LCD_SUPPORT
 	select INTERVAL_TREE
 	help
 	  Choose this option if you have a recent AMD Radeon graphics card.
@@ -231,10 +251,14 @@ source "drivers/gpu/drm/virtio/Kconfig"
 
 source "drivers/gpu/drm/msm/Kconfig"
 
+source "drivers/gpu/drm/fsl-dcu/Kconfig"
+
 source "drivers/gpu/drm/tegra/Kconfig"
 
 source "drivers/gpu/drm/panel/Kconfig"
 
+source "drivers/gpu/drm/bridge/Kconfig"
+
 source "drivers/gpu/drm/sti/Kconfig"
 
 source "drivers/gpu/drm/amd/amdkfd/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 5713d0534504..45e7719846b1 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -23,7 +23,7 @@ drm-$(CONFIG_OF) += drm_of.o
 drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
 		drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o
 drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
-drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
+drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
 drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
 
 obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
@@ -70,3 +70,4 @@ obj-$(CONFIG_DRM_IMX) += imx/
 obj-y			+= i2c/
 obj-y			+= panel/
 obj-y			+= bridge/
+obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 616dfd4a1398..04c270757030 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -3,7 +3,9 @@
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
 ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/asic_reg \
-	-Idrivers/gpu/drm/amd/include
+	-Idrivers/gpu/drm/amd/include \
+	-Idrivers/gpu/drm/amd/amdgpu \
+	-Idrivers/gpu/drm/amd/scheduler
 
 amdgpu-y := amdgpu_drv.o
 
@@ -21,7 +23,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
 
 # add asic specific block
 amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \
-	ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o
+	ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
+	amdgpu_amdkfd_gfx_v7.o
 
 amdgpu-y += \
 	vi.o
@@ -43,6 +46,7 @@ amdgpu-y += \
 	amdgpu_dpm.o \
 	cz_smc.o cz_dpm.o \
 	tonga_smc.o tonga_dpm.o \
+	fiji_smc.o fiji_dpm.o \
 	iceland_smc.o iceland_dpm.o
 
 # add DCE block
@@ -71,6 +75,20 @@ amdgpu-y += \
 	amdgpu_vce.o \
 	vce_v3_0.o
 
+# add amdkfd interfaces
+amdgpu-y += \
+	 amdgpu_amdkfd.o \
+	 amdgpu_amdkfd_gfx_v8.o
+
+# add cgs
+amdgpu-y += amdgpu_cgs.o
+
+# GPU scheduler
+amdgpu-y += \
+	../scheduler/gpu_scheduler.o \
+	../scheduler/sched_fence.o \
+	amdgpu_sched.o
+
 amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o
 amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
 amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index f7b49d5ce4b8..668939a14206 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -42,17 +42,19 @@
 #include <ttm/ttm_module.h>
 #include <ttm/ttm_execbuf_util.h>
 
+#include <drm/drmP.h>
 #include <drm/drm_gem.h>
 #include <drm/amdgpu_drm.h>
 
 #include "amd_shared.h"
-#include "amdgpu_family.h"
 #include "amdgpu_mode.h"
 #include "amdgpu_ih.h"
 #include "amdgpu_irq.h"
 #include "amdgpu_ucode.h"
 #include "amdgpu_gds.h"
 
+#include "gpu_scheduler.h"
+
 /*
  * Modules parameters.
  */
@@ -77,7 +79,11 @@ extern int amdgpu_bapm;
 extern int amdgpu_deep_color;
 extern int amdgpu_vm_size;
 extern int amdgpu_vm_block_size;
+extern int amdgpu_enable_scheduler;
+extern int amdgpu_sched_jobs;
+extern int amdgpu_sched_hw_submission;
 
+#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000
 #define AMDGPU_MAX_USEC_TIMEOUT			100000	/* 100 ms */
 #define AMDGPU_FENCE_JIFFIES_TIMEOUT		(HZ / 2)
 /* AMDGPU_IB_POOL_SIZE must be a power of 2 */
@@ -92,6 +98,9 @@ extern int amdgpu_vm_block_size;
 #define AMDGPU_MAX_COMPUTE_RINGS		8
 #define AMDGPU_MAX_VCE_RINGS			2
 
+/* max number of IP instances */
+#define AMDGPU_MAX_SDMA_INSTANCES		2
+
 /* number of hw syncs before falling back on blocking */
 #define AMDGPU_NUM_SYNCS			4
 
@@ -177,7 +186,9 @@ struct amdgpu_vm;
 struct amdgpu_ring;
 struct amdgpu_semaphore;
 struct amdgpu_cs_parser;
+struct amdgpu_job;
 struct amdgpu_irq_src;
+struct amdgpu_fpriv;
 
 enum amdgpu_cp_irq {
 	AMDGPU_CP_IRQ_GFX_EOP = 0,
@@ -239,7 +250,7 @@ struct amdgpu_buffer_funcs {
 	unsigned	copy_num_dw;
 
 	/* used for buffer migration */
-	void (*emit_copy_buffer)(struct amdgpu_ring *ring,
+	void (*emit_copy_buffer)(struct amdgpu_ib *ib,
 				 /* src addr in bytes */
 				 uint64_t src_offset,
 				 /* dst addr in bytes */
@@ -254,7 +265,7 @@ struct amdgpu_buffer_funcs {
 	unsigned	fill_num_dw;
 
 	/* used for buffer clearing */
-	void (*emit_fill_buffer)(struct amdgpu_ring *ring,
+	void (*emit_fill_buffer)(struct amdgpu_ib *ib,
 				 /* value to write to memory */
 				 uint32_t src_data,
 				 /* dst addr in bytes */
@@ -332,6 +343,8 @@ struct amdgpu_ring_funcs {
 	int (*test_ring)(struct amdgpu_ring *ring);
 	int (*test_ib)(struct amdgpu_ring *ring);
 	bool (*is_lockup)(struct amdgpu_ring *ring);
+	/* insert NOP packets */
+	void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
 };
 
 /*
@@ -381,10 +394,10 @@ struct amdgpu_fence_driver {
 	uint64_t			sync_seq[AMDGPU_MAX_RINGS];
 	atomic64_t			last_seq;
 	bool				initialized;
-	bool				delayed_irq;
 	struct amdgpu_irq_src		*irq_src;
 	unsigned			irq_type;
 	struct delayed_work             lockup_work;
+	wait_queue_head_t		fence_queue;
 };
 
 /* some special values for the owner field */
@@ -423,20 +436,20 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
 				   struct amdgpu_irq_src *irq_src,
 				   unsigned irq_type);
+void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
+void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
 int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
 		      struct amdgpu_fence **fence);
-int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner,
-			  uint64_t seq, struct amdgpu_fence **fence);
 void amdgpu_fence_process(struct amdgpu_ring *ring);
 int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
 
-bool amdgpu_fence_signaled(struct amdgpu_fence *fence);
-int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible);
-int amdgpu_fence_wait_any(struct amdgpu_device *adev,
-			  struct amdgpu_fence **fences,
-			  bool intr);
+signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
+				  struct fence **array,
+				  uint32_t count,
+				  bool intr,
+				  signed long t);
 struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
 void amdgpu_fence_unref(struct amdgpu_fence **fence);
 
@@ -481,7 +494,7 @@ static inline bool amdgpu_fence_is_earlier(struct amdgpu_fence *a,
 	return a->seq < b->seq;
 }
 
-int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user, 
+int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user,
 			   void *owner, struct amdgpu_fence **fence);
 
 /*
@@ -509,7 +522,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
 		       uint64_t dst_offset,
 		       uint32_t byte_count,
 		       struct reservation_object *resv,
-		       struct amdgpu_fence **fence);
+		       struct fence **fence);
 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
 
 struct amdgpu_bo_list_entry {
@@ -532,14 +545,16 @@ struct amdgpu_bo_va_mapping {
 struct amdgpu_bo_va {
 	/* protected by bo being reserved */
 	struct list_head		bo_list;
-	uint64_t			addr;
-	struct amdgpu_fence		*last_pt_update;
+	struct fence		        *last_pt_update;
 	unsigned			ref_count;
 
-	/* protected by vm mutex */
-	struct list_head		mappings;
+	/* protected by vm mutex and spinlock */
 	struct list_head		vm_status;
 
+	/* mappings for this bo_va */
+	struct list_head		invalids;
+	struct list_head		valids;
+
 	/* constant after initialization */
 	struct amdgpu_vm		*vm;
 	struct amdgpu_bo		*bo;
@@ -643,7 +658,7 @@ struct amdgpu_sa_bo {
 	struct amdgpu_sa_manager	*manager;
 	unsigned			soffset;
 	unsigned			eoffset;
-	struct amdgpu_fence		*fence;
+	struct fence		        *fence;
 };
 
 /*
@@ -685,7 +700,7 @@ bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring,
 				struct amdgpu_semaphore *semaphore);
 void amdgpu_semaphore_free(struct amdgpu_device *adev,
 			   struct amdgpu_semaphore **semaphore,
-			   struct amdgpu_fence *fence);
+			   struct fence *fence);
 
 /*
  * Synchronization
@@ -693,20 +708,23 @@ void amdgpu_semaphore_free(struct amdgpu_device *adev,
 struct amdgpu_sync {
 	struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS];
 	struct amdgpu_fence	*sync_to[AMDGPU_MAX_RINGS];
-	struct amdgpu_fence	*last_vm_update;
+	DECLARE_HASHTABLE(fences, 4);
+	struct fence	        *last_vm_update;
 };
 
 void amdgpu_sync_create(struct amdgpu_sync *sync);
-void amdgpu_sync_fence(struct amdgpu_sync *sync,
-		       struct amdgpu_fence *fence);
+int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+		      struct fence *f);
 int amdgpu_sync_resv(struct amdgpu_device *adev,
 		     struct amdgpu_sync *sync,
 		     struct reservation_object *resv,
 		     void *owner);
 int amdgpu_sync_rings(struct amdgpu_sync *sync,
 		      struct amdgpu_ring *ring);
+struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
+int amdgpu_sync_wait(struct amdgpu_sync *sync);
 void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync,
-		      struct amdgpu_fence *fence);
+		      struct fence *fence);
 
 /*
  * GART structures, functions & helpers
@@ -821,7 +839,9 @@ struct amdgpu_flip_work {
 	uint64_t			base;
 	struct drm_pending_vblank_event *event;
 	struct amdgpu_bo		*old_rbo;
-	struct fence			*fence;
+	struct fence			*excl;
+	unsigned			shared_count;
+	struct fence			**shared;
 };
 
 
@@ -844,6 +864,8 @@ struct amdgpu_ib {
 	uint32_t			gws_base, gws_size;
 	uint32_t			oa_base, oa_size;
 	uint32_t			flags;
+	/* resulting sequence number */
+	uint64_t			sequence;
 };
 
 enum amdgpu_ring_type {
@@ -854,11 +876,23 @@ enum amdgpu_ring_type {
 	AMDGPU_RING_TYPE_VCE
 };
 
+extern struct amd_sched_backend_ops amdgpu_sched_ops;
+
+int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
+					 struct amdgpu_ring *ring,
+					 struct amdgpu_ib *ibs,
+					 unsigned num_ibs,
+					 int (*free_job)(struct amdgpu_job *),
+					 void *owner,
+					 struct fence **fence);
+
 struct amdgpu_ring {
 	struct amdgpu_device		*adev;
 	const struct amdgpu_ring_funcs	*funcs;
 	struct amdgpu_fence_driver	fence_drv;
+	struct amd_gpu_scheduler 	*scheduler;
 
+	spinlock_t              fence_lock;
 	struct mutex		*ring_lock;
 	struct amdgpu_bo	*ring_obj;
 	volatile uint32_t	*ring;
@@ -892,6 +926,7 @@ struct amdgpu_ring {
 	struct amdgpu_ctx	*current_ctx;
 	enum amdgpu_ring_type	type;
 	char			name[16];
+	bool                    is_pte_ring;
 };
 
 /*
@@ -933,7 +968,7 @@ struct amdgpu_vm_id {
 	unsigned		id;
 	uint64_t		pd_gpu_addr;
 	/* last flushed PD/PT update */
-	struct amdgpu_fence	*flushed_updates;
+	struct fence	        *flushed_updates;
 	/* last use of vmid */
 	struct amdgpu_fence	*last_id_use;
 };
@@ -943,18 +978,22 @@ struct amdgpu_vm {
 
 	struct rb_root		va;
 
-	/* protecting invalidated and freed */
+	/* protecting invalidated */
 	spinlock_t		status_lock;
 
 	/* BOs moved, but not yet updated in the PT */
 	struct list_head	invalidated;
 
-	/* BOs freed, but not yet updated in the PT */
+	/* BOs cleared in the PT because of a move */
+	struct list_head	cleared;
+
+	/* BO mappings freed, but not yet updated in the PT */
 	struct list_head	freed;
 
 	/* contains the page directory */
 	struct amdgpu_bo	*page_directory;
 	unsigned		max_pde_used;
+	struct fence		*page_directory_fence;
 
 	/* array of page tables, one for each page directory entry */
 	struct amdgpu_vm_pt	*page_tables;
@@ -983,27 +1022,47 @@ struct amdgpu_vm_manager {
  * context related structures
  */
 
-struct amdgpu_ctx_state {
-	uint64_t flags;
-	uint32_t hangs;
+#define AMDGPU_CTX_MAX_CS_PENDING	16
+
+struct amdgpu_ctx_ring {
+	uint64_t		sequence;
+	struct fence		*fences[AMDGPU_CTX_MAX_CS_PENDING];
+	struct amd_sched_entity	entity;
 };
 
 struct amdgpu_ctx {
-	/* call kref_get()before CS start and kref_put() after CS fence signaled */
-	struct kref refcount;
-	struct amdgpu_fpriv *fpriv;
-	struct amdgpu_ctx_state state;
-	uint32_t id;
-	unsigned reset_counter;
+	struct kref		refcount;
+	struct amdgpu_device    *adev;
+	unsigned		reset_counter;
+	spinlock_t		ring_lock;
+	struct amdgpu_ctx_ring	rings[AMDGPU_MAX_RINGS];
 };
 
 struct amdgpu_ctx_mgr {
-	struct amdgpu_device *adev;
-	struct idr ctx_handles;
-	/* lock for IDR system */
-	struct mutex lock;
+	struct amdgpu_device	*adev;
+	struct mutex		lock;
+	/* protected by lock */
+	struct idr		ctx_handles;
 };
 
+int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
+		    struct amdgpu_ctx *ctx);
+void amdgpu_ctx_fini(struct amdgpu_ctx *ctx);
+
+struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
+int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
+
+uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+			      struct fence *fence);
+struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+				   struct amdgpu_ring *ring, uint64_t seq);
+
+int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
+		     struct drm_file *filp);
+
+void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
+void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
+
 /*
  * file private structure
  */
@@ -1012,7 +1071,7 @@ struct amdgpu_fpriv {
 	struct amdgpu_vm	vm;
 	struct mutex		bo_list_lock;
 	struct idr		bo_list_handles;
-	struct amdgpu_ctx_mgr ctx_mgr;
+	struct amdgpu_ctx_mgr	ctx_mgr;
 };
 
 /*
@@ -1160,6 +1219,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
 void amdgpu_ring_free_size(struct amdgpu_ring *ring);
 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
 int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw);
+void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
 void amdgpu_ring_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_undo(struct amdgpu_ring *ring);
@@ -1207,6 +1267,16 @@ struct amdgpu_cs_parser {
 	struct amdgpu_user_fence uf;
 };
 
+struct amdgpu_job {
+	struct amd_sched_job    base;
+	struct amdgpu_device	*adev;
+	struct amdgpu_ib	*ibs;
+	uint32_t		num_ibs;
+	struct mutex            job_lock;
+	struct amdgpu_user_fence uf;
+	int (*free_job)(struct amdgpu_job *sched_job);
+};
+
 static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
 {
 	return p->ibs[ib_idx].ptr[idx];
@@ -1601,7 +1671,6 @@ struct amdgpu_uvd {
 	struct amdgpu_bo	*vcpu_bo;
 	void			*cpu_addr;
 	uint64_t		gpu_addr;
-	void			*saved_bo;
 	atomic_t		handles[AMDGPU_MAX_UVD_HANDLES];
 	struct drm_file		*filp[AMDGPU_MAX_UVD_HANDLES];
 	struct delayed_work	idle_work;
@@ -1645,6 +1714,7 @@ struct amdgpu_sdma {
 	uint32_t		feature_version;
 
 	struct amdgpu_ring	ring;
+	bool			burst_nop;
 };
 
 /*
@@ -1849,17 +1919,12 @@ struct amdgpu_atcs {
 	struct amdgpu_atcs_functions functions;
 };
 
-int amdgpu_ctx_alloc(struct amdgpu_device *adev,struct amdgpu_fpriv *fpriv,
-							uint32_t *id,uint32_t flags);
-int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
-						  uint32_t id);
-
-void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv);
-struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
-int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
+/*
+ * CGS
+ */
+void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
+void amdgpu_cgs_destroy_device(void *cgs_device);
 
-extern int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
-						 struct drm_file *filp);
 
 /*
  * Core structure, functions and helpers.
@@ -1883,7 +1948,7 @@ struct amdgpu_device {
 	struct rw_semaphore		exclusive_lock;
 
 	/* ASIC */
-	enum amdgpu_asic_type           asic_type;
+	enum amd_asic_type		asic_type;
 	uint32_t			family;
 	uint32_t			rev_id;
 	uint32_t			external_rev_id;
@@ -1976,7 +2041,6 @@ struct amdgpu_device {
 	struct amdgpu_irq_src		hpd_irq;
 
 	/* rings */
-	wait_queue_head_t		fence_queue;
 	unsigned			fence_context;
 	struct mutex			ring_lock;
 	unsigned			num_rings;
@@ -1999,7 +2063,7 @@ struct amdgpu_device {
 	struct amdgpu_gfx		gfx;
 
 	/* sdma */
-	struct amdgpu_sdma		sdma[2];
+	struct amdgpu_sdma		sdma[AMDGPU_MAX_SDMA_INSTANCES];
 	struct amdgpu_irq_src		sdma_trap_irq;
 	struct amdgpu_irq_src		sdma_illegal_inst_irq;
 
@@ -2025,6 +2089,12 @@ struct amdgpu_device {
 	/* tracking pinned memory */
 	u64 vram_pin_size;
 	u64 gart_pin_size;
+
+	/* amdkfd interface */
+	struct kfd_dev          *kfd;
+
+	/* kernel conext for IB submission */
+	struct amdgpu_ctx	kernel_ctx;
 };
 
 bool amdgpu_device_is_px(struct drm_device *dev);
@@ -2132,6 +2202,21 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
 	ring->ring_free_dw--;
 }
 
+static inline struct amdgpu_sdma * amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+	int i;
+
+	for (i = 0; i < AMDGPU_MAX_SDMA_INSTANCES; i++)
+		if (&adev->sdma[i].ring == ring)
+			break;
+
+	if (i < AMDGPU_MAX_SDMA_INSTANCES)
+		return &adev->sdma[i];
+	else
+		return NULL;
+}
+
 /*
  * ASICs macro.
  */
@@ -2183,8 +2268,8 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
 #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
 #define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s))
 #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
-#define amdgpu_emit_copy_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((r), (s), (d), (b))
-#define amdgpu_emit_fill_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((r), (s), (d), (b))
+#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib),  (s), (d), (b))
+#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
 #define amdgpu_dpm_get_temperature(adev) (adev)->pm.funcs->get_temperature((adev))
 #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
 #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
@@ -2212,6 +2297,12 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev);
 bool amdgpu_card_posted(struct amdgpu_device *adev);
 void amdgpu_update_display_priority(struct amdgpu_device *adev);
 bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
+struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
+						 struct drm_file *filp,
+						 struct amdgpu_ctx *ctx,
+						 struct amdgpu_ib *ibs,
+						 uint32_t num_ibs);
+
 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
 int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
 		       u32 ip_instance, u32 ring,
@@ -2275,11 +2366,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
 					  struct amdgpu_vm *vm,
 					  struct list_head *head);
-struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
-				       struct amdgpu_vm *vm);
+int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+		      struct amdgpu_sync *sync);
 void amdgpu_vm_flush(struct amdgpu_ring *ring,
 		     struct amdgpu_vm *vm,
-		     struct amdgpu_fence *updates);
+		     struct fence *updates);
 void amdgpu_vm_fence(struct amdgpu_device *adev,
 		     struct amdgpu_vm *vm,
 		     struct amdgpu_fence *fence);
@@ -2309,7 +2400,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
 		       uint64_t addr);
 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
 		      struct amdgpu_bo_va *bo_va);
-
+int amdgpu_vm_free_job(struct amdgpu_job *job);
 /*
  * functions used by amdgpu_encoder.c
  */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
new file mode 100644
index 000000000000..496ed2192eba
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu_amdkfd.h"
+#include "amd_shared.h"
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include <linux/module.h>
+
+const struct kfd2kgd_calls *kfd2kgd;
+const struct kgd2kfd_calls *kgd2kfd;
+bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+
+bool amdgpu_amdkfd_init(void)
+{
+#if defined(CONFIG_HSA_AMD_MODULE)
+	bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+
+	kgd2kfd_init_p = symbol_request(kgd2kfd_init);
+
+	if (kgd2kfd_init_p == NULL)
+		return false;
+#endif
+	return true;
+}
+
+bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev)
+{
+#if defined(CONFIG_HSA_AMD_MODULE)
+	bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+#endif
+
+	switch (rdev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_CIK
+	case CHIP_KAVERI:
+		kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
+		break;
+#endif
+	case CHIP_CARRIZO:
+		kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
+		break;
+	default:
+		return false;
+	}
+
+#if defined(CONFIG_HSA_AMD_MODULE)
+	kgd2kfd_init_p = symbol_request(kgd2kfd_init);
+
+	if (kgd2kfd_init_p == NULL) {
+		kfd2kgd = NULL;
+		return false;
+	}
+
+	if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd)) {
+		symbol_put(kgd2kfd_init);
+		kfd2kgd = NULL;
+		kgd2kfd = NULL;
+
+		return false;
+	}
+
+	return true;
+#elif defined(CONFIG_HSA_AMD)
+	if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd)) {
+		kfd2kgd = NULL;
+		kgd2kfd = NULL;
+		return false;
+	}
+
+	return true;
+#else
+	kfd2kgd = NULL;
+	return false;
+#endif
+}
+
+void amdgpu_amdkfd_fini(void)
+{
+	if (kgd2kfd) {
+		kgd2kfd->exit();
+		symbol_put(kgd2kfd_init);
+	}
+}
+
+void amdgpu_amdkfd_device_probe(struct amdgpu_device *rdev)
+{
+	if (kgd2kfd)
+		rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev,
+					rdev->pdev, kfd2kgd);
+}
+
+void amdgpu_amdkfd_device_init(struct amdgpu_device *rdev)
+{
+	if (rdev->kfd) {
+		struct kgd2kfd_shared_resources gpu_resources = {
+			.compute_vmid_bitmap = 0xFF00,
+
+			.first_compute_pipe = 1,
+			.compute_pipe_count = 4 - 1,
+		};
+
+		amdgpu_doorbell_get_kfd_info(rdev,
+				&gpu_resources.doorbell_physical_address,
+				&gpu_resources.doorbell_aperture_size,
+				&gpu_resources.doorbell_start_offset);
+
+		kgd2kfd->device_init(rdev->kfd, &gpu_resources);
+	}
+}
+
+void amdgpu_amdkfd_device_fini(struct amdgpu_device *rdev)
+{
+	if (rdev->kfd) {
+		kgd2kfd->device_exit(rdev->kfd);
+		rdev->kfd = NULL;
+	}
+}
+
+void amdgpu_amdkfd_interrupt(struct amdgpu_device *rdev,
+		const void *ih_ring_entry)
+{
+	if (rdev->kfd)
+		kgd2kfd->interrupt(rdev->kfd, ih_ring_entry);
+}
+
+void amdgpu_amdkfd_suspend(struct amdgpu_device *rdev)
+{
+	if (rdev->kfd)
+		kgd2kfd->suspend(rdev->kfd);
+}
+
+int amdgpu_amdkfd_resume(struct amdgpu_device *rdev)
+{
+	int r = 0;
+
+	if (rdev->kfd)
+		r = kgd2kfd->resume(rdev->kfd);
+
+	return r;
+}
+
+u32 pool_to_domain(enum kgd_memory_pool p)
+{
+	switch (p) {
+	case KGD_POOL_FRAMEBUFFER: return AMDGPU_GEM_DOMAIN_VRAM;
+	default: return AMDGPU_GEM_DOMAIN_GTT;
+	}
+}
+
+int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+			void **mem_obj, uint64_t *gpu_addr,
+			void **cpu_ptr)
+{
+	struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
+	struct kgd_mem **mem = (struct kgd_mem **) mem_obj;
+	int r;
+
+	BUG_ON(kgd == NULL);
+	BUG_ON(gpu_addr == NULL);
+	BUG_ON(cpu_ptr == NULL);
+
+	*mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
+	if ((*mem) == NULL)
+		return -ENOMEM;
+
+	r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
+			AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, &(*mem)->bo);
+	if (r) {
+		dev_err(rdev->dev,
+			"failed to allocate BO for amdkfd (%d)\n", r);
+		return r;
+	}
+
+	/* map the buffer */
+	r = amdgpu_bo_reserve((*mem)->bo, true);
+	if (r) {
+		dev_err(rdev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
+		goto allocate_mem_reserve_bo_failed;
+	}
+
+	r = amdgpu_bo_pin((*mem)->bo, AMDGPU_GEM_DOMAIN_GTT,
+				&(*mem)->gpu_addr);
+	if (r) {
+		dev_err(rdev->dev, "(%d) failed to pin bo for amdkfd\n", r);
+		goto allocate_mem_pin_bo_failed;
+	}
+	*gpu_addr = (*mem)->gpu_addr;
+
+	r = amdgpu_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr);
+	if (r) {
+		dev_err(rdev->dev,
+			"(%d) failed to map bo to kernel for amdkfd\n", r);
+		goto allocate_mem_kmap_bo_failed;
+	}
+	*cpu_ptr = (*mem)->cpu_ptr;
+
+	amdgpu_bo_unreserve((*mem)->bo);
+
+	return 0;
+
+allocate_mem_kmap_bo_failed:
+	amdgpu_bo_unpin((*mem)->bo);
+allocate_mem_pin_bo_failed:
+	amdgpu_bo_unreserve((*mem)->bo);
+allocate_mem_reserve_bo_failed:
+	amdgpu_bo_unref(&(*mem)->bo);
+
+	return r;
+}
+
+void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
+{
+	struct kgd_mem *mem = (struct kgd_mem *) mem_obj;
+
+	BUG_ON(mem == NULL);
+
+	amdgpu_bo_reserve(mem->bo, true);
+	amdgpu_bo_kunmap(mem->bo);
+	amdgpu_bo_unpin(mem->bo);
+	amdgpu_bo_unreserve(mem->bo);
+	amdgpu_bo_unref(&(mem->bo));
+	kfree(mem);
+}
+
+uint64_t get_vmem_size(struct kgd_dev *kgd)
+{
+	struct amdgpu_device *rdev =
+		(struct amdgpu_device *)kgd;
+
+	BUG_ON(kgd == NULL);
+
+	return rdev->mc.real_vram_size;
+}
+
+uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
+{
+	struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
+
+	if (rdev->asic_funcs->get_gpu_clock_counter)
+		return rdev->asic_funcs->get_gpu_clock_counter(rdev);
+	return 0;
+}
+
+uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
+{
+	struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
+
+	/* The sclk is in quantas of 10kHz */
+	return rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
new file mode 100644
index 000000000000..a8be765542e6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* amdgpu_amdkfd.h defines the private interface between amdgpu and amdkfd. */
+
+#ifndef AMDGPU_AMDKFD_H_INCLUDED
+#define AMDGPU_AMDKFD_H_INCLUDED
+
+#include <linux/types.h>
+#include <kgd_kfd_interface.h>
+
+struct amdgpu_device;
+
+struct kgd_mem {
+	struct amdgpu_bo *bo;
+	uint64_t gpu_addr;
+	void *cpu_ptr;
+};
+
+bool amdgpu_amdkfd_init(void);
+void amdgpu_amdkfd_fini(void);
+
+bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev);
+
+void amdgpu_amdkfd_suspend(struct amdgpu_device *rdev);
+int amdgpu_amdkfd_resume(struct amdgpu_device *rdev);
+void amdgpu_amdkfd_interrupt(struct amdgpu_device *rdev,
+			const void *ih_ring_entry);
+void amdgpu_amdkfd_device_probe(struct amdgpu_device *rdev);
+void amdgpu_amdkfd_device_init(struct amdgpu_device *rdev);
+void amdgpu_amdkfd_device_fini(struct amdgpu_device *rdev);
+
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void);
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void);
+
+/* Shared API */
+int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+			void **mem_obj, uint64_t *gpu_addr,
+			void **cpu_ptr);
+void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
+uint64_t get_vmem_size(struct kgd_dev *kgd);
+uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
+
+uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+
+#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
new file mode 100644
index 000000000000..dd2037bc0b4a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -0,0 +1,670 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/fdtable.h>
+#include <linux/uaccess.h>
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
+#include "cikd.h"
+#include "cik_sdma.h"
+#include "amdgpu_ucode.h"
+#include "gca/gfx_7_2_d.h"
+#include "gca/gfx_7_2_enum.h"
+#include "gca/gfx_7_2_sh_mask.h"
+#include "oss/oss_2_0_d.h"
+#include "oss/oss_2_0_sh_mask.h"
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+#include "cik_structs.h"
+
+#define CIK_PIPE_PER_MEC	(4)
+
+enum {
+	MAX_TRAPID = 8,		/* 3 bits in the bitfield. */
+	MAX_WATCH_ADDRESSES = 4
+};
+
+enum {
+	ADDRESS_WATCH_REG_ADDR_HI = 0,
+	ADDRESS_WATCH_REG_ADDR_LO,
+	ADDRESS_WATCH_REG_CNTL,
+	ADDRESS_WATCH_REG_MAX
+};
+
+/*  not defined in the CI/KV reg file  */
+enum {
+	ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL,
+	ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF,
+	ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000,
+	/* extend the mask to 26 bits to match the low address field */
+	ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6,
+	ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF
+};
+
+static const uint32_t watchRegs[MAX_WATCH_ADDRESSES * ADDRESS_WATCH_REG_MAX] = {
+	mmTCP_WATCH0_ADDR_H, mmTCP_WATCH0_ADDR_L, mmTCP_WATCH0_CNTL,
+	mmTCP_WATCH1_ADDR_H, mmTCP_WATCH1_ADDR_L, mmTCP_WATCH1_CNTL,
+	mmTCP_WATCH2_ADDR_H, mmTCP_WATCH2_ADDR_L, mmTCP_WATCH2_CNTL,
+	mmTCP_WATCH3_ADDR_H, mmTCP_WATCH3_ADDR_L, mmTCP_WATCH3_CNTL
+};
+
+union TCP_WATCH_CNTL_BITS {
+	struct {
+		uint32_t mask:24;
+		uint32_t vmid:4;
+		uint32_t atc:1;
+		uint32_t mode:2;
+		uint32_t valid:1;
+	} bitfields, bits;
+	uint32_t u32All;
+	signed int i32All;
+	float f32All;
+};
+
+/*
+ * Register access functions
+ */
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+		uint32_t sh_mem_config,	uint32_t sh_mem_ape1_base,
+		uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
+
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+					unsigned int vmid);
+
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+				uint32_t hpd_size, uint64_t hpd_gpu_addr);
+static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+			uint32_t queue_id, uint32_t __user *wptr);
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+				uint32_t pipe_id, uint32_t queue_id);
+
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+				unsigned int timeout, uint32_t pipe_id,
+				uint32_t queue_id);
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+				unsigned int timeout);
+static int kgd_address_watch_disable(struct kgd_dev *kgd);
+static int kgd_address_watch_execute(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					uint32_t cntl_val,
+					uint32_t addr_hi,
+					uint32_t addr_lo);
+static int kgd_wave_control_execute(struct kgd_dev *kgd,
+					uint32_t gfx_index_val,
+					uint32_t sq_cmd);
+static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					unsigned int reg_offset);
+
+static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
+static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+							uint8_t vmid);
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
+
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
+
+static const struct kfd2kgd_calls kfd2kgd = {
+	.init_gtt_mem_allocation = alloc_gtt_mem,
+	.free_gtt_mem = free_gtt_mem,
+	.get_vmem_size = get_vmem_size,
+	.get_gpu_clock_counter = get_gpu_clock_counter,
+	.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
+	.program_sh_mem_settings = kgd_program_sh_mem_settings,
+	.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+	.init_pipeline = kgd_init_pipeline,
+	.init_interrupts = kgd_init_interrupts,
+	.hqd_load = kgd_hqd_load,
+	.hqd_sdma_load = kgd_hqd_sdma_load,
+	.hqd_is_occupied = kgd_hqd_is_occupied,
+	.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+	.hqd_destroy = kgd_hqd_destroy,
+	.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+	.address_watch_disable = kgd_address_watch_disable,
+	.address_watch_execute = kgd_address_watch_execute,
+	.wave_control_execute = kgd_wave_control_execute,
+	.address_watch_get_offset = kgd_address_watch_get_offset,
+	.get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
+	.get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
+	.write_vmid_invalidate_request = write_vmid_invalidate_request,
+	.get_fw_version = get_fw_version
+};
+
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions()
+{
+	return (struct kfd2kgd_calls *)&kfd2kgd;
+}
+
+static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
+{
+	return (struct amdgpu_device *)kgd;
+}
+
+static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+			uint32_t queue, uint32_t vmid)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
+
+	mutex_lock(&adev->srbm_mutex);
+	WREG32(mmSRBM_GFX_CNTL, value);
+}
+
+static void unlock_srbm(struct kgd_dev *kgd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	WREG32(mmSRBM_GFX_CNTL, 0);
+	mutex_unlock(&adev->srbm_mutex);
+}
+
+static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+				uint32_t queue_id)
+{
+	uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
+	uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
+
+	lock_srbm(kgd, mec, pipe, queue_id, 0);
+}
+
+static void release_queue(struct kgd_dev *kgd)
+{
+	unlock_srbm(kgd);
+}
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+					uint32_t sh_mem_config,
+					uint32_t sh_mem_ape1_base,
+					uint32_t sh_mem_ape1_limit,
+					uint32_t sh_mem_bases)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	lock_srbm(kgd, 0, 0, 0, vmid);
+
+	WREG32(mmSH_MEM_CONFIG, sh_mem_config);
+	WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
+	WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
+	WREG32(mmSH_MEM_BASES, sh_mem_bases);
+
+	unlock_srbm(kgd);
+}
+
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+					unsigned int vmid)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	/*
+	 * We have to assume that there is no outstanding mapping.
+	 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
+	 * a mapping is in progress or because a mapping finished and the
+	 * SW cleared it. So the protocol is to always wait & clear.
+	 */
+	uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
+			ATC_VMID0_PASID_MAPPING__VALID_MASK;
+
+	WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
+
+	while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
+		cpu_relax();
+	WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
+
+	/* Mapping vmid to pasid also for IH block */
+	WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
+
+	return 0;
+}
+
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+				uint32_t hpd_size, uint64_t hpd_gpu_addr)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
+	uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
+
+	lock_srbm(kgd, mec, pipe, 0, 0);
+	WREG32(mmCP_HPD_EOP_BASE_ADDR, lower_32_bits(hpd_gpu_addr >> 8));
+	WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(hpd_gpu_addr >> 8));
+	WREG32(mmCP_HPD_EOP_VMID, 0);
+	WREG32(mmCP_HPD_EOP_CONTROL, hpd_size);
+	unlock_srbm(kgd);
+
+	return 0;
+}
+
+static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t mec;
+	uint32_t pipe;
+
+	mec = (pipe_id / CIK_PIPE_PER_MEC) + 1;
+	pipe = (pipe_id % CIK_PIPE_PER_MEC);
+
+	lock_srbm(kgd, mec, pipe, 0, 0);
+
+	WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
+			CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
+
+	unlock_srbm(kgd);
+
+	return 0;
+}
+
+static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
+{
+	uint32_t retval;
+
+	retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
+			m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
+
+	pr_debug("kfd: sdma base address: 0x%x\n", retval);
+
+	return retval;
+}
+
+static inline struct cik_mqd *get_mqd(void *mqd)
+{
+	return (struct cik_mqd *)mqd;
+}
+
+static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
+{
+	return (struct cik_sdma_rlc_registers *)mqd;
+}
+
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+			uint32_t queue_id, uint32_t __user *wptr)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t wptr_shadow, is_wptr_shadow_valid;
+	struct cik_mqd *m;
+
+	m = get_mqd(mqd);
+
+	is_wptr_shadow_valid = !get_user(wptr_shadow, wptr);
+
+	acquire_queue(kgd, pipe_id, queue_id);
+	WREG32(mmCP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo);
+	WREG32(mmCP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi);
+	WREG32(mmCP_MQD_CONTROL, m->cp_mqd_control);
+
+	WREG32(mmCP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo);
+	WREG32(mmCP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi);
+	WREG32(mmCP_HQD_PQ_CONTROL, m->cp_hqd_pq_control);
+
+	WREG32(mmCP_HQD_IB_CONTROL, m->cp_hqd_ib_control);
+	WREG32(mmCP_HQD_IB_BASE_ADDR, m->cp_hqd_ib_base_addr_lo);
+	WREG32(mmCP_HQD_IB_BASE_ADDR_HI, m->cp_hqd_ib_base_addr_hi);
+
+	WREG32(mmCP_HQD_IB_RPTR, m->cp_hqd_ib_rptr);
+
+	WREG32(mmCP_HQD_PERSISTENT_STATE, m->cp_hqd_persistent_state);
+	WREG32(mmCP_HQD_SEMA_CMD, m->cp_hqd_sema_cmd);
+	WREG32(mmCP_HQD_MSG_TYPE, m->cp_hqd_msg_type);
+
+	WREG32(mmCP_HQD_ATOMIC0_PREOP_LO, m->cp_hqd_atomic0_preop_lo);
+	WREG32(mmCP_HQD_ATOMIC0_PREOP_HI, m->cp_hqd_atomic0_preop_hi);
+	WREG32(mmCP_HQD_ATOMIC1_PREOP_LO, m->cp_hqd_atomic1_preop_lo);
+	WREG32(mmCP_HQD_ATOMIC1_PREOP_HI, m->cp_hqd_atomic1_preop_hi);
+
+	WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, m->cp_hqd_pq_rptr_report_addr_lo);
+	WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
+			m->cp_hqd_pq_rptr_report_addr_hi);
+
+	WREG32(mmCP_HQD_PQ_RPTR, m->cp_hqd_pq_rptr);
+
+	WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, m->cp_hqd_pq_wptr_poll_addr_lo);
+	WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, m->cp_hqd_pq_wptr_poll_addr_hi);
+
+	WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, m->cp_hqd_pq_doorbell_control);
+
+	WREG32(mmCP_HQD_VMID, m->cp_hqd_vmid);
+
+	WREG32(mmCP_HQD_QUANTUM, m->cp_hqd_quantum);
+
+	WREG32(mmCP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority);
+	WREG32(mmCP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority);
+
+	WREG32(mmCP_HQD_IQ_RPTR, m->cp_hqd_iq_rptr);
+
+	if (is_wptr_shadow_valid)
+		WREG32(mmCP_HQD_PQ_WPTR, wptr_shadow);
+
+	WREG32(mmCP_HQD_ACTIVE, m->cp_hqd_active);
+	release_queue(kgd);
+
+	return 0;
+}
+
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	struct cik_sdma_rlc_registers *m;
+	uint32_t sdma_base_addr;
+
+	m = get_sdma_mqd(mqd);
+	sdma_base_addr = get_sdma_base_addr(m);
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+			m->sdma_rlc_virtual_addr);
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE,
+			m->sdma_rlc_rb_base);
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+			m->sdma_rlc_rb_base_hi);
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+			m->sdma_rlc_rb_rptr_addr_lo);
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+			m->sdma_rlc_rb_rptr_addr_hi);
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
+			m->sdma_rlc_doorbell);
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+			m->sdma_rlc_rb_cntl);
+
+	return 0;
+}
+
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+				uint32_t pipe_id, uint32_t queue_id)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t act;
+	bool retval = false;
+	uint32_t low, high;
+
+	acquire_queue(kgd, pipe_id, queue_id);
+	act = RREG32(mmCP_HQD_ACTIVE);
+	if (act) {
+		low = lower_32_bits(queue_address >> 8);
+		high = upper_32_bits(queue_address >> 8);
+
+		if (low == RREG32(mmCP_HQD_PQ_BASE) &&
+				high == RREG32(mmCP_HQD_PQ_BASE_HI))
+			retval = true;
+	}
+	release_queue(kgd);
+	return retval;
+}
+
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	struct cik_sdma_rlc_registers *m;
+	uint32_t sdma_base_addr;
+	uint32_t sdma_rlc_rb_cntl;
+
+	m = get_sdma_mqd(mqd);
+	sdma_base_addr = get_sdma_base_addr(m);
+
+	sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+
+	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
+		return true;
+
+	return false;
+}
+
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+				unsigned int timeout, uint32_t pipe_id,
+				uint32_t queue_id)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t temp;
+
+	acquire_queue(kgd, pipe_id, queue_id);
+	WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
+
+	WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type);
+
+	while (true) {
+		temp = RREG32(mmCP_HQD_ACTIVE);
+		if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
+			break;
+		if (timeout == 0) {
+			pr_err("kfd: cp queue preemption time out (%dms)\n",
+				temp);
+			release_queue(kgd);
+			return -ETIME;
+		}
+		msleep(20);
+		timeout -= 20;
+	}
+
+	release_queue(kgd);
+	return 0;
+}
+
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+				unsigned int timeout)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	struct cik_sdma_rlc_registers *m;
+	uint32_t sdma_base_addr;
+	uint32_t temp;
+
+	m = get_sdma_mqd(mqd);
+	sdma_base_addr = get_sdma_base_addr(m);
+
+	temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+
+	while (true) {
+		temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+		if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+			break;
+		if (timeout == 0)
+			return -ETIME;
+		msleep(20);
+		timeout -= 20;
+	}
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
+
+	return 0;
+}
+
+static int kgd_address_watch_disable(struct kgd_dev *kgd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	union TCP_WATCH_CNTL_BITS cntl;
+	unsigned int i;
+
+	cntl.u32All = 0;
+
+	cntl.bitfields.valid = 0;
+	cntl.bitfields.mask = ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK;
+	cntl.bitfields.atc = 1;
+
+	/* Turning off this address until we set all the registers */
+	for (i = 0; i < MAX_WATCH_ADDRESSES; i++)
+		WREG32(watchRegs[i * ADDRESS_WATCH_REG_MAX +
+			ADDRESS_WATCH_REG_CNTL], cntl.u32All);
+
+	return 0;
+}
+
+static int kgd_address_watch_execute(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					uint32_t cntl_val,
+					uint32_t addr_hi,
+					uint32_t addr_lo)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	union TCP_WATCH_CNTL_BITS cntl;
+
+	cntl.u32All = cntl_val;
+
+	/* Turning off this watch point until we set all the registers */
+	cntl.bitfields.valid = 0;
+	WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
+		ADDRESS_WATCH_REG_CNTL], cntl.u32All);
+
+	WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
+		ADDRESS_WATCH_REG_ADDR_HI], addr_hi);
+
+	WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
+		ADDRESS_WATCH_REG_ADDR_LO], addr_lo);
+
+	/* Enable the watch point */
+	cntl.bitfields.valid = 1;
+
+	WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
+		ADDRESS_WATCH_REG_CNTL], cntl.u32All);
+
+	return 0;
+}
+
+static int kgd_wave_control_execute(struct kgd_dev *kgd,
+					uint32_t gfx_index_val,
+					uint32_t sq_cmd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t data;
+
+	mutex_lock(&adev->grbm_idx_mutex);
+
+	WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
+	WREG32(mmSQ_CMD, sq_cmd);
+
+	/*  Restore the GRBM_GFX_INDEX register  */
+
+	data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK |
+		GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
+		GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
+
+	WREG32(mmGRBM_GFX_INDEX, data);
+
+	mutex_unlock(&adev->grbm_idx_mutex);
+
+	return 0;
+}
+
+static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					unsigned int reg_offset)
+{
+	return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
+}
+
+static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
+							uint8_t vmid)
+{
+	uint32_t reg;
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+	reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+	return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+}
+
+static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+								uint8_t vmid)
+{
+	uint32_t reg;
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+	reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+	return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+}
+
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+}
+
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+	const union amdgpu_firmware_header *hdr;
+
+	BUG_ON(kgd == NULL);
+
+	switch (type) {
+	case KGD_ENGINE_PFP:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.pfp_fw->data;
+		break;
+
+	case KGD_ENGINE_ME:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.me_fw->data;
+		break;
+
+	case KGD_ENGINE_CE:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.ce_fw->data;
+		break;
+
+	case KGD_ENGINE_MEC1:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.mec_fw->data;
+		break;
+
+	case KGD_ENGINE_MEC2:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.mec2_fw->data;
+		break;
+
+	case KGD_ENGINE_RLC:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.rlc_fw->data;
+		break;
+
+	case KGD_ENGINE_SDMA1:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->sdma[0].fw->data;
+		break;
+
+	case KGD_ENGINE_SDMA2:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->sdma[1].fw->data;
+		break;
+
+	default:
+		return 0;
+	}
+
+	if (hdr == NULL)
+		return 0;
+
+	/* Only 12 bit in use*/
+	return hdr->common.ucode_version;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
new file mode 100644
index 000000000000..dfd1d503bccf
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -0,0 +1,543 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/fdtable.h>
+#include <linux/uaccess.h>
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_ucode.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_enum.h"
+#include "oss/oss_3_0_sh_mask.h"
+#include "oss/oss_3_0_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "vi_structs.h"
+#include "vid.h"
+
+#define VI_PIPE_PER_MEC	(4)
+
+struct cik_sdma_rlc_registers;
+
+/*
+ * Register access functions
+ */
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+		uint32_t sh_mem_config,
+		uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
+		uint32_t sh_mem_bases);
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+		unsigned int vmid);
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+		uint32_t hpd_size, uint64_t hpd_gpu_addr);
+static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+		uint32_t queue_id, uint32_t __user *wptr);
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+		uint32_t pipe_id, uint32_t queue_id);
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+				unsigned int timeout, uint32_t pipe_id,
+				uint32_t queue_id);
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+				unsigned int timeout);
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
+static int kgd_address_watch_disable(struct kgd_dev *kgd);
+static int kgd_address_watch_execute(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					uint32_t cntl_val,
+					uint32_t addr_hi,
+					uint32_t addr_lo);
+static int kgd_wave_control_execute(struct kgd_dev *kgd,
+					uint32_t gfx_index_val,
+					uint32_t sq_cmd);
+static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					unsigned int reg_offset);
+
+static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
+		uint8_t vmid);
+static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+		uint8_t vmid);
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
+
+static const struct kfd2kgd_calls kfd2kgd = {
+	.init_gtt_mem_allocation = alloc_gtt_mem,
+	.free_gtt_mem = free_gtt_mem,
+	.get_vmem_size = get_vmem_size,
+	.get_gpu_clock_counter = get_gpu_clock_counter,
+	.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
+	.program_sh_mem_settings = kgd_program_sh_mem_settings,
+	.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+	.init_pipeline = kgd_init_pipeline,
+	.init_interrupts = kgd_init_interrupts,
+	.hqd_load = kgd_hqd_load,
+	.hqd_sdma_load = kgd_hqd_sdma_load,
+	.hqd_is_occupied = kgd_hqd_is_occupied,
+	.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+	.hqd_destroy = kgd_hqd_destroy,
+	.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+	.address_watch_disable = kgd_address_watch_disable,
+	.address_watch_execute = kgd_address_watch_execute,
+	.wave_control_execute = kgd_wave_control_execute,
+	.address_watch_get_offset = kgd_address_watch_get_offset,
+	.get_atc_vmid_pasid_mapping_pasid =
+			get_atc_vmid_pasid_mapping_pasid,
+	.get_atc_vmid_pasid_mapping_valid =
+			get_atc_vmid_pasid_mapping_valid,
+	.write_vmid_invalidate_request = write_vmid_invalidate_request,
+	.get_fw_version = get_fw_version
+};
+
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions()
+{
+	return (struct kfd2kgd_calls *)&kfd2kgd;
+}
+
+static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
+{
+	return (struct amdgpu_device *)kgd;
+}
+
+static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+			uint32_t queue, uint32_t vmid)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
+
+	mutex_lock(&adev->srbm_mutex);
+	WREG32(mmSRBM_GFX_CNTL, value);
+}
+
+static void unlock_srbm(struct kgd_dev *kgd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	WREG32(mmSRBM_GFX_CNTL, 0);
+	mutex_unlock(&adev->srbm_mutex);
+}
+
+static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+				uint32_t queue_id)
+{
+	uint32_t mec = (++pipe_id / VI_PIPE_PER_MEC) + 1;
+	uint32_t pipe = (pipe_id % VI_PIPE_PER_MEC);
+
+	lock_srbm(kgd, mec, pipe, queue_id, 0);
+}
+
+static void release_queue(struct kgd_dev *kgd)
+{
+	unlock_srbm(kgd);
+}
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+					uint32_t sh_mem_config,
+					uint32_t sh_mem_ape1_base,
+					uint32_t sh_mem_ape1_limit,
+					uint32_t sh_mem_bases)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	lock_srbm(kgd, 0, 0, 0, vmid);
+
+	WREG32(mmSH_MEM_CONFIG, sh_mem_config);
+	WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
+	WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
+	WREG32(mmSH_MEM_BASES, sh_mem_bases);
+
+	unlock_srbm(kgd);
+}
+
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+					unsigned int vmid)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	/*
+	 * We have to assume that there is no outstanding mapping.
+	 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
+	 * a mapping is in progress or because a mapping finished
+	 * and the SW cleared it.
+	 * So the protocol is to always wait & clear.
+	 */
+	uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
+			ATC_VMID0_PASID_MAPPING__VALID_MASK;
+
+	WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
+
+	while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
+		cpu_relax();
+	WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
+
+	/* Mapping vmid to pasid also for IH block */
+	WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
+
+	return 0;
+}
+
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+				uint32_t hpd_size, uint64_t hpd_gpu_addr)
+{
+	return 0;
+}
+
+static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t mec;
+	uint32_t pipe;
+
+	mec = (++pipe_id / VI_PIPE_PER_MEC) + 1;
+	pipe = (pipe_id % VI_PIPE_PER_MEC);
+
+	lock_srbm(kgd, mec, pipe, 0, 0);
+
+	WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK);
+
+	unlock_srbm(kgd);
+
+	return 0;
+}
+
+static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
+{
+	return 0;
+}
+
+static inline struct vi_mqd *get_mqd(void *mqd)
+{
+	return (struct vi_mqd *)mqd;
+}
+
+static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
+{
+	return (struct cik_sdma_rlc_registers *)mqd;
+}
+
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+			uint32_t queue_id, uint32_t __user *wptr)
+{
+	struct vi_mqd *m;
+	uint32_t shadow_wptr, valid_wptr;
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	m = get_mqd(mqd);
+
+	valid_wptr = copy_from_user(&shadow_wptr, wptr, sizeof(shadow_wptr));
+	acquire_queue(kgd, pipe_id, queue_id);
+
+	WREG32(mmCP_MQD_CONTROL, m->cp_mqd_control);
+	WREG32(mmCP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo);
+	WREG32(mmCP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi);
+
+	WREG32(mmCP_HQD_VMID, m->cp_hqd_vmid);
+	WREG32(mmCP_HQD_PERSISTENT_STATE, m->cp_hqd_persistent_state);
+	WREG32(mmCP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority);
+	WREG32(mmCP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority);
+	WREG32(mmCP_HQD_QUANTUM, m->cp_hqd_quantum);
+	WREG32(mmCP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo);
+	WREG32(mmCP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi);
+	WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, m->cp_hqd_pq_rptr_report_addr_lo);
+	WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
+			m->cp_hqd_pq_rptr_report_addr_hi);
+
+	if (valid_wptr > 0)
+		WREG32(mmCP_HQD_PQ_WPTR, shadow_wptr);
+
+	WREG32(mmCP_HQD_PQ_CONTROL, m->cp_hqd_pq_control);
+	WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, m->cp_hqd_pq_doorbell_control);
+
+	WREG32(mmCP_HQD_EOP_BASE_ADDR, m->cp_hqd_eop_base_addr_lo);
+	WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, m->cp_hqd_eop_base_addr_hi);
+	WREG32(mmCP_HQD_EOP_CONTROL, m->cp_hqd_eop_control);
+	WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr);
+	WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr);
+	WREG32(mmCP_HQD_EOP_EVENTS, m->cp_hqd_eop_done_events);
+
+	WREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO, m->cp_hqd_ctx_save_base_addr_lo);
+	WREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI, m->cp_hqd_ctx_save_base_addr_hi);
+	WREG32(mmCP_HQD_CTX_SAVE_CONTROL, m->cp_hqd_ctx_save_control);
+	WREG32(mmCP_HQD_CNTL_STACK_OFFSET, m->cp_hqd_cntl_stack_offset);
+	WREG32(mmCP_HQD_CNTL_STACK_SIZE, m->cp_hqd_cntl_stack_size);
+	WREG32(mmCP_HQD_WG_STATE_OFFSET, m->cp_hqd_wg_state_offset);
+	WREG32(mmCP_HQD_CTX_SAVE_SIZE, m->cp_hqd_ctx_save_size);
+
+	WREG32(mmCP_HQD_IB_CONTROL, m->cp_hqd_ib_control);
+
+	WREG32(mmCP_HQD_DEQUEUE_REQUEST, m->cp_hqd_dequeue_request);
+	WREG32(mmCP_HQD_ERROR, m->cp_hqd_error);
+	WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem);
+	WREG32(mmCP_HQD_EOP_DONES, m->cp_hqd_eop_dones);
+
+	WREG32(mmCP_HQD_ACTIVE, m->cp_hqd_active);
+
+	release_queue(kgd);
+
+	return 0;
+}
+
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
+{
+	return 0;
+}
+
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+				uint32_t pipe_id, uint32_t queue_id)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t act;
+	bool retval = false;
+	uint32_t low, high;
+
+	acquire_queue(kgd, pipe_id, queue_id);
+	act = RREG32(mmCP_HQD_ACTIVE);
+	if (act) {
+		low = lower_32_bits(queue_address >> 8);
+		high = upper_32_bits(queue_address >> 8);
+
+		if (low == RREG32(mmCP_HQD_PQ_BASE) &&
+				high == RREG32(mmCP_HQD_PQ_BASE_HI))
+			retval = true;
+	}
+	release_queue(kgd);
+	return retval;
+}
+
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	struct cik_sdma_rlc_registers *m;
+	uint32_t sdma_base_addr;
+	uint32_t sdma_rlc_rb_cntl;
+
+	m = get_sdma_mqd(mqd);
+	sdma_base_addr = get_sdma_base_addr(m);
+
+	sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+
+	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
+		return true;
+
+	return false;
+}
+
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+				unsigned int timeout, uint32_t pipe_id,
+				uint32_t queue_id)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t temp;
+
+	acquire_queue(kgd, pipe_id, queue_id);
+
+	WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type);
+
+	while (true) {
+		temp = RREG32(mmCP_HQD_ACTIVE);
+		if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
+			break;
+		if (timeout == 0) {
+			pr_err("kfd: cp queue preemption time out (%dms)\n",
+				temp);
+			release_queue(kgd);
+			return -ETIME;
+		}
+		msleep(20);
+		timeout -= 20;
+	}
+
+	release_queue(kgd);
+	return 0;
+}
+
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+				unsigned int timeout)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	struct cik_sdma_rlc_registers *m;
+	uint32_t sdma_base_addr;
+	uint32_t temp;
+
+	m = get_sdma_mqd(mqd);
+	sdma_base_addr = get_sdma_base_addr(m);
+
+	temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+
+	while (true) {
+		temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+		if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+			break;
+		if (timeout == 0)
+			return -ETIME;
+		msleep(20);
+		timeout -= 20;
+	}
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
+
+	return 0;
+}
+
+static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
+							uint8_t vmid)
+{
+	uint32_t reg;
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+	reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+	return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+}
+
+static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+								uint8_t vmid)
+{
+	uint32_t reg;
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+	reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+	return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+}
+
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+}
+
+static int kgd_address_watch_disable(struct kgd_dev *kgd)
+{
+	return 0;
+}
+
+static int kgd_address_watch_execute(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					uint32_t cntl_val,
+					uint32_t addr_hi,
+					uint32_t addr_lo)
+{
+	return 0;
+}
+
+static int kgd_wave_control_execute(struct kgd_dev *kgd,
+					uint32_t gfx_index_val,
+					uint32_t sq_cmd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t data = 0;
+
+	mutex_lock(&adev->grbm_idx_mutex);
+
+	WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
+	WREG32(mmSQ_CMD, sq_cmd);
+
+	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+		INSTANCE_BROADCAST_WRITES, 1);
+	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+		SH_BROADCAST_WRITES, 1);
+	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+		SE_BROADCAST_WRITES, 1);
+
+	WREG32(mmGRBM_GFX_INDEX, data);
+	mutex_unlock(&adev->grbm_idx_mutex);
+
+	return 0;
+}
+
+static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					unsigned int reg_offset)
+{
+	return 0;
+}
+
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+	const union amdgpu_firmware_header *hdr;
+
+	BUG_ON(kgd == NULL);
+
+	switch (type) {
+	case KGD_ENGINE_PFP:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.pfp_fw->data;
+		break;
+
+	case KGD_ENGINE_ME:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.me_fw->data;
+		break;
+
+	case KGD_ENGINE_CE:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.ce_fw->data;
+		break;
+
+	case KGD_ENGINE_MEC1:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.mec_fw->data;
+		break;
+
+	case KGD_ENGINE_MEC2:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.mec2_fw->data;
+		break;
+
+	case KGD_ENGINE_RLC:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.rlc_fw->data;
+		break;
+
+	case KGD_ENGINE_SDMA1:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->sdma[0].fw->data;
+		break;
+
+	case KGD_ENGINE_SDMA2:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->sdma[1].fw->data;
+		break;
+
+	default:
+		return 0;
+	}
+
+	if (hdr == NULL)
+		return 0;
+
+	/* Only 12 bit in use*/
+	return hdr->common.ucode_version;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 6a588371d54a..77f1d7c6ea3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -897,7 +897,7 @@ bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev,
 					if ((id == ASIC_INTERNAL_ENGINE_SS) ||
 					    (id == ASIC_INTERNAL_MEMORY_SS))
 						ss->rate /= 100;
-					if (adev->flags & AMDGPU_IS_APU)
+					if (adev->flags & AMD_IS_APU)
 						amdgpu_atombios_get_igp_ss_overrides(adev, ss, id);
 					return true;
 				}
@@ -1058,7 +1058,7 @@ void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev,
 	SET_MEMORY_CLOCK_PS_ALLOCATION args;
 	int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
 
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		return;
 
 	args.ulTargetMemoryClock = cpu_to_le32(mem_clock);	/* 10 khz */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 2742b9a35cbc..98d59ee640ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -33,7 +33,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
 {
 	unsigned long start_jiffies;
 	unsigned long end_jiffies;
-	struct amdgpu_fence *fence = NULL;
+	struct fence *fence = NULL;
 	int i, r;
 
 	start_jiffies = jiffies;
@@ -42,17 +42,17 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
 		r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence);
 		if (r)
 			goto exit_do_move;
-		r = amdgpu_fence_wait(fence, false);
+		r = fence_wait(fence, false);
 		if (r)
 			goto exit_do_move;
-		amdgpu_fence_unref(&fence);
+		fence_put(fence);
 	}
 	end_jiffies = jiffies;
 	r = jiffies_to_msecs(end_jiffies - start_jiffies);
 
 exit_do_move:
 	if (fence)
-		amdgpu_fence_unref(&fence);
+		fence_put(fence);
 	return r;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index ceb444f6d418..02add0a508cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -48,7 +48,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
 	resource_size_t vram_base;
 	resource_size_t size = 256 * 1024; /* ??? */
 
-	if (!(adev->flags & AMDGPU_IS_APU))
+	if (!(adev->flags & AMD_IS_APU))
 		if (!amdgpu_card_posted(adev))
 			return false;
 
@@ -184,7 +184,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
 	bool found = false;
 
 	/* ATRM is for the discrete card only */
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		return false;
 
 	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
@@ -246,7 +246,7 @@ static inline bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
 
 static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev)
 {
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		return igp_read_bios_from_vram(adev);
 	else
 		return amdgpu_asic_read_disabled_bios(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
new file mode 100644
index 000000000000..6b1243f9f86d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -0,0 +1,838 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <drm/drmP.h>
+#include <linux/firmware.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+#include "cgs_linux.h"
+#include "atom.h"
+#include "amdgpu_ucode.h"
+
+
+struct amdgpu_cgs_device {
+	struct cgs_device base;
+	struct amdgpu_device *adev;
+};
+
+#define CGS_FUNC_ADEV							\
+	struct amdgpu_device *adev =					\
+		((struct amdgpu_cgs_device *)cgs_device)->adev
+
+static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type,
+				   uint64_t *mc_start, uint64_t *mc_size,
+				   uint64_t *mem_size)
+{
+	CGS_FUNC_ADEV;
+	switch(type) {
+	case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
+	case CGS_GPU_MEM_TYPE__VISIBLE_FB:
+		*mc_start = 0;
+		*mc_size = adev->mc.visible_vram_size;
+		*mem_size = adev->mc.visible_vram_size - adev->vram_pin_size;
+		break;
+	case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
+	case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
+		*mc_start = adev->mc.visible_vram_size;
+		*mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size;
+		*mem_size = *mc_size;
+		break;
+	case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
+	case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
+		*mc_start = adev->mc.gtt_start;
+		*mc_size = adev->mc.gtt_size;
+		*mem_size = adev->mc.gtt_size - adev->gart_pin_size;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
+				uint64_t size,
+				uint64_t min_offset, uint64_t max_offset,
+				cgs_handle_t *kmem_handle, uint64_t *mcaddr)
+{
+	CGS_FUNC_ADEV;
+	int ret;
+	struct amdgpu_bo *bo;
+	struct page *kmem_page = vmalloc_to_page(kmem);
+	int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
+
+	struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
+	ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
+			       AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo);
+	if (ret)
+		return ret;
+	ret = amdgpu_bo_reserve(bo, false);
+	if (unlikely(ret != 0))
+		return ret;
+
+	/* pin buffer into GTT */
+	ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT,
+				       min_offset, max_offset, mcaddr);
+	amdgpu_bo_unreserve(bo);
+
+	*kmem_handle = (cgs_handle_t)bo;
+	return ret;
+}
+
+static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle)
+{
+	struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle;
+
+	if (obj) {
+		int r = amdgpu_bo_reserve(obj, false);
+		if (likely(r == 0)) {
+			amdgpu_bo_unpin(obj);
+			amdgpu_bo_unreserve(obj);
+		}
+		amdgpu_bo_unref(&obj);
+
+	}
+	return 0;
+}
+
+static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
+				    enum cgs_gpu_mem_type type,
+				    uint64_t size, uint64_t align,
+				    uint64_t min_offset, uint64_t max_offset,
+				    cgs_handle_t *handle)
+{
+	CGS_FUNC_ADEV;
+	uint16_t flags = 0;
+	int ret = 0;
+	uint32_t domain = 0;
+	struct amdgpu_bo *obj;
+	struct ttm_placement placement;
+	struct ttm_place place;
+
+	if (min_offset > max_offset) {
+		BUG_ON(1);
+		return -EINVAL;
+	}
+
+	/* fail if the alignment is not a power of 2 */
+	if (((align != 1) && (align & (align - 1)))
+	    || size == 0 || align == 0)
+		return -EINVAL;
+
+
+	switch(type) {
+	case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
+	case CGS_GPU_MEM_TYPE__VISIBLE_FB:
+		flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+		domain = AMDGPU_GEM_DOMAIN_VRAM;
+		if (max_offset > adev->mc.real_vram_size)
+			return -EINVAL;
+		place.fpfn = min_offset >> PAGE_SHIFT;
+		place.lpfn = max_offset >> PAGE_SHIFT;
+		place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+			TTM_PL_FLAG_VRAM;
+		break;
+	case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
+	case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
+		flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+		domain = AMDGPU_GEM_DOMAIN_VRAM;
+		if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
+			place.fpfn =
+				max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
+			place.lpfn =
+				min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
+			place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+				TTM_PL_FLAG_VRAM;
+		}
+
+		break;
+	case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
+		domain = AMDGPU_GEM_DOMAIN_GTT;
+		place.fpfn = min_offset >> PAGE_SHIFT;
+		place.lpfn = max_offset >> PAGE_SHIFT;
+		place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
+		break;
+	case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
+		flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+		domain = AMDGPU_GEM_DOMAIN_GTT;
+		place.fpfn = min_offset >> PAGE_SHIFT;
+		place.lpfn = max_offset >> PAGE_SHIFT;
+		place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
+			TTM_PL_FLAG_UNCACHED;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+
+	*handle = 0;
+
+	placement.placement = &place;
+	placement.num_placement = 1;
+	placement.busy_placement = &place;
+	placement.num_busy_placement = 1;
+
+	ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
+					  true, domain, flags,
+					  NULL, &placement, &obj);
+	if (ret) {
+		DRM_ERROR("(%d) bo create failed\n", ret);
+		return ret;
+	}
+	*handle = (cgs_handle_t)obj;
+
+	return ret;
+}
+
+static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd,
+				     cgs_handle_t *handle)
+{
+	CGS_FUNC_ADEV;
+	int r;
+	uint32_t dma_handle;
+	struct drm_gem_object *obj;
+	struct amdgpu_bo *bo;
+	struct drm_device *dev = adev->ddev;
+	struct drm_file *file_priv = NULL, *priv;
+
+	mutex_lock(&dev->struct_mutex);
+	list_for_each_entry(priv, &dev->filelist, lhead) {
+		rcu_read_lock();
+		if (priv->pid == get_pid(task_pid(current)))
+			file_priv = priv;
+		rcu_read_unlock();
+		if (file_priv)
+			break;
+	}
+	mutex_unlock(&dev->struct_mutex);
+	r = dev->driver->prime_fd_to_handle(dev,
+					    file_priv, dmabuf_fd,
+					    &dma_handle);
+	spin_lock(&file_priv->table_lock);
+
+	/* Check if we currently have a reference on the object */
+	obj = idr_find(&file_priv->object_idr, dma_handle);
+	if (obj == NULL) {
+		spin_unlock(&file_priv->table_lock);
+		return -EINVAL;
+	}
+	spin_unlock(&file_priv->table_lock);
+	bo = gem_to_amdgpu_bo(obj);
+	*handle = (cgs_handle_t)bo;
+	return 0;
+}
+
+static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
+{
+	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
+
+	if (obj) {
+		int r = amdgpu_bo_reserve(obj, false);
+		if (likely(r == 0)) {
+			amdgpu_bo_kunmap(obj);
+			amdgpu_bo_unpin(obj);
+			amdgpu_bo_unreserve(obj);
+		}
+		amdgpu_bo_unref(&obj);
+
+	}
+	return 0;
+}
+
+static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
+				   uint64_t *mcaddr)
+{
+	int r;
+	u64 min_offset, max_offset;
+	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
+
+	WARN_ON_ONCE(obj->placement.num_placement > 1);
+
+	min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
+	max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
+
+	r = amdgpu_bo_reserve(obj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT,
+				     min_offset, max_offset, mcaddr);
+	amdgpu_bo_unreserve(obj);
+	return r;
+}
+
+static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
+{
+	int r;
+	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
+	r = amdgpu_bo_reserve(obj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = amdgpu_bo_unpin(obj);
+	amdgpu_bo_unreserve(obj);
+	return r;
+}
+
+static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
+				   void **map)
+{
+	int r;
+	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
+	r = amdgpu_bo_reserve(obj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = amdgpu_bo_kmap(obj, map);
+	amdgpu_bo_unreserve(obj);
+	return r;
+}
+
+static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
+{
+	int r;
+	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
+	r = amdgpu_bo_reserve(obj, false);
+	if (unlikely(r != 0))
+		return r;
+	amdgpu_bo_kunmap(obj);
+	amdgpu_bo_unreserve(obj);
+	return r;
+}
+
+static uint32_t amdgpu_cgs_read_register(void *cgs_device, unsigned offset)
+{
+	CGS_FUNC_ADEV;
+	return RREG32(offset);
+}
+
+static void amdgpu_cgs_write_register(void *cgs_device, unsigned offset,
+				      uint32_t value)
+{
+	CGS_FUNC_ADEV;
+	WREG32(offset, value);
+}
+
+static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device,
+					     enum cgs_ind_reg space,
+					     unsigned index)
+{
+	CGS_FUNC_ADEV;
+	switch (space) {
+	case CGS_IND_REG__MMIO:
+		return RREG32_IDX(index);
+	case CGS_IND_REG__PCIE:
+		return RREG32_PCIE(index);
+	case CGS_IND_REG__SMC:
+		return RREG32_SMC(index);
+	case CGS_IND_REG__UVD_CTX:
+		return RREG32_UVD_CTX(index);
+	case CGS_IND_REG__DIDT:
+		return RREG32_DIDT(index);
+	case CGS_IND_REG__AUDIO_ENDPT:
+		DRM_ERROR("audio endpt register access not implemented.\n");
+		return 0;
+	}
+	WARN(1, "Invalid indirect register space");
+	return 0;
+}
+
+static void amdgpu_cgs_write_ind_register(void *cgs_device,
+					  enum cgs_ind_reg space,
+					  unsigned index, uint32_t value)
+{
+	CGS_FUNC_ADEV;
+	switch (space) {
+	case CGS_IND_REG__MMIO:
+		return WREG32_IDX(index, value);
+	case CGS_IND_REG__PCIE:
+		return WREG32_PCIE(index, value);
+	case CGS_IND_REG__SMC:
+		return WREG32_SMC(index, value);
+	case CGS_IND_REG__UVD_CTX:
+		return WREG32_UVD_CTX(index, value);
+	case CGS_IND_REG__DIDT:
+		return WREG32_DIDT(index, value);
+	case CGS_IND_REG__AUDIO_ENDPT:
+		DRM_ERROR("audio endpt register access not implemented.\n");
+		return;
+	}
+	WARN(1, "Invalid indirect register space");
+}
+
+static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr)
+{
+	CGS_FUNC_ADEV;
+	uint8_t val;
+	int ret = pci_read_config_byte(adev->pdev, addr, &val);
+	if (WARN(ret, "pci_read_config_byte error"))
+		return 0;
+	return val;
+}
+
+static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr)
+{
+	CGS_FUNC_ADEV;
+	uint16_t val;
+	int ret = pci_read_config_word(adev->pdev, addr, &val);
+	if (WARN(ret, "pci_read_config_word error"))
+		return 0;
+	return val;
+}
+
+static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device,
+						 unsigned addr)
+{
+	CGS_FUNC_ADEV;
+	uint32_t val;
+	int ret = pci_read_config_dword(adev->pdev, addr, &val);
+	if (WARN(ret, "pci_read_config_dword error"))
+		return 0;
+	return val;
+}
+
+static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr,
+					     uint8_t value)
+{
+	CGS_FUNC_ADEV;
+	int ret = pci_write_config_byte(adev->pdev, addr, value);
+	WARN(ret, "pci_write_config_byte error");
+}
+
+static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr,
+					     uint16_t value)
+{
+	CGS_FUNC_ADEV;
+	int ret = pci_write_config_word(adev->pdev, addr, value);
+	WARN(ret, "pci_write_config_word error");
+}
+
+static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr,
+					      uint32_t value)
+{
+	CGS_FUNC_ADEV;
+	int ret = pci_write_config_dword(adev->pdev, addr, value);
+	WARN(ret, "pci_write_config_dword error");
+}
+
+static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device,
+						  unsigned table, uint16_t *size,
+						  uint8_t *frev, uint8_t *crev)
+{
+	CGS_FUNC_ADEV;
+	uint16_t data_start;
+
+	if (amdgpu_atom_parse_data_header(
+		    adev->mode_info.atom_context, table, size,
+		    frev, crev, &data_start))
+		return (uint8_t*)adev->mode_info.atom_context->bios +
+			data_start;
+
+	return NULL;
+}
+
+static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table,
+					      uint8_t *frev, uint8_t *crev)
+{
+	CGS_FUNC_ADEV;
+
+	if (amdgpu_atom_parse_cmd_header(
+		    adev->mode_info.atom_context, table,
+		    frev, crev))
+		return 0;
+
+	return -EINVAL;
+}
+
+static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table,
+					  void *args)
+{
+	CGS_FUNC_ADEV;
+
+	return amdgpu_atom_execute_table(
+		adev->mode_info.atom_context, table, args);
+}
+
+static int amdgpu_cgs_create_pm_request(void *cgs_device, cgs_handle_t *request)
+{
+	/* TODO */
+	return 0;
+}
+
+static int amdgpu_cgs_destroy_pm_request(void *cgs_device, cgs_handle_t request)
+{
+	/* TODO */
+	return 0;
+}
+
+static int amdgpu_cgs_set_pm_request(void *cgs_device, cgs_handle_t request,
+				     int active)
+{
+	/* TODO */
+	return 0;
+}
+
+static int amdgpu_cgs_pm_request_clock(void *cgs_device, cgs_handle_t request,
+				       enum cgs_clock clock, unsigned freq)
+{
+	/* TODO */
+	return 0;
+}
+
+static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request,
+					enum cgs_engine engine, int powered)
+{
+	/* TODO */
+	return 0;
+}
+
+
+
+static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device,
+					    enum cgs_clock clock,
+					    struct cgs_clock_limits *limits)
+{
+	/* TODO */
+	return 0;
+}
+
+static int amdgpu_cgs_set_camera_voltages(void *cgs_device, uint32_t mask,
+					  const uint32_t *voltages)
+{
+	DRM_ERROR("not implemented");
+	return -EPERM;
+}
+
+struct cgs_irq_params {
+	unsigned src_id;
+	cgs_irq_source_set_func_t set;
+	cgs_irq_handler_func_t handler;
+	void *private_data;
+};
+
+static int cgs_set_irq_state(struct amdgpu_device *adev,
+			     struct amdgpu_irq_src *src,
+			     unsigned type,
+			     enum amdgpu_interrupt_state state)
+{
+	struct cgs_irq_params *irq_params =
+		(struct cgs_irq_params *)src->data;
+	if (!irq_params)
+		return -EINVAL;
+	if (!irq_params->set)
+		return -EINVAL;
+	return irq_params->set(irq_params->private_data,
+			       irq_params->src_id,
+			       type,
+			       (int)state);
+}
+
+static int cgs_process_irq(struct amdgpu_device *adev,
+			   struct amdgpu_irq_src *source,
+			   struct amdgpu_iv_entry *entry)
+{
+	struct cgs_irq_params *irq_params =
+		(struct cgs_irq_params *)source->data;
+	if (!irq_params)
+		return -EINVAL;
+	if (!irq_params->handler)
+		return -EINVAL;
+	return irq_params->handler(irq_params->private_data,
+				   irq_params->src_id,
+				   entry->iv_entry);
+}
+
+static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
+	.set = cgs_set_irq_state,
+	.process = cgs_process_irq,
+};
+
+static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id,
+				     unsigned num_types,
+				     cgs_irq_source_set_func_t set,
+				     cgs_irq_handler_func_t handler,
+				     void *private_data)
+{
+	CGS_FUNC_ADEV;
+	int ret = 0;
+	struct cgs_irq_params *irq_params;
+	struct amdgpu_irq_src *source =
+		kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
+	if (!source)
+		return -ENOMEM;
+	irq_params =
+		kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL);
+	if (!irq_params) {
+		kfree(source);
+		return -ENOMEM;
+	}
+	source->num_types = num_types;
+	source->funcs = &cgs_irq_funcs;
+	irq_params->src_id = src_id;
+	irq_params->set = set;
+	irq_params->handler = handler;
+	irq_params->private_data = private_data;
+	source->data = (void *)irq_params;
+	ret = amdgpu_irq_add_id(adev, src_id, source);
+	if (ret) {
+		kfree(irq_params);
+		kfree(source);
+	}
+
+	return ret;
+}
+
+static int amdgpu_cgs_irq_get(void *cgs_device, unsigned src_id, unsigned type)
+{
+	CGS_FUNC_ADEV;
+	return amdgpu_irq_get(adev, adev->irq.sources[src_id], type);
+}
+
+static int amdgpu_cgs_irq_put(void *cgs_device, unsigned src_id, unsigned type)
+{
+	CGS_FUNC_ADEV;
+	return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
+}
+
+int amdgpu_cgs_set_clockgating_state(void *cgs_device,
+				  enum amd_ip_block_type block_type,
+				  enum amd_clockgating_state state)
+{
+	CGS_FUNC_ADEV;
+	int i, r = -1;
+
+	for (i = 0; i < adev->num_ip_blocks; i++) {
+		if (!adev->ip_block_status[i].valid)
+			continue;
+
+		if (adev->ip_blocks[i].type == block_type) {
+			r = adev->ip_blocks[i].funcs->set_clockgating_state(
+								(void *)adev,
+									state);
+			break;
+		}
+	}
+	return r;
+}
+
+int amdgpu_cgs_set_powergating_state(void *cgs_device,
+				  enum amd_ip_block_type block_type,
+				  enum amd_powergating_state state)
+{
+	CGS_FUNC_ADEV;
+	int i, r = -1;
+
+	for (i = 0; i < adev->num_ip_blocks; i++) {
+		if (!adev->ip_block_status[i].valid)
+			continue;
+
+		if (adev->ip_blocks[i].type == block_type) {
+			r = adev->ip_blocks[i].funcs->set_powergating_state(
+								(void *)adev,
+									state);
+			break;
+		}
+	}
+	return r;
+}
+
+
+static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type)
+{
+	CGS_FUNC_ADEV;
+	enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
+
+	switch (fw_type) {
+	case CGS_UCODE_ID_SDMA0:
+		result = AMDGPU_UCODE_ID_SDMA0;
+		break;
+	case CGS_UCODE_ID_SDMA1:
+		result = AMDGPU_UCODE_ID_SDMA1;
+		break;
+	case CGS_UCODE_ID_CP_CE:
+		result = AMDGPU_UCODE_ID_CP_CE;
+		break;
+	case CGS_UCODE_ID_CP_PFP:
+		result = AMDGPU_UCODE_ID_CP_PFP;
+		break;
+	case CGS_UCODE_ID_CP_ME:
+		result = AMDGPU_UCODE_ID_CP_ME;
+		break;
+	case CGS_UCODE_ID_CP_MEC:
+	case CGS_UCODE_ID_CP_MEC_JT1:
+		result = AMDGPU_UCODE_ID_CP_MEC1;
+		break;
+	case CGS_UCODE_ID_CP_MEC_JT2:
+		if (adev->asic_type == CHIP_TONGA)
+			result = AMDGPU_UCODE_ID_CP_MEC2;
+		else if (adev->asic_type == CHIP_CARRIZO)
+			result = AMDGPU_UCODE_ID_CP_MEC1;
+		break;
+	case CGS_UCODE_ID_RLC_G:
+		result = AMDGPU_UCODE_ID_RLC_G;
+		break;
+	default:
+		DRM_ERROR("Firmware type not supported\n");
+	}
+	return result;
+}
+
+static int amdgpu_cgs_get_firmware_info(void *cgs_device,
+					enum cgs_ucode_id type,
+					struct cgs_firmware_info *info)
+{
+	CGS_FUNC_ADEV;
+
+	if (CGS_UCODE_ID_SMU != type) {
+		uint64_t gpu_addr;
+		uint32_t data_size;
+		const struct gfx_firmware_header_v1_0 *header;
+		enum AMDGPU_UCODE_ID id;
+		struct amdgpu_firmware_info *ucode;
+
+		id = fw_type_convert(cgs_device, type);
+		ucode = &adev->firmware.ucode[id];
+		if (ucode->fw == NULL)
+			return -EINVAL;
+
+		gpu_addr  = ucode->mc_addr;
+		header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
+		data_size = le32_to_cpu(header->header.ucode_size_bytes);
+
+		if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
+		    (type == CGS_UCODE_ID_CP_MEC_JT2)) {
+			gpu_addr += le32_to_cpu(header->jt_offset) << 2;
+			data_size = le32_to_cpu(header->jt_size) << 2;
+		}
+		info->mc_addr = gpu_addr;
+		info->image_size = data_size;
+		info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
+		info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
+	} else {
+		char fw_name[30] = {0};
+		int err = 0;
+		uint32_t ucode_size;
+		uint32_t ucode_start_address;
+		const uint8_t *src;
+		const struct smc_firmware_header_v1_0 *hdr;
+
+		switch (adev->asic_type) {
+		case CHIP_TONGA:
+			strcpy(fw_name, "amdgpu/tonga_smc.bin");
+			break;
+		default:
+			DRM_ERROR("SMC firmware not supported\n");
+			return -EINVAL;
+		}
+
+		err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+		if (err) {
+			DRM_ERROR("Failed to request firmware\n");
+			return err;
+		}
+
+		err = amdgpu_ucode_validate(adev->pm.fw);
+		if (err) {
+			DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
+			release_firmware(adev->pm.fw);
+			adev->pm.fw = NULL;
+			return err;
+		}
+
+		hdr = (const struct smc_firmware_header_v1_0 *)	adev->pm.fw->data;
+		adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
+		ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+		ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+		src = (const uint8_t *)(adev->pm.fw->data +
+		       le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+		info->version = adev->pm.fw_version;
+		info->image_size = ucode_size;
+		info->kptr = (void *)src;
+	}
+	return 0;
+}
+
+static const struct cgs_ops amdgpu_cgs_ops = {
+	amdgpu_cgs_gpu_mem_info,
+	amdgpu_cgs_gmap_kmem,
+	amdgpu_cgs_gunmap_kmem,
+	amdgpu_cgs_alloc_gpu_mem,
+	amdgpu_cgs_free_gpu_mem,
+	amdgpu_cgs_gmap_gpu_mem,
+	amdgpu_cgs_gunmap_gpu_mem,
+	amdgpu_cgs_kmap_gpu_mem,
+	amdgpu_cgs_kunmap_gpu_mem,
+	amdgpu_cgs_read_register,
+	amdgpu_cgs_write_register,
+	amdgpu_cgs_read_ind_register,
+	amdgpu_cgs_write_ind_register,
+	amdgpu_cgs_read_pci_config_byte,
+	amdgpu_cgs_read_pci_config_word,
+	amdgpu_cgs_read_pci_config_dword,
+	amdgpu_cgs_write_pci_config_byte,
+	amdgpu_cgs_write_pci_config_word,
+	amdgpu_cgs_write_pci_config_dword,
+	amdgpu_cgs_atom_get_data_table,
+	amdgpu_cgs_atom_get_cmd_table_revs,
+	amdgpu_cgs_atom_exec_cmd_table,
+	amdgpu_cgs_create_pm_request,
+	amdgpu_cgs_destroy_pm_request,
+	amdgpu_cgs_set_pm_request,
+	amdgpu_cgs_pm_request_clock,
+	amdgpu_cgs_pm_request_engine,
+	amdgpu_cgs_pm_query_clock_limits,
+	amdgpu_cgs_set_camera_voltages,
+	amdgpu_cgs_get_firmware_info,
+	amdgpu_cgs_set_powergating_state,
+	amdgpu_cgs_set_clockgating_state
+};
+
+static const struct cgs_os_ops amdgpu_cgs_os_ops = {
+	amdgpu_cgs_import_gpu_mem,
+	amdgpu_cgs_add_irq_source,
+	amdgpu_cgs_irq_get,
+	amdgpu_cgs_irq_put
+};
+
+void *amdgpu_cgs_create_device(struct amdgpu_device *adev)
+{
+	struct amdgpu_cgs_device *cgs_device =
+		kmalloc(sizeof(*cgs_device), GFP_KERNEL);
+
+	if (!cgs_device) {
+		DRM_ERROR("Couldn't allocate CGS device structure\n");
+		return NULL;
+	}
+
+	cgs_device->base.ops = &amdgpu_cgs_ops;
+	cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
+	cgs_device->adev = adev;
+
+	return cgs_device;
+}
+
+void amdgpu_cgs_destroy_device(void *cgs_device)
+{
+	kfree(cgs_device);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 27df17a0e620..89c3dd62ba21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -75,6 +75,11 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
 			if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
 				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
 			} else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
+				/* Don't try to start link training before we
+				 * have the dpcd */
+				if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
+					return;
+
 				/* set it to OFF so that drm_helper_connector_dpms()
 				 * won't return immediately since the current state
 				 * is ON at this point.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 1f040d85ac47..3b355aeb62fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -126,6 +126,30 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
 	return 0;
 }
 
+struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
+                                               struct drm_file *filp,
+                                               struct amdgpu_ctx *ctx,
+                                               struct amdgpu_ib *ibs,
+                                               uint32_t num_ibs)
+{
+	struct amdgpu_cs_parser *parser;
+	int i;
+
+	parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL);
+	if (!parser)
+		return NULL;
+
+	parser->adev = adev;
+	parser->filp = filp;
+	parser->ctx = ctx;
+	parser->ibs = ibs;
+	parser->num_ibs = num_ibs;
+	for (i = 0; i < num_ibs; i++)
+		ibs[i].ctx = ctx;
+
+	return parser;
+}
+
 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
 {
 	union drm_amdgpu_cs *cs = data;
@@ -147,13 +171,13 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
 
 	/* get chunks */
 	INIT_LIST_HEAD(&p->validated);
-	chunk_array = kcalloc(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
+	chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
 	if (chunk_array == NULL) {
 		r = -ENOMEM;
 		goto out;
 	}
 
-	chunk_array_user = (uint64_t *)(unsigned long)(cs->in.chunks);
+	chunk_array_user = (uint64_t __user *)(cs->in.chunks);
 	if (copy_from_user(chunk_array, chunk_array_user,
 			   sizeof(uint64_t)*cs->in.num_chunks)) {
 		r = -EFAULT;
@@ -161,7 +185,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
 	}
 
 	p->nchunks = cs->in.num_chunks;
-	p->chunks = kcalloc(p->nchunks, sizeof(struct amdgpu_cs_chunk),
+	p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
 			    GFP_KERNEL);
 	if (p->chunks == NULL) {
 		r = -ENOMEM;
@@ -173,7 +197,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
 		struct drm_amdgpu_cs_chunk user_chunk;
 		uint32_t __user *cdata;
 
-		chunk_ptr = (void __user *)(unsigned long)chunk_array[i];
+		chunk_ptr = (void __user *)chunk_array[i];
 		if (copy_from_user(&user_chunk, chunk_ptr,
 				       sizeof(struct drm_amdgpu_cs_chunk))) {
 			r = -EFAULT;
@@ -183,7 +207,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
 		p->chunks[i].length_dw = user_chunk.length_dw;
 
 		size = p->chunks[i].length_dw;
-		cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
+		cdata = (void __user *)user_chunk.chunk_data;
 		p->chunks[i].user_ptr = cdata;
 
 		p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
@@ -235,11 +259,10 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
 		}
 	}
 
+
 	p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
-	if (!p->ibs) {
+	if (!p->ibs)
 		r = -ENOMEM;
-		goto out;
-	}
 
 out:
 	kfree(chunk_array);
@@ -331,7 +354,7 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p)
 			 * into account. We don't want to disallow buffer moves
 			 * completely.
 			 */
-			if (current_domain != AMDGPU_GEM_DOMAIN_CPU &&
+			if ((lobj->allowed_domains & current_domain) != 0 &&
 			    (domain & current_domain) == 0 && /* will be moved */
 			    bytes_moved > bytes_moved_threshold) {
 				/* don't move it */
@@ -415,18 +438,8 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
 	return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
 }
 
-/**
- * cs_parser_fini() - clean parser states
- * @parser:	parser structure holding parsing context.
- * @error:	error number
- *
- * If error is set than unvalidate buffer, otherwise just free memory
- * used by parsing context.
- **/
-static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
+static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff)
 {
-	unsigned i;
-
 	if (!error) {
 		/* Sort the buffer list from the smallest to largest buffer,
 		 * which affects the order of buffers in the LRU list.
@@ -447,21 +460,45 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
 		ttm_eu_backoff_reservation(&parser->ticket,
 					   &parser->validated);
 	}
+}
 
+static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
+{
+	unsigned i;
 	if (parser->ctx)
 		amdgpu_ctx_put(parser->ctx);
 	if (parser->bo_list)
 		amdgpu_bo_list_put(parser->bo_list);
+
 	drm_free_large(parser->vm_bos);
 	for (i = 0; i < parser->nchunks; i++)
 		drm_free_large(parser->chunks[i].kdata);
 	kfree(parser->chunks);
-	if (parser->ibs)
-		for (i = 0; i < parser->num_ibs; i++)
-			amdgpu_ib_free(parser->adev, &parser->ibs[i]);
-	kfree(parser->ibs);
-	if (parser->uf.bo)
-		drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
+	if (!amdgpu_enable_scheduler)
+	{
+		if (parser->ibs)
+			for (i = 0; i < parser->num_ibs; i++)
+				amdgpu_ib_free(parser->adev, &parser->ibs[i]);
+		kfree(parser->ibs);
+		if (parser->uf.bo)
+			drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
+	}
+
+	kfree(parser);
+}
+
+/**
+ * cs_parser_fini() - clean parser states
+ * @parser:	parser structure holding parsing context.
+ * @error:	error number
+ *
+ * If error is set than unvalidate buffer, otherwise just free memory
+ * used by parsing context.
+ **/
+static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
+{
+       amdgpu_cs_parser_fini_early(parser, error, backoff);
+       amdgpu_cs_parser_fini_late(parser);
 }
 
 static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
@@ -476,12 +513,18 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
 	if (r)
 		return r;
 
+	r = amdgpu_sync_fence(adev, &p->ibs[0].sync, vm->page_directory_fence);
+	if (r)
+		return r;
+
 	r = amdgpu_vm_clear_freed(adev, vm);
 	if (r)
 		return r;
 
 	if (p->bo_list) {
 		for (i = 0; i < p->bo_list->num_entries; i++) {
+			struct fence *f;
+
 			/* ignore duplicates */
 			bo = p->bo_list->array[i].robj;
 			if (!bo)
@@ -495,7 +538,10 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
 			if (r)
 				return r;
 
-			amdgpu_sync_fence(&p->ibs[0].sync, bo_va->last_pt_update);
+			f = bo_va->last_pt_update;
+			r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f);
+			if (r)
+				return r;
 		}
 	}
 
@@ -529,9 +575,9 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
 		goto out;
 	}
 	amdgpu_cs_sync_rings(parser);
-
-	r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
-			       parser->filp);
+	if (!amdgpu_enable_scheduler)
+		r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
+				       parser->filp);
 
 out:
 	mutex_unlock(&vm->mutex);
@@ -650,7 +696,6 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
 			ib->oa_size = amdgpu_bo_size(oa);
 		}
 	}
-
 	/* wrap the last IB with user fence */
 	if (parser->uf.bo) {
 		struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1];
@@ -693,9 +738,9 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
 			sizeof(struct drm_amdgpu_cs_chunk_dep);
 
 		for (j = 0; j < num_deps; ++j) {
-			struct amdgpu_fence *fence;
 			struct amdgpu_ring *ring;
 			struct amdgpu_ctx *ctx;
+			struct fence *fence;
 
 			r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
 					       deps[j].ip_instance,
@@ -707,85 +752,137 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
 			if (ctx == NULL)
 				return -EINVAL;
 
-			r = amdgpu_fence_recreate(ring, p->filp,
-						  deps[j].handle,
-						  &fence);
-			if (r) {
+			fence = amdgpu_ctx_get_fence(ctx, ring,
+						     deps[j].handle);
+			if (IS_ERR(fence)) {
+				r = PTR_ERR(fence);
 				amdgpu_ctx_put(ctx);
 				return r;
-			}
 
-			amdgpu_sync_fence(&ib->sync, fence);
-			amdgpu_fence_unref(&fence);
-			amdgpu_ctx_put(ctx);
+			} else if (fence) {
+				r = amdgpu_sync_fence(adev, &ib->sync, fence);
+				fence_put(fence);
+				amdgpu_ctx_put(ctx);
+				if (r)
+					return r;
+			}
 		}
 	}
 
 	return 0;
 }
 
+static int amdgpu_cs_free_job(struct amdgpu_job *sched_job)
+{
+	int i;
+	if (sched_job->ibs)
+		for (i = 0; i < sched_job->num_ibs; i++)
+			amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
+	kfree(sched_job->ibs);
+	if (sched_job->uf.bo)
+		drm_gem_object_unreference_unlocked(&sched_job->uf.bo->gem_base);
+	return 0;
+}
+
 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 {
 	struct amdgpu_device *adev = dev->dev_private;
 	union drm_amdgpu_cs *cs = data;
-	struct amdgpu_cs_parser parser;
-	int r, i;
+	struct amdgpu_cs_parser *parser;
 	bool reserved_buffers = false;
+	int i, r;
 
 	down_read(&adev->exclusive_lock);
 	if (!adev->accel_working) {
 		up_read(&adev->exclusive_lock);
 		return -EBUSY;
 	}
-	/* initialize parser */
-	memset(&parser, 0, sizeof(struct amdgpu_cs_parser));
-	parser.filp = filp;
-	parser.adev = adev;
-	r = amdgpu_cs_parser_init(&parser, data);
+
+	parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0);
+	if (!parser)
+		return -ENOMEM;
+	r = amdgpu_cs_parser_init(parser, data);
 	if (r) {
 		DRM_ERROR("Failed to initialize parser !\n");
-		amdgpu_cs_parser_fini(&parser, r, false);
+		amdgpu_cs_parser_fini(parser, r, false);
 		up_read(&adev->exclusive_lock);
 		r = amdgpu_cs_handle_lockup(adev, r);
 		return r;
 	}
 
-	r = amdgpu_cs_parser_relocs(&parser);
-	if (r) {
-		if (r != -ERESTARTSYS) {
-			if (r == -ENOMEM)
-				DRM_ERROR("Not enough memory for command submission!\n");
-			else
-				DRM_ERROR("Failed to process the buffer list %d!\n", r);
-		}
+	r = amdgpu_cs_parser_relocs(parser);
+	if (r == -ENOMEM)
+		DRM_ERROR("Not enough memory for command submission!\n");
+	else if (r && r != -ERESTARTSYS)
+		DRM_ERROR("Failed to process the buffer list %d!\n", r);
+	else if (!r) {
+		reserved_buffers = true;
+		r = amdgpu_cs_ib_fill(adev, parser);
 	}
 
 	if (!r) {
-		reserved_buffers = true;
-		r = amdgpu_cs_ib_fill(adev, &parser);
+		r = amdgpu_cs_dependencies(adev, parser);
+		if (r)
+			DRM_ERROR("Failed in the dependencies handling %d!\n", r);
 	}
 
-	if (!r)
-		r = amdgpu_cs_dependencies(adev, &parser);
-
-	if (r) {
-		amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
-		up_read(&adev->exclusive_lock);
-		r = amdgpu_cs_handle_lockup(adev, r);
-		return r;
-	}
+	if (r)
+		goto out;
 
-	for (i = 0; i < parser.num_ibs; i++)
-		trace_amdgpu_cs(&parser, i);
+	for (i = 0; i < parser->num_ibs; i++)
+		trace_amdgpu_cs(parser, i);
 
-	r = amdgpu_cs_ib_vm_chunk(adev, &parser);
-	if (r) {
+	r = amdgpu_cs_ib_vm_chunk(adev, parser);
+	if (r)
 		goto out;
+
+	if (amdgpu_enable_scheduler && parser->num_ibs) {
+		struct amdgpu_job *job;
+		struct amdgpu_ring * ring =  parser->ibs->ring;
+		job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
+		if (!job)
+			return -ENOMEM;
+		job->base.sched = ring->scheduler;
+		job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
+		job->adev = parser->adev;
+		job->ibs = parser->ibs;
+		job->num_ibs = parser->num_ibs;
+		job->base.owner = parser->filp;
+		mutex_init(&job->job_lock);
+		if (job->ibs[job->num_ibs - 1].user) {
+			memcpy(&job->uf,  &parser->uf,
+			       sizeof(struct amdgpu_user_fence));
+			job->ibs[job->num_ibs - 1].user = &job->uf;
+		}
+
+		job->free_job = amdgpu_cs_free_job;
+		mutex_lock(&job->job_lock);
+		r = amd_sched_entity_push_job((struct amd_sched_job *)job);
+		if (r) {
+			mutex_unlock(&job->job_lock);
+			amdgpu_cs_free_job(job);
+			kfree(job);
+			goto out;
+		}
+		cs->out.handle =
+			amdgpu_ctx_add_fence(parser->ctx, ring,
+					     &job->base.s_fence->base);
+		parser->ibs[parser->num_ibs - 1].sequence = cs->out.handle;
+
+		list_sort(NULL, &parser->validated, cmp_size_smaller_first);
+		ttm_eu_fence_buffer_objects(&parser->ticket,
+				&parser->validated,
+				&job->base.s_fence->base);
+
+		mutex_unlock(&job->job_lock);
+		amdgpu_cs_parser_fini_late(parser);
+		up_read(&adev->exclusive_lock);
+		return 0;
 	}
 
-	cs->out.handle = parser.ibs[parser.num_ibs - 1].fence->seq;
+	cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
 out:
-	amdgpu_cs_parser_fini(&parser, r, true);
+	amdgpu_cs_parser_fini(parser, r, reserved_buffers);
 	up_read(&adev->exclusive_lock);
 	r = amdgpu_cs_handle_lockup(adev, r);
 	return r;
@@ -806,30 +903,29 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
 	union drm_amdgpu_wait_cs *wait = data;
 	struct amdgpu_device *adev = dev->dev_private;
 	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
-	struct amdgpu_fence *fence = NULL;
 	struct amdgpu_ring *ring = NULL;
 	struct amdgpu_ctx *ctx;
+	struct fence *fence;
 	long r;
 
-	ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
-	if (ctx == NULL)
-		return -EINVAL;
-
 	r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
 			       wait->in.ring, &ring);
-	if (r) {
-		amdgpu_ctx_put(ctx);
+	if (r)
 		return r;
-	}
 
-	r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence);
-	if (r) {
-		amdgpu_ctx_put(ctx);
-		return r;
-	}
+	ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
+	if (ctx == NULL)
+		return -EINVAL;
+
+	fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
+	if (IS_ERR(fence))
+		r = PTR_ERR(fence);
+	else if (fence) {
+		r = fence_wait_timeout(fence, true, timeout);
+		fence_put(fence);
+	} else
+		r = 1;
 
-	r = fence_wait_timeout(&fence->base, true, timeout);
-	amdgpu_fence_unref(&fence);
 	amdgpu_ctx_put(ctx);
 	if (r < 0)
 		return r;
@@ -864,7 +960,16 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
 		if (!reloc->bo_va)
 			continue;
 
-		list_for_each_entry(mapping, &reloc->bo_va->mappings, list) {
+		list_for_each_entry(mapping, &reloc->bo_va->valids, list) {
+			if (mapping->it.start > addr ||
+			    addr > mapping->it.last)
+				continue;
+
+			*bo = reloc->bo_va->bo;
+			return mapping;
+		}
+
+		list_for_each_entry(mapping, &reloc->bo_va->invalids, list) {
 			if (mapping->it.start > addr ||
 			    addr > mapping->it.last)
 				continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 6c66ac8a1891..20cbc4eb5a6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -25,54 +25,107 @@
 #include <drm/drmP.h>
 #include "amdgpu.h"
 
-static void amdgpu_ctx_do_release(struct kref *ref)
+int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
+		    struct amdgpu_ctx *ctx)
 {
-	struct amdgpu_ctx *ctx;
-	struct amdgpu_ctx_mgr *mgr;
+	unsigned i, j;
+	int r;
 
-	ctx = container_of(ref, struct amdgpu_ctx, refcount);
-	mgr = &ctx->fpriv->ctx_mgr;
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->adev = adev;
+	kref_init(&ctx->refcount);
+	spin_lock_init(&ctx->ring_lock);
+	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+		ctx->rings[i].sequence = 1;
 
-	idr_remove(&mgr->ctx_handles, ctx->id);
-	kfree(ctx);
+	if (amdgpu_enable_scheduler) {
+		/* create context entity for each ring */
+		for (i = 0; i < adev->num_rings; i++) {
+			struct amd_sched_rq *rq;
+			if (kernel)
+				rq = &adev->rings[i]->scheduler->kernel_rq;
+			else
+				rq = &adev->rings[i]->scheduler->sched_rq;
+			r = amd_sched_entity_init(adev->rings[i]->scheduler,
+						  &ctx->rings[i].entity,
+						  rq, amdgpu_sched_jobs);
+			if (r)
+				break;
+		}
+
+		if (i < adev->num_rings) {
+			for (j = 0; j < i; j++)
+				amd_sched_entity_fini(adev->rings[j]->scheduler,
+						      &ctx->rings[j].entity);
+			kfree(ctx);
+			return r;
+		}
+	}
+	return 0;
 }
 
-int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t *id, uint32_t flags)
+void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
+{
+	struct amdgpu_device *adev = ctx->adev;
+	unsigned i, j;
+
+	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+		for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
+			fence_put(ctx->rings[i].fences[j]);
+
+	if (amdgpu_enable_scheduler) {
+		for (i = 0; i < adev->num_rings; i++)
+			amd_sched_entity_fini(adev->rings[i]->scheduler,
+					      &ctx->rings[i].entity);
+	}
+}
+
+static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
+			    struct amdgpu_fpriv *fpriv,
+			    uint32_t *id)
 {
-	int r;
-	struct amdgpu_ctx *ctx;
 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+	struct amdgpu_ctx *ctx;
+	int r;
 
 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
 	if (!ctx)
 		return -ENOMEM;
 
 	mutex_lock(&mgr->lock);
-	r = idr_alloc(&mgr->ctx_handles, ctx, 0, 0, GFP_KERNEL);
+	r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
 	if (r < 0) {
 		mutex_unlock(&mgr->lock);
 		kfree(ctx);
 		return r;
 	}
 	*id = (uint32_t)r;
-
-	memset(ctx, 0, sizeof(*ctx));
-	ctx->id = *id;
-	ctx->fpriv = fpriv;
-	kref_init(&ctx->refcount);
+	r = amdgpu_ctx_init(adev, false, ctx);
 	mutex_unlock(&mgr->lock);
 
-	return 0;
+	return r;
 }
 
-int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id)
+static void amdgpu_ctx_do_release(struct kref *ref)
 {
 	struct amdgpu_ctx *ctx;
+
+	ctx = container_of(ref, struct amdgpu_ctx, refcount);
+
+	amdgpu_ctx_fini(ctx);
+
+	kfree(ctx);
+}
+
+static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
+{
 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+	struct amdgpu_ctx *ctx;
 
 	mutex_lock(&mgr->lock);
 	ctx = idr_find(&mgr->ctx_handles, id);
 	if (ctx) {
+		idr_remove(&mgr->ctx_handles, id);
 		kref_put(&ctx->refcount, amdgpu_ctx_do_release);
 		mutex_unlock(&mgr->lock);
 		return 0;
@@ -86,9 +139,13 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev,
 			    union drm_amdgpu_ctx_out *out)
 {
 	struct amdgpu_ctx *ctx;
-	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+	struct amdgpu_ctx_mgr *mgr;
 	unsigned reset_counter;
 
+	if (!fpriv)
+		return -EINVAL;
+
+	mgr = &fpriv->ctx_mgr;
 	mutex_lock(&mgr->lock);
 	ctx = idr_find(&mgr->ctx_handles, id);
 	if (!ctx) {
@@ -97,8 +154,8 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev,
 	}
 
 	/* TODO: these two are always zero */
-	out->state.flags = ctx->state.flags;
-	out->state.hangs = ctx->state.hangs;
+	out->state.flags = 0x0;
+	out->state.hangs = 0x0;
 
 	/* determine if a GPU reset has occured since the last call */
 	reset_counter = atomic_read(&adev->gpu_reset_counter);
@@ -113,28 +170,11 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev,
 	return 0;
 }
 
-void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv)
-{
-	struct idr *idp;
-	struct amdgpu_ctx *ctx;
-	uint32_t id;
-	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
-	idp = &mgr->ctx_handles;
-
-	idr_for_each_entry(idp,ctx,id) {
-		if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
-			DRM_ERROR("ctx (id=%ul) is still alive\n",ctx->id);
-	}
-
-	mutex_destroy(&mgr->lock);
-}
-
 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
 		     struct drm_file *filp)
 {
 	int r;
 	uint32_t id;
-	uint32_t flags;
 
 	union drm_amdgpu_ctx *args = data;
 	struct amdgpu_device *adev = dev->dev_private;
@@ -142,15 +182,14 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
 
 	r = 0;
 	id = args->in.ctx_id;
-	flags = args->in.flags;
 
 	switch (args->in.op) {
 		case AMDGPU_CTX_OP_ALLOC_CTX:
-			r = amdgpu_ctx_alloc(adev, fpriv, &id, flags);
+			r = amdgpu_ctx_alloc(adev, fpriv, &id);
 			args->out.alloc.ctx_id = id;
 			break;
 		case AMDGPU_CTX_OP_FREE_CTX:
-			r = amdgpu_ctx_free(adev, fpriv, id);
+			r = amdgpu_ctx_free(fpriv, id);
 			break;
 		case AMDGPU_CTX_OP_QUERY_STATE:
 			r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
@@ -165,7 +204,12 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
 {
 	struct amdgpu_ctx *ctx;
-	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+	struct amdgpu_ctx_mgr *mgr;
+
+	if (!fpriv)
+		return NULL;
+
+	mgr = &fpriv->ctx_mgr;
 
 	mutex_lock(&mgr->lock);
 	ctx = idr_find(&mgr->ctx_handles, id);
@@ -177,17 +221,86 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
 
 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
 {
-	struct amdgpu_fpriv *fpriv;
-	struct amdgpu_ctx_mgr *mgr;
-
 	if (ctx == NULL)
 		return -EINVAL;
 
-	fpriv = ctx->fpriv;
-	mgr = &fpriv->ctx_mgr;
-	mutex_lock(&mgr->lock);
 	kref_put(&ctx->refcount, amdgpu_ctx_do_release);
-	mutex_unlock(&mgr->lock);
-
 	return 0;
 }
+
+uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+			      struct fence *fence)
+{
+	struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
+	uint64_t seq = cring->sequence;
+	unsigned idx = 0;
+	struct fence *other = NULL;
+
+	idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
+	other = cring->fences[idx];
+	if (other) {
+		signed long r;
+		r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
+		if (r < 0)
+			DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+	}
+
+	fence_get(fence);
+
+	spin_lock(&ctx->ring_lock);
+	cring->fences[idx] = fence;
+	cring->sequence++;
+	spin_unlock(&ctx->ring_lock);
+
+	fence_put(other);
+
+	return seq;
+}
+
+struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+				   struct amdgpu_ring *ring, uint64_t seq)
+{
+	struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
+	struct fence *fence;
+
+	spin_lock(&ctx->ring_lock);
+
+	if (seq >= cring->sequence) {
+		spin_unlock(&ctx->ring_lock);
+		return ERR_PTR(-EINVAL);
+	}
+
+
+	if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) {
+		spin_unlock(&ctx->ring_lock);
+		return NULL;
+	}
+
+	fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]);
+	spin_unlock(&ctx->ring_lock);
+
+	return fence;
+}
+
+void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
+{
+	mutex_init(&mgr->lock);
+	idr_init(&mgr->ctx_handles);
+}
+
+void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
+{
+	struct amdgpu_ctx *ctx;
+	struct idr *idp;
+	uint32_t id;
+
+	idp = &mgr->ctx_handles;
+
+	idr_for_each_entry(idp, ctx, id) {
+		if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
+			DRM_ERROR("ctx %p is still alive\n", ctx);
+	}
+
+	idr_destroy(&mgr->ctx_handles);
+	mutex_destroy(&mgr->lock);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 99f158e1baff..6ff6ae945794 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -55,6 +55,7 @@ static const char *amdgpu_asic_name[] = {
 	"MULLINS",
 	"TOPAZ",
 	"TONGA",
+	"FIJI",
 	"CARRIZO",
 	"LAST",
 };
@@ -63,7 +64,7 @@ bool amdgpu_device_is_px(struct drm_device *dev)
 {
 	struct amdgpu_device *adev = dev->dev_private;
 
-	if (adev->flags & AMDGPU_IS_PX)
+	if (adev->flags & AMD_IS_PX)
 		return true;
 	return false;
 }
@@ -243,7 +244,8 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
 
 	if (adev->vram_scratch.robj == NULL) {
 		r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
-				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
+				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
+				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
 				     NULL, &adev->vram_scratch.robj);
 		if (r) {
 			return r;
@@ -1160,6 +1162,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
 	switch (adev->asic_type) {
 	case CHIP_TOPAZ:
 	case CHIP_TONGA:
+	case CHIP_FIJI:
 	case CHIP_CARRIZO:
 		if (adev->asic_type == CHIP_CARRIZO)
 			adev->family = AMDGPU_FAMILY_CZ;
@@ -1377,7 +1380,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	adev->ddev = ddev;
 	adev->pdev = pdev;
 	adev->flags = flags;
-	adev->asic_type = flags & AMDGPU_ASIC_MASK;
+	adev->asic_type = flags & AMD_ASIC_MASK;
 	adev->is_atom_bios = false;
 	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
 	adev->mc.gtt_size = 512 * 1024 * 1024;
@@ -1523,6 +1526,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 		return r;
 	}
 
+	r = amdgpu_ctx_init(adev, true, &adev->kernel_ctx);
+	if (r) {
+		dev_err(adev->dev, "failed to create kernel context (%d).\n", r);
+		return r;
+	}
 	r = amdgpu_ib_ring_tests(adev);
 	if (r)
 		DRM_ERROR("ib ring test failed (%d).\n", r);
@@ -1584,6 +1592,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
 	adev->shutdown = true;
 	/* evict vram memory */
 	amdgpu_bo_evict_vram(adev);
+	amdgpu_ctx_fini(&adev->kernel_ctx);
 	amdgpu_ib_pool_fini(adev);
 	amdgpu_fence_driver_fini(adev);
 	amdgpu_fbdev_fini(adev);
@@ -1627,8 +1636,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
 	struct amdgpu_device *adev;
 	struct drm_crtc *crtc;
 	struct drm_connector *connector;
-	int i, r;
-	bool force_completion = false;
+	int r;
 
 	if (dev == NULL || dev->dev_private == NULL) {
 		return -ENODEV;
@@ -1667,21 +1675,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
 	/* evict vram memory */
 	amdgpu_bo_evict_vram(adev);
 
-	/* wait for gpu to finish processing current batch */
-	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
-		struct amdgpu_ring *ring = adev->rings[i];
-		if (!ring)
-			continue;
-
-		r = amdgpu_fence_wait_empty(ring);
-		if (r) {
-			/* delay GPU reset to resume */
-			force_completion = true;
-		}
-	}
-	if (force_completion) {
-		amdgpu_fence_driver_force_completion(adev);
-	}
+	amdgpu_fence_driver_suspend(adev);
 
 	r = amdgpu_suspend(adev);
 
@@ -1739,6 +1733,8 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
 
 	r = amdgpu_resume(adev);
 
+	amdgpu_fence_driver_resume(adev);
+
 	r = amdgpu_ib_ring_tests(adev);
 	if (r)
 		DRM_ERROR("ib ring test failed (%d).\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b16b9256883e..e3d70772b531 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -35,6 +35,36 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
 
+static void amdgpu_flip_wait_fence(struct amdgpu_device *adev,
+				   struct fence **f)
+{
+	struct amdgpu_fence *fence;
+	long r;
+
+	if (*f == NULL)
+		return;
+
+	fence = to_amdgpu_fence(*f);
+	if (fence) {
+		r = fence_wait(&fence->base, false);
+		if (r == -EDEADLK) {
+			up_read(&adev->exclusive_lock);
+			r = amdgpu_gpu_reset(adev);
+			down_read(&adev->exclusive_lock);
+		}
+	} else
+		r = fence_wait(*f, false);
+
+	if (r)
+		DRM_ERROR("failed to wait on page flip fence (%ld)!\n", r);
+
+	/* We continue with the page flip even if we failed to wait on
+	 * the fence, otherwise the DRM core and userspace will be
+	 * confused about which BO the CRTC is scanning out
+	 */
+	fence_put(*f);
+	*f = NULL;
+}
 
 static void amdgpu_flip_work_func(struct work_struct *__work)
 {
@@ -44,34 +74,13 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
 	struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
 
 	struct drm_crtc *crtc = &amdgpuCrtc->base;
-	struct amdgpu_fence *fence;
 	unsigned long flags;
-	int r;
+	unsigned i;
 
 	down_read(&adev->exclusive_lock);
-	if (work->fence) {
-		fence = to_amdgpu_fence(work->fence);
-		if (fence) {
-			r = amdgpu_fence_wait(fence, false);
-			if (r == -EDEADLK) {
-				up_read(&adev->exclusive_lock);
-				r = amdgpu_gpu_reset(adev);
-				down_read(&adev->exclusive_lock);
-			}
-		} else
-			r = fence_wait(work->fence, false);
-
-		if (r)
-			DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
-
-		/* We continue with the page flip even if we failed to wait on
-		 * the fence, otherwise the DRM core and userspace will be
-		 * confused about which BO the CRTC is scanning out
-		 */
-
-		fence_put(work->fence);
-		work->fence = NULL;
-	}
+	amdgpu_flip_wait_fence(adev, &work->excl);
+	for (i = 0; i < work->shared_count; ++i)
+		amdgpu_flip_wait_fence(adev, &work->shared[i]);
 
 	/* We borrow the event spin lock for protecting flip_status */
 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -108,6 +117,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
 		DRM_ERROR("failed to reserve buffer after flip\n");
 
 	drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
+	kfree(work->shared);
 	kfree(work);
 }
 
@@ -127,7 +137,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
 	unsigned long flags;
 	u64 tiling_flags;
 	u64 base;
-	int r;
+	int i, r;
 
 	work = kzalloc(sizeof *work, GFP_KERNEL);
 	if (work == NULL)
@@ -167,7 +177,19 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
 		goto cleanup;
 	}
 
-	work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
+	r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl,
+					      &work->shared_count,
+					      &work->shared);
+	if (unlikely(r != 0)) {
+		amdgpu_bo_unreserve(new_rbo);
+		DRM_ERROR("failed to get fences for buffer\n");
+		goto cleanup;
+	}
+
+	fence_get(work->excl);
+	for (i = 0; i < work->shared_count; ++i)
+		fence_get(work->shared[i]);
+
 	amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
 	amdgpu_bo_unreserve(new_rbo);
 
@@ -212,7 +234,10 @@ pflip_cleanup:
 
 cleanup:
 	drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
-	fence_put(work->fence);
+	fence_put(work->excl);
+	for (i = 0; i < work->shared_count; ++i)
+		fence_put(work->shared[i]);
+	kfree(work->shared);
 	kfree(work);
 
 	return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 56da962231fc..0fcc0bd1622c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -44,12 +44,15 @@
 #include "amdgpu.h"
 #include "amdgpu_irq.h"
 
+#include "amdgpu_amdkfd.h"
+
 /*
  * KMS wrapper.
  * - 3.0.0 - initial driver
+ * - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP)
  */
 #define KMS_DRIVER_MAJOR	3
-#define KMS_DRIVER_MINOR	0
+#define KMS_DRIVER_MINOR	1
 #define KMS_DRIVER_PATCHLEVEL	0
 
 int amdgpu_vram_limit = 0;
@@ -61,7 +64,7 @@ int amdgpu_disp_priority = 0;
 int amdgpu_hw_i2c = 0;
 int amdgpu_pcie_gen2 = -1;
 int amdgpu_msi = -1;
-int amdgpu_lockup_timeout = 10000;
+int amdgpu_lockup_timeout = 0;
 int amdgpu_dpm = -1;
 int amdgpu_smc_load_fw = 1;
 int amdgpu_aspm = -1;
@@ -73,6 +76,9 @@ int amdgpu_deep_color = 0;
 int amdgpu_vm_size = 8;
 int amdgpu_vm_block_size = -1;
 int amdgpu_exp_hw_support = 0;
+int amdgpu_enable_scheduler = 0;
+int amdgpu_sched_jobs = 16;
+int amdgpu_sched_hw_submission = 2;
 
 MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
 module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -101,7 +107,7 @@ module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444);
 MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
 module_param_named(msi, amdgpu_msi, int, 0444);
 
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 0 = disable)");
 module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
 
 MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
@@ -137,36 +143,45 @@ module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444);
 MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
 module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
 
+MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable, 0 = disable ((default))");
+module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444);
+
+MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 16)");
+module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
+
+MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
+module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
+
 static struct pci_device_id pciidlist[] = {
 #ifdef CONFIG_DRM_AMDGPU_CIK
 	/* Kaveri */
-	{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
+	{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
 	/* Bonaire */
-	{0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY},
-	{0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY},
-	{0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY},
-	{0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY},
+	{0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
+	{0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
+	{0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
+	{0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
 	{0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
 	{0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
 	{0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
@@ -188,39 +203,39 @@ static struct pci_device_id pciidlist[] = {
 	{0x1002, 0x67BA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
 	{0x1002, 0x67BE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
 	/* Kabini */
-	{0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
-	{0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
-	{0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
-	{0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
-	{0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
-	{0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
-	{0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
-	{0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
-	{0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
+	{0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+	{0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+	{0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+	{0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+	{0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+	{0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+	{0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+	{0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+	{0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
 	/* mullins */
-	{0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
 #endif
 	/* topaz */
 	{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
@@ -238,12 +253,14 @@ static struct pci_device_id pciidlist[] = {
 	{0x1002, 0x6930, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
 	{0x1002, 0x6938, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
 	{0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
+	/* fiji */
+	{0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
 	/* carrizo */
-	{0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
-	{0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
-	{0x1002, 0x9875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
-	{0x1002, 0x9876, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
-	{0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
+	{0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
+	{0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
+	{0x1002, 0x9875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
+	{0x1002, 0x9876, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
+	{0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
 
 	{0, 0, 0}
 };
@@ -279,7 +296,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
 	unsigned long flags = ent->driver_data;
 	int ret;
 
-	if ((flags & AMDGPU_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
+	if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
 		DRM_INFO("This hardware requires experimental hardware support.\n"
 			 "See modparam exp_hw_support\n");
 		return -ENODEV;
@@ -527,12 +544,15 @@ static int __init amdgpu_init(void)
 	driver->num_ioctls = amdgpu_max_kms_ioctl;
 	amdgpu_register_atpx_handler();
 
+	amdgpu_amdkfd_init();
+
 	/* let modprobe override vga console setting */
 	return drm_pci_init(driver, pdriver);
 }
 
 static void __exit amdgpu_exit(void)
 {
+	amdgpu_amdkfd_fini();
 	drm_pci_exit(driver, pdriver);
 	amdgpu_unregister_atpx_handler();
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
index cceeb33c447a..e3a4f7048042 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
@@ -31,7 +31,7 @@
 #include <linux/firmware.h>
 #include <linux/platform_device.h>
 
-#include "amdgpu_family.h"
+#include "amd_shared.h"
 
 /* General customization:
  */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index c1645d21f8e2..8a122b1b7786 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -53,9 +53,9 @@ static struct fb_ops amdgpufb_ops = {
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
-	.fb_fillrect = cfb_fillrect,
-	.fb_copyarea = cfb_copyarea,
-	.fb_imageblit = cfb_imageblit,
+	.fb_fillrect = drm_fb_helper_cfb_fillrect,
+	.fb_copyarea = drm_fb_helper_cfb_copyarea,
+	.fb_imageblit = drm_fb_helper_cfb_imageblit,
 	.fb_pan_display = drm_fb_helper_pan_display,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcmap = drm_fb_helper_setcmap,
@@ -126,8 +126,8 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
 	aligned_size = ALIGN(size, PAGE_SIZE);
 	ret = amdgpu_gem_object_create(adev, aligned_size, 0,
 				       AMDGPU_GEM_DOMAIN_VRAM,
-				       0, true,
-				       &gobj);
+				       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+				       true, &gobj);
 	if (ret) {
 		printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
 		       aligned_size);
@@ -179,7 +179,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
 	struct drm_mode_fb_cmd2 mode_cmd;
 	struct drm_gem_object *gobj = NULL;
 	struct amdgpu_bo *rbo = NULL;
-	struct device *device = &adev->pdev->dev;
 	int ret;
 	unsigned long tmp;
 
@@ -201,9 +200,9 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
 	rbo = gem_to_amdgpu_bo(gobj);
 
 	/* okay we have an object now allocate the framebuffer */
-	info = framebuffer_alloc(0, device);
-	if (info == NULL) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto out_unref;
 	}
 
@@ -212,14 +211,13 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
 	ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
 	if (ret) {
 		DRM_ERROR("failed to initialize framebuffer %d\n", ret);
-		goto out_unref;
+		goto out_destroy_fbi;
 	}
 
 	fb = &rfbdev->rfb.base;
 
 	/* setup helper */
 	rfbdev->helper.fb = fb;
-	rfbdev->helper.fbdev = info;
 
 	memset_io(rbo->kptr, 0x0, amdgpu_bo_size(rbo));
 
@@ -239,11 +237,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
 	drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
 
 	/* setup aperture base/size for vesafb takeover */
-	info->apertures = alloc_apertures(1);
-	if (!info->apertures) {
-		ret = -ENOMEM;
-		goto out_unref;
-	}
 	info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
 	info->apertures->ranges[0].size = adev->mc.aper_size;
 
@@ -251,13 +244,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
 
 	if (info->screen_base == NULL) {
 		ret = -ENOSPC;
-		goto out_unref;
-	}
-
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto out_unref;
+		goto out_destroy_fbi;
 	}
 
 	DRM_INFO("fb mappable at 0x%lX\n",  info->fix.smem_start);
@@ -269,6 +256,8 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
 	vga_switcheroo_client_fb_set(adev->ddev->pdev, info);
 	return 0;
 
+out_destroy_fbi:
+	drm_fb_helper_release_fbi(helper);
 out_unref:
 	if (rbo) {
 
@@ -290,17 +279,10 @@ void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev)
 
 static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
 {
-	struct fb_info *info;
 	struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
 
-	if (rfbdev->helper.fbdev) {
-		info = rfbdev->helper.fbdev;
-
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&rfbdev->helper);
+	drm_fb_helper_release_fbi(&rfbdev->helper);
 
 	if (rfb->obj) {
 		amdgpufb_destroy_pinned_object(rfb->obj);
@@ -395,7 +377,8 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev)
 void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state)
 {
 	if (adev->mode_info.rfbdev)
-		fb_set_suspend(adev->mode_info.rfbdev->helper.fbdev, state);
+		drm_fb_helper_set_suspend(&adev->mode_info.rfbdev->helper,
+			state);
 }
 
 int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index a7189a1fa6a1..1be2bd6d07ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -126,7 +126,8 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
 	(*fence)->ring = ring;
 	(*fence)->owner = owner;
 	fence_init(&(*fence)->base, &amdgpu_fence_ops,
-		&adev->fence_queue.lock, adev->fence_context + ring->idx,
+		&ring->fence_drv.fence_queue.lock,
+		adev->fence_context + ring->idx,
 		(*fence)->seq);
 	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
 			       (*fence)->seq,
@@ -136,38 +137,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
 }
 
 /**
- * amdgpu_fence_recreate - recreate a fence from an user fence
- *
- * @ring: ring the fence is associated with
- * @owner: creator of the fence
- * @seq: user fence sequence number
- * @fence: resulting amdgpu fence object
- *
- * Recreates a fence command from the user fence sequence number (all asics).
- * Returns 0 on success, -ENOMEM on failure.
- */
-int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner,
-			  uint64_t seq, struct amdgpu_fence **fence)
-{
-	struct amdgpu_device *adev = ring->adev;
-
-	if (seq > ring->fence_drv.sync_seq[ring->idx])
-		return -EINVAL;
-
-	*fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
-	if ((*fence) == NULL)
-		return -ENOMEM;
-
-	(*fence)->seq = seq;
-	(*fence)->ring = ring;
-	(*fence)->owner = owner;
-	fence_init(&(*fence)->base, &amdgpu_fence_ops,
-		&adev->fence_queue.lock, adev->fence_context + ring->idx,
-		(*fence)->seq);
-	return 0;
-}
-
-/**
  * amdgpu_fence_check_signaled - callback from fence_queue
  *
  * this function is called with fence_queue lock held, which is also used
@@ -196,9 +165,7 @@ static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int fl
 		else
 			FENCE_TRACE(&fence->base, "was already signaled\n");
 
-		amdgpu_irq_put(adev, fence->ring->fence_drv.irq_src,
-				fence->ring->fence_drv.irq_type);
-		__remove_wait_queue(&adev->fence_queue, &fence->fence_wake);
+		__remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake);
 		fence_put(&fence->base);
 	} else
 		FENCE_TRACE(&fence->base, "pending\n");
@@ -299,14 +266,9 @@ static void amdgpu_fence_check_lockup(struct work_struct *work)
 		return;
 	}
 
-	if (fence_drv->delayed_irq && ring->adev->ddev->irq_enabled) {
-		fence_drv->delayed_irq = false;
-		amdgpu_irq_update(ring->adev, fence_drv->irq_src,
-				fence_drv->irq_type);
+	if (amdgpu_fence_activity(ring)) {
+		wake_up_all(&ring->fence_drv.fence_queue);
 	}
-
-	if (amdgpu_fence_activity(ring))
-		wake_up_all(&ring->adev->fence_queue);
 	else if (amdgpu_ring_is_lockup(ring)) {
 		/* good news we believe it's a lockup */
 		dev_warn(ring->adev->dev, "GPU lockup (current fence id "
@@ -316,7 +278,7 @@ static void amdgpu_fence_check_lockup(struct work_struct *work)
 
 		/* remember that we need an reset */
 		ring->adev->needs_reset = true;
-		wake_up_all(&ring->adev->fence_queue);
+		wake_up_all(&ring->fence_drv.fence_queue);
 	}
 	up_read(&ring->adev->exclusive_lock);
 }
@@ -332,62 +294,8 @@ static void amdgpu_fence_check_lockup(struct work_struct *work)
  */
 void amdgpu_fence_process(struct amdgpu_ring *ring)
 {
-	uint64_t seq, last_seq, last_emitted;
-	unsigned count_loop = 0;
-	bool wake = false;
-
-	/* Note there is a scenario here for an infinite loop but it's
-	 * very unlikely to happen. For it to happen, the current polling
-	 * process need to be interrupted by another process and another
-	 * process needs to update the last_seq btw the atomic read and
-	 * xchg of the current process.
-	 *
-	 * More over for this to go in infinite loop there need to be
-	 * continuously new fence signaled ie amdgpu_fence_read needs
-	 * to return a different value each time for both the currently
-	 * polling process and the other process that xchg the last_seq
-	 * btw atomic read and xchg of the current process. And the
-	 * value the other process set as last seq must be higher than
-	 * the seq value we just read. Which means that current process
-	 * need to be interrupted after amdgpu_fence_read and before
-	 * atomic xchg.
-	 *
-	 * To be even more safe we count the number of time we loop and
-	 * we bail after 10 loop just accepting the fact that we might
-	 * have temporarly set the last_seq not to the true real last
-	 * seq but to an older one.
-	 */
-	last_seq = atomic64_read(&ring->fence_drv.last_seq);
-	do {
-		last_emitted = ring->fence_drv.sync_seq[ring->idx];
-		seq = amdgpu_fence_read(ring);
-		seq |= last_seq & 0xffffffff00000000LL;
-		if (seq < last_seq) {
-			seq &= 0xffffffff;
-			seq |= last_emitted & 0xffffffff00000000LL;
-		}
-
-		if (seq <= last_seq || seq > last_emitted) {
-			break;
-		}
-		/* If we loop over we don't want to return without
-		 * checking if a fence is signaled as it means that the
-		 * seq we just read is different from the previous on.
-		 */
-		wake = true;
-		last_seq = seq;
-		if ((count_loop++) > 10) {
-			/* We looped over too many time leave with the
-			 * fact that we might have set an older fence
-			 * seq then the current real last seq as signaled
-			 * by the hw.
-			 */
-			break;
-		}
-	} while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
-
-	if (wake)
-		wake_up_all(&ring->adev->fence_queue);
+	if (amdgpu_fence_activity(ring))
+		wake_up_all(&ring->fence_drv.fence_queue);
 }
 
 /**
@@ -447,284 +355,49 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
 {
 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
 	struct amdgpu_ring *ring = fence->ring;
-	struct amdgpu_device *adev = ring->adev;
 
 	if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
 		return false;
 
-	if (down_read_trylock(&adev->exclusive_lock)) {
-		amdgpu_irq_get(adev, ring->fence_drv.irq_src,
-			ring->fence_drv.irq_type);
-		if (amdgpu_fence_activity(ring))
-			wake_up_all_locked(&adev->fence_queue);
-
-		/* did fence get signaled after we enabled the sw irq? */
-		if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) {
-			amdgpu_irq_put(adev, ring->fence_drv.irq_src,
-				ring->fence_drv.irq_type);
-			up_read(&adev->exclusive_lock);
-			return false;
-		}
-
-		up_read(&adev->exclusive_lock);
-	} else {
-		/* we're probably in a lockup, lets not fiddle too much */
-		if (amdgpu_irq_get_delayed(adev, ring->fence_drv.irq_src,
-			ring->fence_drv.irq_type))
-			ring->fence_drv.delayed_irq = true;
-		amdgpu_fence_schedule_check(ring);
-	}
-
 	fence->fence_wake.flags = 0;
 	fence->fence_wake.private = NULL;
 	fence->fence_wake.func = amdgpu_fence_check_signaled;
-	__add_wait_queue(&adev->fence_queue, &fence->fence_wake);
+	__add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
 	fence_get(f);
 	FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
 	return true;
 }
 
-/**
- * amdgpu_fence_signaled - check if a fence has signaled
- *
- * @fence: amdgpu fence object
- *
- * Check if the requested fence has signaled (all asics).
- * Returns true if the fence has signaled or false if it has not.
- */
-bool amdgpu_fence_signaled(struct amdgpu_fence *fence)
-{
-	if (!fence)
-		return true;
-
-	if (amdgpu_fence_seq_signaled(fence->ring, fence->seq)) {
-		if (!fence_signal(&fence->base))
-			FENCE_TRACE(&fence->base, "signaled from amdgpu_fence_signaled\n");
-		return true;
-	}
-
-	return false;
-}
-
-/**
- * amdgpu_fence_any_seq_signaled - check if any sequence number is signaled
- *
- * @adev: amdgpu device pointer
- * @seq: sequence numbers
- *
- * Check if the last signaled fence sequnce number is >= the requested
- * sequence number (all asics).
- * Returns true if any has signaled (current value is >= requested value)
- * or false if it has not. Helper function for amdgpu_fence_wait_seq.
- */
-static bool amdgpu_fence_any_seq_signaled(struct amdgpu_device *adev, u64 *seq)
-{
-	unsigned i;
-
-	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-		if (!adev->rings[i] || !seq[i])
-			continue;
-
-		if (amdgpu_fence_seq_signaled(adev->rings[i], seq[i]))
-			return true;
-	}
-
-	return false;
-}
-
-/**
- * amdgpu_fence_wait_seq_timeout - wait for a specific sequence numbers
- *
- * @adev: amdgpu device pointer
- * @target_seq: sequence number(s) we want to wait for
- * @intr: use interruptable sleep
- * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
- *
- * Wait for the requested sequence number(s) to be written by any ring
- * (all asics).  Sequnce number array is indexed by ring id.
- * @intr selects whether to use interruptable (true) or non-interruptable
- * (false) sleep when waiting for the sequence number.  Helper function
- * for amdgpu_fence_wait_*().
- * Returns remaining time if the sequence number has passed, 0 when
- * the wait timeout, or an error for all other cases.
- * -EDEADLK is returned when a GPU lockup has been detected.
- */
-static long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev,
-					  u64 *target_seq, bool intr,
-					  long timeout)
-{
-	uint64_t last_seq[AMDGPU_MAX_RINGS];
-	bool signaled;
-	int i;
-	long r;
-
-	if (timeout == 0) {
-		return amdgpu_fence_any_seq_signaled(adev, target_seq);
-	}
-
-	while (!amdgpu_fence_any_seq_signaled(adev, target_seq)) {
-
-		/* Save current sequence values, used to check for GPU lockups */
-		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-			struct amdgpu_ring *ring = adev->rings[i];
-
-			if (!ring || !target_seq[i])
-				continue;
-
-			last_seq[i] = atomic64_read(&ring->fence_drv.last_seq);
-			trace_amdgpu_fence_wait_begin(adev->ddev, i, target_seq[i]);
-			amdgpu_irq_get(adev, ring->fence_drv.irq_src,
-				       ring->fence_drv.irq_type);
-		}
-
-		if (intr) {
-			r = wait_event_interruptible_timeout(adev->fence_queue, (
-				(signaled = amdgpu_fence_any_seq_signaled(adev, target_seq))
-				 || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
-		} else {
-			r = wait_event_timeout(adev->fence_queue, (
-				(signaled = amdgpu_fence_any_seq_signaled(adev, target_seq))
-				 || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
-		}
-
-		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-			struct amdgpu_ring *ring = adev->rings[i];
-
-			if (!ring || !target_seq[i])
-				continue;
-
-			amdgpu_irq_put(adev, ring->fence_drv.irq_src,
-				       ring->fence_drv.irq_type);
-			trace_amdgpu_fence_wait_end(adev->ddev, i, target_seq[i]);
-		}
-
-		if (unlikely(r < 0))
-			return r;
-
-		if (unlikely(!signaled)) {
-
-			if (adev->needs_reset)
-				return -EDEADLK;
-
-			/* we were interrupted for some reason and fence
-			 * isn't signaled yet, resume waiting */
-			if (r)
-				continue;
-
-			for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-				struct amdgpu_ring *ring = adev->rings[i];
-
-				if (!ring || !target_seq[i])
-					continue;
-
-				if (last_seq[i] != atomic64_read(&ring->fence_drv.last_seq))
-					break;
-			}
-
-			if (i != AMDGPU_MAX_RINGS)
-				continue;
-
-			for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-				if (!adev->rings[i] || !target_seq[i])
-					continue;
-
-				if (amdgpu_ring_is_lockup(adev->rings[i]))
-					break;
-			}
-
-			if (i < AMDGPU_MAX_RINGS) {
-				/* good news we believe it's a lockup */
-				dev_warn(adev->dev, "GPU lockup (waiting for "
-					 "0x%016llx last fence id 0x%016llx on"
-					 " ring %d)\n",
-					 target_seq[i], last_seq[i], i);
-
-				/* remember that we need an reset */
-				adev->needs_reset = true;
-				wake_up_all(&adev->fence_queue);
-				return -EDEADLK;
-			}
-
-			if (timeout < MAX_SCHEDULE_TIMEOUT) {
-				timeout -= AMDGPU_FENCE_JIFFIES_TIMEOUT;
-				if (timeout <= 0) {
-					return 0;
-				}
-			}
-		}
-	}
-	return timeout;
-}
-
-/**
- * amdgpu_fence_wait - wait for a fence to signal
- *
- * @fence: amdgpu fence object
- * @intr: use interruptable sleep
- *
- * Wait for the requested fence to signal (all asics).
- * @intr selects whether to use interruptable (true) or non-interruptable
- * (false) sleep when waiting for the fence.
- * Returns 0 if the fence has passed, error for all other cases.
- */
-int amdgpu_fence_wait(struct amdgpu_fence *fence, bool intr)
-{
-	uint64_t seq[AMDGPU_MAX_RINGS] = {};
-	long r;
-
-	seq[fence->ring->idx] = fence->seq;
-	r = amdgpu_fence_wait_seq_timeout(fence->ring->adev, seq, intr, MAX_SCHEDULE_TIMEOUT);
-	if (r < 0) {
-		return r;
-	}
-
-	r = fence_signal(&fence->base);
-	if (!r)
-		FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
-	return 0;
-}
-
-/**
- * amdgpu_fence_wait_any - wait for a fence to signal on any ring
- *
- * @adev: amdgpu device pointer
- * @fences: amdgpu fence object(s)
- * @intr: use interruptable sleep
+/*
+ * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal
+ * @ring: ring to wait on for the seq number
+ * @seq: seq number wait for
  *
- * Wait for any requested fence to signal (all asics).  Fence
- * array is indexed by ring id.  @intr selects whether to use
- * interruptable (true) or non-interruptable (false) sleep when
- * waiting for the fences. Used by the suballocator.
- * Returns 0 if any fence has passed, error for all other cases.
+ * return value:
+ * 0: seq signaled, and gpu not hang
+ * -EDEADL: GPU hang detected
+ * -EINVAL: some paramter is not valid
  */
-int amdgpu_fence_wait_any(struct amdgpu_device *adev,
-			  struct amdgpu_fence **fences,
-			  bool intr)
+static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
 {
-	uint64_t seq[AMDGPU_MAX_RINGS];
-	unsigned i, num_rings = 0;
-	long r;
-
-	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-		seq[i] = 0;
+	struct amdgpu_device *adev = ring->adev;
+	bool signaled = false;
 
-		if (!fences[i]) {
-			continue;
-		}
+	BUG_ON(!ring);
+	if (seq > ring->fence_drv.sync_seq[ring->idx])
+		return -EINVAL;
 
-		seq[i] = fences[i]->seq;
-		++num_rings;
-	}
+	if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
+		return 0;
 
-	/* nothing to wait for ? */
-	if (num_rings == 0)
-		return -ENOENT;
+	wait_event(ring->fence_drv.fence_queue, (
+		   (signaled = amdgpu_fence_seq_signaled(ring, seq))
+		   || adev->needs_reset));
 
-	r = amdgpu_fence_wait_seq_timeout(adev, seq, intr, MAX_SCHEDULE_TIMEOUT);
-	if (r < 0) {
-		return r;
-	}
-	return 0;
+	if (signaled)
+		return 0;
+	else
+		return -EDEADLK;
 }
 
 /**
@@ -739,19 +412,12 @@ int amdgpu_fence_wait_any(struct amdgpu_device *adev,
  */
 int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
 {
-	uint64_t seq[AMDGPU_MAX_RINGS] = {};
-	long r;
+	uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
 
-	seq[ring->idx] = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
-	if (seq[ring->idx] >= ring->fence_drv.sync_seq[ring->idx]) {
-		/* nothing to wait for, last_seq is
-		   already the last emited fence */
+	if (seq >= ring->fence_drv.sync_seq[ring->idx])
 		return -ENOENT;
-	}
-	r = amdgpu_fence_wait_seq_timeout(ring->adev, seq, false, MAX_SCHEDULE_TIMEOUT);
-	if (r < 0)
-		return r;
-	return 0;
+
+	return amdgpu_fence_ring_wait_seq(ring, seq);
 }
 
 /**
@@ -766,23 +432,12 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
  */
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
 {
-	struct amdgpu_device *adev = ring->adev;
-	uint64_t seq[AMDGPU_MAX_RINGS] = {};
-	long r;
+	uint64_t seq = ring->fence_drv.sync_seq[ring->idx];
 
-	seq[ring->idx] = ring->fence_drv.sync_seq[ring->idx];
-	if (!seq[ring->idx])
+	if (!seq)
 		return 0;
 
-	r = amdgpu_fence_wait_seq_timeout(adev, seq, false, MAX_SCHEDULE_TIMEOUT);
-	if (r < 0) {
-		if (r == -EDEADLK)
-			return -EDEADLK;
-
-		dev_err(adev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
-			ring->idx, r);
-	}
-	return 0;
+	return amdgpu_fence_ring_wait_seq(ring, seq);
 }
 
 /**
@@ -933,9 +588,12 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
 		ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
 	}
 	amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq));
-	ring->fence_drv.initialized = true;
+	amdgpu_irq_get(adev, irq_src, irq_type);
+
 	ring->fence_drv.irq_src = irq_src;
 	ring->fence_drv.irq_type = irq_type;
+	ring->fence_drv.initialized = true;
+
 	dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
 		 "cpu addr 0x%p\n", ring->idx,
 		 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
@@ -966,6 +624,16 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
 	INIT_DELAYED_WORK(&ring->fence_drv.lockup_work,
 			amdgpu_fence_check_lockup);
 	ring->fence_drv.ring = ring;
+
+	if (amdgpu_enable_scheduler) {
+		ring->scheduler = amd_sched_create(&amdgpu_sched_ops,
+						   ring->idx,
+						   amdgpu_sched_hw_submission,
+						   (void *)ring->adev);
+		if (!ring->scheduler)
+			DRM_ERROR("Failed to create scheduler on ring %d.\n",
+				  ring->idx);
+	}
 }
 
 /**
@@ -982,7 +650,6 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
  */
 int amdgpu_fence_driver_init(struct amdgpu_device *adev)
 {
-	init_waitqueue_head(&adev->fence_queue);
 	if (amdgpu_debugfs_fence_init(adev))
 		dev_err(adev->dev, "fence debugfs file creation failed\n");
 
@@ -1011,13 +678,78 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
 			/* no need to trigger GPU reset as we are unloading */
 			amdgpu_fence_driver_force_completion(adev);
 		}
-		wake_up_all(&adev->fence_queue);
+		wake_up_all(&ring->fence_drv.fence_queue);
+		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
+			       ring->fence_drv.irq_type);
+		if (ring->scheduler)
+			amd_sched_destroy(ring->scheduler);
 		ring->fence_drv.initialized = false;
 	}
 	mutex_unlock(&adev->ring_lock);
 }
 
 /**
+ * amdgpu_fence_driver_suspend - suspend the fence driver
+ * for all possible rings.
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Suspend the fence driver for all possible rings (all asics).
+ */
+void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
+{
+	int i, r;
+
+	mutex_lock(&adev->ring_lock);
+	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+		struct amdgpu_ring *ring = adev->rings[i];
+		if (!ring || !ring->fence_drv.initialized)
+			continue;
+
+		/* wait for gpu to finish processing current batch */
+		r = amdgpu_fence_wait_empty(ring);
+		if (r) {
+			/* delay GPU reset to resume */
+			amdgpu_fence_driver_force_completion(adev);
+		}
+
+		/* disable the interrupt */
+		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
+			       ring->fence_drv.irq_type);
+	}
+	mutex_unlock(&adev->ring_lock);
+}
+
+/**
+ * amdgpu_fence_driver_resume - resume the fence driver
+ * for all possible rings.
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Resume the fence driver for all possible rings (all asics).
+ * Not all asics have all rings, so each asic will only
+ * start the fence driver on the rings it has using
+ * amdgpu_fence_driver_start_ring().
+ * Returns 0 for success.
+ */
+void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
+{
+	int i;
+
+	mutex_lock(&adev->ring_lock);
+	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+		struct amdgpu_ring *ring = adev->rings[i];
+		if (!ring || !ring->fence_drv.initialized)
+			continue;
+
+		/* enable the interrupt */
+		amdgpu_irq_get(adev, ring->fence_drv.irq_src,
+			       ring->fence_drv.irq_type);
+	}
+	mutex_unlock(&adev->ring_lock);
+}
+
+/**
  * amdgpu_fence_driver_force_completion - force all fence waiter to complete
  *
  * @adev: amdgpu device pointer
@@ -1104,6 +836,21 @@ static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence)
 	return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
 }
 
+static bool amdgpu_test_signaled_any(struct fence **fences, uint32_t count)
+{
+	int idx;
+	struct fence *fence;
+
+	for (idx = 0; idx < count; ++idx) {
+		fence = fences[idx];
+		if (fence) {
+			if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+				return true;
+		}
+	}
+	return false;
+}
+
 struct amdgpu_wait_cb {
 	struct fence_cb base;
 	struct task_struct *task;
@@ -1121,12 +868,48 @@ static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
 {
 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
 	struct amdgpu_device *adev = fence->ring->adev;
-	struct amdgpu_wait_cb cb;
 
-	cb.task = current;
+	return amdgpu_fence_wait_any(adev, &f, 1, intr, t);
+}
 
-	if (fence_add_callback(f, &cb.base, amdgpu_fence_wait_cb))
-		return t;
+/**
+ * Wait the fence array with timeout
+ *
+ * @adev:     amdgpu device
+ * @array:    the fence array with amdgpu fence pointer
+ * @count:    the number of the fence array
+ * @intr:     when sleep, set the current task interruptable or not
+ * @t:        timeout to wait
+ *
+ * It will return when any fence is signaled or timeout.
+ */
+signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
+				  struct fence **array, uint32_t count,
+				  bool intr, signed long t)
+{
+	struct amdgpu_wait_cb *cb;
+	struct fence *fence;
+	unsigned idx;
+
+	BUG_ON(!array);
+
+	cb = kcalloc(count, sizeof(struct amdgpu_wait_cb), GFP_KERNEL);
+	if (cb == NULL) {
+		t = -ENOMEM;
+		goto err_free_cb;
+	}
+
+	for (idx = 0; idx < count; ++idx) {
+		fence = array[idx];
+		if (fence) {
+			cb[idx].task = current;
+			if (fence_add_callback(fence,
+					&cb[idx].base, amdgpu_fence_wait_cb)) {
+				/* The fence is already signaled */
+				goto fence_rm_cb;
+			}
+		}
+	}
 
 	while (t > 0) {
 		if (intr)
@@ -1135,10 +918,10 @@ static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
 			set_current_state(TASK_UNINTERRUPTIBLE);
 
 		/*
-		 * amdgpu_test_signaled must be called after
+		 * amdgpu_test_signaled_any must be called after
 		 * set_current_state to prevent a race with wake_up_process
 		 */
-		if (amdgpu_test_signaled(fence))
+		if (amdgpu_test_signaled_any(array, count))
 			break;
 
 		if (adev->needs_reset) {
@@ -1153,7 +936,16 @@ static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
 	}
 
 	__set_current_state(TASK_RUNNING);
-	fence_remove_callback(f, &cb.base);
+
+fence_rm_cb:
+	for (idx = 0; idx < count; ++idx) {
+		fence = array[idx];
+		if (fence && cb[idx].base.func)
+			fence_remove_callback(fence, &cb[idx].base);
+	}
+
+err_free_cb:
+	kfree(cb);
 
 	return t;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index e02db0b2e839..cbd3a486c5c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -125,7 +125,8 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
 
 	if (adev->gart.robj == NULL) {
 		r = amdgpu_bo_create(adev, adev->gart.table_size,
-				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
+				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
+				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
 				     NULL, &adev->gart.robj);
 		if (r) {
 			return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 4afc507820c0..5839fab374bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -615,6 +615,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
 		info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
 		info.domains = robj->initial_domain;
 		info.domain_flags = robj->flags;
+		amdgpu_bo_unreserve(robj);
 		if (copy_to_user(out, &info, sizeof(info)))
 			r = -EFAULT;
 		break;
@@ -622,17 +623,19 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
 	case AMDGPU_GEM_OP_SET_PLACEMENT:
 		if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) {
 			r = -EPERM;
+			amdgpu_bo_unreserve(robj);
 			break;
 		}
 		robj->initial_domain = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
 						      AMDGPU_GEM_DOMAIN_GTT |
 						      AMDGPU_GEM_DOMAIN_CPU);
+		amdgpu_bo_unreserve(robj);
 		break;
 	default:
+		amdgpu_bo_unreserve(robj);
 		r = -EINVAL;
 	}
 
-	amdgpu_bo_unreserve(robj);
 out:
 	drm_gem_object_unreference_unlocked(gobj);
 	return r;
@@ -653,7 +656,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
 
 	r = amdgpu_gem_object_create(adev, args->size, 0,
 				     AMDGPU_GEM_DOMAIN_VRAM,
-				     0, ttm_bo_type_device,
+				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+				     ttm_bo_type_device,
 				     &gobj);
 	if (r)
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index bc0fac618a3f..c439735ee670 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -73,28 +73,12 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
 
 		if (!vm)
 			ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
-		else
-			ib->gpu_addr = 0;
-
-	} else {
-		ib->sa_bo = NULL;
-		ib->ptr = NULL;
-		ib->gpu_addr = 0;
 	}
 
 	amdgpu_sync_create(&ib->sync);
 
 	ib->ring = ring;
-	ib->fence = NULL;
-	ib->user = NULL;
 	ib->vm = vm;
-	ib->gds_base = 0;
-	ib->gds_size = 0;
-	ib->gws_base = 0;
-	ib->gws_size = 0;
-	ib->oa_base = 0;
-	ib->oa_size = 0;
-	ib->flags = 0;
 
 	return 0;
 }
@@ -109,8 +93,8 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
  */
 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
 {
-	amdgpu_sync_free(adev, &ib->sync, ib->fence);
-	amdgpu_sa_bo_free(adev, &ib->sa_bo, ib->fence);
+	amdgpu_sync_free(adev, &ib->sync, &ib->fence->base);
+	amdgpu_sa_bo_free(adev, &ib->sa_bo, &ib->fence->base);
 	amdgpu_fence_unref(&ib->fence);
 }
 
@@ -156,7 +140,11 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
 		dev_err(adev->dev, "couldn't schedule ib\n");
 		return -EINVAL;
 	}
-
+	r = amdgpu_sync_wait(&ibs->sync);
+	if (r) {
+		dev_err(adev->dev, "IB sync failed (%d).\n", r);
+		return r;
+	}
 	r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs);
 	if (r) {
 		dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
@@ -165,9 +153,11 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
 
 	if (vm) {
 		/* grab a vm id if necessary */
-		struct amdgpu_fence *vm_id_fence = NULL;
-		vm_id_fence = amdgpu_vm_grab_id(ibs->ring, ibs->vm);
-		amdgpu_sync_fence(&ibs->sync, vm_id_fence);
+		r = amdgpu_vm_grab_id(ibs->vm, ibs->ring, &ibs->sync);
+		if (r) {
+			amdgpu_ring_unlock_undo(ring);
+			return r;
+		}
 	}
 
 	r = amdgpu_sync_rings(&ibs->sync, ring);
@@ -212,11 +202,15 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
 		return r;
 	}
 
+	if (!amdgpu_enable_scheduler && ib->ctx)
+		ib->sequence = amdgpu_ctx_add_fence(ib->ctx, ring,
+						    &ib->fence->base);
+
 	/* wrap the last IB with fence */
 	if (ib->user) {
 		uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo);
 		addr += ib->user->offset;
-		amdgpu_ring_emit_fence(ring, addr, ib->fence->seq,
+		amdgpu_ring_emit_fence(ring, addr, ib->sequence,
 				       AMDGPU_FENCE_FLAG_64BIT);
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index db5422e65ec5..5c8a803acedc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -24,6 +24,7 @@
 #include <drm/drmP.h>
 #include "amdgpu.h"
 #include "amdgpu_ih.h"
+#include "amdgpu_amdkfd.h"
 
 /**
  * amdgpu_ih_ring_alloc - allocate memory for the IH ring
@@ -97,18 +98,12 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
 			/* add 8 bytes for the rptr/wptr shadows and
 			 * add them to the end of the ring allocation.
 			 */
-			adev->irq.ih.ring = kzalloc(adev->irq.ih.ring_size + 8, GFP_KERNEL);
+			adev->irq.ih.ring = pci_alloc_consistent(adev->pdev,
+								 adev->irq.ih.ring_size + 8,
+								 &adev->irq.ih.rb_dma_addr);
 			if (adev->irq.ih.ring == NULL)
 				return -ENOMEM;
-			adev->irq.ih.rb_dma_addr = pci_map_single(adev->pdev,
-								  (void *)adev->irq.ih.ring,
-								  adev->irq.ih.ring_size,
-								  PCI_DMA_BIDIRECTIONAL);
-			if (pci_dma_mapping_error(adev->pdev, adev->irq.ih.rb_dma_addr)) {
-				dev_err(&adev->pdev->dev, "Failed to DMA MAP the IH RB page\n");
-				kfree((void *)adev->irq.ih.ring);
-				return -ENOMEM;
-			}
+			memset((void *)adev->irq.ih.ring, 0, adev->irq.ih.ring_size + 8);
 			adev->irq.ih.wptr_offs = (adev->irq.ih.ring_size / 4) + 0;
 			adev->irq.ih.rptr_offs = (adev->irq.ih.ring_size / 4) + 1;
 		}
@@ -148,9 +143,9 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
 			/* add 8 bytes for the rptr/wptr shadows and
 			 * add them to the end of the ring allocation.
 			 */
-			pci_unmap_single(adev->pdev, adev->irq.ih.rb_dma_addr,
-					 adev->irq.ih.ring_size + 8, PCI_DMA_BIDIRECTIONAL);
-			kfree((void *)adev->irq.ih.ring);
+			pci_free_consistent(adev->pdev, adev->irq.ih.ring_size + 8,
+					    (void *)adev->irq.ih.ring,
+					    adev->irq.ih.rb_dma_addr);
 			adev->irq.ih.ring = NULL;
 		}
 	} else {
@@ -199,6 +194,14 @@ restart_ih:
 	rmb();
 
 	while (adev->irq.ih.rptr != wptr) {
+		u32 ring_index = adev->irq.ih.rptr >> 2;
+
+		/* Before dispatching irq to IP blocks, send it to amdkfd */
+		amdgpu_amdkfd_interrupt(adev,
+				(const void *) &adev->irq.ih.ring[ring_index]);
+
+		entry.iv_entry = (const uint32_t *)
+			&adev->irq.ih.ring[ring_index];
 		amdgpu_ih_decode_iv(adev, &entry);
 		adev->irq.ih.rptr &= adev->irq.ih.ptr_mask;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index c62b09e555d6..ba38ae6a1463 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -52,6 +52,7 @@ struct amdgpu_iv_entry {
 	unsigned ring_id;
 	unsigned vm_id;
 	unsigned pas_id;
+	const uint32_t *iv_entry;
 };
 
 int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index b4d36f0f2153..0aba8e9bc8a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -272,6 +272,11 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
 
 		kfree(src->enabled_types);
 		src->enabled_types = NULL;
+		if (src->data) {
+			kfree(src->data);
+			kfree(src);
+			adev->irq.sources[i] = NULL;
+		}
 	}
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index 8299795f2b2d..17b01aef4278 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -40,6 +40,7 @@ struct amdgpu_irq_src {
 	unsigned				num_types;
 	atomic_t				*enabled_types;
 	const struct amdgpu_irq_src_funcs	*funcs;
+	void *data;
 };
 
 /* provided by interrupt generating IP blocks */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 3bfe67de8349..22367939ebf1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -34,6 +34,7 @@
 #include <linux/vga_switcheroo.h>
 #include <linux/slab.h>
 #include <linux/pm_runtime.h>
+#include "amdgpu_amdkfd.h"
 
 #if defined(CONFIG_VGA_SWITCHEROO)
 bool amdgpu_has_atpx(void);
@@ -61,6 +62,8 @@ int amdgpu_driver_unload_kms(struct drm_device *dev)
 
 	pm_runtime_get_sync(dev->dev);
 
+	amdgpu_amdkfd_device_fini(adev);
+
 	amdgpu_acpi_fini(adev);
 
 	amdgpu_device_fini(adev);
@@ -93,8 +96,8 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
 
 	if ((amdgpu_runtime_pm != 0) &&
 	    amdgpu_has_atpx() &&
-	    ((flags & AMDGPU_IS_APU) == 0))
-		flags |= AMDGPU_IS_PX;
+	    ((flags & AMD_IS_APU) == 0))
+		flags |= AMD_IS_PX;
 
 	/* amdgpu_device_init should report only fatal error
 	 * like memory allocation failure or iomapping failure,
@@ -118,6 +121,10 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
 				"Error during ACPI methods call\n");
 	}
 
+	amdgpu_amdkfd_load_interface(adev);
+	amdgpu_amdkfd_device_probe(adev);
+	amdgpu_amdkfd_device_init(adev);
+
 	if (amdgpu_device_is_px(dev)) {
 		pm_runtime_use_autosuspend(dev->dev);
 		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
@@ -444,11 +451,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 		dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
 		dev_info._pad = 0;
 		dev_info.ids_flags = 0;
-		if (adev->flags & AMDGPU_IS_APU)
+		if (adev->flags & AMD_IS_APU)
 			dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
 		dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
 		dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
-		dev_info.virtual_address_alignment = max(PAGE_SIZE, 0x10000UL);
+		dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
 		dev_info.pte_fragment_size = (1 << AMDGPU_LOG2_PAGES_PER_FRAG) *
 					     AMDGPU_GPU_PAGE_SIZE;
 		dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
@@ -520,10 +527,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
 	mutex_init(&fpriv->bo_list_lock);
 	idr_init(&fpriv->bo_list_handles);
 
-	/* init context manager */
-	mutex_init(&fpriv->ctx_mgr.lock);
-	idr_init(&fpriv->ctx_mgr.ctx_handles);
-	fpriv->ctx_mgr.adev = adev;
+	amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
 
 	file_priv->driver_priv = fpriv;
 
@@ -556,6 +560,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
 	if (!fpriv)
 		return;
 
+	amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
+
 	amdgpu_vm_fini(adev, &fpriv->vm);
 
 	idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
@@ -564,9 +570,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
 	idr_destroy(&fpriv->bo_list_handles);
 	mutex_destroy(&fpriv->bo_list_lock);
 
-	/* release context */
-	amdgpu_ctx_fini(fpriv);
-
 	kfree(fpriv);
 	file_priv->driver_priv = NULL;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 8da64245b31b..08b09d55b96f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -127,7 +127,7 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
 			placements[c].fpfn =
 				adev->mc.visible_vram_size >> PAGE_SHIFT;
 			placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
-				TTM_PL_FLAG_VRAM;
+				TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN;
 		}
 		placements[c].fpfn = 0;
 		placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
@@ -223,18 +223,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
 	size_t acc_size;
 	int r;
 
-	/* VI has a hw bug where VM PTEs have to be allocated in groups of 8.
-	 * do this as a temporary workaround
-	 */
-	if (!(domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
-		if (adev->asic_type >= CHIP_TOPAZ) {
-			if (byte_align & 0x7fff)
-				byte_align = ALIGN(byte_align, 0x8000);
-			if (size & 0x7fff)
-				size = ALIGN(size, 0x8000);
-		}
-	}
-
 	page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
 	size = ALIGN(size, PAGE_SIZE);
 
@@ -462,7 +450,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
 int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
 {
 	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
-	if (0 && (adev->flags & AMDGPU_IS_APU)) {
+	if (0 && (adev->flags & AMD_IS_APU)) {
 		/* Useless to evict on IGP chips */
 		return 0;
 	}
@@ -478,7 +466,6 @@ void amdgpu_bo_force_delete(struct amdgpu_device *adev)
 	}
 	dev_err(adev->dev, "Userspace still has active objects !\n");
 	list_for_each_entry_safe(bo, n, &adev->gem.objects, list) {
-		mutex_lock(&adev->ddev->struct_mutex);
 		dev_err(adev->dev, "%p %p %lu %lu force free\n",
 			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
 			*((unsigned long *)&bo->gem_base.refcount));
@@ -486,8 +473,7 @@ void amdgpu_bo_force_delete(struct amdgpu_device *adev)
 		list_del_init(&bo->list);
 		mutex_unlock(&bo->adev->gem.mutex);
 		/* this should unref the ttm bo */
-		drm_gem_object_unreference(&bo->gem_base);
-		mutex_unlock(&adev->ddev->struct_mutex);
+		drm_gem_object_unreference_unlocked(&bo->gem_base);
 	}
 }
 
@@ -658,13 +644,13 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
  * @shared: true if fence should be added shared
  *
  */
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
 		     bool shared)
 {
 	struct reservation_object *resv = bo->tbo.resv;
 
 	if (shared)
-		reservation_object_add_shared_fence(resv, &fence->base);
+		reservation_object_add_shared_fence(resv, fence);
 	else
-		reservation_object_add_excl_fence(resv, &fence->base);
+		reservation_object_add_excl_fence(resv, fence);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 675bdc30e41d..6ea18dcec561 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -161,7 +161,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 				  struct ttm_mem_reg *new_mem);
 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
 		     bool shared);
 
 /*
@@ -193,7 +193,7 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
 			    unsigned size, unsigned align);
 void amdgpu_sa_bo_free(struct amdgpu_device *adev,
 			      struct amdgpu_sa_bo **sa_bo,
-			      struct amdgpu_fence *fence);
+			      struct fence *fence);
 #if defined(CONFIG_DEBUG_FS)
 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
 					 struct seq_file *m);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index ed13baa7c976..efed11509f4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -82,7 +82,7 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
 	mutex_unlock(&adev->pm.mutex);
 
 	/* Can't set dpm state when the card is off */
-	if (!(adev->flags & AMDGPU_IS_PX) ||
+	if (!(adev->flags & AMD_IS_PX) ||
 	    (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
 		amdgpu_pm_compute_clocks(adev);
 fail:
@@ -538,7 +538,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
 		/* vce just modifies an existing state so force a change */
 		if (ps->vce_active != adev->pm.dpm.vce_active)
 			goto force;
-		if (adev->flags & AMDGPU_IS_APU) {
+		if (adev->flags & AMD_IS_APU) {
 			/* for APUs if the num crtcs changed but state is the same,
 			 * all we need to do is update the display configuration.
 			 */
@@ -580,7 +580,6 @@ force:
 		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
 	}
 
-	mutex_lock(&adev->ddev->struct_mutex);
 	mutex_lock(&adev->ring_lock);
 
 	/* update whether vce is active */
@@ -628,7 +627,6 @@ force:
 
 done:
 	mutex_unlock(&adev->ring_lock);
-	mutex_unlock(&adev->ddev->struct_mutex);
 }
 
 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 855e2196657a..9bec91484c24 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -131,6 +131,21 @@ int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw)
 	return 0;
 }
 
+/** amdgpu_ring_insert_nop - insert NOP packets
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @count: the number of NOP packets to insert
+ *
+ * This is the generic insert_nop function for rings except SDMA
+ */
+void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+	int i;
+
+	for (i = 0; i < count; i++)
+		amdgpu_ring_write(ring, ring->nop);
+}
+
 /**
  * amdgpu_ring_commit - tell the GPU to execute the new
  * commands on the ring buffer
@@ -143,10 +158,13 @@ int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw)
  */
 void amdgpu_ring_commit(struct amdgpu_ring *ring)
 {
+	uint32_t count;
+
 	/* We pad to match fetch size */
-	while (ring->wptr & ring->align_mask) {
-		amdgpu_ring_write(ring, ring->nop);
-	}
+	count = ring->align_mask + 1 - (ring->wptr & ring->align_mask);
+	count %= ring->align_mask + 1;
+	ring->funcs->insert_nop(ring, count);
+
 	mb();
 	amdgpu_ring_set_wptr(ring);
 }
@@ -342,6 +360,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
 		amdgpu_fence_driver_init_ring(ring);
 	}
 
+	init_waitqueue_head(&ring->fence_drv.fence_queue);
+
 	r = amdgpu_wb_get(adev, &ring->rptr_offs);
 	if (r) {
 		dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
@@ -367,7 +387,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
 	}
 	ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4);
 	ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs];
-
+	spin_lock_init(&ring->fence_lock);
 	r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
 	if (r) {
 		dev_err(adev->dev, "failed initializing fences (%d).\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index eb20987ce18d..74dad270362c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -139,6 +139,20 @@ int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
 	return r;
 }
 
+static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f)
+{
+	struct amdgpu_fence *a_fence;
+	struct amd_sched_fence *s_fence;
+
+	s_fence = to_amd_sched_fence(f);
+	if (s_fence)
+		return s_fence->scheduler->ring_id;
+	a_fence = to_amdgpu_fence(f);
+	if (a_fence)
+		return a_fence->ring->idx;
+	return 0;
+}
+
 static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
 {
 	struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
@@ -147,7 +161,7 @@ static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
 	}
 	list_del_init(&sa_bo->olist);
 	list_del_init(&sa_bo->flist);
-	amdgpu_fence_unref(&sa_bo->fence);
+	fence_put(sa_bo->fence);
 	kfree(sa_bo);
 }
 
@@ -160,7 +174,8 @@ static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
 
 	sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
 	list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
-		if (sa_bo->fence == NULL || !amdgpu_fence_signaled(sa_bo->fence)) {
+		if (sa_bo->fence == NULL ||
+		    !fence_is_signaled(sa_bo->fence)) {
 			return;
 		}
 		amdgpu_sa_bo_remove_locked(sa_bo);
@@ -245,7 +260,7 @@ static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
 }
 
 static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
-				   struct amdgpu_fence **fences,
+				   struct fence **fences,
 				   unsigned *tries)
 {
 	struct amdgpu_sa_bo *best_bo = NULL;
@@ -274,7 +289,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
 		sa_bo = list_first_entry(&sa_manager->flist[i],
 					 struct amdgpu_sa_bo, flist);
 
-		if (!amdgpu_fence_signaled(sa_bo->fence)) {
+		if (!fence_is_signaled(sa_bo->fence)) {
 			fences[i] = sa_bo->fence;
 			continue;
 		}
@@ -298,7 +313,8 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
 	}
 
 	if (best_bo) {
-		++tries[best_bo->fence->ring->idx];
+		uint32_t idx = amdgpu_sa_get_ring_from_fence(best_bo->fence);
+		++tries[idx];
 		sa_manager->hole = best_bo->olist.prev;
 
 		/* we knew that this one is signaled,
@@ -314,9 +330,10 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
 		     struct amdgpu_sa_bo **sa_bo,
 		     unsigned size, unsigned align)
 {
-	struct amdgpu_fence *fences[AMDGPU_MAX_RINGS];
+	struct fence *fences[AMDGPU_MAX_RINGS];
 	unsigned tries[AMDGPU_MAX_RINGS];
 	int i, r;
+	signed long t;
 
 	BUG_ON(align > sa_manager->align);
 	BUG_ON(size > sa_manager->size);
@@ -350,7 +367,9 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
 		} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
 
 		spin_unlock(&sa_manager->wq.lock);
-		r = amdgpu_fence_wait_any(adev, fences, false);
+		t = amdgpu_fence_wait_any(adev, fences, AMDGPU_MAX_RINGS,
+					  false, MAX_SCHEDULE_TIMEOUT);
+		r = (t > 0) ? 0 : t;
 		spin_lock(&sa_manager->wq.lock);
 		/* if we have nothing to wait for block */
 		if (r == -ENOENT) {
@@ -369,7 +388,7 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
 }
 
 void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
-		       struct amdgpu_fence *fence)
+		       struct fence *fence)
 {
 	struct amdgpu_sa_manager *sa_manager;
 
@@ -379,10 +398,11 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
 
 	sa_manager = (*sa_bo)->manager;
 	spin_lock(&sa_manager->wq.lock);
-	if (fence && !amdgpu_fence_signaled(fence)) {
-		(*sa_bo)->fence = amdgpu_fence_ref(fence);
-		list_add_tail(&(*sa_bo)->flist,
-			      &sa_manager->flist[fence->ring->idx]);
+	if (fence && !fence_is_signaled(fence)) {
+		uint32_t idx;
+		(*sa_bo)->fence = fence_get(fence);
+		idx = amdgpu_sa_get_ring_from_fence(fence);
+		list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
 	} else {
 		amdgpu_sa_bo_remove_locked(*sa_bo);
 	}
@@ -409,8 +429,16 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
 		seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
 			   soffset, eoffset, eoffset - soffset);
 		if (i->fence) {
-			seq_printf(m, " protected by 0x%016llx on ring %d",
-				   i->fence->seq, i->fence->ring->idx);
+			struct amdgpu_fence *a_fence = to_amdgpu_fence(i->fence);
+			struct amd_sched_fence *s_fence = to_amd_sched_fence(i->fence);
+			if (a_fence)
+				seq_printf(m, " protected by 0x%016llx on ring %d",
+					   a_fence->seq, a_fence->ring->idx);
+			if (s_fence)
+				seq_printf(m, " protected by 0x%016x on ring %d",
+					   s_fence->base.seqno,
+					   s_fence->scheduler->ring_id);
+
 		}
 		seq_printf(m, "\n");
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
new file mode 100644
index 000000000000..de98fbd2971e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+
+static struct fence *amdgpu_sched_dependency(struct amd_sched_job *job)
+{
+	struct amdgpu_job *sched_job = (struct amdgpu_job *)job;
+	return amdgpu_sync_get_fence(&sched_job->ibs->sync);
+}
+
+static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job)
+{
+	struct amdgpu_job *sched_job;
+	struct amdgpu_fence *fence;
+	int r;
+
+	if (!job) {
+		DRM_ERROR("job is null\n");
+		return NULL;
+	}
+	sched_job = (struct amdgpu_job *)job;
+	mutex_lock(&sched_job->job_lock);
+	r = amdgpu_ib_schedule(sched_job->adev,
+			       sched_job->num_ibs,
+			       sched_job->ibs,
+			       sched_job->base.owner);
+	if (r)
+		goto err;
+	fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence);
+
+	if (sched_job->free_job)
+		sched_job->free_job(sched_job);
+
+	mutex_unlock(&sched_job->job_lock);
+	return &fence->base;
+
+err:
+	DRM_ERROR("Run job error\n");
+	mutex_unlock(&sched_job->job_lock);
+	job->sched->ops->process_job(job);
+	return NULL;
+}
+
+static void amdgpu_sched_process_job(struct amd_sched_job *job)
+{
+	struct amdgpu_job *sched_job;
+
+	if (!job) {
+		DRM_ERROR("job is null\n");
+		return;
+	}
+	sched_job = (struct amdgpu_job *)job;
+	/* after processing job, free memory */
+	fence_put(&sched_job->base.s_fence->base);
+	kfree(sched_job);
+}
+
+struct amd_sched_backend_ops amdgpu_sched_ops = {
+	.dependency = amdgpu_sched_dependency,
+	.run_job = amdgpu_sched_run_job,
+	.process_job = amdgpu_sched_process_job
+};
+
+int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
+					 struct amdgpu_ring *ring,
+					 struct amdgpu_ib *ibs,
+					 unsigned num_ibs,
+					 int (*free_job)(struct amdgpu_job *),
+					 void *owner,
+					 struct fence **f)
+{
+	int r = 0;
+	if (amdgpu_enable_scheduler) {
+		struct amdgpu_job *job =
+			kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
+		if (!job)
+			return -ENOMEM;
+		job->base.sched = ring->scheduler;
+		job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
+		job->adev = adev;
+		job->ibs = ibs;
+		job->num_ibs = num_ibs;
+		job->base.owner = owner;
+		mutex_init(&job->job_lock);
+		job->free_job = free_job;
+		mutex_lock(&job->job_lock);
+		r = amd_sched_entity_push_job((struct amd_sched_job *)job);
+		if (r) {
+			mutex_unlock(&job->job_lock);
+			kfree(job);
+			return r;
+		}
+		*f = fence_get(&job->base.s_fence->base);
+		mutex_unlock(&job->job_lock);
+	} else {
+		r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
+		if (r)
+			return r;
+		*f = fence_get(&ibs[num_ibs - 1].fence->base);
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
index d6d41a42ab65..ff3ca52ec6fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
@@ -87,7 +87,7 @@ bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring,
 
 void amdgpu_semaphore_free(struct amdgpu_device *adev,
 			   struct amdgpu_semaphore **semaphore,
-			   struct amdgpu_fence *fence)
+			   struct fence *fence)
 {
 	if (semaphore == NULL || *semaphore == NULL) {
 		return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 21accbdd0a1a..068aeaff7183 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -32,6 +32,11 @@
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 
+struct amdgpu_sync_entry {
+	struct hlist_node	node;
+	struct fence		*fence;
+};
+
 /**
  * amdgpu_sync_create - zero init sync object
  *
@@ -49,36 +54,104 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
 		sync->sync_to[i] = NULL;
 
+	hash_init(sync->fences);
 	sync->last_vm_update = NULL;
 }
 
+static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
+{
+	struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
+	struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
+	if (a_fence)
+		return a_fence->ring->adev == adev;
+	if (s_fence)
+		return (struct amdgpu_device *)s_fence->scheduler->priv == adev;
+	return false;
+}
+
+static bool amdgpu_sync_test_owner(struct fence *f, void *owner)
+{
+	struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
+	struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+	if (s_fence)
+		return s_fence->owner == owner;
+	if (a_fence)
+		return a_fence->owner == owner;
+	return false;
+}
+
 /**
- * amdgpu_sync_fence - use the semaphore to sync to a fence
+ * amdgpu_sync_fence - remember to sync to this fence
  *
  * @sync: sync object to add fence to
  * @fence: fence to sync to
  *
- * Sync to the fence using the semaphore objects
  */
-void amdgpu_sync_fence(struct amdgpu_sync *sync,
-		       struct amdgpu_fence *fence)
+int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+		      struct fence *f)
 {
+	struct amdgpu_sync_entry *e;
+	struct amdgpu_fence *fence;
 	struct amdgpu_fence *other;
+	struct fence *tmp, *later;
 
-	if (!fence)
-		return;
+	if (!f)
+		return 0;
+
+	if (amdgpu_sync_same_dev(adev, f) &&
+	    amdgpu_sync_test_owner(f, AMDGPU_FENCE_OWNER_VM)) {
+		if (sync->last_vm_update) {
+			tmp = sync->last_vm_update;
+			BUG_ON(f->context != tmp->context);
+			later = (f->seqno - tmp->seqno <= INT_MAX) ? f : tmp;
+			sync->last_vm_update = fence_get(later);
+			fence_put(tmp);
+		} else
+			sync->last_vm_update = fence_get(f);
+	}
+
+	fence = to_amdgpu_fence(f);
+	if (!fence || fence->ring->adev != adev) {
+		hash_for_each_possible(sync->fences, e, node, f->context) {
+			struct fence *new;
+			if (unlikely(e->fence->context != f->context))
+				continue;
+			new = fence_get(fence_later(e->fence, f));
+			if (new) {
+				fence_put(e->fence);
+				e->fence = new;
+			}
+			return 0;
+		}
+
+		e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL);
+		if (!e)
+			return -ENOMEM;
+
+		hash_add(sync->fences, &e->node, f->context);
+		e->fence = fence_get(f);
+		return 0;
+	}
 
 	other = sync->sync_to[fence->ring->idx];
 	sync->sync_to[fence->ring->idx] = amdgpu_fence_ref(
 		amdgpu_fence_later(fence, other));
 	amdgpu_fence_unref(&other);
 
-	if (fence->owner == AMDGPU_FENCE_OWNER_VM) {
-		other = sync->last_vm_update;
-		sync->last_vm_update = amdgpu_fence_ref(
-			amdgpu_fence_later(fence, other));
-		amdgpu_fence_unref(&other);
-	}
+	return 0;
+}
+
+static void *amdgpu_sync_get_owner(struct fence *f)
+{
+	struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
+	struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
+	if (s_fence)
+		return s_fence->owner;
+	else if (a_fence)
+		return a_fence->owner;
+	return AMDGPU_FENCE_OWNER_UNDEFINED;
 }
 
 /**
@@ -97,7 +170,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 {
 	struct reservation_object_list *flist;
 	struct fence *f;
-	struct amdgpu_fence *fence;
+	void *fence_owner;
 	unsigned i;
 	int r = 0;
 
@@ -106,11 +179,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 
 	/* always sync to the exclusive fence */
 	f = reservation_object_get_excl(resv);
-	fence = f ? to_amdgpu_fence(f) : NULL;
-	if (fence && fence->ring->adev == adev)
-		amdgpu_sync_fence(sync, fence);
-	else if (f)
-		r = fence_wait(f, true);
+	r = amdgpu_sync_fence(adev, sync, f);
 
 	flist = reservation_object_get_list(resv);
 	if (!flist || r)
@@ -119,20 +188,72 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 	for (i = 0; i < flist->shared_count; ++i) {
 		f = rcu_dereference_protected(flist->shared[i],
 					      reservation_object_held(resv));
-		fence = f ? to_amdgpu_fence(f) : NULL;
-		if (fence && fence->ring->adev == adev) {
-			if (fence->owner != owner ||
-			    fence->owner == AMDGPU_FENCE_OWNER_UNDEFINED)
-				amdgpu_sync_fence(sync, fence);
-		} else if (f) {
-			r = fence_wait(f, true);
-			if (r)
-				break;
+		if (amdgpu_sync_same_dev(adev, f)) {
+			/* VM updates are only interesting
+			 * for other VM updates and moves.
+			 */
+			fence_owner = amdgpu_sync_get_owner(f);
+			if ((owner != AMDGPU_FENCE_OWNER_MOVE) &&
+			    (fence_owner != AMDGPU_FENCE_OWNER_MOVE) &&
+			    ((owner == AMDGPU_FENCE_OWNER_VM) !=
+			     (fence_owner == AMDGPU_FENCE_OWNER_VM)))
+				continue;
+
+			/* Ignore fence from the same owner as
+			 * long as it isn't undefined.
+			 */
+			if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
+			    fence_owner == owner)
+				continue;
 		}
+
+		r = amdgpu_sync_fence(adev, sync, f);
+		if (r)
+			break;
 	}
 	return r;
 }
 
+struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
+{
+	struct amdgpu_sync_entry *e;
+	struct hlist_node *tmp;
+	struct fence *f;
+	int i;
+
+	hash_for_each_safe(sync->fences, i, tmp, e, node) {
+
+		f = e->fence;
+
+		hash_del(&e->node);
+		kfree(e);
+
+		if (!fence_is_signaled(f))
+			return f;
+
+		fence_put(f);
+	}
+	return NULL;
+}
+
+int amdgpu_sync_wait(struct amdgpu_sync *sync)
+{
+	struct amdgpu_sync_entry *e;
+	struct hlist_node *tmp;
+	int i, r;
+
+	hash_for_each_safe(sync->fences, i, tmp, e, node) {
+		r = fence_wait(e->fence, false);
+		if (r)
+			return r;
+
+		hash_del(&e->node);
+		fence_put(e->fence);
+		kfree(e);
+	}
+	return 0;
+}
+
 /**
  * amdgpu_sync_rings - sync ring to all registered fences
  *
@@ -164,9 +285,9 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
 			return -EINVAL;
 		}
 
-		if (count >= AMDGPU_NUM_SYNCS) {
+		if (amdgpu_enable_scheduler || (count >= AMDGPU_NUM_SYNCS)) {
 			/* not enough room, wait manually */
-			r = amdgpu_fence_wait(fence, false);
+			r = fence_wait(&fence->base, false);
 			if (r)
 				return r;
 			continue;
@@ -186,7 +307,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
 		if (!amdgpu_semaphore_emit_signal(other, semaphore)) {
 			/* signaling wasn't successful wait manually */
 			amdgpu_ring_undo(other);
-			r = amdgpu_fence_wait(fence, false);
+			r = fence_wait(&fence->base, false);
 			if (r)
 				return r;
 			continue;
@@ -196,7 +317,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
 		if (!amdgpu_semaphore_emit_wait(ring, semaphore)) {
 			/* waiting wasn't successful wait manually */
 			amdgpu_ring_undo(other);
-			r = amdgpu_fence_wait(fence, false);
+			r = fence_wait(&fence->base, false);
 			if (r)
 				return r;
 			continue;
@@ -220,15 +341,23 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
  */
 void amdgpu_sync_free(struct amdgpu_device *adev,
 		      struct amdgpu_sync *sync,
-		      struct amdgpu_fence *fence)
+		      struct fence *fence)
 {
+	struct amdgpu_sync_entry *e;
+	struct hlist_node *tmp;
 	unsigned i;
 
+	hash_for_each_safe(sync->fences, i, tmp, e, node) {
+		hash_del(&e->node);
+		fence_put(e->fence);
+		kfree(e);
+	}
+
 	for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
 		amdgpu_semaphore_free(adev, &sync->semaphores[i], fence);
 
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
 		amdgpu_fence_unref(&sync->sync_to[i]);
 
-	amdgpu_fence_unref(&sync->last_vm_update);
+	fence_put(sync->last_vm_update);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index df202999fbfe..f80b1a43be8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -77,7 +77,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 		void *gtt_map, *vram_map;
 		void **gtt_start, **gtt_end;
 		void **vram_start, **vram_end;
-		struct amdgpu_fence *fence = NULL;
+		struct fence *fence = NULL;
 
 		r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
 				     AMDGPU_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i);
@@ -116,13 +116,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 			goto out_lclean_unpin;
 		}
 
-		r = amdgpu_fence_wait(fence, false);
+		r = fence_wait(fence, false);
 		if (r) {
 			DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
 			goto out_lclean_unpin;
 		}
 
-		amdgpu_fence_unref(&fence);
+		fence_put(fence);
 
 		r = amdgpu_bo_kmap(vram_obj, &vram_map);
 		if (r) {
@@ -161,13 +161,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 			goto out_lclean_unpin;
 		}
 
-		r = amdgpu_fence_wait(fence, false);
+		r = fence_wait(fence, false);
 		if (r) {
 			DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
 			goto out_lclean_unpin;
 		}
 
-		amdgpu_fence_unref(&fence);
+		fence_put(fence);
 
 		r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
 		if (r) {
@@ -214,7 +214,7 @@ out_lclean:
 			amdgpu_bo_unref(&gtt_obj[i]);
 		}
 		if (fence)
-			amdgpu_fence_unref(&fence);
+			fence_put(fence);
 		break;
 	}
 
@@ -238,7 +238,7 @@ void amdgpu_test_moves(struct amdgpu_device *adev)
 
 static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev,
 					     struct amdgpu_ring *ring,
-					     struct amdgpu_fence **fence)
+					     struct fence **fence)
 {
 	uint32_t handle = ring->idx ^ 0xdeafbeef;
 	int r;
@@ -269,15 +269,16 @@ static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev,
 			DRM_ERROR("Failed to get dummy destroy msg\n");
 			return r;
 		}
-
 	} else {
+		struct amdgpu_fence *a_fence = NULL;
 		r = amdgpu_ring_lock(ring, 64);
 		if (r) {
 			DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
 			return r;
 		}
-		amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+		amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, &a_fence);
 		amdgpu_ring_unlock_commit(ring);
+		*fence = &a_fence->base;
 	}
 	return 0;
 }
@@ -286,7 +287,7 @@ void amdgpu_test_ring_sync(struct amdgpu_device *adev,
 			   struct amdgpu_ring *ringA,
 			   struct amdgpu_ring *ringB)
 {
-	struct amdgpu_fence *fence1 = NULL, *fence2 = NULL;
+	struct fence *fence1 = NULL, *fence2 = NULL;
 	struct amdgpu_semaphore *semaphore = NULL;
 	int r;
 
@@ -322,7 +323,7 @@ void amdgpu_test_ring_sync(struct amdgpu_device *adev,
 
 	mdelay(1000);
 
-	if (amdgpu_fence_signaled(fence1)) {
+	if (fence_is_signaled(fence1)) {
 		DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
 		goto out_cleanup;
 	}
@@ -335,7 +336,7 @@ void amdgpu_test_ring_sync(struct amdgpu_device *adev,
 	amdgpu_semaphore_emit_signal(ringB, semaphore);
 	amdgpu_ring_unlock_commit(ringB);
 
-	r = amdgpu_fence_wait(fence1, false);
+	r = fence_wait(fence1, false);
 	if (r) {
 		DRM_ERROR("Failed to wait for sync fence 1\n");
 		goto out_cleanup;
@@ -343,7 +344,7 @@ void amdgpu_test_ring_sync(struct amdgpu_device *adev,
 
 	mdelay(1000);
 
-	if (amdgpu_fence_signaled(fence2)) {
+	if (fence_is_signaled(fence2)) {
 		DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
 		goto out_cleanup;
 	}
@@ -356,7 +357,7 @@ void amdgpu_test_ring_sync(struct amdgpu_device *adev,
 	amdgpu_semaphore_emit_signal(ringB, semaphore);
 	amdgpu_ring_unlock_commit(ringB);
 
-	r = amdgpu_fence_wait(fence2, false);
+	r = fence_wait(fence2, false);
 	if (r) {
 		DRM_ERROR("Failed to wait for sync fence 1\n");
 		goto out_cleanup;
@@ -366,10 +367,10 @@ out_cleanup:
 	amdgpu_semaphore_free(adev, &semaphore, NULL);
 
 	if (fence1)
-		amdgpu_fence_unref(&fence1);
+		fence_put(fence1);
 
 	if (fence2)
-		amdgpu_fence_unref(&fence2);
+		fence_put(fence2);
 
 	if (r)
 		printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
@@ -380,7 +381,7 @@ static void amdgpu_test_ring_sync2(struct amdgpu_device *adev,
 			    struct amdgpu_ring *ringB,
 			    struct amdgpu_ring *ringC)
 {
-	struct amdgpu_fence *fenceA = NULL, *fenceB = NULL;
+	struct fence *fenceA = NULL, *fenceB = NULL;
 	struct amdgpu_semaphore *semaphore = NULL;
 	bool sigA, sigB;
 	int i, r;
@@ -416,11 +417,11 @@ static void amdgpu_test_ring_sync2(struct amdgpu_device *adev,
 
 	mdelay(1000);
 
-	if (amdgpu_fence_signaled(fenceA)) {
+	if (fence_is_signaled(fenceA)) {
 		DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
 		goto out_cleanup;
 	}
-	if (amdgpu_fence_signaled(fenceB)) {
+	if (fence_is_signaled(fenceB)) {
 		DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
 		goto out_cleanup;
 	}
@@ -435,8 +436,8 @@ static void amdgpu_test_ring_sync2(struct amdgpu_device *adev,
 
 	for (i = 0; i < 30; ++i) {
 		mdelay(100);
-		sigA = amdgpu_fence_signaled(fenceA);
-		sigB = amdgpu_fence_signaled(fenceB);
+		sigA = fence_is_signaled(fenceA);
+		sigB = fence_is_signaled(fenceB);
 		if (sigA || sigB)
 			break;
 	}
@@ -461,12 +462,12 @@ static void amdgpu_test_ring_sync2(struct amdgpu_device *adev,
 
 	mdelay(1000);
 
-	r = amdgpu_fence_wait(fenceA, false);
+	r = fence_wait(fenceA, false);
 	if (r) {
 		DRM_ERROR("Failed to wait for sync fence A\n");
 		goto out_cleanup;
 	}
-	r = amdgpu_fence_wait(fenceB, false);
+	r = fence_wait(fenceB, false);
 	if (r) {
 		DRM_ERROR("Failed to wait for sync fence B\n");
 		goto out_cleanup;
@@ -476,10 +477,10 @@ out_cleanup:
 	amdgpu_semaphore_free(adev, &semaphore, NULL);
 
 	if (fenceA)
-		amdgpu_fence_unref(&fenceA);
+		fence_put(fenceA);
 
 	if (fenceB)
-		amdgpu_fence_unref(&fenceB);
+		fence_put(fenceB);
 
 	if (r)
 		printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index dd3415d2e45d..b5abd5cde413 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -228,7 +228,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 	struct amdgpu_device *adev;
 	struct amdgpu_ring *ring;
 	uint64_t old_start, new_start;
-	struct amdgpu_fence *fence;
+	struct fence *fence;
 	int r;
 
 	adev = amdgpu_get_adev(bo->bdev);
@@ -269,9 +269,9 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 			       new_mem->num_pages * PAGE_SIZE, /* bytes */
 			       bo->resv, &fence);
 	/* FIXME: handle copy error */
-	r = ttm_bo_move_accel_cleanup(bo, &fence->base,
+	r = ttm_bo_move_accel_cleanup(bo, fence,
 				      evict, no_wait_gpu, new_mem);
-	amdgpu_fence_unref(&fence);
+	fence_put(fence);
 	return r;
 }
 
@@ -859,7 +859,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
 	amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
 
 	r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
-			     AMDGPU_GEM_DOMAIN_VRAM, 0,
+			     AMDGPU_GEM_DOMAIN_VRAM,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
 			     NULL, &adev->stollen_vga_memory);
 	if (r) {
 		return r;
@@ -987,46 +988,48 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
 		       uint64_t dst_offset,
 		       uint32_t byte_count,
 		       struct reservation_object *resv,
-		       struct amdgpu_fence **fence)
+		       struct fence **fence)
 {
 	struct amdgpu_device *adev = ring->adev;
-	struct amdgpu_sync sync;
 	uint32_t max_bytes;
 	unsigned num_loops, num_dw;
+	struct amdgpu_ib *ib;
 	unsigned i;
 	int r;
 
-	/* sync other rings */
-	amdgpu_sync_create(&sync);
-	if (resv) {
-		r = amdgpu_sync_resv(adev, &sync, resv, false);
-		if (r) {
-			DRM_ERROR("sync failed (%d).\n", r);
-			amdgpu_sync_free(adev, &sync, NULL);
-			return r;
-		}
-	}
-
 	max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
 	num_loops = DIV_ROUND_UP(byte_count, max_bytes);
 	num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
 
-	/* for fence and sync */
-	num_dw += 64 + AMDGPU_NUM_SYNCS * 8;
+	/* for IB padding */
+	while (num_dw & 0x7)
+		num_dw++;
+
+	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+	if (!ib)
+		return -ENOMEM;
 
-	r = amdgpu_ring_lock(ring, num_dw);
+	r = amdgpu_ib_get(ring, NULL, num_dw * 4, ib);
 	if (r) {
-		DRM_ERROR("ring lock failed (%d).\n", r);
-		amdgpu_sync_free(adev, &sync, NULL);
+		kfree(ib);
 		return r;
 	}
 
-	amdgpu_sync_rings(&sync, ring);
+	ib->length_dw = 0;
+
+	if (resv) {
+		r = amdgpu_sync_resv(adev, &ib->sync, resv,
+				     AMDGPU_FENCE_OWNER_UNDEFINED);
+		if (r) {
+			DRM_ERROR("sync failed (%d).\n", r);
+			goto error_free;
+		}
+	}
 
 	for (i = 0; i < num_loops; i++) {
 		uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
 
-		amdgpu_emit_copy_buffer(adev, ring, src_offset, dst_offset,
+		amdgpu_emit_copy_buffer(adev, ib, src_offset, dst_offset,
 					cur_size_in_bytes);
 
 		src_offset += cur_size_in_bytes;
@@ -1034,17 +1037,24 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
 		byte_count -= cur_size_in_bytes;
 	}
 
-	r = amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_MOVE, fence);
-	if (r) {
-		amdgpu_ring_unlock_undo(ring);
-		amdgpu_sync_free(adev, &sync, NULL);
-		return r;
-	}
-
-	amdgpu_ring_unlock_commit(ring);
-	amdgpu_sync_free(adev, &sync, *fence);
+	amdgpu_vm_pad_ib(adev, ib);
+	WARN_ON(ib->length_dw > num_dw);
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+						 &amdgpu_vm_free_job,
+						 AMDGPU_FENCE_OWNER_MOVE,
+						 fence);
+	if (r)
+		goto error_free;
 
+	if (!amdgpu_enable_scheduler) {
+		amdgpu_ib_free(adev, ib);
+		kfree(ib);
+	}
 	return 0;
+error_free:
+	amdgpu_ib_free(adev, ib);
+	kfree(ib);
+	return r;
 }
 
 #if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index f5c22556ec2c..2cf6c6b06e3b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -52,6 +52,7 @@
 #endif
 #define FIRMWARE_TONGA		"amdgpu/tonga_uvd.bin"
 #define FIRMWARE_CARRIZO	"amdgpu/carrizo_uvd.bin"
+#define FIRMWARE_FIJI		"amdgpu/fiji_uvd.bin"
 
 /**
  * amdgpu_uvd_cs_ctx - Command submission parser context
@@ -81,6 +82,7 @@ MODULE_FIRMWARE(FIRMWARE_MULLINS);
 #endif
 MODULE_FIRMWARE(FIRMWARE_TONGA);
 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
+MODULE_FIRMWARE(FIRMWARE_FIJI);
 
 static void amdgpu_uvd_note_usage(struct amdgpu_device *adev);
 static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
@@ -116,6 +118,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 	case CHIP_TONGA:
 		fw_name = FIRMWARE_TONGA;
 		break;
+	case CHIP_FIJI:
+		fw_name = FIRMWARE_FIJI;
+		break;
 	case CHIP_CARRIZO:
 		fw_name = FIRMWARE_CARRIZO;
 		break;
@@ -149,7 +154,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 	bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
 		 +  AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
 	r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
-			     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->uvd.vcpu_bo);
+			     AMDGPU_GEM_DOMAIN_VRAM,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			     NULL, &adev->uvd.vcpu_bo);
 	if (r) {
 		dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
 		return r;
@@ -216,31 +223,32 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 
 int amdgpu_uvd_suspend(struct amdgpu_device *adev)
 {
-	unsigned size;
-	void *ptr;
-	const struct common_firmware_header *hdr;
-	int i;
+	struct amdgpu_ring *ring = &adev->uvd.ring;
+	int i, r;
 
 	if (adev->uvd.vcpu_bo == NULL)
 		return 0;
 
-	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
-		if (atomic_read(&adev->uvd.handles[i]))
-			break;
-
-	if (i == AMDGPU_MAX_UVD_HANDLES)
-		return 0;
+	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+		uint32_t handle = atomic_read(&adev->uvd.handles[i]);
+		if (handle != 0) {
+			struct fence *fence;
 
-	hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
+			amdgpu_uvd_note_usage(adev);
 
-	size = amdgpu_bo_size(adev->uvd.vcpu_bo);
-	size -= le32_to_cpu(hdr->ucode_size_bytes);
+			r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence);
+			if (r) {
+				DRM_ERROR("Error destroying UVD (%d)!\n", r);
+				continue;
+			}
 
-	ptr = adev->uvd.cpu_addr;
-	ptr += le32_to_cpu(hdr->ucode_size_bytes);
+			fence_wait(fence, false);
+			fence_put(fence);
 
-	adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
-	memcpy(adev->uvd.saved_bo, ptr, size);
+			adev->uvd.filp[i] = NULL;
+			atomic_set(&adev->uvd.handles[i], 0);
+		}
+	}
 
 	return 0;
 }
@@ -265,12 +273,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
 	ptr = adev->uvd.cpu_addr;
 	ptr += le32_to_cpu(hdr->ucode_size_bytes);
 
-	if (adev->uvd.saved_bo != NULL) {
-		memcpy(ptr, adev->uvd.saved_bo, size);
-		kfree(adev->uvd.saved_bo);
-		adev->uvd.saved_bo = NULL;
-	} else
-		memset(ptr, 0, size);
+	memset(ptr, 0, size);
 
 	return 0;
 }
@@ -283,7 +286,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
 	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
 		uint32_t handle = atomic_read(&adev->uvd.handles[i]);
 		if (handle != 0 && adev->uvd.filp[i] == filp) {
-			struct amdgpu_fence *fence;
+			struct fence *fence;
 
 			amdgpu_uvd_note_usage(adev);
 
@@ -293,8 +296,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
 				continue;
 			}
 
-			amdgpu_fence_wait(fence, false);
-			amdgpu_fence_unref(&fence);
+			fence_wait(fence, false);
+			fence_put(fence);
 
 			adev->uvd.filp[i] = NULL;
 			atomic_set(&adev->uvd.handles[i], 0);
@@ -374,7 +377,8 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
 	unsigned height_in_mb = ALIGN(height / 16, 2);
 	unsigned fs_in_mb = width_in_mb * height_in_mb;
 
-	unsigned image_size, tmp, min_dpb_size, num_dpb_buffer, min_ctx_size;
+	unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
+	unsigned min_ctx_size = 0;
 
 	image_size = width * height;
 	image_size += image_size / 2;
@@ -507,28 +511,25 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
 {
 	struct amdgpu_device *adev = ctx->parser->adev;
 	int32_t *msg, msg_type, handle;
-	struct fence *f;
 	void *ptr;
-
-	int i, r;
+	long r;
+	int i;
 
 	if (offset & 0x3F) {
 		DRM_ERROR("UVD messages must be 64 byte aligned!\n");
 		return -EINVAL;
 	}
 
-	f = reservation_object_get_excl(bo->tbo.resv);
-	if (f) {
-		r = amdgpu_fence_wait((struct amdgpu_fence *)f, false);
-		if (r) {
-			DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
-			return r;
-		}
+	r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
+						MAX_SCHEDULE_TIMEOUT);
+	if (r < 0) {
+		DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r);
+		return r;
 	}
 
 	r = amdgpu_bo_kmap(bo, &ptr);
 	if (r) {
-		DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
+		DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
 		return r;
 	}
 
@@ -803,14 +804,24 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
 	return 0;
 }
 
+static int amdgpu_uvd_free_job(
+	struct amdgpu_job *sched_job)
+{
+	amdgpu_ib_free(sched_job->adev, sched_job->ibs);
+	kfree(sched_job->ibs);
+	return 0;
+}
+
 static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
 			       struct amdgpu_bo *bo,
-			       struct amdgpu_fence **fence)
+			       struct fence **fence)
 {
 	struct ttm_validate_buffer tv;
 	struct ww_acquire_ctx ticket;
 	struct list_head head;
-	struct amdgpu_ib ib;
+	struct amdgpu_ib *ib = NULL;
+	struct fence *f = NULL;
+	struct amdgpu_device *adev = ring->adev;
 	uint64_t addr;
 	int i, r;
 
@@ -832,34 +843,49 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
 	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
 	if (r)
 		goto err;
-
-	r = amdgpu_ib_get(ring, NULL, 64, &ib);
-	if (r)
+	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+	if (!ib) {
+		r = -ENOMEM;
 		goto err;
+	}
+	r = amdgpu_ib_get(ring, NULL, 64, ib);
+	if (r)
+		goto err1;
 
 	addr = amdgpu_bo_gpu_offset(bo);
-	ib.ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
-	ib.ptr[1] = addr;
-	ib.ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
-	ib.ptr[3] = addr >> 32;
-	ib.ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
-	ib.ptr[5] = 0;
+	ib->ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
+	ib->ptr[1] = addr;
+	ib->ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
+	ib->ptr[3] = addr >> 32;
+	ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
+	ib->ptr[5] = 0;
 	for (i = 6; i < 16; ++i)
-		ib.ptr[i] = PACKET2(0);
-	ib.length_dw = 16;
+		ib->ptr[i] = PACKET2(0);
+	ib->length_dw = 16;
 
-	r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+						 &amdgpu_uvd_free_job,
+						 AMDGPU_FENCE_OWNER_UNDEFINED,
+						 &f);
 	if (r)
-		goto err;
-	ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base);
+		goto err2;
 
-	if (fence)
-		*fence = amdgpu_fence_ref(ib.fence);
+	ttm_eu_fence_buffer_objects(&ticket, &head, f);
 
-	amdgpu_ib_free(ring->adev, &ib);
+	if (fence)
+		*fence = fence_get(f);
 	amdgpu_bo_unref(&bo);
-	return 0;
+	fence_put(f);
+	if (amdgpu_enable_scheduler)
+		return 0;
 
+	amdgpu_ib_free(ring->adev, ib);
+	kfree(ib);
+	return 0;
+err2:
+	amdgpu_ib_free(ring->adev, ib);
+err1:
+	kfree(ib);
 err:
 	ttm_eu_backoff_reservation(&ticket, &head);
 	return r;
@@ -869,7 +895,7 @@ err:
    crash the vcpu so just try to emmit a dummy create/destroy msg to
    avoid this */
 int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
-			      struct amdgpu_fence **fence)
+			      struct fence **fence)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_bo *bo;
@@ -877,7 +903,9 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 	int r, i;
 
 	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
-			     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &bo);
+			     AMDGPU_GEM_DOMAIN_VRAM,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			     NULL, &bo);
 	if (r)
 		return r;
 
@@ -916,7 +944,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 }
 
 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-			       struct amdgpu_fence **fence)
+			       struct fence **fence)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_bo *bo;
@@ -924,7 +952,9 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 	int r, i;
 
 	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
-			     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &bo);
+			     AMDGPU_GEM_DOMAIN_VRAM,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			     NULL, &bo);
 	if (r)
 		return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 2255aa710e33..1724c2c86151 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -29,9 +29,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
 int amdgpu_uvd_suspend(struct amdgpu_device *adev);
 int amdgpu_uvd_resume(struct amdgpu_device *adev);
 int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
-			      struct amdgpu_fence **fence);
+			      struct fence **fence);
 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-			       struct amdgpu_fence **fence);
+			       struct fence **fence);
 void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
 			     struct drm_file *filp);
 int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index d3ca73090e39..3cab96c42aa8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -48,6 +48,7 @@
 #endif
 #define FIRMWARE_TONGA		"amdgpu/tonga_vce.bin"
 #define FIRMWARE_CARRIZO	"amdgpu/carrizo_vce.bin"
+#define FIRMWARE_FIJI		"amdgpu/fiji_vce.bin"
 
 #ifdef CONFIG_DRM_AMDGPU_CIK
 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
@@ -58,6 +59,7 @@ MODULE_FIRMWARE(FIRMWARE_MULLINS);
 #endif
 MODULE_FIRMWARE(FIRMWARE_TONGA);
 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
+MODULE_FIRMWARE(FIRMWARE_FIJI);
 
 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
 
@@ -101,6 +103,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
 	case CHIP_CARRIZO:
 		fw_name = FIRMWARE_CARRIZO;
 		break;
+	case CHIP_FIJI:
+		fw_name = FIRMWARE_FIJI;
+		break;
 
 	default:
 		return -EINVAL;
@@ -136,7 +141,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
 	/* allocate firmware, stack and heap BO */
 
 	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
-			     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->vce.vcpu_bo);
+			     AMDGPU_GEM_DOMAIN_VRAM,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			     NULL, &adev->vce.vcpu_bo);
 	if (r) {
 		dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
 		return r;
@@ -334,6 +341,14 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
 	}
 }
 
+static int amdgpu_vce_free_job(
+	struct amdgpu_job *sched_job)
+{
+	amdgpu_ib_free(sched_job->adev, sched_job->ibs);
+	kfree(sched_job->ibs);
+	return 0;
+}
+
 /**
  * amdgpu_vce_get_create_msg - generate a VCE create msg
  *
@@ -345,59 +360,69 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
  * Open up a stream for HW test
  */
 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
-			      struct amdgpu_fence **fence)
+			      struct fence **fence)
 {
 	const unsigned ib_size_dw = 1024;
-	struct amdgpu_ib ib;
+	struct amdgpu_ib *ib = NULL;
+	struct fence *f = NULL;
+	struct amdgpu_device *adev = ring->adev;
 	uint64_t dummy;
 	int i, r;
 
-	r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib);
+	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+	if (!ib)
+		return -ENOMEM;
+	r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib);
 	if (r) {
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+		kfree(ib);
 		return r;
 	}
 
-	dummy = ib.gpu_addr + 1024;
+	dummy = ib->gpu_addr + 1024;
 
 	/* stitch together an VCE create msg */
-	ib.length_dw = 0;
-	ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
-	ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
-	ib.ptr[ib.length_dw++] = handle;
-
-	ib.ptr[ib.length_dw++] = 0x00000030; /* len */
-	ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */
-	ib.ptr[ib.length_dw++] = 0x00000000;
-	ib.ptr[ib.length_dw++] = 0x00000042;
-	ib.ptr[ib.length_dw++] = 0x0000000a;
-	ib.ptr[ib.length_dw++] = 0x00000001;
-	ib.ptr[ib.length_dw++] = 0x00000080;
-	ib.ptr[ib.length_dw++] = 0x00000060;
-	ib.ptr[ib.length_dw++] = 0x00000100;
-	ib.ptr[ib.length_dw++] = 0x00000100;
-	ib.ptr[ib.length_dw++] = 0x0000000c;
-	ib.ptr[ib.length_dw++] = 0x00000000;
-
-	ib.ptr[ib.length_dw++] = 0x00000014; /* len */
-	ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
-	ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
-	ib.ptr[ib.length_dw++] = dummy;
-	ib.ptr[ib.length_dw++] = 0x00000001;
-
-	for (i = ib.length_dw; i < ib_size_dw; ++i)
-		ib.ptr[i] = 0x0;
-
-	r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
-	}
-
+	ib->length_dw = 0;
+	ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
+	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
+	ib->ptr[ib->length_dw++] = handle;
+
+	ib->ptr[ib->length_dw++] = 0x00000030; /* len */
+	ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
+	ib->ptr[ib->length_dw++] = 0x00000000;
+	ib->ptr[ib->length_dw++] = 0x00000042;
+	ib->ptr[ib->length_dw++] = 0x0000000a;
+	ib->ptr[ib->length_dw++] = 0x00000001;
+	ib->ptr[ib->length_dw++] = 0x00000080;
+	ib->ptr[ib->length_dw++] = 0x00000060;
+	ib->ptr[ib->length_dw++] = 0x00000100;
+	ib->ptr[ib->length_dw++] = 0x00000100;
+	ib->ptr[ib->length_dw++] = 0x0000000c;
+	ib->ptr[ib->length_dw++] = 0x00000000;
+
+	ib->ptr[ib->length_dw++] = 0x00000014; /* len */
+	ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
+	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
+	ib->ptr[ib->length_dw++] = dummy;
+	ib->ptr[ib->length_dw++] = 0x00000001;
+
+	for (i = ib->length_dw; i < ib_size_dw; ++i)
+		ib->ptr[i] = 0x0;
+
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+						 &amdgpu_vce_free_job,
+						 AMDGPU_FENCE_OWNER_UNDEFINED,
+						 &f);
+	if (r)
+		goto err;
 	if (fence)
-		*fence = amdgpu_fence_ref(ib.fence);
-
-	amdgpu_ib_free(ring->adev, &ib);
-
+		*fence = fence_get(f);
+	fence_put(f);
+	if (amdgpu_enable_scheduler)
+		return 0;
+err:
+	amdgpu_ib_free(adev, ib);
+	kfree(ib);
 	return r;
 }
 
@@ -412,49 +437,59 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
  * Close up a stream for HW test or if userspace failed to do so
  */
 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-			       struct amdgpu_fence **fence)
+			       struct fence **fence)
 {
 	const unsigned ib_size_dw = 1024;
-	struct amdgpu_ib ib;
+	struct amdgpu_ib *ib = NULL;
+	struct fence *f = NULL;
+	struct amdgpu_device *adev = ring->adev;
 	uint64_t dummy;
 	int i, r;
 
-	r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib);
+	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+	if (!ib)
+		return -ENOMEM;
+
+	r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib);
 	if (r) {
+		kfree(ib);
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
 		return r;
 	}
 
-	dummy = ib.gpu_addr + 1024;
+	dummy = ib->gpu_addr + 1024;
 
 	/* stitch together an VCE destroy msg */
-	ib.length_dw = 0;
-	ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
-	ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
-	ib.ptr[ib.length_dw++] = handle;
-
-	ib.ptr[ib.length_dw++] = 0x00000014; /* len */
-	ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
-	ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
-	ib.ptr[ib.length_dw++] = dummy;
-	ib.ptr[ib.length_dw++] = 0x00000001;
-
-	ib.ptr[ib.length_dw++] = 0x00000008; /* len */
-	ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */
-
-	for (i = ib.length_dw; i < ib_size_dw; ++i)
-		ib.ptr[i] = 0x0;
-
-	r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
-	}
-
+	ib->length_dw = 0;
+	ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
+	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
+	ib->ptr[ib->length_dw++] = handle;
+
+	ib->ptr[ib->length_dw++] = 0x00000014; /* len */
+	ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
+	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
+	ib->ptr[ib->length_dw++] = dummy;
+	ib->ptr[ib->length_dw++] = 0x00000001;
+
+	ib->ptr[ib->length_dw++] = 0x00000008; /* len */
+	ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
+
+	for (i = ib->length_dw; i < ib_size_dw; ++i)
+		ib->ptr[i] = 0x0;
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+						 &amdgpu_vce_free_job,
+						 AMDGPU_FENCE_OWNER_UNDEFINED,
+						 &f);
+	if (r)
+		goto err;
 	if (fence)
-		*fence = amdgpu_fence_ref(ib.fence);
-
-	amdgpu_ib_free(ring->adev, &ib);
-
+		*fence = fence_get(f);
+	fence_put(f);
+	if (amdgpu_enable_scheduler)
+		return 0;
+err:
+	amdgpu_ib_free(adev, ib);
+	kfree(ib);
 	return r;
 }
 
@@ -800,9 +835,13 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
  */
 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
 {
-	struct amdgpu_fence *fence = NULL;
+	struct fence *fence = NULL;
 	int r;
 
+	/* skip vce ring1 ib test for now, since it's not reliable */
+	if (ring == &ring->adev->vce.ring[1])
+		return 0;
+
 	r = amdgpu_vce_get_create_msg(ring, 1, NULL);
 	if (r) {
 		DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
@@ -815,13 +854,13 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
 		goto error;
 	}
 
-	r = amdgpu_fence_wait(fence, false);
+	r = fence_wait(fence, false);
 	if (r) {
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
 	} else {
 		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
 	}
 error:
-	amdgpu_fence_unref(&fence);
+	fence_put(fence);
 	return r;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 7ccdb5927da5..ba2da8ee5906 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -29,9 +29,9 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
 int amdgpu_vce_suspend(struct amdgpu_device *adev);
 int amdgpu_vce_resume(struct amdgpu_device *adev);
 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
-			      struct amdgpu_fence **fence);
+			      struct fence **fence);
 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-			       struct amdgpu_fence **fence);
+			       struct fence **fence);
 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
 bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 9a4e3b63f1cb..f68b7cdc370a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -127,16 +127,16 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
 /**
  * amdgpu_vm_grab_id - allocate the next free VMID
  *
- * @ring: ring we want to submit job to
  * @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ * @sync: sync object where we add dependencies
  *
- * Allocate an id for the vm (cayman+).
- * Returns the fence we need to sync to (if any).
+ * Allocate an id for the vm, adding fences to the sync obj as necessary.
  *
- * Global and local mutex must be locked!
+ * Global mutex must be locked!
  */
-struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
-				       struct amdgpu_vm *vm)
+int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+		      struct amdgpu_sync *sync)
 {
 	struct amdgpu_fence *best[AMDGPU_MAX_RINGS] = {};
 	struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
@@ -148,7 +148,7 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
 	/* check if the id is still valid */
 	if (vm_id->id && vm_id->last_id_use &&
 	    vm_id->last_id_use == adev->vm_manager.active[vm_id->id])
-		return NULL;
+		return 0;
 
 	/* we definately need to flush */
 	vm_id->pd_gpu_addr = ~0ll;
@@ -161,7 +161,7 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
 			/* found a free one */
 			vm_id->id = i;
 			trace_amdgpu_vm_grab_id(i, ring->idx);
-			return NULL;
+			return 0;
 		}
 
 		if (amdgpu_fence_is_earlier(fence, best[fence->ring->idx])) {
@@ -172,15 +172,19 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
 
 	for (i = 0; i < 2; ++i) {
 		if (choices[i]) {
+			struct amdgpu_fence *fence;
+
+			fence  = adev->vm_manager.active[choices[i]];
 			vm_id->id = choices[i];
+
 			trace_amdgpu_vm_grab_id(choices[i], ring->idx);
-			return adev->vm_manager.active[choices[i]];
+			return amdgpu_sync_fence(ring->adev, sync, &fence->base);
 		}
 	}
 
 	/* should never happen */
 	BUG();
-	return NULL;
+	return -EINVAL;
 }
 
 /**
@@ -196,17 +200,29 @@ struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
  */
 void amdgpu_vm_flush(struct amdgpu_ring *ring,
 		     struct amdgpu_vm *vm,
-		     struct amdgpu_fence *updates)
+		     struct fence *updates)
 {
 	uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
 	struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
+	struct fence *flushed_updates = vm_id->flushed_updates;
+	bool is_earlier = false;
 
-	if (pd_addr != vm_id->pd_gpu_addr || !vm_id->flushed_updates ||
-	    amdgpu_fence_is_earlier(vm_id->flushed_updates, updates)) {
+	if (flushed_updates && updates) {
+		BUG_ON(flushed_updates->context != updates->context);
+		is_earlier = (updates->seqno - flushed_updates->seqno <=
+			      INT_MAX) ? true : false;
+	}
+
+	if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates ||
+	    is_earlier) {
 
 		trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
-		amdgpu_fence_unref(&vm_id->flushed_updates);
-		vm_id->flushed_updates = amdgpu_fence_ref(updates);
+		if (is_earlier) {
+			vm_id->flushed_updates = fence_get(updates);
+			fence_put(flushed_updates);
+		}
+		if (!flushed_updates)
+			vm_id->flushed_updates = fence_get(updates);
 		vm_id->pd_gpu_addr = pd_addr;
 		amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
 	}
@@ -300,6 +316,15 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
 	}
 }
 
+int amdgpu_vm_free_job(struct amdgpu_job *sched_job)
+{
+	int i;
+	for (i = 0; i < sched_job->num_ibs; i++)
+		amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
+	kfree(sched_job->ibs);
+	return 0;
+}
+
 /**
  * amdgpu_vm_clear_bo - initially clear the page dir/table
  *
@@ -310,7 +335,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
 			      struct amdgpu_bo *bo)
 {
 	struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
-	struct amdgpu_ib ib;
+	struct fence *fence = NULL;
+	struct amdgpu_ib *ib;
 	unsigned entries;
 	uint64_t addr;
 	int r;
@@ -330,24 +356,33 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
 	addr = amdgpu_bo_gpu_offset(bo);
 	entries = amdgpu_bo_size(bo) / 8;
 
-	r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, &ib);
-	if (r)
+	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+	if (!ib)
 		goto error_unreserve;
 
-	ib.length_dw = 0;
-
-	amdgpu_vm_update_pages(adev, &ib, addr, 0, entries, 0, 0, 0);
-	amdgpu_vm_pad_ib(adev, &ib);
-	WARN_ON(ib.length_dw > 64);
-
-	r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
+	r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
 	if (r)
 		goto error_free;
 
-	amdgpu_bo_fence(bo, ib.fence, true);
-
+	ib->length_dw = 0;
+
+	amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0);
+	amdgpu_vm_pad_ib(adev, ib);
+	WARN_ON(ib->length_dw > 64);
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+						 &amdgpu_vm_free_job,
+						 AMDGPU_FENCE_OWNER_VM,
+						 &fence);
+	if (!r)
+		amdgpu_bo_fence(bo, fence, true);
+	fence_put(fence);
+	if (amdgpu_enable_scheduler) {
+		amdgpu_bo_unreserve(bo);
+		return 0;
+	}
 error_free:
-	amdgpu_ib_free(adev, &ib);
+	amdgpu_ib_free(adev, ib);
+	kfree(ib);
 
 error_unreserve:
 	amdgpu_bo_unreserve(bo);
@@ -400,7 +435,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 	uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
 	uint64_t last_pde = ~0, last_pt = ~0;
 	unsigned count = 0, pt_idx, ndw;
-	struct amdgpu_ib ib;
+	struct amdgpu_ib *ib;
+	struct fence *fence = NULL;
+
 	int r;
 
 	/* padding, etc. */
@@ -413,10 +450,14 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 	if (ndw > 0xfffff)
 		return -ENOMEM;
 
-	r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib);
+	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+	if (!ib)
+		return -ENOMEM;
+
+	r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
 	if (r)
 		return r;
-	ib.length_dw = 0;
+	ib->length_dw = 0;
 
 	/* walk over the address space and update the page directory */
 	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
@@ -436,7 +477,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 		    ((last_pt + incr * count) != pt)) {
 
 			if (count) {
-				amdgpu_vm_update_pages(adev, &ib, last_pde,
+				amdgpu_vm_update_pages(adev, ib, last_pde,
 						       last_pt, count, incr,
 						       AMDGPU_PTE_VALID, 0);
 			}
@@ -450,23 +491,37 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 	}
 
 	if (count)
-		amdgpu_vm_update_pages(adev, &ib, last_pde, last_pt, count,
+		amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count,
 				       incr, AMDGPU_PTE_VALID, 0);
 
-	if (ib.length_dw != 0) {
-		amdgpu_vm_pad_ib(adev, &ib);
-		amdgpu_sync_resv(adev, &ib.sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
-		WARN_ON(ib.length_dw > ndw);
-		r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
-		if (r) {
-			amdgpu_ib_free(adev, &ib);
-			return r;
-		}
-		amdgpu_bo_fence(pd, ib.fence, true);
+	if (ib->length_dw != 0) {
+		amdgpu_vm_pad_ib(adev, ib);
+		amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
+		WARN_ON(ib->length_dw > ndw);
+		r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+							 &amdgpu_vm_free_job,
+							 AMDGPU_FENCE_OWNER_VM,
+							 &fence);
+		if (r)
+			goto error_free;
+
+		amdgpu_bo_fence(pd, fence, true);
+		fence_put(vm->page_directory_fence);
+		vm->page_directory_fence = fence_get(fence);
+		fence_put(fence);
+	}
+
+	if (!amdgpu_enable_scheduler || ib->length_dw == 0) {
+		amdgpu_ib_free(adev, ib);
+		kfree(ib);
 	}
-	amdgpu_ib_free(adev, &ib);
 
 	return 0;
+
+error_free:
+	amdgpu_ib_free(adev, ib);
+	kfree(ib);
+	return r;
 }
 
 /**
@@ -572,9 +627,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
 {
 	uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
 	uint64_t last_pte = ~0, last_dst = ~0;
+	void *owner = AMDGPU_FENCE_OWNER_VM;
 	unsigned count = 0;
 	uint64_t addr;
 
+	/* sync to everything on unmapping */
+	if (!(flags & AMDGPU_PTE_VALID))
+		owner = AMDGPU_FENCE_OWNER_UNDEFINED;
+
 	/* walk over the address space and update the page tables */
 	for (addr = start; addr < end; ) {
 		uint64_t pt_idx = addr >> amdgpu_vm_block_size;
@@ -583,8 +643,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
 		uint64_t pte;
 		int r;
 
-		amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv,
-				 AMDGPU_FENCE_OWNER_VM);
+		amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, owner);
 		r = reservation_object_reserve_shared(pt->tbo.resv);
 		if (r)
 			return r;
@@ -640,7 +699,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
  */
 static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
 				uint64_t start, uint64_t end,
-				struct amdgpu_fence *fence)
+				struct fence *fence)
 {
 	unsigned i;
 
@@ -670,12 +729,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 				       struct amdgpu_vm *vm,
 				       struct amdgpu_bo_va_mapping *mapping,
 				       uint64_t addr, uint32_t gtt_flags,
-				       struct amdgpu_fence **fence)
+				       struct fence **fence)
 {
 	struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
 	unsigned nptes, ncmds, ndw;
 	uint32_t flags = gtt_flags;
-	struct amdgpu_ib ib;
+	struct amdgpu_ib *ib;
+	struct fence *f = NULL;
 	int r;
 
 	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -722,46 +782,54 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 	if (ndw > 0xfffff)
 		return -ENOMEM;
 
-	r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib);
-	if (r)
-		return r;
-	ib.length_dw = 0;
-
-	if (!(flags & AMDGPU_PTE_VALID)) {
-		unsigned i;
+	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+	if (!ib)
+		return -ENOMEM;
 
-		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-			struct amdgpu_fence *f = vm->ids[i].last_id_use;
-			amdgpu_sync_fence(&ib.sync, f);
-		}
+	r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
+	if (r) {
+		kfree(ib);
+		return r;
 	}
 
-	r = amdgpu_vm_update_ptes(adev, vm, &ib, mapping->it.start,
+	ib->length_dw = 0;
+
+	r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start,
 				  mapping->it.last + 1, addr + mapping->offset,
 				  flags, gtt_flags);
 
 	if (r) {
-		amdgpu_ib_free(adev, &ib);
+		amdgpu_ib_free(adev, ib);
+		kfree(ib);
 		return r;
 	}
 
-	amdgpu_vm_pad_ib(adev, &ib);
-	WARN_ON(ib.length_dw > ndw);
+	amdgpu_vm_pad_ib(adev, ib);
+	WARN_ON(ib->length_dw > ndw);
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+						 &amdgpu_vm_free_job,
+						 AMDGPU_FENCE_OWNER_VM,
+						 &f);
+	if (r)
+		goto error_free;
 
-	r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
-	if (r) {
-		amdgpu_ib_free(adev, &ib);
-		return r;
-	}
 	amdgpu_vm_fence_pts(vm, mapping->it.start,
-			    mapping->it.last + 1, ib.fence);
+			    mapping->it.last + 1, f);
 	if (fence) {
-		amdgpu_fence_unref(fence);
-		*fence = amdgpu_fence_ref(ib.fence);
+		fence_put(*fence);
+		*fence = fence_get(f);
+	}
+	fence_put(f);
+	if (!amdgpu_enable_scheduler) {
+		amdgpu_ib_free(adev, ib);
+		kfree(ib);
 	}
-	amdgpu_ib_free(adev, &ib);
-
 	return 0;
+
+error_free:
+	amdgpu_ib_free(adev, ib);
+	kfree(ib);
+	return r;
 }
 
 /**
@@ -794,21 +862,25 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 		addr = 0;
 	}
 
-	if (addr == bo_va->addr)
-		return 0;
-
 	flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
 
-	list_for_each_entry(mapping, &bo_va->mappings, list) {
+	spin_lock(&vm->status_lock);
+	if (!list_empty(&bo_va->vm_status))
+		list_splice_init(&bo_va->valids, &bo_va->invalids);
+	spin_unlock(&vm->status_lock);
+
+	list_for_each_entry(mapping, &bo_va->invalids, list) {
 		r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr,
 						flags, &bo_va->last_pt_update);
 		if (r)
 			return r;
 	}
 
-	bo_va->addr = addr;
 	spin_lock(&vm->status_lock);
+	list_splice_init(&bo_va->invalids, &bo_va->valids);
 	list_del_init(&bo_va->vm_status);
+	if (!mem)
+		list_add(&bo_va->vm_status, &vm->cleared);
 	spin_unlock(&vm->status_lock);
 
 	return 0;
@@ -861,7 +933,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
 			     struct amdgpu_vm *vm, struct amdgpu_sync *sync)
 {
 	struct amdgpu_bo_va *bo_va = NULL;
-	int r;
+	int r = 0;
 
 	spin_lock(&vm->status_lock);
 	while (!list_empty(&vm->invalidated)) {
@@ -878,8 +950,9 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
 	spin_unlock(&vm->status_lock);
 
 	if (bo_va)
-		amdgpu_sync_fence(sync, bo_va->last_pt_update);
-	return 0;
+		r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
+
+	return r;
 }
 
 /**
@@ -907,10 +980,10 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
 	}
 	bo_va->vm = vm;
 	bo_va->bo = bo;
-	bo_va->addr = 0;
 	bo_va->ref_count = 1;
 	INIT_LIST_HEAD(&bo_va->bo_list);
-	INIT_LIST_HEAD(&bo_va->mappings);
+	INIT_LIST_HEAD(&bo_va->valids);
+	INIT_LIST_HEAD(&bo_va->invalids);
 	INIT_LIST_HEAD(&bo_va->vm_status);
 
 	mutex_lock(&vm->mutex);
@@ -999,12 +1072,10 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 	mapping->offset = offset;
 	mapping->flags = flags;
 
-	list_add(&mapping->list, &bo_va->mappings);
+	list_add(&mapping->list, &bo_va->invalids);
 	interval_tree_insert(&mapping->it, &vm->va);
 	trace_amdgpu_vm_bo_map(bo_va, mapping);
 
-	bo_va->addr = 0;
-
 	/* Make sure the page tables are allocated */
 	saddr >>= amdgpu_vm_block_size;
 	eaddr >>= amdgpu_vm_block_size;
@@ -1028,7 +1099,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 
 		r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
 				     AMDGPU_GPU_PAGE_SIZE, true,
-				     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &pt);
+				     AMDGPU_GEM_DOMAIN_VRAM,
+				     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+				     NULL, &pt);
 		if (r)
 			goto error_free;
 
@@ -1085,17 +1158,27 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
 {
 	struct amdgpu_bo_va_mapping *mapping;
 	struct amdgpu_vm *vm = bo_va->vm;
+	bool valid = true;
 
 	saddr /= AMDGPU_GPU_PAGE_SIZE;
 
-	list_for_each_entry(mapping, &bo_va->mappings, list) {
+	list_for_each_entry(mapping, &bo_va->valids, list) {
 		if (mapping->it.start == saddr)
 			break;
 	}
 
-	if (&mapping->list == &bo_va->mappings) {
-		amdgpu_bo_unreserve(bo_va->bo);
-		return -ENOENT;
+	if (&mapping->list == &bo_va->valids) {
+		valid = false;
+
+		list_for_each_entry(mapping, &bo_va->invalids, list) {
+			if (mapping->it.start == saddr)
+				break;
+		}
+
+		if (&mapping->list == &bo_va->invalids) {
+			amdgpu_bo_unreserve(bo_va->bo);
+			return -ENOENT;
+		}
 	}
 
 	mutex_lock(&vm->mutex);
@@ -1103,12 +1186,10 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
 	interval_tree_remove(&mapping->it, &vm->va);
 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
 
-	if (bo_va->addr) {
-		/* clear the old address */
+	if (valid)
 		list_add(&mapping->list, &vm->freed);
-	} else {
+	else
 		kfree(mapping);
-	}
 	mutex_unlock(&vm->mutex);
 	amdgpu_bo_unreserve(bo_va->bo);
 
@@ -1139,16 +1220,19 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
 	list_del(&bo_va->vm_status);
 	spin_unlock(&vm->status_lock);
 
-	list_for_each_entry_safe(mapping, next, &bo_va->mappings, list) {
+	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
 		list_del(&mapping->list);
 		interval_tree_remove(&mapping->it, &vm->va);
 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
-		if (bo_va->addr)
-			list_add(&mapping->list, &vm->freed);
-		else
-			kfree(mapping);
+		list_add(&mapping->list, &vm->freed);
+	}
+	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
+		list_del(&mapping->list);
+		interval_tree_remove(&mapping->it, &vm->va);
+		kfree(mapping);
 	}
-	amdgpu_fence_unref(&bo_va->last_pt_update);
+
+	fence_put(bo_va->last_pt_update);
 	kfree(bo_va);
 
 	mutex_unlock(&vm->mutex);
@@ -1169,12 +1253,10 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
 	struct amdgpu_bo_va *bo_va;
 
 	list_for_each_entry(bo_va, &bo->va, bo_list) {
-		if (bo_va->addr) {
-			spin_lock(&bo_va->vm->status_lock);
-			list_del(&bo_va->vm_status);
+		spin_lock(&bo_va->vm->status_lock);
+		if (list_empty(&bo_va->vm_status))
 			list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
-			spin_unlock(&bo_va->vm->status_lock);
-		}
+		spin_unlock(&bo_va->vm->status_lock);
 	}
 }
 
@@ -1202,6 +1284,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 	vm->va = RB_ROOT;
 	spin_lock_init(&vm->status_lock);
 	INIT_LIST_HEAD(&vm->invalidated);
+	INIT_LIST_HEAD(&vm->cleared);
 	INIT_LIST_HEAD(&vm->freed);
 
 	pd_size = amdgpu_vm_directory_size(adev);
@@ -1215,8 +1298,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 		return -ENOMEM;
 	}
 
+	vm->page_directory_fence = NULL;
+
 	r = amdgpu_bo_create(adev, pd_size, align, true,
-			     AMDGPU_GEM_DOMAIN_VRAM, 0,
+			     AMDGPU_GEM_DOMAIN_VRAM,
+			     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
 			     NULL, &vm->page_directory);
 	if (r)
 		return r;
@@ -1263,9 +1349,10 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 	kfree(vm->page_tables);
 
 	amdgpu_bo_unref(&vm->page_directory);
+	fence_put(vm->page_directory_fence);
 
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-		amdgpu_fence_unref(&vm->ids[i].flushed_updates);
+		fence_put(vm->ids[i].flushed_updates);
 		amdgpu_fence_unref(&vm->ids[i].last_id_use);
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 9ba0a7d5bc8e..92b6acadfc52 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -139,7 +139,8 @@ amdgpu_atombios_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *m
 
 	tx_buf[0] = msg->address & 0xff;
 	tx_buf[1] = msg->address >> 8;
-	tx_buf[2] = msg->request << 4;
+	tx_buf[2] = (msg->request << 4) |
+		((msg->address >> 16) & 0xf);
 	tx_buf[3] = msg->size ? (msg->size - 1) : 0;
 
 	switch (msg->request & ~DP_AUX_I2C_MOT) {
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index ae8caca61e04..cd6edc40c9cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -812,7 +812,7 @@ amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int a
 			else
 				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
 
-			if ((adev->flags & AMDGPU_IS_APU) &&
+			if ((adev->flags & AMD_IS_APU) &&
 			    (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
 				if (is_dp ||
 				    !amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 341c56681841..4b6ce74753cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -64,6 +64,8 @@
 #include "oss/oss_2_0_d.h"
 #include "oss/oss_2_0_sh_mask.h"
 
+#include "amdgpu_amdkfd.h"
+
 /*
  * Indirect registers accessor
  */
@@ -836,7 +838,7 @@ static u32 cik_get_xclk(struct amdgpu_device *adev)
 {
 	u32 reference_clock = adev->clock.spll.reference_freq;
 
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		if (RREG32_SMC(ixGENERAL_PWRMGT) & GENERAL_PWRMGT__GPU_COUNTER_CLK_MASK)
 			return reference_clock / 2;
 	} else {
@@ -1233,7 +1235,7 @@ static void cik_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask)
 	if (reset_mask & AMDGPU_RESET_VMC)
 		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_VMC_MASK;
 
-	if (!(adev->flags & AMDGPU_IS_APU)) {
+	if (!(adev->flags & AMD_IS_APU)) {
 		if (reset_mask & AMDGPU_RESET_MC)
 			srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_MC_MASK;
 	}
@@ -1409,7 +1411,7 @@ static void cik_gpu_pci_config_reset(struct amdgpu_device *adev)
 		dev_warn(adev->dev, "Wait for MC idle timed out !\n");
 	}
 
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		kv_save_regs_for_reset(adev, &kv_save);
 
 	/* disable BM */
@@ -1427,7 +1429,7 @@ static void cik_gpu_pci_config_reset(struct amdgpu_device *adev)
 	}
 
 	/* does asic init need to be run first??? */
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		kv_restore_regs_for_reset(adev, &kv_save);
 }
 
@@ -1568,7 +1570,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
 	if (amdgpu_pcie_gen2 == 0)
 		return;
 
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		return;
 
 	ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
@@ -1728,7 +1730,7 @@ static void cik_program_aspm(struct amdgpu_device *adev)
 		return;
 
 	/* XXX double check APUs */
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		return;
 
 	orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
@@ -2448,14 +2450,21 @@ static int cik_common_suspend(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	amdgpu_amdkfd_suspend(adev);
+
 	return cik_common_hw_fini(adev);
 }
 
 static int cik_common_resume(void *handle)
 {
+	int r;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	return cik_common_hw_init(adev);
+	r = cik_common_hw_init(adev);
+	if (r)
+		return r;
+
+	return amdgpu_amdkfd_resume(adev);
 }
 
 static bool cik_common_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 15df46c93f0a..9ea9de457da3 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -188,6 +188,19 @@ static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
 	WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
 }
 
+static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+	struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
+	int i;
+
+	for (i = 0; i < count; i++)
+		if (sdma && sdma->burst_nop && (i == 0))
+			amdgpu_ring_write(ring, ring->nop |
+					  SDMA_NOP_COUNT(count - 1));
+		else
+			amdgpu_ring_write(ring, ring->nop);
+}
+
 /**
  * cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine
  *
@@ -213,8 +226,8 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
 	amdgpu_ring_write(ring, next_rptr);
 
 	/* IB packet must end on a 8 DW boundary */
-	while ((ring->wptr & 7) != 4)
-		amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
+	cik_sdma_ring_insert_nop(ring, (12 - (ring->wptr & 7)) % 8);
+
 	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
 	amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
@@ -501,6 +514,8 @@ static int cik_sdma_load_microcode(struct amdgpu_device *adev)
 		fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 		adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
 		adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
+		if (adev->sdma[i].feature_version >= 20)
+			adev->sdma[i].burst_nop = true;
 		fw_data = (const __le32 *)
 			(adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 		WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
@@ -614,6 +629,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
+	struct fence *f = NULL;
 	unsigned i;
 	unsigned index;
 	int r;
@@ -629,12 +645,11 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
-
+	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(ring, NULL, 256, &ib);
 	if (r) {
-		amdgpu_wb_free(adev, index);
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
-		return r;
+		goto err0;
 	}
 
 	ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
@@ -643,20 +658,16 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
 	ib.ptr[3] = 1;
 	ib.ptr[4] = 0xDEADBEEF;
 	ib.length_dw = 5;
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
+						 AMDGPU_FENCE_OWNER_UNDEFINED,
+						 &f);
+	if (r)
+		goto err1;
 
-	r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
+	r = fence_wait(f, false);
 	if (r) {
-		amdgpu_ib_free(adev, &ib);
-		amdgpu_wb_free(adev, index);
-		DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
-		return r;
-	}
-	r = amdgpu_fence_wait(ib.fence, false);
-	if (r) {
-		amdgpu_ib_free(adev, &ib);
-		amdgpu_wb_free(adev, index);
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
-		return r;
+		goto err1;
 	}
 	for (i = 0; i < adev->usec_timeout; i++) {
 		tmp = le32_to_cpu(adev->wb.wb[index]);
@@ -666,12 +677,17 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
 	}
 	if (i < adev->usec_timeout) {
 		DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
-			 ib.fence->ring->idx, i);
+			 ring->idx, i);
+		goto err1;
 	} else {
 		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
 		r = -EINVAL;
 	}
+
+err1:
+	fence_put(f);
 	amdgpu_ib_free(adev, &ib);
+err0:
 	amdgpu_wb_free(adev, index);
 	return r;
 }
@@ -814,8 +830,19 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib,
  */
 static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib)
 {
-	while (ib->length_dw & 0x7)
-		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
+	struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
+	u32 pad_count;
+	int i;
+
+	pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+	for (i = 0; i < pad_count; i++)
+		if (sdma && sdma->burst_nop && (i == 0))
+			ib->ptr[ib->length_dw++] =
+					SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0) |
+					SDMA_NOP_COUNT(pad_count - 1);
+		else
+			ib->ptr[ib->length_dw++] =
+					SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
 }
 
 /**
@@ -1302,6 +1329,7 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
 	.test_ring = cik_sdma_ring_test_ring,
 	.test_ib = cik_sdma_ring_test_ib,
 	.is_lockup = cik_sdma_ring_is_lockup,
+	.insert_nop = cik_sdma_ring_insert_nop,
 };
 
 static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
@@ -1338,18 +1366,18 @@ static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
  * Used by the amdgpu ttm implementation to move pages if
  * registered as the asic copy callback.
  */
-static void cik_sdma_emit_copy_buffer(struct amdgpu_ring *ring,
+static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib,
 				      uint64_t src_offset,
 				      uint64_t dst_offset,
 				      uint32_t byte_count)
 {
-	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
-	amdgpu_ring_write(ring, byte_count);
-	amdgpu_ring_write(ring, 0); /* src/dst endian swap */
-	amdgpu_ring_write(ring, lower_32_bits(src_offset));
-	amdgpu_ring_write(ring, upper_32_bits(src_offset));
-	amdgpu_ring_write(ring, lower_32_bits(dst_offset));
-	amdgpu_ring_write(ring, upper_32_bits(dst_offset));
+	ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
+	ib->ptr[ib->length_dw++] = byte_count;
+	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+	ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
+	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
 }
 
 /**
@@ -1362,16 +1390,16 @@ static void cik_sdma_emit_copy_buffer(struct amdgpu_ring *ring,
  *
  * Fill GPU buffers using the DMA engine (CIK).
  */
-static void cik_sdma_emit_fill_buffer(struct amdgpu_ring *ring,
+static void cik_sdma_emit_fill_buffer(struct amdgpu_ib *ib,
 				      uint32_t src_data,
 				      uint64_t dst_offset,
 				      uint32_t byte_count)
 {
-	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0));
-	amdgpu_ring_write(ring, lower_32_bits(dst_offset));
-	amdgpu_ring_write(ring, upper_32_bits(dst_offset));
-	amdgpu_ring_write(ring, src_data);
-	amdgpu_ring_write(ring, byte_count);
+	ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0);
+	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = src_data;
+	ib->ptr[ib->length_dw++] = byte_count;
 }
 
 static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = {
@@ -1404,5 +1432,6 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
 	if (adev->vm_manager.vm_pte_funcs == NULL) {
 		adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
 		adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
+		adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
 	}
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index d19085a97064..7f6d457f250a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -487,6 +487,7 @@
 					 (((op) & 0xFF) << 0))
 /* sDMA opcodes */
 #define	SDMA_OPCODE_NOP					  0
+#	define SDMA_NOP_COUNT(x)			  (((x) & 0x3FFF) << 16)
 #define	SDMA_OPCODE_COPY				  1
 #       define SDMA_COPY_SUB_OPCODE_LINEAR                0
 #       define SDMA_COPY_SUB_OPCODE_TILED                 1
@@ -552,6 +553,12 @@
 #define VCE_CMD_IB_AUTO		0x00000005
 #define VCE_CMD_SEMAPHORE	0x00000006
 
+/* if PTR32, these are the bases for scratch and lds */
+#define	PRIVATE_BASE(x)	((x) << 0) /* scratch */
+#define	SHARED_BASE(x)	((x) << 16) /* LDS */
+
+#define KFD_CIK_SDMA_QUEUE_OFFSET	0x200
+
 /* valid for both DEFAULT_MTYPE and APE1_MTYPE */
 enum {
 	MTYPE_CACHED = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index ace870afc7d4..44fa96ad4709 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -1596,9 +1596,9 @@ static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev)
 
 	if (pi->sys_info.nb_dpm_enable) {
 		if (ps->force_high)
-			cz_dpm_nbdpm_lm_pstate_enable(adev, true);
-		else
 			cz_dpm_nbdpm_lm_pstate_enable(adev, false);
+		else
+			cz_dpm_nbdpm_lm_pstate_enable(adev, true);
 	}
 
 	return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index e70a26f587a0..e4d101b1252a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -126,9 +126,31 @@ static const u32 tonga_mgcg_cgcg_init[] =
 	mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
 };
 
+static const u32 golden_settings_fiji_a10[] =
+{
+	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
+	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
+	mmFBC_MISC, 0x1f311fff, 0x12300000,
+	mmHDMI_CONTROL, 0x31000111, 0x00000011,
+};
+
+static const u32 fiji_mgcg_cgcg_init[] =
+{
+	mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
+	mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
+};
+
 static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev)
 {
 	switch (adev->asic_type) {
+	case CHIP_FIJI:
+		amdgpu_program_register_sequence(adev,
+						 fiji_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
+		amdgpu_program_register_sequence(adev,
+						 golden_settings_fiji_a10,
+						 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
+		break;
 	case CHIP_TONGA:
 		amdgpu_program_register_sequence(adev,
 						 tonga_mgcg_cgcg_init,
@@ -803,11 +825,11 @@ static u32 dce_v10_0_line_buffer_adjust(struct amdgpu_device *adev,
 			buffer_alloc = 2;
 		} else if (mode->crtc_hdisplay < 4096) {
 			mem_cfg = 0;
-			buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 		} else {
 			DRM_DEBUG_KMS("Mode too big for LB!\n");
 			mem_cfg = 0;
-			buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 		}
 	} else {
 		mem_cfg = 1;
@@ -1331,7 +1353,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
 	tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
 	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 	tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
-	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
+	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
 	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 	/* restore original selection */
@@ -2888,6 +2910,7 @@ static int dce_v10_0_early_init(void *handle)
 	dce_v10_0_set_irq_funcs(adev);
 
 	switch (adev->asic_type) {
+	case CHIP_FIJI:
 	case CHIP_TONGA:
 		adev->mode_info.num_crtc = 6; /* XXX 7??? */
 		adev->mode_info.num_hpd = 6;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index dcb402ee048a..6411e8244671 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -801,11 +801,11 @@ static u32 dce_v11_0_line_buffer_adjust(struct amdgpu_device *adev,
 			buffer_alloc = 2;
 		} else if (mode->crtc_hdisplay < 4096) {
 			mem_cfg = 0;
-			buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 		} else {
 			DRM_DEBUG_KMS("Mode too big for LB!\n");
 			mem_cfg = 0;
-			buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 		}
 	} else {
 		mem_cfg = 1;
@@ -1329,7 +1329,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
 	tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
 	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 	tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
-	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
+	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
 	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 	/* restore original selection */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index cc050a329c49..c86911c2ea2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -770,11 +770,11 @@ static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev,
 			buffer_alloc = 2;
 		} else if (mode->crtc_hdisplay < 4096) {
 			tmp = 0;
-			buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 		} else {
 			DRM_DEBUG_KMS("Mode too big for LB!\n");
 			tmp = 0;
-			buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 		}
 	} else {
 		tmp = 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
new file mode 100644
index 000000000000..8f9845d9a986
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "fiji_smumgr.h"
+
+MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
+
+static void fiji_dpm_set_funcs(struct amdgpu_device *adev);
+
+static int fiji_dpm_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	fiji_dpm_set_funcs(adev);
+
+	return 0;
+}
+
+static int fiji_dpm_init_microcode(struct amdgpu_device *adev)
+{
+	char fw_name[30] = "amdgpu/fiji_smc.bin";
+	int err;
+
+	err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+	if (err)
+		goto out;
+	err = amdgpu_ucode_validate(adev->pm.fw);
+
+out:
+	if (err) {
+		DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
+		release_firmware(adev->pm.fw);
+		adev->pm.fw = NULL;
+	}
+	return err;
+}
+
+static int fiji_dpm_sw_init(void *handle)
+{
+	int ret;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	ret = fiji_dpm_init_microcode(adev);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int fiji_dpm_sw_fini(void *handle)
+{
+	return 0;
+}
+
+static int fiji_dpm_hw_init(void *handle)
+{
+	int ret;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	mutex_lock(&adev->pm.mutex);
+
+	ret = fiji_smu_init(adev);
+	if (ret) {
+		DRM_ERROR("SMU initialization failed\n");
+		goto fail;
+	}
+
+	ret = fiji_smu_start(adev);
+	if (ret) {
+		DRM_ERROR("SMU start failed\n");
+		goto fail;
+	}
+
+	mutex_unlock(&adev->pm.mutex);
+	return 0;
+
+fail:
+	adev->firmware.smu_load = false;
+	mutex_unlock(&adev->pm.mutex);
+	return -EINVAL;
+}
+
+static int fiji_dpm_hw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	mutex_lock(&adev->pm.mutex);
+	fiji_smu_fini(adev);
+	mutex_unlock(&adev->pm.mutex);
+	return 0;
+}
+
+static int fiji_dpm_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	fiji_dpm_hw_fini(adev);
+
+	return 0;
+}
+
+static int fiji_dpm_resume(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	fiji_dpm_hw_init(adev);
+
+	return 0;
+}
+
+static int fiji_dpm_set_clockgating_state(void *handle,
+			enum amd_clockgating_state state)
+{
+	return 0;
+}
+
+static int fiji_dpm_set_powergating_state(void *handle,
+			enum amd_powergating_state state)
+{
+	return 0;
+}
+
+const struct amd_ip_funcs fiji_dpm_ip_funcs = {
+	.early_init = fiji_dpm_early_init,
+	.late_init = NULL,
+	.sw_init = fiji_dpm_sw_init,
+	.sw_fini = fiji_dpm_sw_fini,
+	.hw_init = fiji_dpm_hw_init,
+	.hw_fini = fiji_dpm_hw_fini,
+	.suspend = fiji_dpm_suspend,
+	.resume = fiji_dpm_resume,
+	.is_idle = NULL,
+	.wait_for_idle = NULL,
+	.soft_reset = NULL,
+	.print_status = NULL,
+	.set_clockgating_state = fiji_dpm_set_clockgating_state,
+	.set_powergating_state = fiji_dpm_set_powergating_state,
+};
+
+static const struct amdgpu_dpm_funcs fiji_dpm_funcs = {
+	.get_temperature = NULL,
+	.pre_set_power_state = NULL,
+	.set_power_state = NULL,
+	.post_set_power_state = NULL,
+	.display_configuration_changed = NULL,
+	.get_sclk = NULL,
+	.get_mclk = NULL,
+	.print_power_state = NULL,
+	.debugfs_print_current_performance_level = NULL,
+	.force_performance_level = NULL,
+	.vblank_too_short = NULL,
+	.powergate_uvd = NULL,
+};
+
+static void fiji_dpm_set_funcs(struct amdgpu_device *adev)
+{
+	if (NULL == adev->pm.funcs)
+		adev->pm.funcs = &fiji_dpm_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h b/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h
new file mode 100644
index 000000000000..3c4824082990
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef FIJI_PP_SMC_H
+#define FIJI_PP_SMC_H
+
+#pragma pack(push, 1)
+
+#define PPSMC_SWSTATE_FLAG_DC                           0x01
+#define PPSMC_SWSTATE_FLAG_UVD                          0x02
+#define PPSMC_SWSTATE_FLAG_VCE                          0x04
+
+#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL             0x00
+#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL             0x01
+#define PPSMC_THERMAL_PROTECT_TYPE_NONE                 0xff
+
+#define PPSMC_SYSTEMFLAG_GPIO_DC                        0x01
+#define PPSMC_SYSTEMFLAG_STEPVDDC                       0x02
+#define PPSMC_SYSTEMFLAG_GDDR5                          0x04
+
+#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP               0x08
+
+#define PPSMC_SYSTEMFLAG_REGULATOR_HOT                  0x10
+#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG           0x20
+
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK              0x07
+#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK     0x08
+
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE   0x00
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE  0x01
+
+#define PPSMC_DPM2FLAGS_TDPCLMP                         0x01
+#define PPSMC_DPM2FLAGS_PWRSHFT                         0x02
+#define PPSMC_DPM2FLAGS_OCP                             0x04
+
+#define PPSMC_DISPLAY_WATERMARK_LOW                     0
+#define PPSMC_DISPLAY_WATERMARK_HIGH                    1
+
+#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP    		0x01
+#define PPSMC_STATEFLAG_POWERBOOST         		0x02
+#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 		0x04
+#define PPSMC_STATEFLAG_POWERSHIFT         		0x08
+#define PPSMC_STATEFLAG_SLOW_READ_MARGIN   		0x10
+#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 		0x20
+#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS   		0x40
+
+#define FDO_MODE_HARDWARE 0
+#define FDO_MODE_PIECE_WISE_LINEAR 1
+
+enum FAN_CONTROL {
+	FAN_CONTROL_FUZZY,
+	FAN_CONTROL_TABLE
+};
+
+//Gemini Modes
+#define PPSMC_GeminiModeNone   0  //Single GPU board
+#define PPSMC_GeminiModeMaster 1  //Master GPU on a Gemini board
+#define PPSMC_GeminiModeSlave  2  //Slave GPU on a Gemini board
+
+#define PPSMC_Result_OK             			((uint16_t)0x01)
+#define PPSMC_Result_NoMore         			((uint16_t)0x02)
+#define PPSMC_Result_NotNow         			((uint16_t)0x03)
+#define PPSMC_Result_Failed         			((uint16_t)0xFF)
+#define PPSMC_Result_UnknownCmd     			((uint16_t)0xFE)
+#define PPSMC_Result_UnknownVT      			((uint16_t)0xFD)
+
+typedef uint16_t PPSMC_Result;
+
+#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x))
+
+#define PPSMC_MSG_Halt                      		((uint16_t)0x10)
+#define PPSMC_MSG_Resume                    		((uint16_t)0x11)
+#define PPSMC_MSG_EnableDPMLevel            		((uint16_t)0x12)
+#define PPSMC_MSG_ZeroLevelsDisabled        		((uint16_t)0x13)
+#define PPSMC_MSG_OneLevelsDisabled         		((uint16_t)0x14)
+#define PPSMC_MSG_TwoLevelsDisabled         		((uint16_t)0x15)
+#define PPSMC_MSG_EnableThermalInterrupt    		((uint16_t)0x16)
+#define PPSMC_MSG_RunningOnAC               		((uint16_t)0x17)
+#define PPSMC_MSG_LevelUp                   		((uint16_t)0x18)
+#define PPSMC_MSG_LevelDown                 		((uint16_t)0x19)
+#define PPSMC_MSG_ResetDPMCounters          		((uint16_t)0x1a)
+#define PPSMC_MSG_SwitchToSwState           		((uint16_t)0x20)
+#define PPSMC_MSG_SwitchToSwStateLast       		((uint16_t)0x3f)
+#define PPSMC_MSG_SwitchToInitialState      		((uint16_t)0x40)
+#define PPSMC_MSG_NoForcedLevel             		((uint16_t)0x41)
+#define PPSMC_MSG_ForceHigh                 		((uint16_t)0x42)
+#define PPSMC_MSG_ForceMediumOrHigh         		((uint16_t)0x43)
+#define PPSMC_MSG_SwitchToMinimumPower      		((uint16_t)0x51)
+#define PPSMC_MSG_ResumeFromMinimumPower    		((uint16_t)0x52)
+#define PPSMC_MSG_EnableCac                 		((uint16_t)0x53)
+#define PPSMC_MSG_DisableCac                		((uint16_t)0x54)
+#define PPSMC_DPMStateHistoryStart          		((uint16_t)0x55)
+#define PPSMC_DPMStateHistoryStop           		((uint16_t)0x56)
+#define PPSMC_CACHistoryStart               		((uint16_t)0x57)
+#define PPSMC_CACHistoryStop                		((uint16_t)0x58)
+#define PPSMC_TDPClampingActive             		((uint16_t)0x59)
+#define PPSMC_TDPClampingInactive           		((uint16_t)0x5A)
+#define PPSMC_StartFanControl               		((uint16_t)0x5B)
+#define PPSMC_StopFanControl                		((uint16_t)0x5C)
+#define PPSMC_NoDisplay                     		((uint16_t)0x5D)
+#define PPSMC_HasDisplay                    		((uint16_t)0x5E)
+#define PPSMC_MSG_UVDPowerOFF               		((uint16_t)0x60)
+#define PPSMC_MSG_UVDPowerON                		((uint16_t)0x61)
+#define PPSMC_MSG_EnableULV                 		((uint16_t)0x62)
+#define PPSMC_MSG_DisableULV                		((uint16_t)0x63)
+#define PPSMC_MSG_EnterULV                  		((uint16_t)0x64)
+#define PPSMC_MSG_ExitULV                   		((uint16_t)0x65)
+#define PPSMC_PowerShiftActive              		((uint16_t)0x6A)
+#define PPSMC_PowerShiftInactive            		((uint16_t)0x6B)
+#define PPSMC_OCPActive                     		((uint16_t)0x6C)
+#define PPSMC_OCPInactive                   		((uint16_t)0x6D)
+#define PPSMC_CACLongTermAvgEnable          		((uint16_t)0x6E)
+#define PPSMC_CACLongTermAvgDisable         		((uint16_t)0x6F)
+#define PPSMC_MSG_InferredStateSweep_Start  		((uint16_t)0x70)
+#define PPSMC_MSG_InferredStateSweep_Stop   		((uint16_t)0x71)
+#define PPSMC_MSG_SwitchToLowestInfState    		((uint16_t)0x72)
+#define PPSMC_MSG_SwitchToNonInfState       		((uint16_t)0x73)
+#define PPSMC_MSG_AllStateSweep_Start       		((uint16_t)0x74)
+#define PPSMC_MSG_AllStateSweep_Stop        		((uint16_t)0x75)
+#define PPSMC_MSG_SwitchNextLowerInfState   		((uint16_t)0x76)
+#define PPSMC_MSG_SwitchNextHigherInfState  		((uint16_t)0x77)
+#define PPSMC_MSG_MclkRetrainingTest        		((uint16_t)0x78)
+#define PPSMC_MSG_ForceTDPClamping          		((uint16_t)0x79)
+#define PPSMC_MSG_CollectCAC_PowerCorreln   		((uint16_t)0x7A)
+#define PPSMC_MSG_CollectCAC_WeightCalib    		((uint16_t)0x7B)
+#define PPSMC_MSG_CollectCAC_SQonly         		((uint16_t)0x7C)
+#define PPSMC_MSG_CollectCAC_TemperaturePwr 		((uint16_t)0x7D)
+#define PPSMC_MSG_ExtremitiesTest_Start     		((uint16_t)0x7E)
+#define PPSMC_MSG_ExtremitiesTest_Stop      		((uint16_t)0x7F)
+#define PPSMC_FlushDataCache                		((uint16_t)0x80)
+#define PPSMC_FlushInstrCache               		((uint16_t)0x81)
+#define PPSMC_MSG_SetEnabledLevels          		((uint16_t)0x82)
+#define PPSMC_MSG_SetForcedLevels           		((uint16_t)0x83)
+#define PPSMC_MSG_ResetToDefaults           		((uint16_t)0x84)
+#define PPSMC_MSG_SetForcedLevelsAndJump    		((uint16_t)0x85)
+#define PPSMC_MSG_SetCACHistoryMode         		((uint16_t)0x86)
+#define PPSMC_MSG_EnableDTE                 		((uint16_t)0x87)
+#define PPSMC_MSG_DisableDTE                		((uint16_t)0x88)
+#define PPSMC_MSG_SmcSpaceSetAddress        		((uint16_t)0x89)
+#define PPSMC_MSG_SmcSpaceWriteDWordInc     		((uint16_t)0x8A)
+#define PPSMC_MSG_SmcSpaceWriteWordInc      		((uint16_t)0x8B)
+#define PPSMC_MSG_SmcSpaceWriteByteInc      		((uint16_t)0x8C)
+
+#define PPSMC_MSG_BREAK                     		((uint16_t)0xF8)
+
+#define PPSMC_MSG_Test                      		((uint16_t)0x100)
+#define PPSMC_MSG_DRV_DRAM_ADDR_HI            		((uint16_t)0x250)
+#define PPSMC_MSG_DRV_DRAM_ADDR_LO            		((uint16_t)0x251)
+#define PPSMC_MSG_SMU_DRAM_ADDR_HI            		((uint16_t)0x252)
+#define PPSMC_MSG_SMU_DRAM_ADDR_LO            		((uint16_t)0x253)
+#define PPSMC_MSG_LoadUcodes                  		((uint16_t)0x254)
+
+typedef uint16_t PPSMC_Msg;
+
+#define PPSMC_EVENT_STATUS_THERMAL          		0x00000001
+#define PPSMC_EVENT_STATUS_REGULATORHOT     		0x00000002
+#define PPSMC_EVENT_STATUS_DC               		0x00000004
+#define PPSMC_EVENT_STATUS_GPIO17           		0x00000008
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
new file mode 100644
index 000000000000..322edea65857
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
@@ -0,0 +1,857 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "fiji_ppsmc.h"
+#include "fiji_smumgr.h"
+#include "smu_ucode_xfer_vi.h"
+#include "amdgpu_ucode.h"
+
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+
+#define FIJI_SMC_SIZE 0x20000
+
+static int fiji_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit)
+{
+	uint32_t val;
+
+	if (smc_address & 3)
+		return -EINVAL;
+
+	if ((smc_address + 3) > limit)
+		return -EINVAL;
+
+	WREG32(mmSMC_IND_INDEX_0, smc_address);
+
+	val = RREG32(mmSMC_IND_ACCESS_CNTL);
+	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+	WREG32(mmSMC_IND_ACCESS_CNTL, val);
+
+	return 0;
+}
+
+static int fiji_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit)
+{
+	uint32_t addr;
+	uint32_t data, orig_data;
+	int result = 0;
+	uint32_t extra_shift;
+	unsigned long flags;
+
+	if (smc_start_address & 3)
+		return -EINVAL;
+
+	if ((smc_start_address + byte_count) > limit)
+		return -EINVAL;
+
+	addr = smc_start_address;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	while (byte_count >= 4) {
+		/* Bytes are written into the SMC addres space with the MSB first */
+		data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
+
+		result = fiji_set_smc_sram_address(adev, addr, limit);
+
+		if (result)
+			goto out;
+
+		WREG32(mmSMC_IND_DATA_0, data);
+
+		src += 4;
+		byte_count -= 4;
+		addr += 4;
+	}
+
+	if (0 != byte_count) {
+		/* Now write odd bytes left, do a read modify write cycle */
+		data = 0;
+
+		result = fiji_set_smc_sram_address(adev, addr, limit);
+		if (result)
+			goto out;
+
+		orig_data = RREG32(mmSMC_IND_DATA_0);
+		extra_shift = 8 * (4 - byte_count);
+
+		while (byte_count > 0) {
+			data = (data << 8) + *src++;
+			byte_count--;
+		}
+
+		data <<= extra_shift;
+		data |= (orig_data & ~((~0UL) << extra_shift));
+
+		result = fiji_set_smc_sram_address(adev, addr, limit);
+		if (result)
+			goto out;
+
+		WREG32(mmSMC_IND_DATA_0, data);
+	}
+
+out:
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+	return result;
+}
+
+static int fiji_program_jump_on_start(struct amdgpu_device *adev)
+{
+	static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
+	fiji_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
+
+	return 0;
+}
+
+static bool fiji_is_smc_ram_running(struct amdgpu_device *adev)
+{
+	uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+	val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
+
+	return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
+}
+
+static int wait_smu_response(struct amdgpu_device *adev)
+{
+	int i;
+	uint32_t val;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		val = RREG32(mmSMC_RESP_0);
+		if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
+			break;
+		udelay(1);
+	}
+
+	if (i == adev->usec_timeout)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int fiji_send_msg_to_smc_offset(struct amdgpu_device *adev)
+{
+	if (wait_smu_response(adev)) {
+		DRM_ERROR("Failed to send previous message\n");
+		return -EINVAL;
+	}
+
+	WREG32(mmSMC_MSG_ARG_0, 0x20000);
+	WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test);
+
+	if (wait_smu_response(adev)) {
+		DRM_ERROR("Failed to send message\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
+{
+	if (!fiji_is_smc_ram_running(adev))
+	{
+		return -EINVAL;;
+	}
+
+	if (wait_smu_response(adev)) {
+		DRM_ERROR("Failed to send previous message\n");
+		return -EINVAL;
+	}
+
+	WREG32(mmSMC_MESSAGE_0, msg);
+
+	if (wait_smu_response(adev)) {
+		DRM_ERROR("Failed to send message\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fiji_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
+						PPSMC_Msg msg)
+{
+	if (wait_smu_response(adev)) {
+		DRM_ERROR("Failed to send previous message\n");
+		return -EINVAL;
+	}
+
+	WREG32(mmSMC_MESSAGE_0, msg);
+
+	return 0;
+}
+
+static int fiji_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
+						PPSMC_Msg msg,
+						uint32_t parameter)
+{
+	if (!fiji_is_smc_ram_running(adev))
+		return -EINVAL;
+
+	if (wait_smu_response(adev)) {
+		DRM_ERROR("Failed to send previous message\n");
+		return -EINVAL;
+	}
+
+	WREG32(mmSMC_MSG_ARG_0, parameter);
+
+	return fiji_send_msg_to_smc(adev, msg);
+}
+
+static int fiji_send_msg_to_smc_with_parameter_without_waiting(
+					struct amdgpu_device *adev,
+					PPSMC_Msg msg, uint32_t parameter)
+{
+	if (wait_smu_response(adev)) {
+		DRM_ERROR("Failed to send previous message\n");
+		return -EINVAL;
+	}
+
+	WREG32(mmSMC_MSG_ARG_0, parameter);
+
+	return fiji_send_msg_to_smc_without_waiting(adev, msg);
+}
+
+#if 0 /* not used yet */
+static int fiji_wait_for_smc_inactive(struct amdgpu_device *adev)
+{
+	int i;
+	uint32_t val;
+
+	if (!fiji_is_smc_ram_running(adev))
+		return -EINVAL;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+		if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
+			break;
+		udelay(1);
+	}
+
+	if (i == adev->usec_timeout)
+		return -EINVAL;
+
+	return 0;
+}
+#endif
+
+static int fiji_smu_upload_firmware_image(struct amdgpu_device *adev)
+{
+	const struct smc_firmware_header_v1_0 *hdr;
+	uint32_t ucode_size;
+	uint32_t ucode_start_address;
+	const uint8_t *src;
+	uint32_t val;
+	uint32_t byte_count;
+	uint32_t *data;
+	unsigned long flags;
+
+	if (!adev->pm.fw)
+		return -EINVAL;
+
+	hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
+	amdgpu_ucode_print_smc_hdr(&hdr->header);
+
+	adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
+	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+	ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+	src = (const uint8_t *)
+		(adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+	if (ucode_size & 3) {
+		DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
+		return -EINVAL;
+	}
+
+	if (ucode_size > FIJI_SMC_SIZE) {
+		DRM_ERROR("SMC address is beyond the SMC RAM area\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
+
+	val = RREG32(mmSMC_IND_ACCESS_CNTL);
+	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
+	WREG32(mmSMC_IND_ACCESS_CNTL, val);
+
+	byte_count = ucode_size;
+	data = (uint32_t *)src;
+	for (; byte_count >= 4; data++, byte_count -= 4)
+		WREG32(mmSMC_IND_DATA_0, data[0]);
+
+	val = RREG32(mmSMC_IND_ACCESS_CNTL);
+	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+	WREG32(mmSMC_IND_ACCESS_CNTL, val);
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+	return 0;
+}
+
+#if 0 /* not used yet */
+static int fiji_read_smc_sram_dword(struct amdgpu_device *adev,
+				uint32_t smc_address,
+				uint32_t *value,
+				uint32_t limit)
+{
+	int result;
+	unsigned long flags;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	result = fiji_set_smc_sram_address(adev, smc_address, limit);
+	if (result == 0)
+		*value = RREG32(mmSMC_IND_DATA_0);
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+	return result;
+}
+
+static int fiji_write_smc_sram_dword(struct amdgpu_device *adev,
+				uint32_t smc_address,
+				uint32_t value,
+				uint32_t limit)
+{
+	int result;
+	unsigned long flags;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	result = fiji_set_smc_sram_address(adev, smc_address, limit);
+	if (result == 0)
+		WREG32(mmSMC_IND_DATA_0, value);
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+	return result;
+}
+
+static int fiji_smu_stop_smc(struct amdgpu_device *adev)
+{
+	uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+
+	val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+	val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
+	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
+
+	return 0;
+}
+#endif
+
+static enum AMDGPU_UCODE_ID fiji_convert_fw_type(uint32_t fw_type)
+{
+	switch (fw_type) {
+		case UCODE_ID_SDMA0:
+			return AMDGPU_UCODE_ID_SDMA0;
+		case UCODE_ID_SDMA1:
+			return AMDGPU_UCODE_ID_SDMA1;
+		case UCODE_ID_CP_CE:
+			return AMDGPU_UCODE_ID_CP_CE;
+		case UCODE_ID_CP_PFP:
+			return AMDGPU_UCODE_ID_CP_PFP;
+		case UCODE_ID_CP_ME:
+			return AMDGPU_UCODE_ID_CP_ME;
+		case UCODE_ID_CP_MEC:
+		case UCODE_ID_CP_MEC_JT1:
+		case UCODE_ID_CP_MEC_JT2:
+			return AMDGPU_UCODE_ID_CP_MEC1;
+		case UCODE_ID_RLC_G:
+			return AMDGPU_UCODE_ID_RLC_G;
+		default:
+			DRM_ERROR("ucode type is out of range!\n");
+			return AMDGPU_UCODE_ID_MAXIMUM;
+	}
+}
+
+static int fiji_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
+						uint32_t fw_type,
+						struct SMU_Entry *entry)
+{
+	enum AMDGPU_UCODE_ID id = fiji_convert_fw_type(fw_type);
+	struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
+	const struct gfx_firmware_header_v1_0 *header = NULL;
+	uint64_t gpu_addr;
+	uint32_t data_size;
+
+	if (ucode->fw == NULL)
+		return -EINVAL;
+	gpu_addr  = ucode->mc_addr;
+	header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
+	data_size = le32_to_cpu(header->header.ucode_size_bytes);
+
+	if ((fw_type == UCODE_ID_CP_MEC_JT1) ||
+		(fw_type == UCODE_ID_CP_MEC_JT2)) {
+		gpu_addr += le32_to_cpu(header->jt_offset) << 2;
+		data_size = le32_to_cpu(header->jt_size) << 2;
+	}
+
+	entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
+	entry->id = (uint16_t)fw_type;
+	entry->image_addr_high = upper_32_bits(gpu_addr);
+	entry->image_addr_low = lower_32_bits(gpu_addr);
+	entry->meta_data_addr_high = 0;
+	entry->meta_data_addr_low = 0;
+	entry->data_size_byte = data_size;
+	entry->num_register_entries = 0;
+
+	if (fw_type == UCODE_ID_RLC_G)
+		entry->flags = 1;
+	else
+		entry->flags = 0;
+
+	return 0;
+}
+
+static int fiji_smu_request_load_fw(struct amdgpu_device *adev)
+{
+	struct fiji_smu_private_data *private = (struct fiji_smu_private_data *)adev->smu.priv;
+	struct SMU_DRAMData_TOC *toc;
+	uint32_t fw_to_load;
+
+	WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0);
+
+	fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high);
+	fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low);
+
+	toc = (struct SMU_DRAMData_TOC *)private->header;
+	toc->num_entries = 0;
+	toc->structure_version = 1;
+
+	if (!adev->firmware.smu_load)
+		return 0;
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for RLC\n");
+		return -EINVAL;
+	}
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for CE\n");
+		return -EINVAL;
+	}
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for PFP\n");
+		return -EINVAL;
+	}
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for ME\n");
+		return -EINVAL;
+	}
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for MEC\n");
+		return -EINVAL;
+	}
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
+		return -EINVAL;
+	}
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
+		return -EINVAL;
+	}
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for SDMA0\n");
+		return -EINVAL;
+	}
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for SDMA1\n");
+		return -EINVAL;
+	}
+
+	fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
+	fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
+
+	fw_to_load = UCODE_ID_RLC_G_MASK |
+			UCODE_ID_SDMA0_MASK |
+			UCODE_ID_SDMA1_MASK |
+			UCODE_ID_CP_CE_MASK |
+			UCODE_ID_CP_ME_MASK |
+			UCODE_ID_CP_PFP_MASK |
+			UCODE_ID_CP_MEC_MASK;
+
+	if (fiji_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
+		DRM_ERROR("Fail to request SMU load ucode\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static uint32_t fiji_smu_get_mask_for_fw_type(uint32_t fw_type)
+{
+	switch (fw_type) {
+		case AMDGPU_UCODE_ID_SDMA0:
+			return UCODE_ID_SDMA0_MASK;
+		case AMDGPU_UCODE_ID_SDMA1:
+			return UCODE_ID_SDMA1_MASK;
+		case AMDGPU_UCODE_ID_CP_CE:
+			return UCODE_ID_CP_CE_MASK;
+		case AMDGPU_UCODE_ID_CP_PFP:
+			return UCODE_ID_CP_PFP_MASK;
+		case AMDGPU_UCODE_ID_CP_ME:
+			return UCODE_ID_CP_ME_MASK;
+		case AMDGPU_UCODE_ID_CP_MEC1:
+			return UCODE_ID_CP_MEC_MASK;
+		case AMDGPU_UCODE_ID_CP_MEC2:
+			return UCODE_ID_CP_MEC_MASK;
+		case AMDGPU_UCODE_ID_RLC_G:
+			return UCODE_ID_RLC_G_MASK;
+		default:
+			DRM_ERROR("ucode type is out of range!\n");
+			return 0;
+	}
+}
+
+static int fiji_smu_check_fw_load_finish(struct amdgpu_device *adev,
+					uint32_t fw_type)
+{
+	uint32_t fw_mask = fiji_smu_get_mask_for_fw_type(fw_type);
+	int i;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask))
+			break;
+		udelay(1);
+	}
+
+	if (i == adev->usec_timeout) {
+		DRM_ERROR("check firmware loading failed\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fiji_smu_start_in_protection_mode(struct amdgpu_device *adev)
+{
+	int result;
+	uint32_t val;
+	int i;
+
+	/* Assert reset */
+	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+
+	result = fiji_smu_upload_firmware_image(adev);
+	if (result)
+		return result;
+
+	/* Clear status */
+	WREG32_SMC(ixSMU_STATUS, 0);
+
+	/* Enable clock */
+	val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+	val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
+	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
+
+	/* De-assert reset */
+	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+
+	/* Set SMU Auto Start */
+	val = RREG32_SMC(ixSMU_INPUT_DATA);
+	val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1);
+	WREG32_SMC(ixSMU_INPUT_DATA, val);
+
+	/* Clear firmware interrupt enable flag */
+	WREG32_SMC(ixFIRMWARE_FLAGS, 0);
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		val = RREG32_SMC(ixRCU_UC_EVENTS);
+		if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED))
+			break;
+		udelay(1);
+	}
+
+	if (i == adev->usec_timeout) {
+		DRM_ERROR("Interrupt is not enabled by firmware\n");
+		return -EINVAL;
+	}
+
+	/* Call Test SMU message with 0x20000 offset
+	 * to trigger SMU start
+	 */
+	fiji_send_msg_to_smc_offset(adev);
+	DRM_INFO("[FM]try triger smu start\n");
+	/* Wait for done bit to be set */
+	for (i = 0; i < adev->usec_timeout; i++) {
+		val = RREG32_SMC(ixSMU_STATUS);
+		if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE))
+			break;
+		udelay(1);
+	}
+
+	if (i == adev->usec_timeout) {
+		DRM_ERROR("Timeout for SMU start\n");
+		return -EINVAL;
+	}
+
+	/* Check pass/failed indicator */
+	val = RREG32_SMC(ixSMU_STATUS);
+	if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) {
+		DRM_ERROR("SMU Firmware start failed\n");
+		return -EINVAL;
+	}
+	DRM_INFO("[FM]smu started\n");
+	/* Wait for firmware to initialize */
+	for (i = 0; i < adev->usec_timeout; i++) {
+		val = RREG32_SMC(ixFIRMWARE_FLAGS);
+		if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
+			break;
+		udelay(1);
+	}
+
+	if (i == adev->usec_timeout) {
+		DRM_ERROR("SMU firmware initialization failed\n");
+		return -EINVAL;
+	}
+	DRM_INFO("[FM]smu initialized\n");
+
+	return 0;
+}
+
+static int fiji_smu_start_in_non_protection_mode(struct amdgpu_device *adev)
+{
+	int i, result;
+	uint32_t val;
+
+	/* wait for smc boot up */
+	for (i = 0; i < adev->usec_timeout; i++) {
+		val = RREG32_SMC(ixRCU_UC_EVENTS);
+		val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done);
+		if (val)
+			break;
+		udelay(1);
+	}
+
+	if (i == adev->usec_timeout) {
+		DRM_ERROR("SMC boot sequence is not completed\n");
+		return -EINVAL;
+	}
+
+	/* Clear firmware interrupt enable flag */
+	WREG32_SMC(ixFIRMWARE_FLAGS, 0);
+
+	/* Assert reset */
+	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+
+	result = fiji_smu_upload_firmware_image(adev);
+	if (result)
+		return result;
+
+	/* Set smc instruct start point at 0x0 */
+	fiji_program_jump_on_start(adev);
+
+	/* Enable clock */
+	val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+	val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
+	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
+
+	/* De-assert reset */
+	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+
+	/* Wait for firmware to initialize */
+	for (i = 0; i < adev->usec_timeout; i++) {
+		val = RREG32_SMC(ixFIRMWARE_FLAGS);
+		if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
+			break;
+		udelay(1);
+	}
+
+	if (i == adev->usec_timeout) {
+		DRM_ERROR("Timeout for SMC firmware initialization\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int fiji_smu_start(struct amdgpu_device *adev)
+{
+	int result;
+	uint32_t val;
+
+	if (!fiji_is_smc_ram_running(adev)) {
+		val = RREG32_SMC(ixSMU_FIRMWARE);
+		if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) {
+			DRM_INFO("[FM]start smu in nonprotection mode\n");
+			result = fiji_smu_start_in_non_protection_mode(adev);
+			if (result)
+				return result;
+		} else {
+			DRM_INFO("[FM]start smu in protection mode\n");
+			result = fiji_smu_start_in_protection_mode(adev);
+			if (result)
+				return result;
+		}
+	}
+
+	return fiji_smu_request_load_fw(adev);
+}
+
+static const struct amdgpu_smumgr_funcs fiji_smumgr_funcs = {
+	.check_fw_load_finish = fiji_smu_check_fw_load_finish,
+	.request_smu_load_fw = NULL,
+	.request_smu_specific_fw = NULL,
+};
+
+int fiji_smu_init(struct amdgpu_device *adev)
+{
+	struct fiji_smu_private_data *private;
+	uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
+	uint32_t smu_internal_buffer_size = 200*4096;
+	struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
+	struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
+	uint64_t mc_addr;
+	void *toc_buf_ptr;
+	void *smu_buf_ptr;
+	int ret;
+
+	private = kzalloc(sizeof(struct fiji_smu_private_data), GFP_KERNEL);
+	if (NULL == private)
+		return -ENOMEM;
+
+	/* allocate firmware buffers */
+	if (adev->firmware.smu_load)
+		amdgpu_ucode_init_bo(adev);
+
+	adev->smu.priv = private;
+	adev->smu.fw_flags = 0;
+
+	/* Allocate FW image data structure and header buffer */
+	ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
+			       true, AMDGPU_GEM_DOMAIN_VRAM,
+			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			       NULL, toc_buf);
+	if (ret) {
+		DRM_ERROR("Failed to allocate memory for TOC buffer\n");
+		return -ENOMEM;
+	}
+
+	/* Allocate buffer for SMU internal buffer */
+	ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
+			       true, AMDGPU_GEM_DOMAIN_VRAM,
+			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			       NULL, smu_buf);
+	if (ret) {
+		DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
+		return -ENOMEM;
+	}
+
+	/* Retrieve GPU address for header buffer and internal buffer */
+	ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
+	if (ret) {
+		amdgpu_bo_unref(&adev->smu.toc_buf);
+		DRM_ERROR("Failed to reserve the TOC buffer\n");
+		return -EINVAL;
+	}
+
+	ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
+	if (ret) {
+		amdgpu_bo_unreserve(adev->smu.toc_buf);
+		amdgpu_bo_unref(&adev->smu.toc_buf);
+		DRM_ERROR("Failed to pin the TOC buffer\n");
+		return -EINVAL;
+	}
+
+	ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
+	if (ret) {
+		amdgpu_bo_unreserve(adev->smu.toc_buf);
+		amdgpu_bo_unref(&adev->smu.toc_buf);
+		DRM_ERROR("Failed to map the TOC buffer\n");
+		return -EINVAL;
+	}
+
+	amdgpu_bo_unreserve(adev->smu.toc_buf);
+	private->header_addr_low = lower_32_bits(mc_addr);
+	private->header_addr_high = upper_32_bits(mc_addr);
+	private->header = toc_buf_ptr;
+
+	ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
+	if (ret) {
+		amdgpu_bo_unref(&adev->smu.smu_buf);
+		amdgpu_bo_unref(&adev->smu.toc_buf);
+		DRM_ERROR("Failed to reserve the SMU internal buffer\n");
+		return -EINVAL;
+	}
+
+	ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
+	if (ret) {
+		amdgpu_bo_unreserve(adev->smu.smu_buf);
+		amdgpu_bo_unref(&adev->smu.smu_buf);
+		amdgpu_bo_unref(&adev->smu.toc_buf);
+		DRM_ERROR("Failed to pin the SMU internal buffer\n");
+		return -EINVAL;
+	}
+
+	ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
+	if (ret) {
+		amdgpu_bo_unreserve(adev->smu.smu_buf);
+		amdgpu_bo_unref(&adev->smu.smu_buf);
+		amdgpu_bo_unref(&adev->smu.toc_buf);
+		DRM_ERROR("Failed to map the SMU internal buffer\n");
+		return -EINVAL;
+	}
+
+	amdgpu_bo_unreserve(adev->smu.smu_buf);
+	private->smu_buffer_addr_low = lower_32_bits(mc_addr);
+	private->smu_buffer_addr_high = upper_32_bits(mc_addr);
+
+	adev->smu.smumgr_funcs = &fiji_smumgr_funcs;
+
+	return 0;
+}
+
+int fiji_smu_fini(struct amdgpu_device *adev)
+{
+	amdgpu_bo_unref(&adev->smu.toc_buf);
+	amdgpu_bo_unref(&adev->smu.smu_buf);
+	kfree(adev->smu.priv);
+	adev->smu.priv = NULL;
+	if (adev->firmware.fw_buf)
+		amdgpu_ucode_fini_bo(adev);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h b/drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h
new file mode 100644
index 000000000000..1cef03deeac3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef FIJI_SMUMGR_H
+#define FIJI_SMUMGR_H
+
+#include "fiji_ppsmc.h"
+
+int fiji_smu_init(struct amdgpu_device *adev);
+int fiji_smu_fini(struct amdgpu_device *adev);
+int fiji_smu_start(struct amdgpu_device *adev);
+
+struct fiji_smu_private_data
+{
+	uint8_t *header;
+	uint32_t smu_buffer_addr_high;
+	uint32_t smu_buffer_addr_low;
+	uint32_t header_addr_high;
+	uint32_t header_addr_low;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 0d8bf2cb1956..4bd1e5cf65ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2173,7 +2173,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
 
 	adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
 	adev->gfx.config.mem_max_burst_length_bytes = 256;
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		/* Get memory bank mapping mode. */
 		tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
 		dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
@@ -2648,6 +2648,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
+	struct fence *f = NULL;
 	uint32_t scratch;
 	uint32_t tmp = 0;
 	unsigned i;
@@ -2659,29 +2660,27 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
 		return r;
 	}
 	WREG32(scratch, 0xCAFEDEAD);
+	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(ring, NULL, 256, &ib);
 	if (r) {
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
-		amdgpu_gfx_scratch_free(adev, scratch);
-		return r;
+		goto err1;
 	}
 	ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
 	ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
 	ib.ptr[2] = 0xDEADBEEF;
 	ib.length_dw = 3;
-	r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
-	if (r) {
-		amdgpu_gfx_scratch_free(adev, scratch);
-		amdgpu_ib_free(adev, &ib);
-		DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
-		return r;
-	}
-	r = amdgpu_fence_wait(ib.fence, false);
+
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
+						 AMDGPU_FENCE_OWNER_UNDEFINED,
+						 &f);
+	if (r)
+		goto err2;
+
+	r = fence_wait(f, false);
 	if (r) {
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
-		amdgpu_gfx_scratch_free(adev, scratch);
-		amdgpu_ib_free(adev, &ib);
-		return r;
+		goto err2;
 	}
 	for (i = 0; i < adev->usec_timeout; i++) {
 		tmp = RREG32(scratch);
@@ -2691,14 +2690,19 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
 	}
 	if (i < adev->usec_timeout) {
 		DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
-			 ib.fence->ring->idx, i);
+			 ring->idx, i);
+		goto err2;
 	} else {
 		DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
 			  scratch, tmp);
 		r = -EINVAL;
 	}
-	amdgpu_gfx_scratch_free(adev, scratch);
+
+err2:
+	fence_put(f);
 	amdgpu_ib_free(adev, &ib);
+err1:
+	amdgpu_gfx_scratch_free(adev, scratch);
 	return r;
 }
 
@@ -3758,7 +3762,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
 	int r;
 
 	/* allocate rlc buffers */
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		if (adev->asic_type == CHIP_KAVERI) {
 			adev->gfx.rlc.reg_list = spectre_rlc_save_restore_register_list;
 			adev->gfx.rlc.reg_list_size =
@@ -3782,7 +3786,9 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
 		/* save restore block */
 		if (adev->gfx.rlc.save_restore_obj == NULL) {
 			r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
-					     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.save_restore_obj);
+					     AMDGPU_GEM_DOMAIN_VRAM,
+					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+					     NULL, &adev->gfx.rlc.save_restore_obj);
 			if (r) {
 				dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
 				return r;
@@ -3823,7 +3829,9 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
 
 		if (adev->gfx.rlc.clear_state_obj == NULL) {
 			r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
-					     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.clear_state_obj);
+					     AMDGPU_GEM_DOMAIN_VRAM,
+					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+					     NULL, &adev->gfx.rlc.clear_state_obj);
 			if (r) {
 				dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
 				gfx_v7_0_rlc_fini(adev);
@@ -3860,7 +3868,9 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
 	if (adev->gfx.rlc.cp_table_size) {
 		if (adev->gfx.rlc.cp_table_obj == NULL) {
 			r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
-					     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.cp_table_obj);
+					     AMDGPU_GEM_DOMAIN_VRAM,
+					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+					     NULL, &adev->gfx.rlc.cp_table_obj);
 			if (r) {
 				dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
 				gfx_v7_0_rlc_fini(adev);
@@ -5594,6 +5604,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
 	.test_ring = gfx_v7_0_ring_test_ring,
 	.test_ib = gfx_v7_0_ring_test_ib,
 	.is_lockup = gfx_v7_0_ring_is_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
@@ -5610,6 +5621,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
 	.test_ring = gfx_v7_0_ring_test_ring,
 	.test_ib = gfx_v7_0_ring_test_ib,
 	.is_lockup = gfx_v7_0_ring_is_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 20e2cfd521d5..53f07439a512 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -87,6 +87,13 @@ MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
 MODULE_FIRMWARE("amdgpu/topaz_mec2.bin");
 MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
 
+MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
+MODULE_FIRMWARE("amdgpu/fiji_pfp.bin");
+MODULE_FIRMWARE("amdgpu/fiji_me.bin");
+MODULE_FIRMWARE("amdgpu/fiji_mec.bin");
+MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
+MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
+
 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
 {
 	{mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
@@ -217,6 +224,71 @@ static const u32 tonga_mgcg_cgcg_init[] =
 	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
 };
 
+static const u32 fiji_golden_common_all[] =
+{
+	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
+	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
+	mmGB_ADDR_CONFIG, 0xffffffff, 0x12011003,
+	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
+	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
+	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
+	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
+};
+
+static const u32 golden_settings_fiji_a10[] =
+{
+	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
+	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
+	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
+	mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x00000100,
+	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
+	mmTCC_CTRL, 0x00100000, 0xf30fff7f,
+	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
+	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x7d6cf5e4,
+	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x3928b1a0,
+};
+
+static const u32 fiji_mgcg_cgcg_init[] =
+{
+	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffc0,
+	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
+	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
+	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
+	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
+	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
+	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
+	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
+	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
+	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
+	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
+	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
+	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
+	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
+	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
+	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
+	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
+};
+
 static const u32 golden_settings_iceland_a11[] =
 {
 	mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
@@ -439,6 +511,18 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
 						 iceland_golden_common_all,
 						 (const u32)ARRAY_SIZE(iceland_golden_common_all));
 		break;
+	case CHIP_FIJI:
+		amdgpu_program_register_sequence(adev,
+						 fiji_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
+		amdgpu_program_register_sequence(adev,
+						 golden_settings_fiji_a10,
+						 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
+		amdgpu_program_register_sequence(adev,
+						 fiji_golden_common_all,
+						 (const u32)ARRAY_SIZE(fiji_golden_common_all));
+		break;
+
 	case CHIP_TONGA:
 		amdgpu_program_register_sequence(adev,
 						 tonga_mgcg_cgcg_init,
@@ -526,6 +610,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
+	struct fence *f = NULL;
 	uint32_t scratch;
 	uint32_t tmp = 0;
 	unsigned i;
@@ -537,29 +622,27 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
 		return r;
 	}
 	WREG32(scratch, 0xCAFEDEAD);
+	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(ring, NULL, 256, &ib);
 	if (r) {
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
-		amdgpu_gfx_scratch_free(adev, scratch);
-		return r;
+		goto err1;
 	}
 	ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
 	ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
 	ib.ptr[2] = 0xDEADBEEF;
 	ib.length_dw = 3;
-	r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
-	if (r) {
-		amdgpu_gfx_scratch_free(adev, scratch);
-		amdgpu_ib_free(adev, &ib);
-		DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
-		return r;
-	}
-	r = amdgpu_fence_wait(ib.fence, false);
+
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
+						 AMDGPU_FENCE_OWNER_UNDEFINED,
+						 &f);
+	if (r)
+		goto err2;
+
+	r = fence_wait(f, false);
 	if (r) {
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
-		amdgpu_gfx_scratch_free(adev, scratch);
-		amdgpu_ib_free(adev, &ib);
-		return r;
+		goto err2;
 	}
 	for (i = 0; i < adev->usec_timeout; i++) {
 		tmp = RREG32(scratch);
@@ -569,14 +652,18 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
 	}
 	if (i < adev->usec_timeout) {
 		DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
-			 ib.fence->ring->idx, i);
+			 ring->idx, i);
+		goto err2;
 	} else {
 		DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
 			  scratch, tmp);
 		r = -EINVAL;
 	}
-	amdgpu_gfx_scratch_free(adev, scratch);
+err2:
+	fence_put(f);
 	amdgpu_ib_free(adev, &ib);
+err1:
+	amdgpu_gfx_scratch_free(adev, scratch);
 	return r;
 }
 
@@ -601,6 +688,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
 	case CHIP_CARRIZO:
 		chip_name = "carrizo";
 		break;
+	case CHIP_FIJI:
+		chip_name = "fiji";
+		break;
 	default:
 		BUG();
 	}
@@ -1236,6 +1326,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
 			adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
 			WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
 		}
+	case CHIP_FIJI:
 	case CHIP_TONGA:
 		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
 			switch (reg_offset) {
@@ -1914,7 +2005,7 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev,
 }
 
 /**
- * gmc_v8_0_init_compute_vmid - gart enable
+ * gfx_v8_0_init_compute_vmid - gart enable
  *
  * @rdev: amdgpu_device pointer
  *
@@ -1924,7 +2015,7 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev,
 #define DEFAULT_SH_MEM_BASES	(0x6000)
 #define FIRST_COMPUTE_VMID	(8)
 #define LAST_COMPUTE_VMID	(16)
-static void gmc_v8_0_init_compute_vmid(struct amdgpu_device *adev)
+static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
 {
 	int i;
 	uint32_t sh_mem_config;
@@ -1984,6 +2075,23 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
 		gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
 		break;
+	case CHIP_FIJI:
+		adev->gfx.config.max_shader_engines = 4;
+		adev->gfx.config.max_tile_pipes = 16;
+		adev->gfx.config.max_cu_per_sh = 16;
+		adev->gfx.config.max_sh_per_se = 1;
+		adev->gfx.config.max_backends_per_se = 4;
+		adev->gfx.config.max_texture_channel_caches = 8;
+		adev->gfx.config.max_gprs = 256;
+		adev->gfx.config.max_gs_threads = 32;
+		adev->gfx.config.max_hw_contexts = 8;
+
+		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
+		break;
 	case CHIP_TONGA:
 		adev->gfx.config.max_shader_engines = 4;
 		adev->gfx.config.max_tile_pipes = 8;
@@ -2078,7 +2186,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
 
 	adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
 	adev->gfx.config.mem_max_burst_length_bytes = 256;
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		/* Get memory bank mapping mode. */
 		tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
 		dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
@@ -2174,7 +2282,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
 	vi_srbm_select(adev, 0, 0, 0, 0);
 	mutex_unlock(&adev->srbm_mutex);
 
-	gmc_v8_0_init_compute_vmid(adev);
+	gfx_v8_0_init_compute_vmid(adev);
 
 	mutex_lock(&adev->grbm_idx_mutex);
 	/*
@@ -2490,6 +2598,7 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
 	amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
 	switch (adev->asic_type) {
 	case CHIP_TONGA:
+	case CHIP_FIJI:
 		amdgpu_ring_write(ring, 0x16000012);
 		amdgpu_ring_write(ring, 0x0000002A);
 		break;
@@ -3131,7 +3240,8 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
 
 		/* enable the doorbell if requested */
 		if (use_doorbell) {
-			if (adev->asic_type == CHIP_CARRIZO) {
+			if ((adev->asic_type == CHIP_CARRIZO) ||
+			    (adev->asic_type == CHIP_FIJI)) {
 				WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
 				       AMDGPU_DOORBELL_KIQ << 2);
 				WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
@@ -3875,7 +3985,8 @@ static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring *ring,
 	unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
 
 	if (ring->adev->asic_type == CHIP_TOPAZ ||
-	    ring->adev->asic_type == CHIP_TONGA)
+	    ring->adev->asic_type == CHIP_TONGA ||
+	    ring->adev->asic_type == CHIP_FIJI)
 		/* we got a hw semaphore bug in VI TONGA, return false to switch back to sw fence wait */
 		return false;
 	else {
@@ -4268,6 +4379,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
 	.test_ring = gfx_v8_0_ring_test_ring,
 	.test_ib = gfx_v8_0_ring_test_ib,
 	.is_lockup = gfx_v8_0_ring_is_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
@@ -4284,6 +4396,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
 	.test_ring = gfx_v8_0_ring_test_ring,
 	.test_ib = gfx_v8_0_ring_test_ib,
 	.is_lockup = gfx_v8_0_ring_is_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index ae37fce36520..774528ab8704 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -523,17 +523,11 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
 	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
 			    amdgpu_vm_block_size - 9);
@@ -636,7 +630,7 @@ static int gmc_v7_0_vm_init(struct amdgpu_device *adev)
 	adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS;
 
 	/* base offset of vram pages */
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
 		tmp <<= 22;
 		adev->vm_manager.vram_base_offset = tmp;
@@ -841,7 +835,7 @@ static int gmc_v7_0_early_init(void *handle)
 	gmc_v7_0_set_gart_funcs(adev);
 	gmc_v7_0_set_irq_funcs(adev);
 
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
 	} else {
 		u32 tmp = RREG32(mmMC_SEQ_MISC0);
@@ -852,6 +846,13 @@ static int gmc_v7_0_early_init(void *handle)
 	return 0;
 }
 
+static int gmc_v7_0_late_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+}
+
 static int gmc_v7_0_sw_init(void *handle)
 {
 	int r;
@@ -957,7 +958,7 @@ static int gmc_v7_0_hw_init(void *handle)
 
 	gmc_v7_0_mc_program(adev);
 
-	if (!(adev->flags & AMDGPU_IS_APU)) {
+	if (!(adev->flags & AMD_IS_APU)) {
 		r = gmc_v7_0_mc_load_microcode(adev);
 		if (r) {
 			DRM_ERROR("Failed to load MC firmware!\n");
@@ -976,6 +977,7 @@ static int gmc_v7_0_hw_fini(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
 	gmc_v7_0_gart_disable(adev);
 
 	return 0;
@@ -1172,7 +1174,7 @@ static int gmc_v7_0_soft_reset(void *handle)
 
 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
-		if (!(adev->flags & AMDGPU_IS_APU))
+		if (!(adev->flags & AMD_IS_APU))
 			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
 							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
 	}
@@ -1282,7 +1284,7 @@ static int gmc_v7_0_set_clockgating_state(void *handle,
 	if (state == AMD_CG_STATE_GATE)
 		gate = true;
 
-	if (!(adev->flags & AMDGPU_IS_APU)) {
+	if (!(adev->flags & AMD_IS_APU)) {
 		gmc_v7_0_enable_mc_mgcg(adev, gate);
 		gmc_v7_0_enable_mc_ls(adev, gate);
 	}
@@ -1301,7 +1303,7 @@ static int gmc_v7_0_set_powergating_state(void *handle,
 
 const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
 	.early_init = gmc_v7_0_early_init,
-	.late_init = NULL,
+	.late_init = gmc_v7_0_late_init,
 	.sw_init = gmc_v7_0_sw_init,
 	.sw_fini = gmc_v7_0_sw_fini,
 	.hw_init = gmc_v7_0_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 8135963a66be..9a07742620d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -44,6 +44,7 @@ static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
 
 MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
 MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
+MODULE_FIRMWARE("amdgpu/fiji_mc.bin");
 
 static const u32 golden_settings_tonga_a11[] =
 {
@@ -61,6 +62,19 @@ static const u32 tonga_mgcg_cgcg_init[] =
 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
 };
 
+static const u32 golden_settings_fiji_a10[] =
+{
+	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+};
+
+static const u32 fiji_mgcg_cgcg_init[] =
+{
+	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
+};
+
 static const u32 golden_settings_iceland_a11[] =
 {
 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
@@ -90,6 +104,14 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
 						 golden_settings_iceland_a11,
 						 (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
 		break;
+	case CHIP_FIJI:
+		amdgpu_program_register_sequence(adev,
+						 fiji_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
+		amdgpu_program_register_sequence(adev,
+						 golden_settings_fiji_a10,
+						 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
+		break;
 	case CHIP_TONGA:
 		amdgpu_program_register_sequence(adev,
 						 tonga_mgcg_cgcg_init,
@@ -202,6 +224,9 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
 	case CHIP_TONGA:
 		chip_name = "tonga";
 		break;
+	case CHIP_FIJI:
+		chip_name = "fiji";
+		break;
 	case CHIP_CARRIZO:
 		return 0;
 	default: BUG();
@@ -628,19 +653,12 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
 	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
 			    amdgpu_vm_block_size - 9);
@@ -737,7 +755,7 @@ static int gmc_v8_0_vm_init(struct amdgpu_device *adev)
 	adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS;
 
 	/* base offset of vram pages */
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
 		tmp <<= 22;
 		adev->vm_manager.vram_base_offset = tmp;
@@ -816,7 +834,7 @@ static int gmc_v8_0_early_init(void *handle)
 	gmc_v8_0_set_gart_funcs(adev);
 	gmc_v8_0_set_irq_funcs(adev);
 
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
 	} else {
 		u32 tmp = RREG32(mmMC_SEQ_MISC0);
@@ -827,6 +845,13 @@ static int gmc_v8_0_early_init(void *handle)
 	return 0;
 }
 
+static int gmc_v8_0_late_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+}
+
 static int gmc_v8_0_sw_init(void *handle)
 {
 	int r;
@@ -934,7 +959,7 @@ static int gmc_v8_0_hw_init(void *handle)
 
 	gmc_v8_0_mc_program(adev);
 
-	if (!(adev->flags & AMDGPU_IS_APU)) {
+	if (!(adev->flags & AMD_IS_APU)) {
 		r = gmc_v8_0_mc_load_microcode(adev);
 		if (r) {
 			DRM_ERROR("Failed to load MC firmware!\n");
@@ -953,6 +978,7 @@ static int gmc_v8_0_hw_fini(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
 	gmc_v8_0_gart_disable(adev);
 
 	return 0;
@@ -1147,7 +1173,7 @@ static int gmc_v8_0_soft_reset(void *handle)
 
 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
-		if (!(adev->flags & AMDGPU_IS_APU))
+		if (!(adev->flags & AMD_IS_APU))
 			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
 							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
 	}
@@ -1263,7 +1289,7 @@ static int gmc_v8_0_set_powergating_state(void *handle,
 
 const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
 	.early_init = gmc_v8_0_early_init,
-	.late_init = NULL,
+	.late_init = gmc_v8_0_late_init,
 	.sw_init = gmc_v8_0_sw_init,
 	.sw_fini = gmc_v8_0_sw_fini,
 	.hw_init = gmc_v8_0_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h
index c723602c7b0c..ee6a041cb288 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h
@@ -2163,5 +2163,10 @@
 #define SDMA_PKT_NOP_HEADER_sub_op_shift  8
 #define SDMA_PKT_NOP_HEADER_SUB_OP(x) (((x) & SDMA_PKT_NOP_HEADER_sub_op_mask) << SDMA_PKT_NOP_HEADER_sub_op_shift)
 
+/*define for count field*/
+#define SDMA_PKT_NOP_HEADER_count_offset 0
+#define SDMA_PKT_NOP_HEADER_count_mask   0x00003FFF
+#define SDMA_PKT_NOP_HEADER_count_shift  16
+#define SDMA_PKT_NOP_HEADER_COUNT(x) (((x) & SDMA_PKT_NOP_HEADER_count_mask) << SDMA_PKT_NOP_HEADER_count_shift)
 
 #endif /* __ICELAND_SDMA_PKT_OPEN_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
index c6f1e2f12b5f..c900aa942ade 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
@@ -623,7 +623,9 @@ int iceland_smu_init(struct amdgpu_device *adev)
 
 	/* Allocate FW image data structure and header buffer */
 	ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
-			       true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, toc_buf);
+			       true, AMDGPU_GEM_DOMAIN_VRAM,
+			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			       NULL, toc_buf);
 	if (ret) {
 		DRM_ERROR("Failed to allocate memory for TOC buffer\n");
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index a988dfb1d394..14e87234171a 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -146,6 +146,8 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
 		hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
 		adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
 		adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
+		if (adev->sdma[i].feature_version >= 20)
+			adev->sdma[i].burst_nop = true;
 
 		if (adev->firmware.smu_load) {
 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -218,6 +220,19 @@ static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
 	WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
 }
 
+static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+	struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
+	int i;
+
+	for (i = 0; i < count; i++)
+		if (sdma && sdma->burst_nop && (i == 0))
+			amdgpu_ring_write(ring, ring->nop |
+				SDMA_PKT_NOP_HEADER_COUNT(count - 1));
+		else
+			amdgpu_ring_write(ring, ring->nop);
+}
+
 /**
  * sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine
  *
@@ -245,8 +260,8 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
 	amdgpu_ring_write(ring, next_rptr);
 
 	/* IB packet must end on a 8 DW boundary */
-	while ((ring->wptr & 7) != 2)
-		amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP));
+	sdma_v2_4_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
+
 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
 			  SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
 	/* base must be 32 byte aligned */
@@ -673,6 +688,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
+	struct fence *f = NULL;
 	unsigned i;
 	unsigned index;
 	int r;
@@ -688,12 +704,11 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
-
+	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(ring, NULL, 256, &ib);
 	if (r) {
-		amdgpu_wb_free(adev, index);
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
-		return r;
+		goto err0;
 	}
 
 	ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
@@ -707,19 +722,16 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
 	ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 	ib.length_dw = 8;
 
-	r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
-	if (r) {
-		amdgpu_ib_free(adev, &ib);
-		amdgpu_wb_free(adev, index);
-		DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
-		return r;
-	}
-	r = amdgpu_fence_wait(ib.fence, false);
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
+						 AMDGPU_FENCE_OWNER_UNDEFINED,
+						 &f);
+	if (r)
+		goto err1;
+
+	r = fence_wait(f, false);
 	if (r) {
-		amdgpu_ib_free(adev, &ib);
-		amdgpu_wb_free(adev, index);
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
-		return r;
+		goto err1;
 	}
 	for (i = 0; i < adev->usec_timeout; i++) {
 		tmp = le32_to_cpu(adev->wb.wb[index]);
@@ -729,12 +741,17 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
 	}
 	if (i < adev->usec_timeout) {
 		DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
-			 ib.fence->ring->idx, i);
+			 ring->idx, i);
+		goto err1;
 	} else {
 		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
 		r = -EINVAL;
 	}
+
+err1:
+	fence_put(f);
 	amdgpu_ib_free(adev, &ib);
+err0:
 	amdgpu_wb_free(adev, index);
 	return r;
 }
@@ -877,8 +894,19 @@ static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib,
  */
 static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib)
 {
-	while (ib->length_dw & 0x7)
-		ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
+	struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
+	u32 pad_count;
+	int i;
+
+	pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+	for (i = 0; i < pad_count; i++)
+		if (sdma && sdma->burst_nop && (i == 0))
+			ib->ptr[ib->length_dw++] =
+				SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
+				SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
+		else
+			ib->ptr[ib->length_dw++] =
+				SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 }
 
 /**
@@ -1312,6 +1340,7 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
 	.test_ring = sdma_v2_4_ring_test_ring,
 	.test_ib = sdma_v2_4_ring_test_ib,
 	.is_lockup = sdma_v2_4_ring_is_lockup,
+	.insert_nop = sdma_v2_4_ring_insert_nop,
 };
 
 static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
@@ -1348,19 +1377,19 @@ static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev)
  * Used by the amdgpu ttm implementation to move pages if
  * registered as the asic copy callback.
  */
-static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ring *ring,
+static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib,
 				       uint64_t src_offset,
 				       uint64_t dst_offset,
 				       uint32_t byte_count)
 {
-	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
-			  SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR));
-	amdgpu_ring_write(ring, byte_count);
-	amdgpu_ring_write(ring, 0); /* src/dst endian swap */
-	amdgpu_ring_write(ring, lower_32_bits(src_offset));
-	amdgpu_ring_write(ring, upper_32_bits(src_offset));
-	amdgpu_ring_write(ring, lower_32_bits(dst_offset));
-	amdgpu_ring_write(ring, upper_32_bits(dst_offset));
+	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+	ib->ptr[ib->length_dw++] = byte_count;
+	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+	ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
+	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
 }
 
 /**
@@ -1373,16 +1402,16 @@ static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ring *ring,
  *
  * Fill GPU buffers using the DMA engine (VI).
  */
-static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ring *ring,
+static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ib *ib,
 				       uint32_t src_data,
 				       uint64_t dst_offset,
 				       uint32_t byte_count)
 {
-	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL));
-	amdgpu_ring_write(ring, lower_32_bits(dst_offset));
-	amdgpu_ring_write(ring, upper_32_bits(dst_offset));
-	amdgpu_ring_write(ring, src_data);
-	amdgpu_ring_write(ring, byte_count);
+	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
+	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = src_data;
+	ib->ptr[ib->length_dw++] = byte_count;
 }
 
 static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = {
@@ -1415,5 +1444,6 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
 	if (adev->vm_manager.vm_pte_funcs == NULL) {
 		adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
 		adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
+		adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
 	}
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 2b86569b18d3..9bfe92df15f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -53,6 +53,8 @@ MODULE_FIRMWARE("amdgpu/tonga_sdma.bin");
 MODULE_FIRMWARE("amdgpu/tonga_sdma1.bin");
 MODULE_FIRMWARE("amdgpu/carrizo_sdma.bin");
 MODULE_FIRMWARE("amdgpu/carrizo_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/fiji_sdma.bin");
+MODULE_FIRMWARE("amdgpu/fiji_sdma1.bin");
 
 static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
 {
@@ -80,6 +82,24 @@ static const u32 tonga_mgcg_cgcg_init[] =
 	mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
 };
 
+static const u32 golden_settings_fiji_a10[] =
+{
+	mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
+	mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
+	mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
+	mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
+	mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
+	mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
+	mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
+	mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
+};
+
+static const u32 fiji_mgcg_cgcg_init[] =
+{
+	mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
+	mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
+};
+
 static const u32 cz_golden_settings_a11[] =
 {
 	mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
@@ -122,6 +142,14 @@ static const u32 cz_mgcg_cgcg_init[] =
 static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
 {
 	switch (adev->asic_type) {
+	case CHIP_FIJI:
+		amdgpu_program_register_sequence(adev,
+						 fiji_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
+		amdgpu_program_register_sequence(adev,
+						 golden_settings_fiji_a10,
+						 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
+		break;
 	case CHIP_TONGA:
 		amdgpu_program_register_sequence(adev,
 						 tonga_mgcg_cgcg_init,
@@ -167,6 +195,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
 	case CHIP_TONGA:
 		chip_name = "tonga";
 		break;
+	case CHIP_FIJI:
+		chip_name = "fiji";
+		break;
 	case CHIP_CARRIZO:
 		chip_name = "carrizo";
 		break;
@@ -187,6 +218,8 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
 		hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
 		adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
 		adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
+		if (adev->sdma[i].feature_version >= 20)
+			adev->sdma[i].burst_nop = true;
 
 		if (adev->firmware.smu_load) {
 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -273,6 +306,19 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
 	}
 }
 
+static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+	struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
+	int i;
+
+	for (i = 0; i < count; i++)
+		if (sdma && sdma->burst_nop && (i == 0))
+			amdgpu_ring_write(ring, ring->nop |
+				SDMA_PKT_NOP_HEADER_COUNT(count - 1));
+		else
+			amdgpu_ring_write(ring, ring->nop);
+}
+
 /**
  * sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine
  *
@@ -299,8 +345,7 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
 	amdgpu_ring_write(ring, next_rptr);
 
 	/* IB packet must end on a 8 DW boundary */
-	while ((ring->wptr & 7) != 2)
-		amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP));
+	sdma_v3_0_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
 
 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
 			  SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
@@ -763,6 +808,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
+	struct fence *f = NULL;
 	unsigned i;
 	unsigned index;
 	int r;
@@ -778,12 +824,11 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
-
+	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(ring, NULL, 256, &ib);
 	if (r) {
-		amdgpu_wb_free(adev, index);
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
-		return r;
+		goto err0;
 	}
 
 	ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
@@ -797,19 +842,16 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
 	ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
 	ib.length_dw = 8;
 
-	r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
-	if (r) {
-		amdgpu_ib_free(adev, &ib);
-		amdgpu_wb_free(adev, index);
-		DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
-		return r;
-	}
-	r = amdgpu_fence_wait(ib.fence, false);
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
+						 AMDGPU_FENCE_OWNER_UNDEFINED,
+						 &f);
+	if (r)
+		goto err1;
+
+	r = fence_wait(f, false);
 	if (r) {
-		amdgpu_ib_free(adev, &ib);
-		amdgpu_wb_free(adev, index);
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
-		return r;
+		goto err1;
 	}
 	for (i = 0; i < adev->usec_timeout; i++) {
 		tmp = le32_to_cpu(adev->wb.wb[index]);
@@ -819,12 +861,16 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
 	}
 	if (i < adev->usec_timeout) {
 		DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
-			 ib.fence->ring->idx, i);
+			 ring->idx, i);
+		goto err1;
 	} else {
 		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
 		r = -EINVAL;
 	}
+err1:
+	fence_put(f);
 	amdgpu_ib_free(adev, &ib);
+err0:
 	amdgpu_wb_free(adev, index);
 	return r;
 }
@@ -967,8 +1013,19 @@ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib,
  */
 static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib)
 {
-	while (ib->length_dw & 0x7)
-		ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
+	struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
+	u32 pad_count;
+	int i;
+
+	pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+	for (i = 0; i < pad_count; i++)
+		if (sdma && sdma->burst_nop && (i == 0))
+			ib->ptr[ib->length_dw++] =
+				SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
+				SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
+		else
+			ib->ptr[ib->length_dw++] =
+				SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 }
 
 /**
@@ -1406,6 +1463,7 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
 	.test_ring = sdma_v3_0_ring_test_ring,
 	.test_ib = sdma_v3_0_ring_test_ib,
 	.is_lockup = sdma_v3_0_ring_is_lockup,
+	.insert_nop = sdma_v3_0_ring_insert_nop,
 };
 
 static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1442,19 +1500,19 @@ static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
  * Used by the amdgpu ttm implementation to move pages if
  * registered as the asic copy callback.
  */
-static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ring *ring,
+static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib,
 				       uint64_t src_offset,
 				       uint64_t dst_offset,
 				       uint32_t byte_count)
 {
-	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
-			  SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR));
-	amdgpu_ring_write(ring, byte_count);
-	amdgpu_ring_write(ring, 0); /* src/dst endian swap */
-	amdgpu_ring_write(ring, lower_32_bits(src_offset));
-	amdgpu_ring_write(ring, upper_32_bits(src_offset));
-	amdgpu_ring_write(ring, lower_32_bits(dst_offset));
-	amdgpu_ring_write(ring, upper_32_bits(dst_offset));
+	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+	ib->ptr[ib->length_dw++] = byte_count;
+	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+	ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
+	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
 }
 
 /**
@@ -1467,16 +1525,16 @@ static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ring *ring,
  *
  * Fill GPU buffers using the DMA engine (VI).
  */
-static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ring *ring,
+static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ib *ib,
 				       uint32_t src_data,
 				       uint64_t dst_offset,
 				       uint32_t byte_count)
 {
-	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL));
-	amdgpu_ring_write(ring, lower_32_bits(dst_offset));
-	amdgpu_ring_write(ring, upper_32_bits(dst_offset));
-	amdgpu_ring_write(ring, src_data);
-	amdgpu_ring_write(ring, byte_count);
+	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
+	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = src_data;
+	ib->ptr[ib->length_dw++] = byte_count;
 }
 
 static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = {
@@ -1509,5 +1567,6 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
 	if (adev->vm_manager.vm_pte_funcs == NULL) {
 		adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
 		adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
+		adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
 	}
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h
index 099b7b56113c..e5ebd084288d 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h
@@ -2236,5 +2236,10 @@
 #define SDMA_PKT_NOP_HEADER_sub_op_shift  8
 #define SDMA_PKT_NOP_HEADER_SUB_OP(x) (((x) & SDMA_PKT_NOP_HEADER_sub_op_mask) << SDMA_PKT_NOP_HEADER_sub_op_shift)
 
+/*define for count field*/
+#define SDMA_PKT_NOP_HEADER_count_offset 0
+#define SDMA_PKT_NOP_HEADER_count_mask   0x00003FFF
+#define SDMA_PKT_NOP_HEADER_count_shift  16
+#define SDMA_PKT_NOP_HEADER_COUNT(x) (((x) & SDMA_PKT_NOP_HEADER_count_mask) << SDMA_PKT_NOP_HEADER_count_shift)
 
 #endif /* __TONGA_SDMA_PKT_OPEN_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
index 5fc53a40c7ac..1f5ac941a610 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
@@ -761,7 +761,9 @@ int tonga_smu_init(struct amdgpu_device *adev)
 
 	/* Allocate FW image data structure and header buffer */
 	ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
-				true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, toc_buf);
+			       true, AMDGPU_GEM_DOMAIN_VRAM,
+			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			       NULL, toc_buf);
 	if (ret) {
 		DRM_ERROR("Failed to allocate memory for TOC buffer\n");
 		return -ENOMEM;
@@ -769,7 +771,9 @@ int tonga_smu_init(struct amdgpu_device *adev)
 
 	/* Allocate buffer for SMU internal buffer */
 	ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
-				true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, smu_buf);
+			       true, AMDGPU_GEM_DOMAIN_VRAM,
+			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			       NULL, smu_buf);
 	if (ret) {
 		DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 4efd671d7a9b..5fac5da694f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -534,7 +534,7 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
 static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
-	struct amdgpu_fence *fence = NULL;
+	struct fence *fence = NULL;
 	int r;
 
 	r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
@@ -555,14 +555,14 @@ static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring)
 		goto error;
 	}
 
-	r = amdgpu_fence_wait(fence, false);
+	r = fence_wait(fence, false);
 	if (r) {
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
 		goto error;
 	}
 	DRM_INFO("ib test on ring %d succeeded\n",  ring->idx);
 error:
-	amdgpu_fence_unref(&fence);
+	fence_put(fence);
 	amdgpu_asic_set_uvd_clocks(adev, 0, 0);
 	return r;
 }
@@ -886,6 +886,7 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
 	.test_ring = uvd_v4_2_ring_test_ring,
 	.test_ib = uvd_v4_2_ring_test_ib,
 	.is_lockup = amdgpu_ring_test_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index b756bd99c0fd..2d5c59c318af 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -580,7 +580,7 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
 static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
-	struct amdgpu_fence *fence = NULL;
+	struct fence *fence = NULL;
 	int r;
 
 	r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
@@ -601,14 +601,14 @@ static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring)
 		goto error;
 	}
 
-	r = amdgpu_fence_wait(fence, false);
+	r = fence_wait(fence, false);
 	if (r) {
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
 		goto error;
 	}
 	DRM_INFO("ib test on ring %d succeeded\n",  ring->idx);
 error:
-	amdgpu_fence_unref(&fence);
+	fence_put(fence);
 	amdgpu_asic_set_uvd_clocks(adev, 0, 0);
 	return r;
 }
@@ -825,6 +825,7 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
 	.test_ring = uvd_v5_0_ring_test_ring,
 	.test_ib = uvd_v5_0_ring_test_ib,
 	.is_lockup = amdgpu_ring_test_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 49aa931b2cb4..d9f553fce531 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -575,7 +575,7 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
  */
 static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring)
 {
-	struct amdgpu_fence *fence = NULL;
+	struct fence *fence = NULL;
 	int r;
 
 	r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
@@ -590,14 +590,14 @@ static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring)
 		goto error;
 	}
 
-	r = amdgpu_fence_wait(fence, false);
+	r = fence_wait(fence, false);
 	if (r) {
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
 		goto error;
 	}
 	DRM_INFO("ib test on ring %d succeeded\n",  ring->idx);
 error:
-	amdgpu_fence_unref(&fence);
+	fence_put(fence);
 	return r;
 }
 
@@ -805,6 +805,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = {
 	.test_ring = uvd_v6_0_ring_test_ring,
 	.test_ib = uvd_v6_0_ring_test_ib,
 	.is_lockup = amdgpu_ring_test_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 303d961d57bd..cd16df543f64 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -643,6 +643,7 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
 	.test_ring = amdgpu_vce_ring_test_ring,
 	.test_ib = amdgpu_vce_ring_test_ib,
 	.is_lockup = amdgpu_ring_test_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index d1064ca3670e..f0656dfb53f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -32,8 +32,8 @@
 #include "vid.h"
 #include "vce/vce_3_0_d.h"
 #include "vce/vce_3_0_sh_mask.h"
-#include "oss/oss_2_0_d.h"
-#include "oss/oss_2_0_sh_mask.h"
+#include "oss/oss_3_0_d.h"
+#include "oss/oss_3_0_sh_mask.h"
 #include "gca/gfx_8_0_d.h"
 #include "smu/smu_7_1_2_d.h"
 #include "smu/smu_7_1_2_sh_mask.h"
@@ -205,7 +205,14 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
 	u32 tmp;
 	unsigned ret;
 
-	if (adev->flags & AMDGPU_IS_APU)
+	/* Fiji is single pipe */
+	if (adev->asic_type == CHIP_FIJI) {
+		ret = AMDGPU_VCE_HARVEST_VCE1;
+		return ret;
+	}
+
+	/* Tonga and CZ are dual or single pipe */
+	if (adev->flags & AMD_IS_APU)
 		tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
 		       VCE_HARVEST_FUSE_MACRO__MASK) >>
 			VCE_HARVEST_FUSE_MACRO__SHIFT;
@@ -419,17 +426,41 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
 static bool vce_v3_0_is_idle(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 mask = 0;
+	int idx;
+
+	for (idx = 0; idx < 2; ++idx) {
+		if (adev->vce.harvest_config & (1 << idx))
+			continue;
+
+		if (idx == 0)
+			mask |= SRBM_STATUS2__VCE0_BUSY_MASK;
+		else
+			mask |= SRBM_STATUS2__VCE1_BUSY_MASK;
+	}
 
-	return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
+	return !(RREG32(mmSRBM_STATUS2) & mask);
 }
 
 static int vce_v3_0_wait_for_idle(void *handle)
 {
 	unsigned i;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 mask = 0;
+	int idx;
+
+	for (idx = 0; idx < 2; ++idx) {
+		if (adev->vce.harvest_config & (1 << idx))
+			continue;
+
+		if (idx == 0)
+			mask |= SRBM_STATUS2__VCE0_BUSY_MASK;
+		else
+			mask |= SRBM_STATUS2__VCE1_BUSY_MASK;
+	}
 
 	for (i = 0; i < adev->usec_timeout; i++) {
-		if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK))
+		if (!(RREG32(mmSRBM_STATUS2) & mask))
 			return 0;
 	}
 	return -ETIMEDOUT;
@@ -438,9 +469,21 @@ static int vce_v3_0_wait_for_idle(void *handle)
 static int vce_v3_0_soft_reset(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 mask = 0;
+	int idx;
 
-	WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK,
-			~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK);
+	for (idx = 0; idx < 2; ++idx) {
+		if (adev->vce.harvest_config & (1 << idx))
+			continue;
+
+		if (idx == 0)
+			mask |= SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK;
+		else
+			mask |= SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK;
+	}
+	WREG32_P(mmSRBM_SOFT_RESET, mask,
+		 ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK |
+		   SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK));
 	mdelay(5);
 
 	return vce_v3_0_start(adev);
@@ -601,6 +644,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
 	.test_ring = amdgpu_vce_ring_test_ring,
 	.test_ib = amdgpu_vce_ring_test_ib,
 	.is_lockup = amdgpu_ring_test_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 68552da40287..552d9e75ad1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -203,6 +203,17 @@ static const u32 tonga_mgcg_cgcg_init[] =
 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
 };
 
+static const u32 fiji_mgcg_cgcg_init[] =
+{
+	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
+	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
+	mmPCIE_DATA, 0x000f0000, 0x00000000,
+	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
+	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
+	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
+	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
+};
+
 static const u32 iceland_mgcg_cgcg_init[] =
 {
 	mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
@@ -232,6 +243,11 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)
 						 iceland_mgcg_cgcg_init,
 						 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
 		break;
+	case CHIP_FIJI:
+		amdgpu_program_register_sequence(adev,
+						 fiji_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
+		break;
 	case CHIP_TONGA:
 		amdgpu_program_register_sequence(adev,
 						 tonga_mgcg_cgcg_init,
@@ -261,7 +277,7 @@ static u32 vi_get_xclk(struct amdgpu_device *adev)
 	u32 reference_clock = adev->clock.spll.reference_freq;
 	u32 tmp;
 
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		return reference_clock;
 
 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
@@ -362,6 +378,26 @@ static struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = {
 
 static struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
 	{mmGRBM_STATUS, false},
+	{mmGRBM_STATUS2, false},
+	{mmGRBM_STATUS_SE0, false},
+	{mmGRBM_STATUS_SE1, false},
+	{mmGRBM_STATUS_SE2, false},
+	{mmGRBM_STATUS_SE3, false},
+	{mmSRBM_STATUS, false},
+	{mmSRBM_STATUS2, false},
+	{mmSRBM_STATUS3, false},
+	{mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false},
+	{mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false},
+	{mmCP_STAT, false},
+	{mmCP_STALLED_STAT1, false},
+	{mmCP_STALLED_STAT2, false},
+	{mmCP_STALLED_STAT3, false},
+	{mmCP_CPF_BUSY_STAT, false},
+	{mmCP_CPF_STALLED_STAT1, false},
+	{mmCP_CPF_STATUS, false},
+	{mmCP_CPC_BUSY_STAT, false},
+	{mmCP_CPC_STALLED_STAT1, false},
+	{mmCP_CPC_STATUS, false},
 	{mmGB_ADDR_CONFIG, false},
 	{mmMC_ARB_RAMCFG, false},
 	{mmGB_TILE_MODE0, false},
@@ -449,6 +485,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
 		asic_register_table = tonga_allowed_read_registers;
 		size = ARRAY_SIZE(tonga_allowed_read_registers);
 		break;
+	case CHIP_FIJI:
 	case CHIP_TONGA:
 	case CHIP_CARRIZO:
 		asic_register_table = cz_allowed_read_registers;
@@ -751,7 +788,7 @@ static void vi_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask)
 		srbm_soft_reset =
 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
 
-	if (!(adev->flags & AMDGPU_IS_APU)) {
+	if (!(adev->flags & AMD_IS_APU)) {
 		if (reset_mask & AMDGPU_RESET_MC)
 		srbm_soft_reset =
 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
@@ -971,7 +1008,7 @@ static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
 	if (amdgpu_pcie_gen2 == 0)
 		return;
 
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		return;
 
 	ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
@@ -999,7 +1036,7 @@ static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
 	u32 tmp;
 
 	/* not necessary on CZ */
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		return;
 
 	tmp = RREG32(mmBIF_DOORBELL_APER_EN);
@@ -1127,6 +1164,74 @@ static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
 	},
 };
 
+static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vi_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 8,
+		.minor = 5,
+		.rev = 0,
+		.funcs = &gmc_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &tonga_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &fiji_dpm_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 10,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &dce_v10_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 8,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gfx_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &sdma_v3_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 6,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &uvd_v6_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vce_v3_0_ip_funcs,
+	},
+};
+
 static const struct amdgpu_ip_block_version cz_ip_blocks[] =
 {
 	/* ORDER MATTERS! */
@@ -1202,6 +1307,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
 		adev->ip_blocks = topaz_ip_blocks;
 		adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
 		break;
+	case CHIP_FIJI:
+		adev->ip_blocks = fiji_ip_blocks;
+		adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
+		break;
 	case CHIP_TONGA:
 		adev->ip_blocks = tonga_ip_blocks;
 		adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
@@ -1248,7 +1357,7 @@ static int vi_common_early_init(void *handle)
 	bool smc_enabled = false;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		adev->smc_rreg = &cz_smc_rreg;
 		adev->smc_wreg = &cz_smc_wreg;
 	} else {
@@ -1279,6 +1388,7 @@ static int vi_common_early_init(void *handle)
 		if (amdgpu_smc_load_fw && smc_enabled)
 			adev->firmware.smu_load = true;
 		break;
+	case CHIP_FIJI:
 	case CHIP_TONGA:
 		adev->has_uvd = true;
 		adev->cg_flags = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi_dpm.h b/drivers/gpu/drm/amd/amdgpu/vi_dpm.h
index 3b45332f5df4..fc120ba18aad 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi_dpm.h
@@ -30,7 +30,7 @@ int cz_smu_start(struct amdgpu_device *adev);
 int cz_smu_fini(struct amdgpu_device *adev);
 
 extern const struct amd_ip_funcs tonga_dpm_ip_funcs;
-
+extern const struct amd_ip_funcs fiji_dpm_ip_funcs;
 extern const struct amd_ip_funcs iceland_dpm_ip_funcs;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index 31bb89452e12..d98aa9d82fa1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -66,6 +66,11 @@
 
 #define AMDGPU_NUM_OF_VMIDS			8
 
+#define		PIPEID(x)					((x) << 0)
+#define		MEID(x)						((x) << 2)
+#define		VMID(x)						((x) << 4)
+#define		QUEUEID(x)					((x) << 8)
+
 #define RB_BITMAP_WIDTH_PER_SH     2
 
 #define MC_SEQ_MISC0__MT__MASK	0xf0000000
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index 8dfac37ff327..e13c67c8d2c0 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -4,6 +4,6 @@
 
 config HSA_AMD
 	tristate "HSA kernel driver for AMD GPU devices"
-	depends on DRM_RADEON && AMD_IOMMU_V2 && X86_64
+	depends on (DRM_RADEON || DRM_AMDGPU) && AMD_IOMMU_V2 && X86_64
 	help
 	  Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index 28551153ec6d..7fc9b0f444cb 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -2,7 +2,8 @@
 # Makefile for Heterogenous System Architecture support for AMD GPU devices
 #
 
-ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/  \
+		-Idrivers/gpu/drm/amd/include/asic_reg
 
 amdkfd-y	:= kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
 		kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_regs.h b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
index 183be5b8414f..48769d12dd7b 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_regs.h
+++ b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
@@ -65,17 +65,6 @@
 
 #define	AQL_ENABLE					1
 
-#define	SDMA_RB_VMID(x)					(x << 24)
-#define	SDMA_RB_ENABLE					(1 << 0)
-#define	SDMA_RB_SIZE(x)					((x) << 1) /* log2 */
-#define	SDMA_RPTR_WRITEBACK_ENABLE			(1 << 12)
-#define	SDMA_RPTR_WRITEBACK_TIMER(x)			((x) << 16) /* log2 */
-#define	SDMA_OFFSET(x)					(x << 0)
-#define	SDMA_DB_ENABLE					(1 << 28)
-#define	SDMA_ATC					(1 << 0)
-#define	SDMA_VA_PTR32					(1 << 4)
-#define	SDMA_VA_SHARED_BASE(x)				(x << 8)
-
 #define GRBM_GFX_INDEX					0x30800
 
 #define	ATC_VMID_PASID_MAPPING_VALID			(1U << 31)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index c991973019d0..c6a1b4cc6458 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -31,7 +31,7 @@
 #include <uapi/linux/kfd_ioctl.h>
 #include <linux/time.h>
 #include <linux/mm.h>
-#include <uapi/asm-generic/mman-common.h>
+#include <linux/mman.h>
 #include <asm/processor.h>
 #include "kfd_priv.h"
 #include "kfd_device_queue_manager.h"
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 75312c82969f..3f95f7cb4019 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -80,7 +80,12 @@ static const struct kfd_deviceid supported_devices[] = {
 	{ 0x1318, &kaveri_device_info },	/* Kaveri */
 	{ 0x131B, &kaveri_device_info },	/* Kaveri */
 	{ 0x131C, &kaveri_device_info },	/* Kaveri */
-	{ 0x131D, &kaveri_device_info }		/* Kaveri */
+	{ 0x131D, &kaveri_device_info },	/* Kaveri */
+	{ 0x9870, &carrizo_device_info },	/* Carrizo */
+	{ 0x9874, &carrizo_device_info },	/* Carrizo */
+	{ 0x9875, &carrizo_device_info },	/* Carrizo */
+	{ 0x9876, &carrizo_device_info },	/* Carrizo */
+	{ 0x9877, &carrizo_device_info }	/* Carrizo */
 };
 
 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index 9ce8a20a7aff..c6f435aa803f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -23,6 +23,7 @@
 
 #include "kfd_device_queue_manager.h"
 #include "cik_regs.h"
+#include "oss/oss_2_4_sh_mask.h"
 
 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
 				   struct qcm_process_device *qpd,
@@ -135,13 +136,16 @@ static int register_process_cik(struct device_queue_manager *dqm,
 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
 				struct qcm_process_device *qpd)
 {
-	uint32_t value = SDMA_ATC;
+	uint32_t value = (1 << SDMA0_RLC0_VIRTUAL_ADDR__ATC__SHIFT);
 
 	if (q->process->is_32bit_user_mode)
-		value |= SDMA_VA_PTR32 | get_sh_mem_bases_32(qpd_to_pdd(qpd));
+		value |= (1 << SDMA0_RLC0_VIRTUAL_ADDR__PTR32__SHIFT) |
+				get_sh_mem_bases_32(qpd_to_pdd(qpd));
 	else
-		value |= SDMA_VA_SHARED_BASE(get_sh_mem_bases_nybble_64(
-							qpd_to_pdd(qpd)));
+		value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
+				SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &
+				SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
+
 	q->properties.sdma_vm_addr = value;
 }
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
index 4c15212a3899..7e9cae9d349b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -22,6 +22,10 @@
  */
 
 #include "kfd_device_queue_manager.h"
+#include "gca/gfx_8_0_enum.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "gca/gfx_8_0_enum.h"
+#include "oss/oss_3_0_sh_mask.h"
 
 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
 				   struct qcm_process_device *qpd,
@@ -37,14 +41,40 @@ static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
 
 void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops)
 {
-	pr_warn("amdkfd: VI DQM is not currently supported\n");
-
 	ops->set_cache_memory_policy = set_cache_memory_policy_vi;
 	ops->register_process = register_process_vi;
 	ops->initialize = initialize_cpsch_vi;
 	ops->init_sdma_vm = init_sdma_vm;
 }
 
+static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
+{
+	/* In 64-bit mode, we can only control the top 3 bits of the LDS,
+	 * scratch and GPUVM apertures.
+	 * The hardware fills in the remaining 59 bits according to the
+	 * following pattern:
+	 * LDS:		X0000000'00000000 - X0000001'00000000 (4GB)
+	 * Scratch:	X0000001'00000000 - X0000002'00000000 (4GB)
+	 * GPUVM:	Y0010000'00000000 - Y0020000'00000000 (1TB)
+	 *
+	 * (where X/Y is the configurable nybble with the low-bit 0)
+	 *
+	 * LDS and scratch will have the same top nybble programmed in the
+	 * top 3 bits of SH_MEM_BASES.PRIVATE_BASE.
+	 * GPUVM can have a different top nybble programmed in the
+	 * top 3 bits of SH_MEM_BASES.SHARED_BASE.
+	 * We don't bother to support different top nybbles
+	 * for LDS/Scratch and GPUVM.
+	 */
+
+	BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
+		top_address_nybble == 0);
+
+	return top_address_nybble << 12 |
+			(top_address_nybble << 12) <<
+			SH_MEM_BASES__SHARED_BASE__SHIFT;
+}
+
 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
 				   struct qcm_process_device *qpd,
 				   enum cache_policy default_policy,
@@ -52,18 +82,83 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
 				   void __user *alternate_aperture_base,
 				   uint64_t alternate_aperture_size)
 {
-	return false;
+	uint32_t default_mtype;
+	uint32_t ape1_mtype;
+
+	default_mtype = (default_policy == cache_policy_coherent) ?
+			MTYPE_CC :
+			MTYPE_NC;
+
+	ape1_mtype = (alternate_policy == cache_policy_coherent) ?
+			MTYPE_CC :
+			MTYPE_NC;
+
+	qpd->sh_mem_config = (qpd->sh_mem_config &
+			SH_MEM_CONFIG__ADDRESS_MODE_MASK) |
+		SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+				SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
+		default_mtype << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
+		ape1_mtype << SH_MEM_CONFIG__APE1_MTYPE__SHIFT |
+		SH_MEM_CONFIG__PRIVATE_ATC_MASK;
+
+	return true;
 }
 
 static int register_process_vi(struct device_queue_manager *dqm,
 					struct qcm_process_device *qpd)
 {
-	return -1;
+	struct kfd_process_device *pdd;
+	unsigned int temp;
+
+	BUG_ON(!dqm || !qpd);
+
+	pdd = qpd_to_pdd(qpd);
+
+	/* check if sh_mem_config register already configured */
+	if (qpd->sh_mem_config == 0) {
+		qpd->sh_mem_config =
+			SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+				SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
+			MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
+			MTYPE_CC << SH_MEM_CONFIG__APE1_MTYPE__SHIFT |
+			SH_MEM_CONFIG__PRIVATE_ATC_MASK;
+
+		qpd->sh_mem_ape1_limit = 0;
+		qpd->sh_mem_ape1_base = 0;
+	}
+
+	if (qpd->pqm->process->is_32bit_user_mode) {
+		temp = get_sh_mem_bases_32(pdd);
+		qpd->sh_mem_bases = temp << SH_MEM_BASES__SHARED_BASE__SHIFT;
+		qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA32 <<
+					SH_MEM_CONFIG__ADDRESS_MODE__SHIFT;
+	} else {
+		temp = get_sh_mem_bases_nybble_64(pdd);
+		qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
+		qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA64 <<
+			SH_MEM_CONFIG__ADDRESS_MODE__SHIFT;
+	}
+
+	pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
+		qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
+
+	return 0;
 }
 
 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
 				struct qcm_process_device *qpd)
 {
+	uint32_t value = (1 << SDMA0_RLC0_VIRTUAL_ADDR__ATC__SHIFT);
+
+	if (q->process->is_32bit_user_mode)
+		value |= (1 << SDMA0_RLC0_VIRTUAL_ADDR__PTR32__SHIFT) |
+				get_sh_mem_bases_32(qpd_to_pdd(qpd));
+	else
+		value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
+				SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &
+				SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
+
+	q->properties.sdma_vm_addr = value;
 }
 
 static int initialize_cpsch_vi(struct device_queue_manager *dqm)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 35b987574633..2b655103ba79 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -33,7 +33,7 @@
 #include <linux/time.h>
 #include "kfd_priv.h"
 #include <linux/mm.h>
-#include <uapi/asm-generic/mman-common.h>
+#include <linux/mman.h>
 #include <asm/processor.h>
 
 /*
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 434979428fc0..d83de985e88c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -27,6 +27,7 @@
 #include "kfd_mqd_manager.h"
 #include "cik_regs.h"
 #include "cik_structs.h"
+#include "oss/oss_2_4_sh_mask.h"
 
 static inline struct cik_mqd *get_mqd(void *mqd)
 {
@@ -214,17 +215,20 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
 	BUG_ON(!mm || !mqd || !q);
 
 	m = get_sdma_mqd(mqd);
-	m->sdma_rlc_rb_cntl =
-		SDMA_RB_SIZE((ffs(q->queue_size / sizeof(unsigned int)))) |
-		SDMA_RB_VMID(q->vmid) |
-		SDMA_RPTR_WRITEBACK_ENABLE |
-		SDMA_RPTR_WRITEBACK_TIMER(6);
+	m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) <<
+			SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
+			q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
+			1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
+			6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
 
 	m->sdma_rlc_rb_base = lower_32_bits(q->queue_address >> 8);
 	m->sdma_rlc_rb_base_hi = upper_32_bits(q->queue_address >> 8);
 	m->sdma_rlc_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
 	m->sdma_rlc_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
-	m->sdma_rlc_doorbell = SDMA_OFFSET(q->doorbell_off) | SDMA_DB_ENABLE;
+	m->sdma_rlc_doorbell = q->doorbell_off <<
+			SDMA0_RLC0_DOORBELL__OFFSET__SHIFT |
+			1 << SDMA0_RLC0_DOORBELL__ENABLE__SHIFT;
+
 	m->sdma_rlc_virtual_addr = q->sdma_vm_addr;
 
 	m->sdma_engine_id = q->sdma_engine_id;
@@ -234,7 +238,9 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
 	if (q->queue_size > 0 &&
 			q->queue_address != 0 &&
 			q->queue_percent > 0) {
-		m->sdma_rlc_rb_cntl |= SDMA_RB_ENABLE;
+		m->sdma_rlc_rb_cntl |=
+				1 << SDMA0_RLC0_RB_CNTL__RB_ENABLE__SHIFT;
+
 		q->is_active = true;
 	}
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index b3a7e3ba1e38..fa32c32fa1c2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -22,12 +22,255 @@
  */
 
 #include <linux/printk.h>
+#include <linux/slab.h>
 #include "kfd_priv.h"
 #include "kfd_mqd_manager.h"
+#include "vi_structs.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "gca/gfx_8_0_enum.h"
+
+#define CP_MQD_CONTROL__PRIV_STATE__SHIFT 0x8
+
+static inline struct vi_mqd *get_mqd(void *mqd)
+{
+	return (struct vi_mqd *)mqd;
+}
+
+static int init_mqd(struct mqd_manager *mm, void **mqd,
+			struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+			struct queue_properties *q)
+{
+	int retval;
+	uint64_t addr;
+	struct vi_mqd *m;
+
+	retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct vi_mqd),
+			mqd_mem_obj);
+	if (retval != 0)
+		return -ENOMEM;
+
+	m = (struct vi_mqd *) (*mqd_mem_obj)->cpu_ptr;
+	addr = (*mqd_mem_obj)->gpu_addr;
+
+	memset(m, 0, sizeof(struct vi_mqd));
+
+	m->header = 0xC0310800;
+	m->compute_pipelinestat_enable = 1;
+	m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
+	m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
+	m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
+	m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
+
+	m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
+			0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+
+	m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT |
+			MTYPE_UC << CP_MQD_CONTROL__MTYPE__SHIFT;
+
+	m->cp_mqd_base_addr_lo        = lower_32_bits(addr);
+	m->cp_mqd_base_addr_hi        = upper_32_bits(addr);
+
+	m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT |
+			1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
+			10 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
+
+	m->cp_hqd_pipe_priority = 1;
+	m->cp_hqd_queue_priority = 15;
+
+	m->cp_hqd_eop_rptr = 1 << CP_HQD_EOP_RPTR__INIT_FETCHER__SHIFT;
+
+	if (q->format == KFD_QUEUE_FORMAT_AQL)
+		m->cp_hqd_iq_rptr = 1;
+
+	*mqd = m;
+	if (gart_addr != NULL)
+		*gart_addr = addr;
+	retval = mm->update_mqd(mm, m, q);
+
+	return retval;
+}
+
+static int load_mqd(struct mqd_manager *mm, void *mqd,
+			uint32_t pipe_id, uint32_t queue_id,
+			uint32_t __user *wptr)
+{
+	return mm->dev->kfd2kgd->hqd_load
+		(mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
+}
+
+static int __update_mqd(struct mqd_manager *mm, void *mqd,
+			struct queue_properties *q, unsigned int mtype,
+			unsigned int atc_bit)
+{
+	struct vi_mqd *m;
+
+	BUG_ON(!mm || !q || !mqd);
+
+	pr_debug("kfd: In func %s\n", __func__);
+
+	m = get_mqd(mqd);
+
+	m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT |
+			atc_bit << CP_HQD_PQ_CONTROL__PQ_ATC__SHIFT |
+			mtype << CP_HQD_PQ_CONTROL__MTYPE__SHIFT;
+	m->cp_hqd_pq_control |=
+			ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
+	pr_debug("kfd: cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
+
+	m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+	m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
+
+	m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
+	m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
+
+	m->cp_hqd_pq_doorbell_control =
+		1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN__SHIFT |
+		q->doorbell_off <<
+			CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
+	pr_debug("kfd: cp_hqd_pq_doorbell_control 0x%x\n",
+			m->cp_hqd_pq_doorbell_control);
+
+	m->cp_hqd_eop_control = atc_bit << CP_HQD_EOP_CONTROL__EOP_ATC__SHIFT |
+			mtype << CP_HQD_EOP_CONTROL__MTYPE__SHIFT;
+
+	m->cp_hqd_ib_control = atc_bit << CP_HQD_IB_CONTROL__IB_ATC__SHIFT |
+			3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT |
+			mtype << CP_HQD_IB_CONTROL__MTYPE__SHIFT;
+
+	m->cp_hqd_eop_control |=
+		ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1;
+	m->cp_hqd_eop_base_addr_lo =
+			lower_32_bits(q->eop_ring_buffer_address >> 8);
+	m->cp_hqd_eop_base_addr_hi =
+			upper_32_bits(q->eop_ring_buffer_address >> 8);
+
+	m->cp_hqd_iq_timer = atc_bit << CP_HQD_IQ_TIMER__IQ_ATC__SHIFT |
+			mtype << CP_HQD_IQ_TIMER__MTYPE__SHIFT;
+
+	m->cp_hqd_vmid = q->vmid;
+
+	if (q->format == KFD_QUEUE_FORMAT_AQL) {
+		m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
+				2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT;
+	}
+
+	m->cp_hqd_active = 0;
+	q->is_active = false;
+	if (q->queue_size > 0 &&
+			q->queue_address != 0 &&
+			q->queue_percent > 0) {
+		m->cp_hqd_active = 1;
+		q->is_active = true;
+	}
+
+	return 0;
+}
+
+
+static int update_mqd(struct mqd_manager *mm, void *mqd,
+			struct queue_properties *q)
+{
+	return __update_mqd(mm, mqd, q, MTYPE_CC, 1);
+}
+
+static int destroy_mqd(struct mqd_manager *mm, void *mqd,
+			enum kfd_preempt_type type,
+			unsigned int timeout, uint32_t pipe_id,
+			uint32_t queue_id)
+{
+	return mm->dev->kfd2kgd->hqd_destroy
+		(mm->dev->kgd, type, timeout,
+		pipe_id, queue_id);
+}
+
+static void uninit_mqd(struct mqd_manager *mm, void *mqd,
+			struct kfd_mem_obj *mqd_mem_obj)
+{
+	BUG_ON(!mm || !mqd);
+	kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
+}
+
+static bool is_occupied(struct mqd_manager *mm, void *mqd,
+			uint64_t queue_address,	uint32_t pipe_id,
+			uint32_t queue_id)
+{
+	return mm->dev->kfd2kgd->hqd_is_occupied(
+		mm->dev->kgd, queue_address,
+		pipe_id, queue_id);
+}
+
+static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
+			struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+			struct queue_properties *q)
+{
+	struct vi_mqd *m;
+	int retval = init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
+
+	if (retval != 0)
+		return retval;
+
+	m = get_mqd(*mqd);
+
+	m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
+			1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
+
+	return retval;
+}
+
+static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
+			struct queue_properties *q)
+{
+	struct vi_mqd *m;
+	int retval = __update_mqd(mm, mqd, q, MTYPE_UC, 0);
+
+	if (retval != 0)
+		return retval;
+
+	m = get_mqd(mqd);
+	m->cp_hqd_vmid = q->vmid;
+	return retval;
+}
 
 struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
-					struct kfd_dev *dev)
+		struct kfd_dev *dev)
 {
-	pr_warn("amdkfd: VI MQD is not currently supported\n");
-	return NULL;
+	struct mqd_manager *mqd;
+
+	BUG_ON(!dev);
+	BUG_ON(type >= KFD_MQD_TYPE_MAX);
+
+	pr_debug("kfd: In func %s\n", __func__);
+
+	mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL);
+	if (!mqd)
+		return NULL;
+
+	mqd->dev = dev;
+
+	switch (type) {
+	case KFD_MQD_TYPE_CP:
+	case KFD_MQD_TYPE_COMPUTE:
+		mqd->init_mqd = init_mqd;
+		mqd->uninit_mqd = uninit_mqd;
+		mqd->load_mqd = load_mqd;
+		mqd->update_mqd = update_mqd;
+		mqd->destroy_mqd = destroy_mqd;
+		mqd->is_occupied = is_occupied;
+		break;
+	case KFD_MQD_TYPE_HIQ:
+		mqd->init_mqd = init_mqd_hiq;
+		mqd->uninit_mqd = uninit_mqd;
+		mqd->load_mqd = load_mqd;
+		mqd->update_mqd = update_mqd_hiq;
+		mqd->destroy_mqd = destroy_mqd;
+		mqd->is_occupied = is_occupied;
+		break;
+	case KFD_MQD_TYPE_SDMA:
+		break;
+	default:
+		kfree(mqd);
+		return NULL;
+	}
+
+	return mqd;
 }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 99b6d28a11c3..90f391434fa3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -27,6 +27,7 @@
 #include "kfd_kernel_queue.h"
 #include "kfd_priv.h"
 #include "kfd_pm4_headers.h"
+#include "kfd_pm4_headers_vi.h"
 #include "kfd_pm4_opcodes.h"
 
 static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
@@ -55,6 +56,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
 				bool *over_subscription)
 {
 	unsigned int process_count, queue_count;
+	unsigned int map_queue_size;
 
 	BUG_ON(!pm || !rlib_size || !over_subscription);
 
@@ -69,9 +71,13 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
 		pr_debug("kfd: over subscribed runlist\n");
 	}
 
+	map_queue_size =
+		(pm->dqm->dev->device_info->asic_family == CHIP_CARRIZO) ?
+		sizeof(struct pm4_mes_map_queues) :
+		sizeof(struct pm4_map_queues);
 	/* calculate run list ib allocation size */
 	*rlib_size = process_count * sizeof(struct pm4_map_process) +
-		     queue_count * sizeof(struct pm4_map_queues);
+		     queue_count * map_queue_size;
 
 	/*
 	 * Increase the allocation size in case we need a chained run list
@@ -176,6 +182,71 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
 	return 0;
 }
 
+static int pm_create_map_queue_vi(struct packet_manager *pm, uint32_t *buffer,
+		struct queue *q, bool is_static)
+{
+	struct pm4_mes_map_queues *packet;
+	bool use_static = is_static;
+
+	BUG_ON(!pm || !buffer || !q);
+
+	pr_debug("kfd: In func %s\n", __func__);
+
+	packet = (struct pm4_mes_map_queues *)buffer;
+	memset(buffer, 0, sizeof(struct pm4_map_queues));
+
+	packet->header.u32all = build_pm4_header(IT_MAP_QUEUES,
+						sizeof(struct pm4_map_queues));
+	packet->bitfields2.alloc_format =
+		alloc_format__mes_map_queues__one_per_pipe_vi;
+	packet->bitfields2.num_queues = 1;
+	packet->bitfields2.queue_sel =
+		queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
+
+	packet->bitfields2.engine_sel =
+		engine_sel__mes_map_queues__compute_vi;
+	packet->bitfields2.queue_type =
+		queue_type__mes_map_queues__normal_compute_vi;
+
+	switch (q->properties.type) {
+	case KFD_QUEUE_TYPE_COMPUTE:
+		if (use_static)
+			packet->bitfields2.queue_type =
+		queue_type__mes_map_queues__normal_latency_static_queue_vi;
+		break;
+	case KFD_QUEUE_TYPE_DIQ:
+		packet->bitfields2.queue_type =
+			queue_type__mes_map_queues__debug_interface_queue_vi;
+		break;
+	case KFD_QUEUE_TYPE_SDMA:
+		packet->bitfields2.engine_sel =
+				engine_sel__mes_map_queues__sdma0_vi;
+		use_static = false; /* no static queues under SDMA */
+		break;
+	default:
+		pr_err("kfd: in %s queue type %d\n", __func__,
+				q->properties.type);
+		BUG();
+		break;
+	}
+	packet->bitfields3.doorbell_offset =
+			q->properties.doorbell_off;
+
+	packet->mqd_addr_lo =
+			lower_32_bits(q->gart_mqd_addr);
+
+	packet->mqd_addr_hi =
+			upper_32_bits(q->gart_mqd_addr);
+
+	packet->wptr_addr_lo =
+			lower_32_bits((uint64_t)q->properties.write_ptr);
+
+	packet->wptr_addr_hi =
+			upper_32_bits((uint64_t)q->properties.write_ptr);
+
+	return 0;
+}
+
 static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
 				struct queue *q, bool is_static)
 {
@@ -292,8 +363,17 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
 			pr_debug("kfd: static_queue, mapping kernel q %d, is debug status %d\n",
 				kq->queue->queue, qpd->is_debug);
 
-			retval = pm_create_map_queue(pm, &rl_buffer[rl_wptr],
-						kq->queue, qpd->is_debug);
+			if (pm->dqm->dev->device_info->asic_family ==
+					CHIP_CARRIZO)
+				retval = pm_create_map_queue_vi(pm,
+						&rl_buffer[rl_wptr],
+						kq->queue,
+						qpd->is_debug);
+			else
+				retval = pm_create_map_queue(pm,
+						&rl_buffer[rl_wptr],
+						kq->queue,
+						qpd->is_debug);
 			if (retval != 0)
 				return retval;
 
@@ -309,8 +389,17 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
 			pr_debug("kfd: static_queue, mapping user queue %d, is debug status %d\n",
 				q->queue, qpd->is_debug);
 
-			retval = pm_create_map_queue(pm, &rl_buffer[rl_wptr],
-						q,  qpd->is_debug);
+			if (pm->dqm->dev->device_info->asic_family ==
+					CHIP_CARRIZO)
+				retval = pm_create_map_queue_vi(pm,
+						&rl_buffer[rl_wptr],
+						q,
+						qpd->is_debug);
+			else
+				retval = pm_create_map_queue(pm,
+						&rl_buffer[rl_wptr],
+						q,
+						qpd->is_debug);
 
 			if (retval != 0)
 				return retval;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h
new file mode 100644
index 000000000000..08c721922812
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h
@@ -0,0 +1,398 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef F32_MES_PM4_PACKETS_H
+#define F32_MES_PM4_PACKETS_H
+
+#ifndef PM4_MES_HEADER_DEFINED
+#define PM4_MES_HEADER_DEFINED
+union PM4_MES_TYPE_3_HEADER {
+	struct {
+		uint32_t reserved1 : 8; /* < reserved */
+		uint32_t opcode    : 8; /* < IT opcode */
+		uint32_t count     : 14;/* < number of DWORDs - 1 in the
+		information body. */
+		uint32_t type      : 2; /* < packet identifier.
+					It should be 3 for type 3 packets */
+	};
+	uint32_t u32All;
+};
+#endif /* PM4_MES_HEADER_DEFINED */
+
+/*--------------------MES_SET_RESOURCES--------------------*/
+
+#ifndef PM4_MES_SET_RESOURCES_DEFINED
+#define PM4_MES_SET_RESOURCES_DEFINED
+enum mes_set_resources_queue_type_enum {
+	queue_type__mes_set_resources__kernel_interface_queue_kiq = 0,
+	queue_type__mes_set_resources__hsa_interface_queue_hiq = 1,
+	queue_type__mes_set_resources__hsa_debug_interface_queue = 4
+};
+
+
+struct pm4_mes_set_resources {
+	union {
+		union PM4_MES_TYPE_3_HEADER	header;		/* header */
+		uint32_t			ordinal1;
+	};
+
+	union {
+		struct {
+			uint32_t vmid_mask:16;
+			uint32_t unmap_latency:8;
+			uint32_t reserved1:5;
+			enum mes_set_resources_queue_type_enum queue_type:3;
+		} bitfields2;
+		uint32_t ordinal2;
+	};
+
+	uint32_t queue_mask_lo;
+	uint32_t queue_mask_hi;
+	uint32_t gws_mask_lo;
+	uint32_t gws_mask_hi;
+
+	union {
+		struct {
+			uint32_t oac_mask:16;
+			uint32_t reserved2:16;
+		} bitfields7;
+		uint32_t ordinal7;
+	};
+
+	union {
+		struct {
+		uint32_t gds_heap_base:6;
+		uint32_t reserved3:5;
+		uint32_t gds_heap_size:6;
+		uint32_t reserved4:15;
+		} bitfields8;
+		uint32_t ordinal8;
+	};
+
+};
+#endif
+
+/*--------------------MES_RUN_LIST--------------------*/
+
+#ifndef PM4_MES_RUN_LIST_DEFINED
+#define PM4_MES_RUN_LIST_DEFINED
+
+struct pm4_mes_runlist {
+	union {
+	    union PM4_MES_TYPE_3_HEADER   header;            /* header */
+	    uint32_t            ordinal1;
+	};
+
+	union {
+		struct {
+			uint32_t reserved1:2;
+			uint32_t ib_base_lo:30;
+		} bitfields2;
+		uint32_t ordinal2;
+	};
+
+	union {
+		struct {
+			uint32_t ib_base_hi:16;
+			uint32_t reserved2:16;
+		} bitfields3;
+		uint32_t ordinal3;
+	};
+
+	union {
+		struct {
+			uint32_t ib_size:20;
+			uint32_t chain:1;
+			uint32_t offload_polling:1;
+			uint32_t reserved3:1;
+			uint32_t valid:1;
+			uint32_t reserved4:8;
+		} bitfields4;
+		uint32_t ordinal4;
+	};
+
+};
+#endif
+
+/*--------------------MES_MAP_PROCESS--------------------*/
+
+#ifndef PM4_MES_MAP_PROCESS_DEFINED
+#define PM4_MES_MAP_PROCESS_DEFINED
+
+struct pm4_mes_map_process {
+	union {
+		union PM4_MES_TYPE_3_HEADER   header;            /* header */
+		uint32_t            ordinal1;
+	};
+
+	union {
+		struct {
+			uint32_t pasid:16;
+			uint32_t reserved1:8;
+			uint32_t diq_enable:1;
+			uint32_t process_quantum:7;
+		} bitfields2;
+		uint32_t ordinal2;
+};
+
+	union {
+		struct {
+			uint32_t page_table_base:28;
+			uint32_t reserved2:4;
+		} bitfields3;
+		uint32_t ordinal3;
+	};
+
+	uint32_t sh_mem_bases;
+	uint32_t sh_mem_ape1_base;
+	uint32_t sh_mem_ape1_limit;
+	uint32_t sh_mem_config;
+	uint32_t gds_addr_lo;
+	uint32_t gds_addr_hi;
+
+	union {
+		struct {
+			uint32_t num_gws:6;
+			uint32_t reserved3:2;
+			uint32_t num_oac:4;
+			uint32_t reserved4:4;
+			uint32_t gds_size:6;
+			uint32_t num_queues:10;
+		} bitfields10;
+		uint32_t ordinal10;
+	};
+
+};
+#endif
+
+/*--------------------MES_MAP_QUEUES--------------------*/
+
+#ifndef PM4_MES_MAP_QUEUES_VI_DEFINED
+#define PM4_MES_MAP_QUEUES_VI_DEFINED
+enum mes_map_queues_queue_sel_vi_enum {
+	queue_sel__mes_map_queues__map_to_specified_queue_slots_vi = 0,
+queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi = 1
+};
+
+enum mes_map_queues_queue_type_vi_enum {
+	queue_type__mes_map_queues__normal_compute_vi = 0,
+	queue_type__mes_map_queues__debug_interface_queue_vi = 1,
+	queue_type__mes_map_queues__normal_latency_static_queue_vi = 2,
+queue_type__mes_map_queues__low_latency_static_queue_vi = 3
+};
+
+enum mes_map_queues_alloc_format_vi_enum {
+	alloc_format__mes_map_queues__one_per_pipe_vi = 0,
+alloc_format__mes_map_queues__all_on_one_pipe_vi = 1
+};
+
+enum mes_map_queues_engine_sel_vi_enum {
+	engine_sel__mes_map_queues__compute_vi = 0,
+	engine_sel__mes_map_queues__sdma0_vi = 2,
+	engine_sel__mes_map_queues__sdma1_vi = 3
+};
+
+
+struct pm4_mes_map_queues {
+	union {
+		union PM4_MES_TYPE_3_HEADER   header;            /* header */
+		uint32_t            ordinal1;
+	};
+
+	union {
+		struct {
+			uint32_t reserved1:4;
+			enum mes_map_queues_queue_sel_vi_enum queue_sel:2;
+			uint32_t reserved2:15;
+			enum mes_map_queues_queue_type_vi_enum queue_type:3;
+			enum mes_map_queues_alloc_format_vi_enum alloc_format:2;
+			enum mes_map_queues_engine_sel_vi_enum engine_sel:3;
+			uint32_t num_queues:3;
+		} bitfields2;
+		uint32_t ordinal2;
+	};
+
+	union {
+		struct {
+			uint32_t reserved3:1;
+			uint32_t check_disable:1;
+			uint32_t doorbell_offset:21;
+			uint32_t reserved4:3;
+			uint32_t queue:6;
+		} bitfields3;
+		uint32_t ordinal3;
+	};
+
+	uint32_t mqd_addr_lo;
+	uint32_t mqd_addr_hi;
+	uint32_t wptr_addr_lo;
+	uint32_t wptr_addr_hi;
+};
+#endif
+
+/*--------------------MES_QUERY_STATUS--------------------*/
+
+#ifndef PM4_MES_QUERY_STATUS_DEFINED
+#define PM4_MES_QUERY_STATUS_DEFINED
+enum mes_query_status_interrupt_sel_enum {
+	interrupt_sel__mes_query_status__completion_status = 0,
+	interrupt_sel__mes_query_status__process_status = 1,
+	interrupt_sel__mes_query_status__queue_status = 2
+};
+
+enum mes_query_status_command_enum {
+	command__mes_query_status__interrupt_only = 0,
+	command__mes_query_status__fence_only_immediate = 1,
+	command__mes_query_status__fence_only_after_write_ack = 2,
+	command__mes_query_status__fence_wait_for_write_ack_send_interrupt = 3
+};
+
+enum mes_query_status_engine_sel_enum {
+	engine_sel__mes_query_status__compute = 0,
+	engine_sel__mes_query_status__sdma0_queue = 2,
+	engine_sel__mes_query_status__sdma1_queue = 3
+};
+
+struct pm4_mes_query_status {
+	union {
+		union PM4_MES_TYPE_3_HEADER   header;            /* header */
+		uint32_t            ordinal1;
+	};
+
+	union {
+		struct {
+			uint32_t context_id:28;
+			enum mes_query_status_interrupt_sel_enum
+				interrupt_sel:2;
+			enum mes_query_status_command_enum command:2;
+		} bitfields2;
+		uint32_t ordinal2;
+	};
+
+	union {
+		struct {
+			uint32_t pasid:16;
+			uint32_t reserved1:16;
+		} bitfields3a;
+		struct {
+			uint32_t reserved2:2;
+			uint32_t doorbell_offset:21;
+			uint32_t reserved3:2;
+			enum mes_query_status_engine_sel_enum engine_sel:3;
+			uint32_t reserved4:4;
+		} bitfields3b;
+		uint32_t ordinal3;
+	};
+
+	uint32_t addr_lo;
+	uint32_t addr_hi;
+	uint32_t data_lo;
+	uint32_t data_hi;
+};
+#endif
+
+/*--------------------MES_UNMAP_QUEUES--------------------*/
+
+#ifndef PM4_MES_UNMAP_QUEUES_DEFINED
+#define PM4_MES_UNMAP_QUEUES_DEFINED
+enum mes_unmap_queues_action_enum {
+	action__mes_unmap_queues__preempt_queues = 0,
+	action__mes_unmap_queues__reset_queues = 1,
+	action__mes_unmap_queues__disable_process_queues = 2,
+	action__mes_unmap_queues__reserved = 3
+};
+
+enum mes_unmap_queues_queue_sel_enum {
+	queue_sel__mes_unmap_queues__perform_request_on_specified_queues = 0,
+	queue_sel__mes_unmap_queues__perform_request_on_pasid_queues = 1,
+	queue_sel__mes_unmap_queues__unmap_all_queues = 2,
+	queue_sel__mes_unmap_queues__unmap_all_non_static_queues = 3
+};
+
+enum mes_unmap_queues_engine_sel_enum {
+	engine_sel__mes_unmap_queues__compute = 0,
+	engine_sel__mes_unmap_queues__sdma0 = 2,
+	engine_sel__mes_unmap_queues__sdmal = 3
+};
+
+struct PM4_MES_UNMAP_QUEUES {
+	union {
+		union PM4_MES_TYPE_3_HEADER   header;            /* header */
+		uint32_t            ordinal1;
+	};
+
+	union {
+		struct {
+			enum mes_unmap_queues_action_enum action:2;
+			uint32_t reserved1:2;
+			enum mes_unmap_queues_queue_sel_enum queue_sel:2;
+			uint32_t reserved2:20;
+			enum mes_unmap_queues_engine_sel_enum engine_sel:3;
+			uint32_t num_queues:3;
+		} bitfields2;
+		uint32_t ordinal2;
+	};
+
+	union {
+		struct {
+			uint32_t pasid:16;
+			uint32_t reserved3:16;
+		} bitfields3a;
+		struct {
+			uint32_t reserved4:2;
+			uint32_t doorbell_offset0:21;
+			uint32_t reserved5:9;
+		} bitfields3b;
+		uint32_t ordinal3;
+	};
+
+	union {
+	struct {
+			uint32_t reserved6:2;
+			uint32_t doorbell_offset1:21;
+			uint32_t reserved7:9;
+		} bitfields4;
+		uint32_t ordinal4;
+	};
+
+	union {
+		struct {
+			uint32_t reserved8:2;
+			uint32_t doorbell_offset2:21;
+			uint32_t reserved9:9;
+		} bitfields5;
+		uint32_t ordinal5;
+	};
+
+	union {
+		struct {
+			uint32_t reserved10:2;
+			uint32_t doorbell_offset3:21;
+			uint32_t reserved11:9;
+		} bitfields6;
+		uint32_t ordinal6;
+	};
+};
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index c25728bc388a..74909e72a009 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1186,6 +1186,11 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
 	 * TODO: Retrieve max engine clock values from KGD
 	 */
 
+	if (dev->gpu->device_info->asic_family == CHIP_CARRIZO) {
+		dev->node_props.capability |= HSA_CAP_DOORBELL_PACKET_TYPE;
+		pr_info("amdkfd: adding doorbell packet type capability\n");
+	}
+
 	res = 0;
 
 err:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 989624b3cd14..c3ddb9b95ff8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -40,6 +40,7 @@
 #define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK	0x00000f00
 #define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT	8
 #define HSA_CAP_RESERVED			0xfffff000
+#define HSA_CAP_DOORBELL_PACKET_TYPE		0x00001000
 
 struct kfd_node_properties {
 	uint32_t cpu_cores_count;
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 5bdf1b4397a0..68a8eaa1b7d0 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -23,6 +23,45 @@
 #ifndef __AMD_SHARED_H__
 #define __AMD_SHARED_H__
 
+#define AMD_MAX_USEC_TIMEOUT		100000  /* 100 ms */
+
+/*
+* Supported GPU families (aligned with amdgpu_drm.h)
+*/
+#define AMD_FAMILY_UNKNOWN              0
+#define AMD_FAMILY_CI                   120 /* Bonaire, Hawaii */
+#define AMD_FAMILY_KV                   125 /* Kaveri, Kabini, Mullins */
+#define AMD_FAMILY_VI                   130 /* Iceland, Tonga */
+#define AMD_FAMILY_CZ                   135 /* Carrizo */
+
+/*
+ * Supported ASIC types
+ */
+enum amd_asic_type {
+	CHIP_BONAIRE = 0,
+	CHIP_KAVERI,
+	CHIP_KABINI,
+	CHIP_HAWAII,
+	CHIP_MULLINS,
+	CHIP_TOPAZ,
+	CHIP_TONGA,
+	CHIP_FIJI,
+	CHIP_CARRIZO,
+	CHIP_LAST,
+};
+
+/*
+ * Chip flags
+ */
+enum amd_chip_flags {
+	AMD_ASIC_MASK = 0x0000ffffUL,
+	AMD_FLAGS_MASK  = 0xffff0000UL,
+	AMD_IS_MOBILITY = 0x00010000UL,
+	AMD_IS_APU      = 0x00020000UL,
+	AMD_IS_PX       = 0x00040000UL,
+	AMD_EXP_HW_SUPPORT = 0x00080000UL,
+};
+
 enum amd_ip_block_type {
 	AMD_IP_BLOCK_TYPE_COMMON,
 	AMD_IP_BLOCK_TYPE_GMC,
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
new file mode 100644
index 000000000000..44b1855cb8df
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
@@ -0,0 +1,1246 @@
+/*
+ * SMU_7_1_3 Register documentation
+ *
+ * Copyright (C) 2014  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef SMU_7_1_3_D_H
+#define SMU_7_1_3_D_H
+
+#define mmGCK_SMC_IND_INDEX                                                     0x80
+#define mmGCK0_GCK_SMC_IND_INDEX                                                0x80
+#define mmGCK1_GCK_SMC_IND_INDEX                                                0x82
+#define mmGCK2_GCK_SMC_IND_INDEX                                                0x84
+#define mmGCK3_GCK_SMC_IND_INDEX                                                0x86
+#define mmGCK_SMC_IND_DATA                                                      0x81
+#define mmGCK0_GCK_SMC_IND_DATA                                                 0x81
+#define mmGCK1_GCK_SMC_IND_DATA                                                 0x83
+#define mmGCK2_GCK_SMC_IND_DATA                                                 0x85
+#define mmGCK3_GCK_SMC_IND_DATA                                                 0x87
+#define ixGCK_MCLK_FUSES                                                        0xc0500008
+#define ixCG_DCLK_CNTL                                                          0xc050009c
+#define ixCG_DCLK_STATUS                                                        0xc05000a0
+#define ixCG_VCLK_CNTL                                                          0xc05000a4
+#define ixCG_VCLK_STATUS                                                        0xc05000a8
+#define ixCG_ECLK_CNTL                                                          0xc05000ac
+#define ixCG_ECLK_STATUS                                                        0xc05000b0
+#define ixCG_ACLK_CNTL                                                          0xc05000dc
+#define ixCG_MCLK_CNTL                                                          0xc0500120
+#define ixCG_MCLK_STATUS                                                        0xc0500124
+#define ixGCK_DFS_BYPASS_CNTL                                                   0xc0500118
+#define ixCG_SPLL_FUNC_CNTL                                                     0xc0500140
+#define ixCG_SPLL_FUNC_CNTL_2                                                   0xc0500144
+#define ixCG_SPLL_FUNC_CNTL_3                                                   0xc0500148
+#define ixCG_SPLL_FUNC_CNTL_4                                                   0xc050014c
+#define ixCG_SPLL_FUNC_CNTL_5                                                   0xc0500150
+#define ixCG_SPLL_FUNC_CNTL_6                                                   0xc0500154
+#define ixCG_SPLL_FUNC_CNTL_7                                                   0xc0500158
+#define ixSPLL_CNTL_MODE                                                        0xc0500160
+#define ixCG_SPLL_SPREAD_SPECTRUM                                               0xc0500164
+#define ixCG_SPLL_SPREAD_SPECTRUM_2                                             0xc0500168
+#define ixMPLL_BYPASSCLK_SEL                                                    0xc050019c
+#define ixCG_CLKPIN_CNTL                                                        0xc05001a0
+#define ixCG_CLKPIN_CNTL_2                                                      0xc05001a4
+#define ixCG_CLKPIN_CNTL_DC                                                     0xc0500204
+#define ixTHM_CLK_CNTL                                                          0xc05001a8
+#define ixMISC_CLK_CTRL                                                         0xc05001ac
+#define ixGCK_PLL_TEST_CNTL                                                     0xc05001c0
+#define ixGCK_PLL_TEST_CNTL_2                                                   0xc05001c4
+#define ixGCK_ADFS_CLK_BYPASS_CNTL1                                             0xc05001c8
+#define mmSMC_IND_INDEX                                                         0x80
+#define mmSMC0_SMC_IND_INDEX                                                    0x80
+#define mmSMC1_SMC_IND_INDEX                                                    0x82
+#define mmSMC2_SMC_IND_INDEX                                                    0x84
+#define mmSMC3_SMC_IND_INDEX                                                    0x86
+#define mmSMC_IND_DATA                                                          0x81
+#define mmSMC0_SMC_IND_DATA                                                     0x81
+#define mmSMC1_SMC_IND_DATA                                                     0x83
+#define mmSMC2_SMC_IND_DATA                                                     0x85
+#define mmSMC3_SMC_IND_DATA                                                     0x87
+#define mmSMC_IND_INDEX_0                                                       0x80
+#define mmSMC_IND_DATA_0                                                        0x81
+#define mmSMC_IND_INDEX_1                                                       0x82
+#define mmSMC_IND_DATA_1                                                        0x83
+#define mmSMC_IND_INDEX_2                                                       0x84
+#define mmSMC_IND_DATA_2                                                        0x85
+#define mmSMC_IND_INDEX_3                                                       0x86
+#define mmSMC_IND_DATA_3                                                        0x87
+#define mmSMC_IND_INDEX_4                                                       0x88
+#define mmSMC_IND_DATA_4                                                        0x89
+#define mmSMC_IND_INDEX_5                                                       0x8a
+#define mmSMC_IND_DATA_5                                                        0x8b
+#define mmSMC_IND_INDEX_6                                                       0x8c
+#define mmSMC_IND_DATA_6                                                        0x8d
+#define mmSMC_IND_INDEX_7                                                       0x8e
+#define mmSMC_IND_DATA_7                                                        0x8f
+#define mmSMC_IND_ACCESS_CNTL                                                   0x92
+#define mmSMC_MESSAGE_0                                                         0x94
+#define mmSMC_RESP_0                                                            0x95
+#define mmSMC_MESSAGE_1                                                         0x96
+#define mmSMC_RESP_1                                                            0x97
+#define mmSMC_MESSAGE_2                                                         0x98
+#define mmSMC_RESP_2                                                            0x99
+#define mmSMC_MESSAGE_3                                                         0x9a
+#define mmSMC_RESP_3                                                            0x9b
+#define mmSMC_MESSAGE_4                                                         0x9c
+#define mmSMC_RESP_4                                                            0x9d
+#define mmSMC_MESSAGE_5                                                         0x9e
+#define mmSMC_RESP_5                                                            0x9f
+#define mmSMC_MESSAGE_6                                                         0xa0
+#define mmSMC_RESP_6                                                            0xa1
+#define mmSMC_MESSAGE_7                                                         0xa2
+#define mmSMC_RESP_7                                                            0xa3
+#define mmSMC_MSG_ARG_0                                                         0xa4
+#define mmSMC_MSG_ARG_1                                                         0xa5
+#define mmSMC_MSG_ARG_2                                                         0xa6
+#define mmSMC_MSG_ARG_3                                                         0xa7
+#define mmSMC_MSG_ARG_4                                                         0xa8
+#define mmSMC_MSG_ARG_5                                                         0xa9
+#define mmSMC_MSG_ARG_6                                                         0xaa
+#define mmSMC_MSG_ARG_7                                                         0xab
+#define mmSMC_MESSAGE_8                                                         0xb5
+#define mmSMC_RESP_8                                                            0xb6
+#define mmSMC_MESSAGE_9                                                         0xb7
+#define mmSMC_RESP_9                                                            0xb8
+#define mmSMC_MESSAGE_10                                                        0xb9
+#define mmSMC_RESP_10                                                           0xba
+#define mmSMC_MESSAGE_11                                                        0xbb
+#define mmSMC_RESP_11                                                           0xbc
+#define mmSMC_MSG_ARG_8                                                         0xbd
+#define mmSMC_MSG_ARG_9                                                         0xbe
+#define mmSMC_MSG_ARG_10                                                        0xbf
+#define mmSMC_MSG_ARG_11                                                        0x93
+#define ixSMC_SYSCON_RESET_CNTL                                            0x80000000
+#define ixSMC_SYSCON_CLOCK_CNTL_0                                          0x80000004
+#define ixSMC_SYSCON_CLOCK_CNTL_1                                          0x80000008
+#define ixSMC_SYSCON_CLOCK_CNTL_2                                          0x8000000c
+#define ixSMC_SYSCON_MISC_CNTL                                                  0x80000010
+#define ixSMC_SYSCON_MSG_ARG_0                                                  0x80000068
+#define ixSMC_PC_C                                                              0x80000370
+#define ixSMC_SCRATCH9                                                          0x80000424
+#define mmGPIOPAD_SW_INT_STAT                                                   0x180
+#define mmGPIOPAD_STRENGTH                                                      0x181
+#define mmGPIOPAD_MASK                                                          0x182
+#define mmGPIOPAD_A                                                             0x183
+#define mmGPIOPAD_EN                                                            0x184
+#define mmGPIOPAD_Y                                                             0x185
+#define mmGPIOPAD_PINSTRAPS                                                     0x186
+#define mmGPIOPAD_INT_STAT_EN                                                   0x187
+#define mmGPIOPAD_INT_STAT                                                      0x188
+#define mmGPIOPAD_INT_STAT_AK                                                   0x189
+#define mmGPIOPAD_INT_EN                                                        0x18a
+#define mmGPIOPAD_INT_TYPE                                                      0x18b
+#define mmGPIOPAD_INT_POLARITY                                                  0x18c
+#define mmGPIOPAD_EXTERN_TRIG_CNTL                                              0x18d
+#define mmGPIOPAD_RCVR_SEL                                                      0x191
+#define mmGPIOPAD_PU_EN                                                         0x192
+#define mmGPIOPAD_PD_EN                                                         0x193
+#define mmCG_FPS_CNT                                                            0x1b6
+#define mmSMU_IND_INDEX_0                                                       0x1a6
+#define mmSMU_IND_DATA_0                                                        0x1a7
+#define mmSMU_IND_INDEX_1                                                       0x1a8
+#define mmSMU_IND_DATA_1                                                        0x1a9
+#define mmSMU_IND_INDEX_2                                                       0x1aa
+#define mmSMU_IND_DATA_2                                                        0x1ab
+#define mmSMU_IND_INDEX_3                                                       0x1ac
+#define mmSMU_IND_DATA_3                                                        0x1ad
+#define mmSMU_IND_INDEX_4                                                       0x1ae
+#define mmSMU_IND_DATA_4                                                        0x1af
+#define mmSMU_IND_INDEX_5                                                       0x1b0
+#define mmSMU_IND_DATA_5                                                        0x1b1
+#define mmSMU_IND_INDEX_6                                                       0x1b2
+#define mmSMU_IND_DATA_6                                                        0x1b3
+#define mmSMU_IND_INDEX_7                                                       0x1b4
+#define mmSMU_IND_DATA_7                                                        0x1b5
+#define mmSMU_SMC_IND_INDEX                                                     0x80
+#define mmSMU0_SMU_SMC_IND_INDEX                                                0x80
+#define mmSMU1_SMU_SMC_IND_INDEX                                                0x82
+#define mmSMU2_SMU_SMC_IND_INDEX                                                0x84
+#define mmSMU3_SMU_SMC_IND_INDEX                                                0x86
+#define mmSMU_SMC_IND_DATA                                                      0x81
+#define mmSMU0_SMU_SMC_IND_DATA                                                 0x81
+#define mmSMU1_SMU_SMC_IND_DATA                                                 0x83
+#define mmSMU2_SMU_SMC_IND_DATA                                                 0x85
+#define mmSMU3_SMU_SMC_IND_DATA                                                 0x87
+#define ixRCU_UC_EVENTS                                                         0xc0000004
+#define ixRCU_MISC_CTRL                                                         0xc0000010
+#define ixRCU_VIRT_RESET_REQ                                                    0xc0000024
+#define ixCC_RCU_FUSES                                                          0xc00c0000
+#define ixCC_SMU_MISC_FUSES                                                     0xc00c0004
+#define ixCC_SCLK_VID_FUSES                                                     0xc00c0008
+#define ixCC_GIO_IOCCFG_FUSES                                                   0xc00c000c
+#define ixCC_GIO_IOC_FUSES                                                      0xc00c0010
+#define ixCC_SMU_TST_EFUSE1_MISC                                                0xc00c001c
+#define ixCC_TST_ID_STRAPS                                                      0xc00c0020
+#define ixCC_FCTRL_FUSES                                                        0xc00c0024
+#define ixCC_HARVEST_FUSES                                                      0xc00c0028
+#define ixSMU_MAIN_PLL_OP_FREQ                                                  0xe0003020
+#define ixSMU_STATUS                                                       0xe0003088
+#define ixSMU_FIRMWARE                                                     0xe00030a4
+#define ixSMU_INPUT_DATA                                                   0xe00030b8
+#define ixSMU_EFUSE_0                                                           0xc0100000
+#define ixFIRMWARE_FLAGS                                                        0x3f000
+#define ixTDC_STATUS                                                            0x3f004
+#define ixTDC_MV_AVERAGE                                                        0x3f008
+#define ixTDC_VRM_LIMIT                                                         0x3f00c
+#define ixFEATURE_STATUS                                                        0x3f010
+#define ixENTITY_TEMPERATURES_1                                                 0x3f014
+#define ixMCARB_DRAM_TIMING_TABLE_1                                             0x3f018
+#define ixMCARB_DRAM_TIMING_TABLE_2                                             0x3f01c
+#define ixMCARB_DRAM_TIMING_TABLE_3                                             0x3f020
+#define ixMCARB_DRAM_TIMING_TABLE_4                                             0x3f024
+#define ixMCARB_DRAM_TIMING_TABLE_5                                             0x3f028
+#define ixMCARB_DRAM_TIMING_TABLE_6                                             0x3f02c
+#define ixMCARB_DRAM_TIMING_TABLE_7                                             0x3f030
+#define ixMCARB_DRAM_TIMING_TABLE_8                                             0x3f034
+#define ixMCARB_DRAM_TIMING_TABLE_9                                             0x3f038
+#define ixMCARB_DRAM_TIMING_TABLE_10                                            0x3f03c
+#define ixMCARB_DRAM_TIMING_TABLE_11                                            0x3f040
+#define ixMCARB_DRAM_TIMING_TABLE_12                                            0x3f044
+#define ixMCARB_DRAM_TIMING_TABLE_13                                            0x3f048
+#define ixMCARB_DRAM_TIMING_TABLE_14                                            0x3f04c
+#define ixMCARB_DRAM_TIMING_TABLE_15                                            0x3f050
+#define ixMCARB_DRAM_TIMING_TABLE_16                                            0x3f054
+#define ixMCARB_DRAM_TIMING_TABLE_17                                            0x3f058
+#define ixMCARB_DRAM_TIMING_TABLE_18                                            0x3f05c
+#define ixMCARB_DRAM_TIMING_TABLE_19                                            0x3f060
+#define ixMCARB_DRAM_TIMING_TABLE_20                                            0x3f064
+#define ixMCARB_DRAM_TIMING_TABLE_21                                            0x3f068
+#define ixMCARB_DRAM_TIMING_TABLE_22                                            0x3f06c
+#define ixMCARB_DRAM_TIMING_TABLE_23                                            0x3f070
+#define ixMCARB_DRAM_TIMING_TABLE_24                                            0x3f074
+#define ixMCARB_DRAM_TIMING_TABLE_25                                            0x3f078
+#define ixMCARB_DRAM_TIMING_TABLE_26                                            0x3f07c
+#define ixMCARB_DRAM_TIMING_TABLE_27                                            0x3f080
+#define ixMCARB_DRAM_TIMING_TABLE_28                                            0x3f084
+#define ixMCARB_DRAM_TIMING_TABLE_29                                            0x3f088
+#define ixMCARB_DRAM_TIMING_TABLE_30                                            0x3f08c
+#define ixMCARB_DRAM_TIMING_TABLE_31                                            0x3f090
+#define ixMCARB_DRAM_TIMING_TABLE_32                                            0x3f094
+#define ixMCARB_DRAM_TIMING_TABLE_33                                            0x3f098
+#define ixMCARB_DRAM_TIMING_TABLE_34                                            0x3f09c
+#define ixMCARB_DRAM_TIMING_TABLE_35                                            0x3f0a0
+#define ixMCARB_DRAM_TIMING_TABLE_36                                            0x3f0a4
+#define ixMCARB_DRAM_TIMING_TABLE_37                                            0x3f0a8
+#define ixMCARB_DRAM_TIMING_TABLE_38                                            0x3f0ac
+#define ixMCARB_DRAM_TIMING_TABLE_39                                            0x3f0b0
+#define ixMCARB_DRAM_TIMING_TABLE_40                                            0x3f0b4
+#define ixMCARB_DRAM_TIMING_TABLE_41                                            0x3f0b8
+#define ixMCARB_DRAM_TIMING_TABLE_42                                            0x3f0bc
+#define ixMCARB_DRAM_TIMING_TABLE_43                                            0x3f0c0
+#define ixMCARB_DRAM_TIMING_TABLE_44                                            0x3f0c4
+#define ixMCARB_DRAM_TIMING_TABLE_45                                            0x3f0c8
+#define ixMCARB_DRAM_TIMING_TABLE_46                                            0x3f0cc
+#define ixMCARB_DRAM_TIMING_TABLE_47                                            0x3f0d0
+#define ixMCARB_DRAM_TIMING_TABLE_48                                            0x3f0d4
+#define ixMCARB_DRAM_TIMING_TABLE_49                                            0x3f0d8
+#define ixMCARB_DRAM_TIMING_TABLE_50                                            0x3f0dc
+#define ixMCARB_DRAM_TIMING_TABLE_51                                            0x3f0e0
+#define ixMCARB_DRAM_TIMING_TABLE_52                                            0x3f0e4
+#define ixMCARB_DRAM_TIMING_TABLE_53                                            0x3f0e8
+#define ixMCARB_DRAM_TIMING_TABLE_54                                            0x3f0ec
+#define ixMCARB_DRAM_TIMING_TABLE_55                                            0x3f0f0
+#define ixMCARB_DRAM_TIMING_TABLE_56                                            0x3f0f4
+#define ixMCARB_DRAM_TIMING_TABLE_57                                            0x3f0f8
+#define ixMCARB_DRAM_TIMING_TABLE_58                                            0x3f0fc
+#define ixMCARB_DRAM_TIMING_TABLE_59                                            0x3f100
+#define ixMCARB_DRAM_TIMING_TABLE_60                                            0x3f104
+#define ixMCARB_DRAM_TIMING_TABLE_61                                            0x3f108
+#define ixMCARB_DRAM_TIMING_TABLE_62                                            0x3f10c
+#define ixMCARB_DRAM_TIMING_TABLE_63                                            0x3f110
+#define ixMCARB_DRAM_TIMING_TABLE_64                                            0x3f114
+#define ixMCARB_DRAM_TIMING_TABLE_65                                            0x3f118
+#define ixMCARB_DRAM_TIMING_TABLE_66                                            0x3f11c
+#define ixMCARB_DRAM_TIMING_TABLE_67                                            0x3f120
+#define ixMCARB_DRAM_TIMING_TABLE_68                                            0x3f124
+#define ixMCARB_DRAM_TIMING_TABLE_69                                            0x3f128
+#define ixMCARB_DRAM_TIMING_TABLE_70                                            0x3f12c
+#define ixMCARB_DRAM_TIMING_TABLE_71                                            0x3f130
+#define ixMCARB_DRAM_TIMING_TABLE_72                                            0x3f134
+#define ixMCARB_DRAM_TIMING_TABLE_73                                            0x3f138
+#define ixMCARB_DRAM_TIMING_TABLE_74                                            0x3f13c
+#define ixMCARB_DRAM_TIMING_TABLE_75                                            0x3f140
+#define ixMCARB_DRAM_TIMING_TABLE_76                                            0x3f144
+#define ixMCARB_DRAM_TIMING_TABLE_77                                            0x3f148
+#define ixMCARB_DRAM_TIMING_TABLE_78                                            0x3f14c
+#define ixMCARB_DRAM_TIMING_TABLE_79                                            0x3f150
+#define ixMCARB_DRAM_TIMING_TABLE_80                                            0x3f154
+#define ixMCARB_DRAM_TIMING_TABLE_81                                            0x3f158
+#define ixMCARB_DRAM_TIMING_TABLE_82                                            0x3f15c
+#define ixMCARB_DRAM_TIMING_TABLE_83                                            0x3f160
+#define ixMCARB_DRAM_TIMING_TABLE_84                                            0x3f164
+#define ixMCARB_DRAM_TIMING_TABLE_85                                            0x3f168
+#define ixMCARB_DRAM_TIMING_TABLE_86                                            0x3f16c
+#define ixMCARB_DRAM_TIMING_TABLE_87                                            0x3f170
+#define ixMCARB_DRAM_TIMING_TABLE_88                                            0x3f174
+#define ixMCARB_DRAM_TIMING_TABLE_89                                            0x3f178
+#define ixMCARB_DRAM_TIMING_TABLE_90                                            0x3f17c
+#define ixMCARB_DRAM_TIMING_TABLE_91                                            0x3f180
+#define ixMCARB_DRAM_TIMING_TABLE_92                                            0x3f184
+#define ixMCARB_DRAM_TIMING_TABLE_93                                            0x3f188
+#define ixMCARB_DRAM_TIMING_TABLE_94                                            0x3f18c
+#define ixMCARB_DRAM_TIMING_TABLE_95                                            0x3f190
+#define ixMCARB_DRAM_TIMING_TABLE_96                                            0x3f194
+#define ixDPM_TABLE_1                                                           0x3f198
+#define ixDPM_TABLE_2                                                           0x3f19c
+#define ixDPM_TABLE_3                                                           0x3f1a0
+#define ixDPM_TABLE_4                                                           0x3f1a4
+#define ixDPM_TABLE_5                                                           0x3f1a8
+#define ixDPM_TABLE_6                                                           0x3f1ac
+#define ixDPM_TABLE_7                                                           0x3f1b0
+#define ixDPM_TABLE_8                                                           0x3f1b4
+#define ixDPM_TABLE_9                                                           0x3f1b8
+#define ixDPM_TABLE_10                                                          0x3f1bc
+#define ixDPM_TABLE_11                                                          0x3f1c0
+#define ixDPM_TABLE_12                                                          0x3f1c4
+#define ixDPM_TABLE_13                                                          0x3f1c8
+#define ixDPM_TABLE_14                                                          0x3f1cc
+#define ixDPM_TABLE_15                                                          0x3f1d0
+#define ixDPM_TABLE_16                                                          0x3f1d4
+#define ixDPM_TABLE_17                                                          0x3f1d8
+#define ixDPM_TABLE_18                                                          0x3f1dc
+#define ixDPM_TABLE_19                                                          0x3f1e0
+#define ixDPM_TABLE_20                                                          0x3f1e4
+#define ixDPM_TABLE_21                                                          0x3f1e8
+#define ixDPM_TABLE_22                                                          0x3f1ec
+#define ixDPM_TABLE_23                                                          0x3f1f0
+#define ixDPM_TABLE_24                                                          0x3f1f4
+#define ixDPM_TABLE_25                                                          0x3f1f8
+#define ixDPM_TABLE_26                                                          0x3f1fc
+#define ixDPM_TABLE_27                                                          0x3f200
+#define ixDPM_TABLE_28                                                          0x3f204
+#define ixDPM_TABLE_29                                                          0x3f208
+#define ixDPM_TABLE_30                                                          0x3f20c
+#define ixDPM_TABLE_31                                                          0x3f210
+#define ixDPM_TABLE_32                                                          0x3f214
+#define ixDPM_TABLE_33                                                          0x3f218
+#define ixDPM_TABLE_34                                                          0x3f21c
+#define ixDPM_TABLE_35                                                          0x3f220
+#define ixDPM_TABLE_36                                                          0x3f224
+#define ixDPM_TABLE_37                                                          0x3f228
+#define ixDPM_TABLE_38                                                          0x3f22c
+#define ixDPM_TABLE_39                                                          0x3f230
+#define ixDPM_TABLE_40                                                          0x3f234
+#define ixDPM_TABLE_41                                                          0x3f238
+#define ixDPM_TABLE_42                                                          0x3f23c
+#define ixDPM_TABLE_43                                                          0x3f240
+#define ixDPM_TABLE_44                                                          0x3f244
+#define ixDPM_TABLE_45                                                          0x3f248
+#define ixDPM_TABLE_46                                                          0x3f24c
+#define ixDPM_TABLE_47                                                          0x3f250
+#define ixDPM_TABLE_48                                                          0x3f254
+#define ixDPM_TABLE_49                                                          0x3f258
+#define ixDPM_TABLE_50                                                          0x3f25c
+#define ixDPM_TABLE_51                                                          0x3f260
+#define ixDPM_TABLE_52                                                          0x3f264
+#define ixDPM_TABLE_53                                                          0x3f268
+#define ixDPM_TABLE_54                                                          0x3f26c
+#define ixDPM_TABLE_55                                                          0x3f270
+#define ixDPM_TABLE_56                                                          0x3f274
+#define ixDPM_TABLE_57                                                          0x3f278
+#define ixDPM_TABLE_58                                                          0x3f27c
+#define ixDPM_TABLE_59                                                          0x3f280
+#define ixDPM_TABLE_60                                                          0x3f284
+#define ixDPM_TABLE_61                                                          0x3f288
+#define ixDPM_TABLE_62                                                          0x3f28c
+#define ixDPM_TABLE_63                                                          0x3f290
+#define ixDPM_TABLE_64                                                          0x3f294
+#define ixDPM_TABLE_65                                                          0x3f298
+#define ixDPM_TABLE_66                                                          0x3f29c
+#define ixDPM_TABLE_67                                                          0x3f2a0
+#define ixDPM_TABLE_68                                                          0x3f2a4
+#define ixDPM_TABLE_69                                                          0x3f2a8
+#define ixDPM_TABLE_70                                                          0x3f2ac
+#define ixDPM_TABLE_71                                                          0x3f2b0
+#define ixDPM_TABLE_72                                                          0x3f2b4
+#define ixDPM_TABLE_73                                                          0x3f2b8
+#define ixDPM_TABLE_74                                                          0x3f2bc
+#define ixDPM_TABLE_75                                                          0x3f2c0
+#define ixDPM_TABLE_76                                                          0x3f2c4
+#define ixDPM_TABLE_77                                                          0x3f2c8
+#define ixDPM_TABLE_78                                                          0x3f2cc
+#define ixDPM_TABLE_79                                                          0x3f2d0
+#define ixDPM_TABLE_80                                                          0x3f2d4
+#define ixDPM_TABLE_81                                                          0x3f2d8
+#define ixDPM_TABLE_82                                                          0x3f2dc
+#define ixDPM_TABLE_83                                                          0x3f2e0
+#define ixDPM_TABLE_84                                                          0x3f2e4
+#define ixDPM_TABLE_85                                                          0x3f2e8
+#define ixDPM_TABLE_86                                                          0x3f2ec
+#define ixDPM_TABLE_87                                                          0x3f2f0
+#define ixDPM_TABLE_88                                                          0x3f2f4
+#define ixDPM_TABLE_89                                                          0x3f2f8
+#define ixDPM_TABLE_90                                                          0x3f2fc
+#define ixDPM_TABLE_91                                                          0x3f300
+#define ixDPM_TABLE_92                                                          0x3f304
+#define ixDPM_TABLE_93                                                          0x3f308
+#define ixDPM_TABLE_94                                                          0x3f30c
+#define ixDPM_TABLE_95                                                          0x3f310
+#define ixDPM_TABLE_96                                                          0x3f314
+#define ixDPM_TABLE_97                                                          0x3f318
+#define ixDPM_TABLE_98                                                          0x3f31c
+#define ixDPM_TABLE_99                                                          0x3f320
+#define ixDPM_TABLE_100                                                         0x3f324
+#define ixDPM_TABLE_101                                                         0x3f328
+#define ixDPM_TABLE_102                                                         0x3f32c
+#define ixDPM_TABLE_103                                                         0x3f330
+#define ixDPM_TABLE_104                                                         0x3f334
+#define ixDPM_TABLE_105                                                         0x3f338
+#define ixDPM_TABLE_106                                                         0x3f33c
+#define ixDPM_TABLE_107                                                         0x3f340
+#define ixDPM_TABLE_108                                                         0x3f344
+#define ixDPM_TABLE_109                                                         0x3f348
+#define ixDPM_TABLE_110                                                         0x3f34c
+#define ixDPM_TABLE_111                                                         0x3f350
+#define ixDPM_TABLE_112                                                         0x3f354
+#define ixDPM_TABLE_113                                                         0x3f358
+#define ixDPM_TABLE_114                                                         0x3f35c
+#define ixDPM_TABLE_115                                                         0x3f360
+#define ixDPM_TABLE_116                                                         0x3f364
+#define ixDPM_TABLE_117                                                         0x3f368
+#define ixDPM_TABLE_118                                                         0x3f36c
+#define ixDPM_TABLE_119                                                         0x3f370
+#define ixDPM_TABLE_120                                                         0x3f374
+#define ixDPM_TABLE_121                                                         0x3f378
+#define ixDPM_TABLE_122                                                         0x3f37c
+#define ixDPM_TABLE_123                                                         0x3f380
+#define ixDPM_TABLE_124                                                         0x3f384
+#define ixDPM_TABLE_125                                                         0x3f388
+#define ixDPM_TABLE_126                                                         0x3f38c
+#define ixDPM_TABLE_127                                                         0x3f390
+#define ixDPM_TABLE_128                                                         0x3f394
+#define ixDPM_TABLE_129                                                         0x3f398
+#define ixDPM_TABLE_130                                                         0x3f39c
+#define ixDPM_TABLE_131                                                         0x3f3a0
+#define ixDPM_TABLE_132                                                         0x3f3a4
+#define ixDPM_TABLE_133                                                         0x3f3a8
+#define ixDPM_TABLE_134                                                         0x3f3ac
+#define ixDPM_TABLE_135                                                         0x3f3b0
+#define ixDPM_TABLE_136                                                         0x3f3b4
+#define ixDPM_TABLE_137                                                         0x3f3b8
+#define ixDPM_TABLE_138                                                         0x3f3bc
+#define ixDPM_TABLE_139                                                         0x3f3c0
+#define ixDPM_TABLE_140                                                         0x3f3c4
+#define ixDPM_TABLE_141                                                         0x3f3c8
+#define ixDPM_TABLE_142                                                         0x3f3cc
+#define ixDPM_TABLE_143                                                         0x3f3d0
+#define ixDPM_TABLE_144                                                         0x3f3d4
+#define ixDPM_TABLE_145                                                         0x3f3d8
+#define ixDPM_TABLE_146                                                         0x3f3dc
+#define ixDPM_TABLE_147                                                         0x3f3e0
+#define ixDPM_TABLE_148                                                         0x3f3e4
+#define ixDPM_TABLE_149                                                         0x3f3e8
+#define ixDPM_TABLE_150                                                         0x3f3ec
+#define ixDPM_TABLE_151                                                         0x3f3f0
+#define ixDPM_TABLE_152                                                         0x3f3f4
+#define ixDPM_TABLE_153                                                         0x3f3f8
+#define ixDPM_TABLE_154                                                         0x3f3fc
+#define ixDPM_TABLE_155                                                         0x3f400
+#define ixDPM_TABLE_156                                                         0x3f404
+#define ixDPM_TABLE_157                                                         0x3f408
+#define ixDPM_TABLE_158                                                         0x3f40c
+#define ixDPM_TABLE_159                                                         0x3f410
+#define ixDPM_TABLE_160                                                         0x3f414
+#define ixDPM_TABLE_161                                                         0x3f418
+#define ixDPM_TABLE_162                                                         0x3f41c
+#define ixDPM_TABLE_163                                                         0x3f420
+#define ixDPM_TABLE_164                                                         0x3f424
+#define ixDPM_TABLE_165                                                         0x3f428
+#define ixDPM_TABLE_166                                                         0x3f42c
+#define ixDPM_TABLE_167                                                         0x3f430
+#define ixDPM_TABLE_168                                                         0x3f434
+#define ixDPM_TABLE_169                                                         0x3f438
+#define ixDPM_TABLE_170                                                         0x3f43c
+#define ixDPM_TABLE_171                                                         0x3f440
+#define ixDPM_TABLE_172                                                         0x3f444
+#define ixDPM_TABLE_173                                                         0x3f448
+#define ixDPM_TABLE_174                                                         0x3f44c
+#define ixDPM_TABLE_175                                                         0x3f450
+#define ixDPM_TABLE_176                                                         0x3f454
+#define ixDPM_TABLE_177                                                         0x3f458
+#define ixDPM_TABLE_178                                                         0x3f45c
+#define ixDPM_TABLE_179                                                         0x3f460
+#define ixDPM_TABLE_180                                                         0x3f464
+#define ixDPM_TABLE_181                                                         0x3f468
+#define ixDPM_TABLE_182                                                         0x3f46c
+#define ixDPM_TABLE_183                                                         0x3f470
+#define ixDPM_TABLE_184                                                         0x3f474
+#define ixDPM_TABLE_185                                                         0x3f478
+#define ixDPM_TABLE_186                                                         0x3f47c
+#define ixDPM_TABLE_187                                                         0x3f480
+#define ixDPM_TABLE_188                                                         0x3f484
+#define ixDPM_TABLE_189                                                         0x3f488
+#define ixDPM_TABLE_190                                                         0x3f48c
+#define ixDPM_TABLE_191                                                         0x3f490
+#define ixDPM_TABLE_192                                                         0x3f494
+#define ixDPM_TABLE_193                                                         0x3f498
+#define ixDPM_TABLE_194                                                         0x3f49c
+#define ixDPM_TABLE_195                                                         0x3f4a0
+#define ixDPM_TABLE_196                                                         0x3f4a4
+#define ixDPM_TABLE_197                                                         0x3f4a8
+#define ixDPM_TABLE_198                                                         0x3f4ac
+#define ixDPM_TABLE_199                                                         0x3f4b0
+#define ixDPM_TABLE_200                                                         0x3f4b4
+#define ixDPM_TABLE_201                                                         0x3f4b8
+#define ixDPM_TABLE_202                                                         0x3f4bc
+#define ixDPM_TABLE_203                                                         0x3f4c0
+#define ixDPM_TABLE_204                                                         0x3f4c4
+#define ixDPM_TABLE_205                                                         0x3f4c8
+#define ixDPM_TABLE_206                                                         0x3f4cc
+#define ixDPM_TABLE_207                                                         0x3f4d0
+#define ixDPM_TABLE_208                                                         0x3f4d4
+#define ixDPM_TABLE_209                                                         0x3f4d8
+#define ixDPM_TABLE_210                                                         0x3f4dc
+#define ixDPM_TABLE_211                                                         0x3f4e0
+#define ixDPM_TABLE_212                                                         0x3f4e4
+#define ixDPM_TABLE_213                                                         0x3f4e8
+#define ixDPM_TABLE_214                                                         0x3f4ec
+#define ixDPM_TABLE_215                                                         0x3f4f0
+#define ixDPM_TABLE_216                                                         0x3f4f4
+#define ixDPM_TABLE_217                                                         0x3f4f8
+#define ixDPM_TABLE_218                                                         0x3f4fc
+#define ixDPM_TABLE_219                                                         0x3f500
+#define ixDPM_TABLE_220                                                         0x3f504
+#define ixDPM_TABLE_221                                                         0x3f508
+#define ixDPM_TABLE_222                                                         0x3f50c
+#define ixDPM_TABLE_223                                                         0x3f510
+#define ixDPM_TABLE_224                                                         0x3f514
+#define ixDPM_TABLE_225                                                         0x3f518
+#define ixDPM_TABLE_226                                                         0x3f51c
+#define ixDPM_TABLE_227                                                         0x3f520
+#define ixDPM_TABLE_228                                                         0x3f524
+#define ixDPM_TABLE_229                                                         0x3f528
+#define ixDPM_TABLE_230                                                         0x3f52c
+#define ixDPM_TABLE_231                                                         0x3f530
+#define ixDPM_TABLE_232                                                         0x3f534
+#define ixDPM_TABLE_233                                                         0x3f538
+#define ixDPM_TABLE_234                                                         0x3f53c
+#define ixDPM_TABLE_235                                                         0x3f540
+#define ixDPM_TABLE_236                                                         0x3f544
+#define ixDPM_TABLE_237                                                         0x3f548
+#define ixDPM_TABLE_238                                                         0x3f54c
+#define ixDPM_TABLE_239                                                         0x3f550
+#define ixDPM_TABLE_240                                                         0x3f554
+#define ixDPM_TABLE_241                                                         0x3f558
+#define ixDPM_TABLE_242                                                         0x3f55c
+#define ixDPM_TABLE_243                                                         0x3f560
+#define ixDPM_TABLE_244                                                         0x3f564
+#define ixDPM_TABLE_245                                                         0x3f568
+#define ixDPM_TABLE_246                                                         0x3f56c
+#define ixDPM_TABLE_247                                                         0x3f570
+#define ixDPM_TABLE_248                                                         0x3f574
+#define ixDPM_TABLE_249                                                         0x3f578
+#define ixDPM_TABLE_250                                                         0x3f57c
+#define ixDPM_TABLE_251                                                         0x3f580
+#define ixDPM_TABLE_252                                                         0x3f584
+#define ixDPM_TABLE_253                                                         0x3f588
+#define ixDPM_TABLE_254                                                         0x3f58c
+#define ixDPM_TABLE_255                                                         0x3f590
+#define ixDPM_TABLE_256                                                         0x3f594
+#define ixDPM_TABLE_257                                                         0x3f598
+#define ixDPM_TABLE_258                                                         0x3f59c
+#define ixDPM_TABLE_259                                                         0x3f5a0
+#define ixDPM_TABLE_260                                                         0x3f5a4
+#define ixDPM_TABLE_261                                                         0x3f5a8
+#define ixDPM_TABLE_262                                                         0x3f5ac
+#define ixDPM_TABLE_263                                                         0x3f5b0
+#define ixDPM_TABLE_264                                                         0x3f5b4
+#define ixDPM_TABLE_265                                                         0x3f5b8
+#define ixDPM_TABLE_266                                                         0x3f5bc
+#define ixDPM_TABLE_267                                                         0x3f5c0
+#define ixDPM_TABLE_268                                                         0x3f5c4
+#define ixDPM_TABLE_269                                                         0x3f5c8
+#define ixDPM_TABLE_270                                                         0x3f5cc
+#define ixDPM_TABLE_271                                                         0x3f5d0
+#define ixDPM_TABLE_272                                                         0x3f5d4
+#define ixDPM_TABLE_273                                                         0x3f5d8
+#define ixDPM_TABLE_274                                                         0x3f5dc
+#define ixDPM_TABLE_275                                                         0x3f5e0
+#define ixDPM_TABLE_276                                                         0x3f5e4
+#define ixDPM_TABLE_277                                                         0x3f5e8
+#define ixDPM_TABLE_278                                                         0x3f5ec
+#define ixDPM_TABLE_279                                                         0x3f5f0
+#define ixDPM_TABLE_280                                                         0x3f5f4
+#define ixDPM_TABLE_281                                                         0x3f5f8
+#define ixDPM_TABLE_282                                                         0x3f5fc
+#define ixDPM_TABLE_283                                                         0x3f600
+#define ixDPM_TABLE_284                                                         0x3f604
+#define ixDPM_TABLE_285                                                         0x3f608
+#define ixDPM_TABLE_286                                                         0x3f60c
+#define ixDPM_TABLE_287                                                         0x3f610
+#define ixDPM_TABLE_288                                                         0x3f614
+#define ixDPM_TABLE_289                                                         0x3f618
+#define ixDPM_TABLE_290                                                         0x3f61c
+#define ixDPM_TABLE_291                                                         0x3f620
+#define ixDPM_TABLE_292                                                         0x3f624
+#define ixDPM_TABLE_293                                                         0x3f628
+#define ixDPM_TABLE_294                                                         0x3f62c
+#define ixDPM_TABLE_295                                                         0x3f630
+#define ixDPM_TABLE_296                                                         0x3f634
+#define ixDPM_TABLE_297                                                         0x3f638
+#define ixDPM_TABLE_298                                                         0x3f63c
+#define ixDPM_TABLE_299                                                         0x3f640
+#define ixDPM_TABLE_300                                                         0x3f644
+#define ixDPM_TABLE_301                                                         0x3f648
+#define ixDPM_TABLE_302                                                         0x3f64c
+#define ixDPM_TABLE_303                                                         0x3f650
+#define ixDPM_TABLE_304                                                         0x3f654
+#define ixDPM_TABLE_305                                                         0x3f658
+#define ixDPM_TABLE_306                                                         0x3f65c
+#define ixDPM_TABLE_307                                                         0x3f660
+#define ixDPM_TABLE_308                                                         0x3f664
+#define ixDPM_TABLE_309                                                         0x3f668
+#define ixDPM_TABLE_310                                                         0x3f66c
+#define ixDPM_TABLE_311                                                         0x3f670
+#define ixDPM_TABLE_312                                                         0x3f674
+#define ixDPM_TABLE_313                                                         0x3f678
+#define ixDPM_TABLE_314                                                         0x3f67c
+#define ixDPM_TABLE_315                                                         0x3f680
+#define ixDPM_TABLE_316                                                         0x3f684
+#define ixDPM_TABLE_317                                                         0x3f688
+#define ixDPM_TABLE_318                                                         0x3f68c
+#define ixDPM_TABLE_319                                                         0x3f690
+#define ixDPM_TABLE_320                                                         0x3f694
+#define ixDPM_TABLE_321                                                         0x3f698
+#define ixDPM_TABLE_322                                                         0x3f69c
+#define ixDPM_TABLE_323                                                         0x3f6a0
+#define ixDPM_TABLE_324                                                         0x3f6a4
+#define ixDPM_TABLE_325                                                         0x3f6a8
+#define ixDPM_TABLE_326                                                         0x3f6ac
+#define ixDPM_TABLE_327                                                         0x3f6b0
+#define ixDPM_TABLE_328                                                         0x3f6b4
+#define ixDPM_TABLE_329                                                         0x3f6b8
+#define ixDPM_TABLE_330                                                         0x3f6bc
+#define ixDPM_TABLE_331                                                         0x3f6c0
+#define ixDPM_TABLE_332                                                         0x3f6c4
+#define ixDPM_TABLE_333                                                         0x3f6c8
+#define ixDPM_TABLE_334                                                         0x3f6cc
+#define ixDPM_TABLE_335                                                         0x3f6d0
+#define ixDPM_TABLE_336                                                         0x3f6d4
+#define ixDPM_TABLE_337                                                         0x3f6d8
+#define ixDPM_TABLE_338                                                         0x3f6dc
+#define ixDPM_TABLE_339                                                         0x3f6e0
+#define ixDPM_TABLE_340                                                         0x3f6e4
+#define ixDPM_TABLE_341                                                         0x3f6e8
+#define ixDPM_TABLE_342                                                         0x3f6ec
+#define ixDPM_TABLE_343                                                         0x3f6f0
+#define ixDPM_TABLE_344                                                         0x3f6f4
+#define ixDPM_TABLE_345                                                         0x3f6f8
+#define ixDPM_TABLE_346                                                         0x3f6fc
+#define ixDPM_TABLE_347                                                         0x3f700
+#define ixDPM_TABLE_348                                                         0x3f704
+#define ixDPM_TABLE_349                                                         0x3f708
+#define ixDPM_TABLE_350                                                         0x3f70c
+#define ixDPM_TABLE_351                                                         0x3f710
+#define ixDPM_TABLE_352                                                         0x3f714
+#define ixDPM_TABLE_353                                                         0x3f718
+#define ixDPM_TABLE_354                                                         0x3f71c
+#define ixDPM_TABLE_355                                                         0x3f720
+#define ixDPM_TABLE_356                                                         0x3f724
+#define ixDPM_TABLE_357                                                         0x3f728
+#define ixDPM_TABLE_358                                                         0x3f72c
+#define ixDPM_TABLE_359                                                         0x3f730
+#define ixDPM_TABLE_360                                                         0x3f734
+#define ixDPM_TABLE_361                                                         0x3f738
+#define ixDPM_TABLE_362                                                         0x3f73c
+#define ixDPM_TABLE_363                                                         0x3f740
+#define ixDPM_TABLE_364                                                         0x3f744
+#define ixDPM_TABLE_365                                                         0x3f748
+#define ixDPM_TABLE_366                                                         0x3f74c
+#define ixDPM_TABLE_367                                                         0x3f750
+#define ixDPM_TABLE_368                                                         0x3f754
+#define ixDPM_TABLE_369                                                         0x3f758
+#define ixDPM_TABLE_370                                                         0x3f75c
+#define ixDPM_TABLE_371                                                         0x3f760
+#define ixDPM_TABLE_372                                                         0x3f764
+#define ixDPM_TABLE_373                                                         0x3f768
+#define ixDPM_TABLE_374                                                         0x3f76c
+#define ixDPM_TABLE_375                                                         0x3f770
+#define ixDPM_TABLE_376                                                         0x3f774
+#define ixDPM_TABLE_377                                                         0x3f778
+#define ixDPM_TABLE_378                                                         0x3f77c
+#define ixDPM_TABLE_379                                                         0x3f780
+#define ixDPM_TABLE_380                                                         0x3f784
+#define ixDPM_TABLE_381                                                         0x3f788
+#define ixDPM_TABLE_382                                                         0x3f78c
+#define ixDPM_TABLE_383                                                         0x3f790
+#define ixDPM_TABLE_384                                                         0x3f794
+#define ixDPM_TABLE_385                                                         0x3f798
+#define ixDPM_TABLE_386                                                         0x3f79c
+#define ixDPM_TABLE_387                                                         0x3f7a0
+#define ixDPM_TABLE_388                                                         0x3f7a4
+#define ixDPM_TABLE_389                                                         0x3f7a8
+#define ixDPM_TABLE_390                                                         0x3f7ac
+#define ixDPM_TABLE_391                                                         0x3f7b0
+#define ixDPM_TABLE_392                                                         0x3f7b4
+#define ixDPM_TABLE_393                                                         0x3f7b8
+#define ixDPM_TABLE_394                                                         0x3f7bc
+#define ixDPM_TABLE_395                                                         0x3f7c0
+#define ixDPM_TABLE_396                                                         0x3f7c4
+#define ixDPM_TABLE_397                                                         0x3f7c8
+#define ixDPM_TABLE_398                                                         0x3f7cc
+#define ixDPM_TABLE_399                                                         0x3f7d0
+#define ixDPM_TABLE_400                                                         0x3f7d4
+#define ixDPM_TABLE_401                                                         0x3f7d8
+#define ixDPM_TABLE_402                                                         0x3f7dc
+#define ixDPM_TABLE_403                                                         0x3f7e0
+#define ixDPM_TABLE_404                                                         0x3f7e4
+#define ixDPM_TABLE_405                                                         0x3f7e8
+#define ixDPM_TABLE_406                                                         0x3f7ec
+#define ixDPM_TABLE_407                                                         0x3f7f0
+#define ixDPM_TABLE_408                                                         0x3f7f4
+#define ixDPM_TABLE_409                                                         0x3f7f8
+#define ixDPM_TABLE_410                                                         0x3f7fc
+#define ixDPM_TABLE_411                                                         0x3f800
+#define ixDPM_TABLE_412                                                         0x3f804
+#define ixDPM_TABLE_413                                                         0x3f808
+#define ixDPM_TABLE_414                                                         0x3f80c
+#define ixDPM_TABLE_415                                                         0x3f810
+#define ixDPM_TABLE_416                                                         0x3f814
+#define ixDPM_TABLE_417                                                         0x3f818
+#define ixDPM_TABLE_418                                                         0x3f81c
+#define ixDPM_TABLE_419                                                         0x3f820
+#define ixDPM_TABLE_420                                                         0x3f824
+#define ixDPM_TABLE_421                                                         0x3f828
+#define ixDPM_TABLE_422                                                         0x3f82c
+#define ixDPM_TABLE_423                                                         0x3f830
+#define ixDPM_TABLE_424                                                         0x3f834
+#define ixDPM_TABLE_425                                                         0x3f838
+#define ixDPM_TABLE_426                                                         0x3f83c
+#define ixDPM_TABLE_427                                                         0x3f840
+#define ixDPM_TABLE_428                                                         0x3f844
+#define ixDPM_TABLE_429                                                         0x3f848
+#define ixDPM_TABLE_430                                                         0x3f84c
+#define ixDPM_TABLE_431                                                         0x3f850
+#define ixDPM_TABLE_432                                                         0x3f854
+#define ixDPM_TABLE_433                                                         0x3f858
+#define ixDPM_TABLE_434                                                         0x3f85c
+#define ixDPM_TABLE_435                                                         0x3f860
+#define ixDPM_TABLE_436                                                         0x3f864
+#define ixDPM_TABLE_437                                                         0x3f868
+#define ixDPM_TABLE_438                                                         0x3f86c
+#define ixDPM_TABLE_439                                                         0x3f870
+#define ixDPM_TABLE_440                                                         0x3f874
+#define ixSOFT_REGISTERS_TABLE_1                                                0x3f89c
+#define ixSOFT_REGISTERS_TABLE_2                                                0x3f8a0
+#define ixSOFT_REGISTERS_TABLE_3                                                0x3f8a4
+#define ixSOFT_REGISTERS_TABLE_4                                                0x3f8a8
+#define ixSOFT_REGISTERS_TABLE_5                                                0x3f8ac
+#define ixSOFT_REGISTERS_TABLE_6                                                0x3f8b0
+#define ixSOFT_REGISTERS_TABLE_7                                                0x3f8b4
+#define ixSOFT_REGISTERS_TABLE_8                                                0x3f8b8
+#define ixSOFT_REGISTERS_TABLE_9                                                0x3f8bc
+#define ixSOFT_REGISTERS_TABLE_10                                               0x3f8c0
+#define ixSOFT_REGISTERS_TABLE_11                                               0x3f8c4
+#define ixSOFT_REGISTERS_TABLE_12                                               0x3f8c8
+#define ixSOFT_REGISTERS_TABLE_13                                               0x3f8cc
+#define ixSOFT_REGISTERS_TABLE_14                                               0x3f8d0
+#define ixSOFT_REGISTERS_TABLE_15                                               0x3f8d4
+#define ixSOFT_REGISTERS_TABLE_16                                               0x3f8d8
+#define ixSOFT_REGISTERS_TABLE_17                                               0x3f8dc
+#define ixSOFT_REGISTERS_TABLE_18                                               0x3f8e0
+#define ixSOFT_REGISTERS_TABLE_19                                               0x3f8e4
+#define ixSOFT_REGISTERS_TABLE_20                                               0x3f8e8
+#define ixSOFT_REGISTERS_TABLE_21                                               0x3f8ec
+#define ixSOFT_REGISTERS_TABLE_22                                               0x3f8f0
+#define ixSOFT_REGISTERS_TABLE_23                                               0x3f8f4
+#define ixSOFT_REGISTERS_TABLE_24                                               0x3f8f8
+#define ixSOFT_REGISTERS_TABLE_25                                               0x3f8fc
+#define ixSOFT_REGISTERS_TABLE_26                                               0x3f900
+#define ixSOFT_REGISTERS_TABLE_27                                               0x3f904
+#define ixSOFT_REGISTERS_TABLE_28                                               0x3f888
+#define ixSOFT_REGISTERS_TABLE_29                                               0x3f90c
+#define ixSOFT_REGISTERS_TABLE_30                                               0x3f910
+#define ixPM_FUSES_1                                                            0x3f914
+#define ixPM_FUSES_2                                                            0x3f918
+#define ixPM_FUSES_3                                                            0x3f91c
+#define ixPM_FUSES_4                                                            0x3f920
+#define ixPM_FUSES_5                                                            0x3f924
+#define ixPM_FUSES_6                                                            0x3f928
+#define ixPM_FUSES_7                                                            0x3f92c
+#define ixPM_FUSES_8                                                            0x3f930
+#define ixPM_FUSES_9                                                            0x3f934
+#define ixPM_FUSES_10                                                           0x3f938
+#define ixPM_FUSES_11                                                           0x3f93c
+#define ixPM_FUSES_12                                                           0x3f940
+#define ixPM_FUSES_13                                                           0x3f944
+#define ixPM_FUSES_14                                                           0x3f948
+#define ixPM_FUSES_15                                                           0x3f94c
+#define ixSMU_PM_STATUS_0                                                       0x3fe00
+#define ixSMU_PM_STATUS_1                                                       0x3fe04
+#define ixSMU_PM_STATUS_2                                                       0x3fe08
+#define ixSMU_PM_STATUS_3                                                       0x3fe0c
+#define ixSMU_PM_STATUS_4                                                       0x3fe10
+#define ixSMU_PM_STATUS_5                                                       0x3fe14
+#define ixSMU_PM_STATUS_6                                                       0x3fe18
+#define ixSMU_PM_STATUS_7                                                       0x3fe1c
+#define ixSMU_PM_STATUS_8                                                       0x3fe20
+#define ixSMU_PM_STATUS_9                                                       0x3fe24
+#define ixSMU_PM_STATUS_10                                                      0x3fe28
+#define ixSMU_PM_STATUS_11                                                      0x3fe2c
+#define ixSMU_PM_STATUS_12                                                      0x3fe30
+#define ixSMU_PM_STATUS_13                                                      0x3fe34
+#define ixSMU_PM_STATUS_14                                                      0x3fe38
+#define ixSMU_PM_STATUS_15                                                      0x3fe3c
+#define ixSMU_PM_STATUS_16                                                      0x3fe40
+#define ixSMU_PM_STATUS_17                                                      0x3fe44
+#define ixSMU_PM_STATUS_18                                                      0x3fe48
+#define ixSMU_PM_STATUS_19                                                      0x3fe4c
+#define ixSMU_PM_STATUS_20                                                      0x3fe50
+#define ixSMU_PM_STATUS_21                                                      0x3fe54
+#define ixSMU_PM_STATUS_22                                                      0x3fe58
+#define ixSMU_PM_STATUS_23                                                      0x3fe5c
+#define ixSMU_PM_STATUS_24                                                      0x3fe60
+#define ixSMU_PM_STATUS_25                                                      0x3fe64
+#define ixSMU_PM_STATUS_26                                                      0x3fe68
+#define ixSMU_PM_STATUS_27                                                      0x3fe6c
+#define ixSMU_PM_STATUS_28                                                      0x3fe70
+#define ixSMU_PM_STATUS_29                                                      0x3fe74
+#define ixSMU_PM_STATUS_30                                                      0x3fe78
+#define ixSMU_PM_STATUS_31                                                      0x3fe7c
+#define ixSMU_PM_STATUS_32                                                      0x3fe80
+#define ixSMU_PM_STATUS_33                                                      0x3fe84
+#define ixSMU_PM_STATUS_34                                                      0x3fe88
+#define ixSMU_PM_STATUS_35                                                      0x3fe8c
+#define ixSMU_PM_STATUS_36                                                      0x3fe90
+#define ixSMU_PM_STATUS_37                                                      0x3fe94
+#define ixSMU_PM_STATUS_38                                                      0x3fe98
+#define ixSMU_PM_STATUS_39                                                      0x3fe9c
+#define ixSMU_PM_STATUS_40                                                      0x3fea0
+#define ixSMU_PM_STATUS_41                                                      0x3fea4
+#define ixSMU_PM_STATUS_42                                                      0x3fea8
+#define ixSMU_PM_STATUS_43                                                      0x3feac
+#define ixSMU_PM_STATUS_44                                                      0x3feb0
+#define ixSMU_PM_STATUS_45                                                      0x3feb4
+#define ixSMU_PM_STATUS_46                                                      0x3feb8
+#define ixSMU_PM_STATUS_47                                                      0x3febc
+#define ixSMU_PM_STATUS_48                                                      0x3fec0
+#define ixSMU_PM_STATUS_49                                                      0x3fec4
+#define ixSMU_PM_STATUS_50                                                      0x3fec8
+#define ixSMU_PM_STATUS_51                                                      0x3fecc
+#define ixSMU_PM_STATUS_52                                                      0x3fed0
+#define ixSMU_PM_STATUS_53                                                      0x3fed4
+#define ixSMU_PM_STATUS_54                                                      0x3fed8
+#define ixSMU_PM_STATUS_55                                                      0x3fedc
+#define ixSMU_PM_STATUS_56                                                      0x3fee0
+#define ixSMU_PM_STATUS_57                                                      0x3fee4
+#define ixSMU_PM_STATUS_58                                                      0x3fee8
+#define ixSMU_PM_STATUS_59                                                      0x3feec
+#define ixSMU_PM_STATUS_60                                                      0x3fef0
+#define ixSMU_PM_STATUS_61                                                      0x3fef4
+#define ixSMU_PM_STATUS_62                                                      0x3fef8
+#define ixSMU_PM_STATUS_63                                                      0x3fefc
+#define ixSMU_PM_STATUS_64                                                      0x3ff00
+#define ixSMU_PM_STATUS_65                                                      0x3ff04
+#define ixSMU_PM_STATUS_66                                                      0x3ff08
+#define ixSMU_PM_STATUS_67                                                      0x3ff0c
+#define ixSMU_PM_STATUS_68                                                      0x3ff10
+#define ixSMU_PM_STATUS_69                                                      0x3ff14
+#define ixSMU_PM_STATUS_70                                                      0x3ff18
+#define ixSMU_PM_STATUS_71                                                      0x3ff1c
+#define ixSMU_PM_STATUS_72                                                      0x3ff20
+#define ixSMU_PM_STATUS_73                                                      0x3ff24
+#define ixSMU_PM_STATUS_74                                                      0x3ff28
+#define ixSMU_PM_STATUS_75                                                      0x3ff2c
+#define ixSMU_PM_STATUS_76                                                      0x3ff30
+#define ixSMU_PM_STATUS_77                                                      0x3ff34
+#define ixSMU_PM_STATUS_78                                                      0x3ff38
+#define ixSMU_PM_STATUS_79                                                      0x3ff3c
+#define ixSMU_PM_STATUS_80                                                      0x3ff40
+#define ixSMU_PM_STATUS_81                                                      0x3ff44
+#define ixSMU_PM_STATUS_82                                                      0x3ff48
+#define ixSMU_PM_STATUS_83                                                      0x3ff4c
+#define ixSMU_PM_STATUS_84                                                      0x3ff50
+#define ixSMU_PM_STATUS_85                                                      0x3ff54
+#define ixSMU_PM_STATUS_86                                                      0x3ff58
+#define ixSMU_PM_STATUS_87                                                      0x3ff5c
+#define ixSMU_PM_STATUS_88                                                      0x3ff60
+#define ixSMU_PM_STATUS_89                                                      0x3ff64
+#define ixSMU_PM_STATUS_90                                                      0x3ff68
+#define ixSMU_PM_STATUS_91                                                      0x3ff6c
+#define ixSMU_PM_STATUS_92                                                      0x3ff70
+#define ixSMU_PM_STATUS_93                                                      0x3ff74
+#define ixSMU_PM_STATUS_94                                                      0x3ff78
+#define ixSMU_PM_STATUS_95                                                      0x3ff7c
+#define ixSMU_PM_STATUS_96                                                      0x3ff80
+#define ixSMU_PM_STATUS_97                                                      0x3ff84
+#define ixSMU_PM_STATUS_98                                                      0x3ff88
+#define ixSMU_PM_STATUS_99                                                      0x3ff8c
+#define ixSMU_PM_STATUS_100                                                     0x3ff90
+#define ixSMU_PM_STATUS_101                                                     0x3ff94
+#define ixSMU_PM_STATUS_102                                                     0x3ff98
+#define ixSMU_PM_STATUS_103                                                     0x3ff9c
+#define ixSMU_PM_STATUS_104                                                     0x3ffa0
+#define ixSMU_PM_STATUS_105                                                     0x3ffa4
+#define ixSMU_PM_STATUS_106                                                     0x3ffa8
+#define ixSMU_PM_STATUS_107                                                     0x3ffac
+#define ixSMU_PM_STATUS_108                                                     0x3ffb0
+#define ixSMU_PM_STATUS_109                                                     0x3ffb4
+#define ixSMU_PM_STATUS_110                                                     0x3ffb8
+#define ixSMU_PM_STATUS_111                                                     0x3ffbc
+#define ixSMU_PM_STATUS_112                                                     0x3ffc0
+#define ixSMU_PM_STATUS_113                                                     0x3ffc4
+#define ixSMU_PM_STATUS_114                                                     0x3ffc8
+#define ixSMU_PM_STATUS_115                                                     0x3ffcc
+#define ixSMU_PM_STATUS_116                                                     0x3ffd0
+#define ixSMU_PM_STATUS_117                                                     0x3ffd4
+#define ixSMU_PM_STATUS_118                                                     0x3ffd8
+#define ixSMU_PM_STATUS_119                                                     0x3ffdc
+#define ixSMU_PM_STATUS_120                                                     0x3ffe0
+#define ixSMU_PM_STATUS_121                                                     0x3ffe4
+#define ixSMU_PM_STATUS_122                                                     0x3ffe8
+#define ixSMU_PM_STATUS_123                                                     0x3ffec
+#define ixSMU_PM_STATUS_124                                                     0x3fff0
+#define ixSMU_PM_STATUS_125                                                     0x3fff4
+#define ixSMU_PM_STATUS_126                                                     0x3fff8
+#define ixSMU_PM_STATUS_127                                                     0x3fffc
+#define ixCG_THERMAL_INT_ENA                                                    0xc2100024
+#define ixCG_THERMAL_INT_CTRL                                                   0xc2100028
+#define ixCG_THERMAL_INT_STATUS                                                 0xc210002c
+#define ixCG_THERMAL_CTRL                                                       0xc0300004
+#define ixCG_THERMAL_STATUS                                                     0xc0300008
+#define ixCG_THERMAL_INT                                                        0xc030000c
+#define ixCG_MULT_THERMAL_CTRL                                                  0xc0300010
+#define ixCG_MULT_THERMAL_STATUS                                                0xc0300014
+#define ixTHM_TMON2_CTRL                                                        0xc0300034
+#define ixTHM_TMON2_CTRL2                                                       0xc0300038
+#define ixTHM_TMON2_CSR_WR                                                      0xc0300054
+#define ixTHM_TMON2_CSR_RD                                                      0xc0300058
+#define ixCG_FDO_CTRL0                                                          0xc0300064
+#define ixCG_FDO_CTRL1                                                          0xc0300068
+#define ixCG_FDO_CTRL2                                                          0xc030006c
+#define ixCG_TACH_CTRL                                                          0xc0300070
+#define ixCG_TACH_STATUS                                                        0xc0300074
+#define ixCC_THM_STRAPS0                                                        0xc0300080
+#define ixTHM_TMON0_RDIL0_DATA                                                  0xc0300100
+#define ixTHM_TMON0_RDIL1_DATA                                                  0xc0300104
+#define ixTHM_TMON0_RDIL2_DATA                                                  0xc0300108
+#define ixTHM_TMON0_RDIL3_DATA                                                  0xc030010c
+#define ixTHM_TMON0_RDIL4_DATA                                                  0xc0300110
+#define ixTHM_TMON0_RDIL5_DATA                                                  0xc0300114
+#define ixTHM_TMON0_RDIL6_DATA                                                  0xc0300118
+#define ixTHM_TMON0_RDIL7_DATA                                                  0xc030011c
+#define ixTHM_TMON0_RDIL8_DATA                                                  0xc0300120
+#define ixTHM_TMON0_RDIL9_DATA                                                  0xc0300124
+#define ixTHM_TMON0_RDIL10_DATA                                                 0xc0300128
+#define ixTHM_TMON0_RDIL11_DATA                                                 0xc030012c
+#define ixTHM_TMON0_RDIL12_DATA                                                 0xc0300130
+#define ixTHM_TMON0_RDIL13_DATA                                                 0xc0300134
+#define ixTHM_TMON0_RDIL14_DATA                                                 0xc0300138
+#define ixTHM_TMON0_RDIL15_DATA                                                 0xc030013c
+#define ixTHM_TMON0_RDIR0_DATA                                                  0xc0300140
+#define ixTHM_TMON0_RDIR1_DATA                                                  0xc0300144
+#define ixTHM_TMON0_RDIR2_DATA                                                  0xc0300148
+#define ixTHM_TMON0_RDIR3_DATA                                                  0xc030014c
+#define ixTHM_TMON0_RDIR4_DATA                                                  0xc0300150
+#define ixTHM_TMON0_RDIR5_DATA                                                  0xc0300154
+#define ixTHM_TMON0_RDIR6_DATA                                                  0xc0300158
+#define ixTHM_TMON0_RDIR7_DATA                                                  0xc030015c
+#define ixTHM_TMON0_RDIR8_DATA                                                  0xc0300160
+#define ixTHM_TMON0_RDIR9_DATA                                                  0xc0300164
+#define ixTHM_TMON0_RDIR10_DATA                                                 0xc0300168
+#define ixTHM_TMON0_RDIR11_DATA                                                 0xc030016c
+#define ixTHM_TMON0_RDIR12_DATA                                                 0xc0300170
+#define ixTHM_TMON0_RDIR13_DATA                                                 0xc0300174
+#define ixTHM_TMON0_RDIR14_DATA                                                 0xc0300178
+#define ixTHM_TMON0_RDIR15_DATA                                                 0xc030017c
+#define ixTHM_TMON1_RDIL0_DATA                                                  0xc0300180
+#define ixTHM_TMON1_RDIL1_DATA                                                  0xc0300184
+#define ixTHM_TMON1_RDIL2_DATA                                                  0xc0300188
+#define ixTHM_TMON1_RDIL3_DATA                                                  0xc030018c
+#define ixTHM_TMON1_RDIL4_DATA                                                  0xc0300190
+#define ixTHM_TMON1_RDIL5_DATA                                                  0xc0300194
+#define ixTHM_TMON1_RDIL6_DATA                                                  0xc0300198
+#define ixTHM_TMON1_RDIL7_DATA                                                  0xc030019c
+#define ixTHM_TMON1_RDIL8_DATA                                                  0xc03001a0
+#define ixTHM_TMON1_RDIL9_DATA                                                  0xc03001a4
+#define ixTHM_TMON1_RDIL10_DATA                                                 0xc03001a8
+#define ixTHM_TMON1_RDIL11_DATA                                                 0xc03001ac
+#define ixTHM_TMON1_RDIL12_DATA                                                 0xc03001b0
+#define ixTHM_TMON1_RDIL13_DATA                                                 0xc03001b4
+#define ixTHM_TMON1_RDIL14_DATA                                                 0xc03001b8
+#define ixTHM_TMON1_RDIL15_DATA                                                 0xc03001bc
+#define ixTHM_TMON1_RDIR0_DATA                                                  0xc03001c0
+#define ixTHM_TMON1_RDIR1_DATA                                                  0xc03001c4
+#define ixTHM_TMON1_RDIR2_DATA                                                  0xc03001c8
+#define ixTHM_TMON1_RDIR3_DATA                                                  0xc03001cc
+#define ixTHM_TMON1_RDIR4_DATA                                                  0xc03001d0
+#define ixTHM_TMON1_RDIR5_DATA                                                  0xc03001d4
+#define ixTHM_TMON1_RDIR6_DATA                                                  0xc03001d8
+#define ixTHM_TMON1_RDIR7_DATA                                                  0xc03001dc
+#define ixTHM_TMON1_RDIR8_DATA                                                  0xc03001e0
+#define ixTHM_TMON1_RDIR9_DATA                                                  0xc03001e4
+#define ixTHM_TMON1_RDIR10_DATA                                                 0xc03001e8
+#define ixTHM_TMON1_RDIR11_DATA                                                 0xc03001ec
+#define ixTHM_TMON1_RDIR12_DATA                                                 0xc03001f0
+#define ixTHM_TMON1_RDIR13_DATA                                                 0xc03001f4
+#define ixTHM_TMON1_RDIR14_DATA                                                 0xc03001f8
+#define ixTHM_TMON1_RDIR15_DATA                                                 0xc03001fc
+#define ixTHM_TMON2_RDIL0_DATA                                                  0xc0300200
+#define ixTHM_TMON2_RDIL1_DATA                                                  0xc0300204
+#define ixTHM_TMON2_RDIL2_DATA                                                  0xc0300208
+#define ixTHM_TMON2_RDIL3_DATA                                                  0xc030020c
+#define ixTHM_TMON2_RDIL4_DATA                                                  0xc0300210
+#define ixTHM_TMON2_RDIL5_DATA                                                  0xc0300214
+#define ixTHM_TMON2_RDIL6_DATA                                                  0xc0300218
+#define ixTHM_TMON2_RDIL7_DATA                                                  0xc030021c
+#define ixTHM_TMON2_RDIL8_DATA                                                  0xc0300220
+#define ixTHM_TMON2_RDIL9_DATA                                                  0xc0300224
+#define ixTHM_TMON2_RDIL10_DATA                                                 0xc0300228
+#define ixTHM_TMON2_RDIL11_DATA                                                 0xc030022c
+#define ixTHM_TMON2_RDIL12_DATA                                                 0xc0300230
+#define ixTHM_TMON2_RDIL13_DATA                                                 0xc0300234
+#define ixTHM_TMON2_RDIL14_DATA                                                 0xc0300238
+#define ixTHM_TMON2_RDIL15_DATA                                                 0xc030023c
+#define ixTHM_TMON2_RDIR0_DATA                                                  0xc0300240
+#define ixTHM_TMON2_RDIR1_DATA                                                  0xc0300244
+#define ixTHM_TMON2_RDIR2_DATA                                                  0xc0300248
+#define ixTHM_TMON2_RDIR3_DATA                                                  0xc030024c
+#define ixTHM_TMON2_RDIR4_DATA                                                  0xc0300250
+#define ixTHM_TMON2_RDIR5_DATA                                                  0xc0300254
+#define ixTHM_TMON2_RDIR6_DATA                                                  0xc0300258
+#define ixTHM_TMON2_RDIR7_DATA                                                  0xc030025c
+#define ixTHM_TMON2_RDIR8_DATA                                                  0xc0300260
+#define ixTHM_TMON2_RDIR9_DATA                                                  0xc0300264
+#define ixTHM_TMON2_RDIR10_DATA                                                 0xc0300268
+#define ixTHM_TMON2_RDIR11_DATA                                                 0xc030026c
+#define ixTHM_TMON2_RDIR12_DATA                                                 0xc0300270
+#define ixTHM_TMON2_RDIR13_DATA                                                 0xc0300274
+#define ixTHM_TMON2_RDIR14_DATA                                                 0xc0300278
+#define ixTHM_TMON2_RDIR15_DATA                                                 0xc030027c
+#define ixTHM_TMON0_INT_DATA                                                    0xc0300300
+#define ixTHM_TMON1_INT_DATA                                                    0xc0300304
+#define ixTHM_TMON2_INT_DATA                                                    0xc0300308
+#define ixTHM_TMON0_DEBUG                                                       0xc0300310
+#define ixTHM_TMON1_DEBUG                                                       0xc0300314
+#define ixTHM_TMON2_DEBUG                                                       0xc0300318
+#define ixTHM_TMON0_STATUS                                                      0xc0300320
+#define ixTHM_TMON1_STATUS                                                      0xc0300324
+#define ixTHM_TMON2_STATUS                                                      0xc0300328
+#define ixGENERAL_PWRMGT                                                        0xc0200000
+#define ixCNB_PWRMGT_CNTL                                                       0xc0200004
+#define ixSCLK_PWRMGT_CNTL                                                      0xc0200008
+#define ixTARGET_AND_CURRENT_PROFILE_INDEX                                      0xc0200014
+#define ixPWR_PCC_CONTROL                                                       0xc0200018
+#define ixPWR_PCC_GPIO_SELECT                                                   0xc020001c
+#define ixCG_FREQ_TRAN_VOTING_0                                                 0xc02001a8
+#define ixCG_FREQ_TRAN_VOTING_1                                                 0xc02001ac
+#define ixCG_FREQ_TRAN_VOTING_2                                                 0xc02001b0
+#define ixCG_FREQ_TRAN_VOTING_3                                                 0xc02001b4
+#define ixCG_FREQ_TRAN_VOTING_4                                                 0xc02001b8
+#define ixCG_FREQ_TRAN_VOTING_5                                                 0xc02001bc
+#define ixCG_FREQ_TRAN_VOTING_6                                                 0xc02001c0
+#define ixCG_FREQ_TRAN_VOTING_7                                                 0xc02001c4
+#define ixPLL_TEST_CNTL                                                         0xc020003c
+#define ixCG_STATIC_SCREEN_PARAMETER                                            0xc0200044
+#define ixCG_DISPLAY_GAP_CNTL                                                   0xc0200060
+#define ixCG_DISPLAY_GAP_CNTL2                                                  0xc0200230
+#define ixCG_ACPI_CNTL                                                          0xc0200064
+#define ixSCLK_DEEP_SLEEP_CNTL                                                  0xc0200080
+#define ixSCLK_DEEP_SLEEP_CNTL2                                                 0xc0200084
+#define ixSCLK_DEEP_SLEEP_CNTL3                                                 0xc020009c
+#define ixSCLK_DEEP_SLEEP_MISC_CNTL                                             0xc0200088
+#define ixLCLK_DEEP_SLEEP_CNTL                                                  0xc020008c
+#define ixLCLK_DEEP_SLEEP_CNTL2                                                 0xc0200310
+#define ixTARGET_AND_CURRENT_PROFILE_INDEX_1                                    0xc02000f0
+#define ixCG_ULV_PARAMETER                                                      0xc020015c
+#define ixSCLK_MIN_DIV                                                          0xc02003ac
+#define ixPWR_AVFS_SEL                                                          0xc0200384
+#define ixPWR_AVFS_CNTL                                                         0xc0200388
+#define ixPWR_AVFS0_CNTL_STATUS                                                 0xc0200400
+#define ixPWR_AVFS1_CNTL_STATUS                                                 0xc0200404
+#define ixPWR_AVFS2_CNTL_STATUS                                                 0xc0200408
+#define ixPWR_AVFS3_CNTL_STATUS                                                 0xc020040c
+#define ixPWR_AVFS4_CNTL_STATUS                                                 0xc0200410
+#define ixPWR_AVFS5_CNTL_STATUS                                                 0xc0200414
+#define ixPWR_AVFS6_CNTL_STATUS                                                 0xc0200418
+#define ixPWR_AVFS7_CNTL_STATUS                                                 0xc020041c
+#define ixPWR_AVFS8_CNTL_STATUS                                                 0xc0200420
+#define ixPWR_AVFS9_CNTL_STATUS                                                 0xc0200424
+#define ixPWR_AVFS10_CNTL_STATUS                                                0xc0200428
+#define ixPWR_AVFS11_CNTL_STATUS                                                0xc020042c
+#define ixPWR_AVFS12_CNTL_STATUS                                                0xc0200430
+#define ixPWR_AVFS13_CNTL_STATUS                                                0xc0200434
+#define ixPWR_AVFS14_CNTL_STATUS                                                0xc0200438
+#define ixPWR_AVFS15_CNTL_STATUS                                                0xc020043c
+#define ixPWR_AVFS16_CNTL_STATUS                                                0xc0200440
+#define ixPWR_AVFS17_CNTL_STATUS                                                0xc0200444
+#define ixPWR_AVFS18_CNTL_STATUS                                                0xc0200448
+#define ixPWR_AVFS19_CNTL_STATUS                                                0xc020044c
+#define ixPWR_AVFS20_CNTL_STATUS                                                0xc0200450
+#define ixPWR_AVFS21_CNTL_STATUS                                                0xc0200454
+#define ixPWR_AVFS22_CNTL_STATUS                                                0xc0200458
+#define ixPWR_AVFS23_CNTL_STATUS                                                0xc020045c
+#define ixPWR_AVFS24_CNTL_STATUS                                                0xc0200460
+#define ixPWR_AVFS25_CNTL_STATUS                                                0xc0200464
+#define ixPWR_AVFS26_CNTL_STATUS                                                0xc0200468
+#define ixPWR_AVFS27_CNTL_STATUS                                                0xc020046c
+#define ixPWR_CKS_ENABLE                                                        0xc020034c
+#define ixPWR_CKS_CNTL                                                          0xc0200350
+#define ixPWR_DISP_TIMER_CONTROL                                                0xc02003c0
+#define ixPWR_DISP_TIMER_DEBUG                                                  0xc02003c4
+#define ixPWR_DISP_TIMER2_CONTROL                                               0xc02003c8
+#define ixPWR_DISP_TIMER2_DEBUG                                                 0xc02003cc
+#define ixPWR_DISP_TIMER_CONTROL2                                               0xc0200378
+#define ixVDDGFX_IDLE_PARAMETER                                                 0xc020036c
+#define ixVDDGFX_IDLE_CONTROL                                                   0xc0200370
+#define ixVDDGFX_IDLE_EXIT                                                      0xc0200374
+#define ixLCAC_MC0_CNTL                                                         0xc0400130
+#define ixLCAC_MC0_OVR_SEL                                                      0xc0400134
+#define ixLCAC_MC0_OVR_VAL                                                      0xc0400138
+#define ixLCAC_MC1_CNTL                                                         0xc040013c
+#define ixLCAC_MC1_OVR_SEL                                                      0xc0400140
+#define ixLCAC_MC1_OVR_VAL                                                      0xc0400144
+#define ixLCAC_MC2_CNTL                                                         0xc0400148
+#define ixLCAC_MC2_OVR_SEL                                                      0xc040014c
+#define ixLCAC_MC2_OVR_VAL                                                      0xc0400150
+#define ixLCAC_MC3_CNTL                                                         0xc0400154
+#define ixLCAC_MC3_OVR_SEL                                                      0xc0400158
+#define ixLCAC_MC3_OVR_VAL                                                      0xc040015c
+#define ixLCAC_MC4_CNTL                                                         0xc0400d60
+#define ixLCAC_MC4_OVR_SEL                                                      0xc0400d64
+#define ixLCAC_MC4_OVR_VAL                                                      0xc0400d68
+#define ixLCAC_MC5_CNTL                                                         0xc0400d6c
+#define ixLCAC_MC5_OVR_SEL                                                      0xc0400d70
+#define ixLCAC_MC5_OVR_VAL                                                      0xc0400d74
+#define ixLCAC_MC6_CNTL                                                         0xc0400d78
+#define ixLCAC_MC6_OVR_SEL                                                      0xc0400d7c
+#define ixLCAC_MC6_OVR_VAL                                                      0xc0400d80
+#define ixLCAC_MC7_CNTL                                                         0xc0400d84
+#define ixLCAC_MC7_OVR_SEL                                                      0xc0400d88
+#define ixLCAC_MC7_OVR_VAL                                                      0xc0400d8c
+#define ixLCAC_CPL_CNTL                                                         0xc0400160
+#define ixLCAC_CPL_OVR_SEL                                                      0xc0400164
+#define ixLCAC_CPL_OVR_VAL                                                      0xc0400168
+#define mmROM_SMC_IND_INDEX                                                     0x80
+#define mmROM0_ROM_SMC_IND_INDEX                                                0x80
+#define mmROM1_ROM_SMC_IND_INDEX                                                0x82
+#define mmROM2_ROM_SMC_IND_INDEX                                                0x84
+#define mmROM3_ROM_SMC_IND_INDEX                                                0x86
+#define mmROM_SMC_IND_DATA                                                      0x81
+#define mmROM0_ROM_SMC_IND_DATA                                                 0x81
+#define mmROM1_ROM_SMC_IND_DATA                                                 0x83
+#define mmROM2_ROM_SMC_IND_DATA                                                 0x85
+#define mmROM3_ROM_SMC_IND_DATA                                                 0x87
+#define ixROM_CNTL                                                              0xc0600000
+#define ixPAGE_MIRROR_CNTL                                                      0xc0600004
+#define ixROM_STATUS                                                            0xc0600008
+#define ixCGTT_ROM_CLK_CTRL0                                                    0xc060000c
+#define ixROM_INDEX                                                             0xc0600010
+#define ixROM_DATA                                                              0xc0600014
+#define ixROM_START                                                             0xc0600018
+#define ixROM_SW_CNTL                                                           0xc060001c
+#define ixROM_SW_STATUS                                                         0xc0600020
+#define ixROM_SW_COMMAND                                                        0xc0600024
+#define ixROM_SW_DATA_1                                                         0xc0600028
+#define ixROM_SW_DATA_2                                                         0xc060002c
+#define ixROM_SW_DATA_3                                                         0xc0600030
+#define ixROM_SW_DATA_4                                                         0xc0600034
+#define ixROM_SW_DATA_5                                                         0xc0600038
+#define ixROM_SW_DATA_6                                                         0xc060003c
+#define ixROM_SW_DATA_7                                                         0xc0600040
+#define ixROM_SW_DATA_8                                                         0xc0600044
+#define ixROM_SW_DATA_9                                                         0xc0600048
+#define ixROM_SW_DATA_10                                                        0xc060004c
+#define ixROM_SW_DATA_11                                                        0xc0600050
+#define ixROM_SW_DATA_12                                                        0xc0600054
+#define ixROM_SW_DATA_13                                                        0xc0600058
+#define ixROM_SW_DATA_14                                                        0xc060005c
+#define ixROM_SW_DATA_15                                                        0xc0600060
+#define ixROM_SW_DATA_16                                                        0xc0600064
+#define ixROM_SW_DATA_17                                                        0xc0600068
+#define ixROM_SW_DATA_18                                                        0xc060006c
+#define ixROM_SW_DATA_19                                                        0xc0600070
+#define ixROM_SW_DATA_20                                                        0xc0600074
+#define ixROM_SW_DATA_21                                                        0xc0600078
+#define ixROM_SW_DATA_22                                                        0xc060007c
+#define ixROM_SW_DATA_23                                                        0xc0600080
+#define ixROM_SW_DATA_24                                                        0xc0600084
+#define ixROM_SW_DATA_25                                                        0xc0600088
+#define ixROM_SW_DATA_26                                                        0xc060008c
+#define ixROM_SW_DATA_27                                                        0xc0600090
+#define ixROM_SW_DATA_28                                                        0xc0600094
+#define ixROM_SW_DATA_29                                                        0xc0600098
+#define ixROM_SW_DATA_30                                                        0xc060009c
+#define ixROM_SW_DATA_31                                                        0xc06000a0
+#define ixROM_SW_DATA_32                                                        0xc06000a4
+#define ixROM_SW_DATA_33                                                        0xc06000a8
+#define ixROM_SW_DATA_34                                                        0xc06000ac
+#define ixROM_SW_DATA_35                                                        0xc06000b0
+#define ixROM_SW_DATA_36                                                        0xc06000b4
+#define ixROM_SW_DATA_37                                                        0xc06000b8
+#define ixROM_SW_DATA_38                                                        0xc06000bc
+#define ixROM_SW_DATA_39                                                        0xc06000c0
+#define ixROM_SW_DATA_40                                                        0xc06000c4
+#define ixROM_SW_DATA_41                                                        0xc06000c8
+#define ixROM_SW_DATA_42                                                        0xc06000cc
+#define ixROM_SW_DATA_43                                                        0xc06000d0
+#define ixROM_SW_DATA_44                                                        0xc06000d4
+#define ixROM_SW_DATA_45                                                        0xc06000d8
+#define ixROM_SW_DATA_46                                                        0xc06000dc
+#define ixROM_SW_DATA_47                                                        0xc06000e0
+#define ixROM_SW_DATA_48                                                        0xc06000e4
+#define ixROM_SW_DATA_49                                                        0xc06000e8
+#define ixROM_SW_DATA_50                                                        0xc06000ec
+#define ixROM_SW_DATA_51                                                        0xc06000f0
+#define ixROM_SW_DATA_52                                                        0xc06000f4
+#define ixROM_SW_DATA_53                                                        0xc06000f8
+#define ixROM_SW_DATA_54                                                        0xc06000fc
+#define ixROM_SW_DATA_55                                                        0xc0600100
+#define ixROM_SW_DATA_56                                                        0xc0600104
+#define ixROM_SW_DATA_57                                                        0xc0600108
+#define ixROM_SW_DATA_58                                                        0xc060010c
+#define ixROM_SW_DATA_59                                                        0xc0600110
+#define ixROM_SW_DATA_60                                                        0xc0600114
+#define ixROM_SW_DATA_61                                                        0xc0600118
+#define ixROM_SW_DATA_62                                                        0xc060011c
+#define ixROM_SW_DATA_63                                                        0xc0600120
+#define ixROM_SW_DATA_64                                                        0xc0600124
+#define mmGC_CAC_CGTT_CLK_CTRL                                                  0x3292
+#define mmSE_CAC_CGTT_CLK_CTRL                                                  0x3293
+#define mmGC_CAC_LKG_AGGR_LOWER                                                 0x3296
+#define mmGC_CAC_LKG_AGGR_UPPER                                                 0x3297
+#define ixGC_CAC_WEIGHT_CU_0                                                    0x32
+#define ixGC_CAC_WEIGHT_CU_1                                                    0x33
+#define ixGC_CAC_WEIGHT_CU_2                                                    0x34
+#define ixGC_CAC_WEIGHT_CU_3                                                    0x35
+#define ixGC_CAC_WEIGHT_CU_4                                                    0x36
+#define ixGC_CAC_WEIGHT_CU_5                                                    0x37
+#define ixGC_CAC_WEIGHT_CU_6                                                    0x38
+#define ixGC_CAC_WEIGHT_CU_7                                                    0x39
+#define ixGC_CAC_ACC_CU0                                                        0xba
+#define ixGC_CAC_ACC_CU1                                                        0xbb
+#define ixGC_CAC_ACC_CU2                                                        0xbc
+#define ixGC_CAC_ACC_CU3                                                        0xbd
+#define ixGC_CAC_ACC_CU4                                                        0xbe
+#define ixGC_CAC_ACC_CU5                                                        0xbf
+#define ixGC_CAC_ACC_CU6                                                        0xc0
+#define ixGC_CAC_ACC_CU7                                                        0xc1
+#define ixGC_CAC_ACC_CU8                                                        0xc2
+#define ixGC_CAC_ACC_CU9                                                        0xc3
+#define ixGC_CAC_ACC_CU10                                                       0xc4
+#define ixGC_CAC_ACC_CU11                                                       0xc5
+#define ixGC_CAC_ACC_CU12                                                       0xc6
+#define ixGC_CAC_ACC_CU13                                                       0xc7
+#define ixGC_CAC_ACC_CU14                                                       0xc8
+#define ixGC_CAC_ACC_CU15                                                       0xc9
+#define ixGC_CAC_OVRD_CU                                                        0xe7
+
+#endif /* SMU_7_1_3_D_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_enum.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_enum.h
new file mode 100644
index 000000000000..f19c4208d963
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_enum.h
@@ -0,0 +1,1282 @@
+/*
+ * SMU_7_1_3 Register documentation
+ *
+ * Copyright (C) 2014  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef SMU_7_1_3_ENUM_H
+#define SMU_7_1_3_ENUM_H
+
+#define CG_SRBM_START_ADDR                        0x600
+#define CG_SRBM_END_ADDR                          0x8ff
+#define RCU_CCF_DWORDS0                           0xa0
+#define RCU_CCF_BITS0                             0x1400
+#define RCU_SAM_BYTES                             0x2c
+#define RCU_SAM_RTL_BYTES                         0x2c
+#define RCU_SMU_BYTES                             0x14
+#define RCU_SMU_RTL_BYTES                         0x14
+#define SFP_CHAIN_ADDR                            0x1
+#define SFP_SADR                                  0x0
+#define SFP_EADR                                  0x37f
+#define SAMU_KEY_CHAIN_ADR                        0x0
+#define SAMU_KEY_SADR                             0x280
+#define SAMU_KEY_EADR                             0x2ab
+#define SMU_KEY_CHAIN_ADR                         0x0
+#define SMU_KEY_SADR                              0x2ac
+#define SMU_KEY_EADR                              0x2bf
+#define SMC_MSG_TEST                              0x1
+#define SMC_MSG_PHY_LN_OFF                        0x2
+#define SMC_MSG_PHY_LN_ON                         0x3
+#define SMC_MSG_DDI_PHY_OFF                       0x4
+#define SMC_MSG_DDI_PHY_ON                        0x5
+#define SMC_MSG_CASCADE_PLL_OFF                   0x6
+#define SMC_MSG_CASCADE_PLL_ON                    0x7
+#define SMC_MSG_PWR_OFF_x16                       0x8
+#define SMC_MSG_CONFIG_LCLK_DPM                   0x9
+#define SMC_MSG_FLUSH_DATA_CACHE                  0xa
+#define SMC_MSG_FLUSH_INSTRUCTION_CACHE           0xb
+#define SMC_MSG_CONFIG_VPC_ACCUMULATOR            0xc
+#define SMC_MSG_CONFIG_BAPM                       0xd
+#define SMC_MSG_CONFIG_TDC_LIMIT                  0xe
+#define SMC_MSG_CONFIG_LPMx                       0xf
+#define SMC_MSG_CONFIG_HTC_LIMIT                  0x10
+#define SMC_MSG_CONFIG_THERMAL_CNTL               0x11
+#define SMC_MSG_CONFIG_VOLTAGE_CNTL               0x12
+#define SMC_MSG_CONFIG_TDP_CNTL                   0x13
+#define SMC_MSG_EN_PM_CNTL                        0x14
+#define SMC_MSG_DIS_PM_CNTL                       0x15
+#define SMC_MSG_CONFIG_NBDPM                      0x16
+#define SMC_MSG_CONFIG_LOADLINE                   0x17
+#define SMC_MSG_ADJUST_LOADLINE                   0x18
+#define SMC_MSG_RESET                             0x20
+#define SMC_MSG_VOLTAGE                           0x25
+#define SMC_VERSION_MAJOR                         0x7
+#define SMC_VERSION_MINOR                         0x0
+#define SMC_HEADER_SIZE                           0x40
+#define ROM_SIGNATURE                             0xaa55
+typedef enum SurfaceEndian {
+	ENDIAN_NONE                                      = 0x0,
+	ENDIAN_8IN16                                     = 0x1,
+	ENDIAN_8IN32                                     = 0x2,
+	ENDIAN_8IN64                                     = 0x3,
+} SurfaceEndian;
+typedef enum ArrayMode {
+	ARRAY_LINEAR_GENERAL                             = 0x0,
+	ARRAY_LINEAR_ALIGNED                             = 0x1,
+	ARRAY_1D_TILED_THIN1                             = 0x2,
+	ARRAY_1D_TILED_THICK                             = 0x3,
+	ARRAY_2D_TILED_THIN1                             = 0x4,
+	ARRAY_PRT_TILED_THIN1                            = 0x5,
+	ARRAY_PRT_2D_TILED_THIN1                         = 0x6,
+	ARRAY_2D_TILED_THICK                             = 0x7,
+	ARRAY_2D_TILED_XTHICK                            = 0x8,
+	ARRAY_PRT_TILED_THICK                            = 0x9,
+	ARRAY_PRT_2D_TILED_THICK                         = 0xa,
+	ARRAY_PRT_3D_TILED_THIN1                         = 0xb,
+	ARRAY_3D_TILED_THIN1                             = 0xc,
+	ARRAY_3D_TILED_THICK                             = 0xd,
+	ARRAY_3D_TILED_XTHICK                            = 0xe,
+	ARRAY_PRT_3D_TILED_THICK                         = 0xf,
+} ArrayMode;
+typedef enum PipeTiling {
+	CONFIG_1_PIPE                                    = 0x0,
+	CONFIG_2_PIPE                                    = 0x1,
+	CONFIG_4_PIPE                                    = 0x2,
+	CONFIG_8_PIPE                                    = 0x3,
+} PipeTiling;
+typedef enum BankTiling {
+	CONFIG_4_BANK                                    = 0x0,
+	CONFIG_8_BANK                                    = 0x1,
+} BankTiling;
+typedef enum GroupInterleave {
+	CONFIG_256B_GROUP                                = 0x0,
+	CONFIG_512B_GROUP                                = 0x1,
+} GroupInterleave;
+typedef enum RowTiling {
+	CONFIG_1KB_ROW                                   = 0x0,
+	CONFIG_2KB_ROW                                   = 0x1,
+	CONFIG_4KB_ROW                                   = 0x2,
+	CONFIG_8KB_ROW                                   = 0x3,
+	CONFIG_1KB_ROW_OPT                               = 0x4,
+	CONFIG_2KB_ROW_OPT                               = 0x5,
+	CONFIG_4KB_ROW_OPT                               = 0x6,
+	CONFIG_8KB_ROW_OPT                               = 0x7,
+} RowTiling;
+typedef enum BankSwapBytes {
+	CONFIG_128B_SWAPS                                = 0x0,
+	CONFIG_256B_SWAPS                                = 0x1,
+	CONFIG_512B_SWAPS                                = 0x2,
+	CONFIG_1KB_SWAPS                                 = 0x3,
+} BankSwapBytes;
+typedef enum SampleSplitBytes {
+	CONFIG_1KB_SPLIT                                 = 0x0,
+	CONFIG_2KB_SPLIT                                 = 0x1,
+	CONFIG_4KB_SPLIT                                 = 0x2,
+	CONFIG_8KB_SPLIT                                 = 0x3,
+} SampleSplitBytes;
+typedef enum NumPipes {
+	ADDR_CONFIG_1_PIPE                               = 0x0,
+	ADDR_CONFIG_2_PIPE                               = 0x1,
+	ADDR_CONFIG_4_PIPE                               = 0x2,
+	ADDR_CONFIG_8_PIPE                               = 0x3,
+} NumPipes;
+typedef enum PipeInterleaveSize {
+	ADDR_CONFIG_PIPE_INTERLEAVE_256B                 = 0x0,
+	ADDR_CONFIG_PIPE_INTERLEAVE_512B                 = 0x1,
+} PipeInterleaveSize;
+typedef enum BankInterleaveSize {
+	ADDR_CONFIG_BANK_INTERLEAVE_1                    = 0x0,
+	ADDR_CONFIG_BANK_INTERLEAVE_2                    = 0x1,
+	ADDR_CONFIG_BANK_INTERLEAVE_4                    = 0x2,
+	ADDR_CONFIG_BANK_INTERLEAVE_8                    = 0x3,
+} BankInterleaveSize;
+typedef enum NumShaderEngines {
+	ADDR_CONFIG_1_SHADER_ENGINE                      = 0x0,
+	ADDR_CONFIG_2_SHADER_ENGINE                      = 0x1,
+} NumShaderEngines;
+typedef enum ShaderEngineTileSize {
+	ADDR_CONFIG_SE_TILE_16                           = 0x0,
+	ADDR_CONFIG_SE_TILE_32                           = 0x1,
+} ShaderEngineTileSize;
+typedef enum NumGPUs {
+	ADDR_CONFIG_1_GPU                                = 0x0,
+	ADDR_CONFIG_2_GPU                                = 0x1,
+	ADDR_CONFIG_4_GPU                                = 0x2,
+} NumGPUs;
+typedef enum MultiGPUTileSize {
+	ADDR_CONFIG_GPU_TILE_16                          = 0x0,
+	ADDR_CONFIG_GPU_TILE_32                          = 0x1,
+	ADDR_CONFIG_GPU_TILE_64                          = 0x2,
+	ADDR_CONFIG_GPU_TILE_128                         = 0x3,
+} MultiGPUTileSize;
+typedef enum RowSize {
+	ADDR_CONFIG_1KB_ROW                              = 0x0,
+	ADDR_CONFIG_2KB_ROW                              = 0x1,
+	ADDR_CONFIG_4KB_ROW                              = 0x2,
+} RowSize;
+typedef enum NumLowerPipes {
+	ADDR_CONFIG_1_LOWER_PIPES                        = 0x0,
+	ADDR_CONFIG_2_LOWER_PIPES                        = 0x1,
+} NumLowerPipes;
+typedef enum DebugBlockId {
+	DBG_CLIENT_BLKID_RESERVED                        = 0x0,
+	DBG_CLIENT_BLKID_dbg                             = 0x1,
+	DBG_CLIENT_BLKID_scf2                            = 0x2,
+	DBG_CLIENT_BLKID_mcd5_0                          = 0x3,
+	DBG_CLIENT_BLKID_mcd5_1                          = 0x4,
+	DBG_CLIENT_BLKID_mcd6_0                          = 0x5,
+	DBG_CLIENT_BLKID_mcd6_1                          = 0x6,
+	DBG_CLIENT_BLKID_mcd7_0                          = 0x7,
+	DBG_CLIENT_BLKID_mcd7_1                          = 0x8,
+	DBG_CLIENT_BLKID_vmc                             = 0x9,
+	DBG_CLIENT_BLKID_sx30                            = 0xa,
+	DBG_CLIENT_BLKID_mcd2_0                          = 0xb,
+	DBG_CLIENT_BLKID_mcd2_1                          = 0xc,
+	DBG_CLIENT_BLKID_bci1                            = 0xd,
+	DBG_CLIENT_BLKID_xdma_dbg_client_wrapper         = 0xe,
+	DBG_CLIENT_BLKID_mcc0                            = 0xf,
+	DBG_CLIENT_BLKID_uvdf_0                          = 0x10,
+	DBG_CLIENT_BLKID_uvdf_1                          = 0x11,
+	DBG_CLIENT_BLKID_uvdf_2                          = 0x12,
+	DBG_CLIENT_BLKID_bci0                            = 0x13,
+	DBG_CLIENT_BLKID_vcec0_0                         = 0x14,
+	DBG_CLIENT_BLKID_cb100                           = 0x15,
+	DBG_CLIENT_BLKID_cb001                           = 0x16,
+	DBG_CLIENT_BLKID_cb002                           = 0x17,
+	DBG_CLIENT_BLKID_cb003                           = 0x18,
+	DBG_CLIENT_BLKID_mcd4_0                          = 0x19,
+	DBG_CLIENT_BLKID_mcd4_1                          = 0x1a,
+	DBG_CLIENT_BLKID_tmonw00                         = 0x1b,
+	DBG_CLIENT_BLKID_cb101                           = 0x1c,
+	DBG_CLIENT_BLKID_cb102                           = 0x1d,
+	DBG_CLIENT_BLKID_cb103                           = 0x1e,
+	DBG_CLIENT_BLKID_sx10                            = 0x1f,
+	DBG_CLIENT_BLKID_cb301                           = 0x20,
+	DBG_CLIENT_BLKID_cb302                           = 0x21,
+	DBG_CLIENT_BLKID_cb303                           = 0x22,
+	DBG_CLIENT_BLKID_tmonw01                         = 0x23,
+	DBG_CLIENT_BLKID_tmonw02                         = 0x24,
+	DBG_CLIENT_BLKID_vcea0_0                         = 0x25,
+	DBG_CLIENT_BLKID_vcea0_1                         = 0x26,
+	DBG_CLIENT_BLKID_vcea0_2                         = 0x27,
+	DBG_CLIENT_BLKID_vcea0_3                         = 0x28,
+	DBG_CLIENT_BLKID_scf1                            = 0x29,
+	DBG_CLIENT_BLKID_sx20                            = 0x2a,
+	DBG_CLIENT_BLKID_spim1                           = 0x2b,
+	DBG_CLIENT_BLKID_scb1                            = 0x2c,
+	DBG_CLIENT_BLKID_pa10                            = 0x2d,
+	DBG_CLIENT_BLKID_pa00                            = 0x2e,
+	DBG_CLIENT_BLKID_gmcon                           = 0x2f,
+	DBG_CLIENT_BLKID_mcb                             = 0x30,
+	DBG_CLIENT_BLKID_vgt0                            = 0x31,
+	DBG_CLIENT_BLKID_pc0                             = 0x32,
+	DBG_CLIENT_BLKID_bci2                            = 0x33,
+	DBG_CLIENT_BLKID_uvdb_0                          = 0x34,
+	DBG_CLIENT_BLKID_spim3                           = 0x35,
+	DBG_CLIENT_BLKID_scb3                            = 0x36,
+	DBG_CLIENT_BLKID_cpc_0                           = 0x37,
+	DBG_CLIENT_BLKID_cpc_1                           = 0x38,
+	DBG_CLIENT_BLKID_uvdm_0                          = 0x39,
+	DBG_CLIENT_BLKID_uvdm_1                          = 0x3a,
+	DBG_CLIENT_BLKID_uvdm_2                          = 0x3b,
+	DBG_CLIENT_BLKID_uvdm_3                          = 0x3c,
+	DBG_CLIENT_BLKID_cb000                           = 0x3d,
+	DBG_CLIENT_BLKID_spim0                           = 0x3e,
+	DBG_CLIENT_BLKID_scb0                            = 0x3f,
+	DBG_CLIENT_BLKID_mcc2                            = 0x40,
+	DBG_CLIENT_BLKID_ds0                             = 0x41,
+	DBG_CLIENT_BLKID_srbm                            = 0x42,
+	DBG_CLIENT_BLKID_ih                              = 0x43,
+	DBG_CLIENT_BLKID_sem                             = 0x44,
+	DBG_CLIENT_BLKID_sdma_0                          = 0x45,
+	DBG_CLIENT_BLKID_sdma_1                          = 0x46,
+	DBG_CLIENT_BLKID_hdp                             = 0x47,
+	DBG_CLIENT_BLKID_acp_0                           = 0x48,
+	DBG_CLIENT_BLKID_acp_1                           = 0x49,
+	DBG_CLIENT_BLKID_cb200                           = 0x4a,
+	DBG_CLIENT_BLKID_scf3                            = 0x4b,
+	DBG_CLIENT_BLKID_bci3                            = 0x4c,
+	DBG_CLIENT_BLKID_mcd0_0                          = 0x4d,
+	DBG_CLIENT_BLKID_mcd0_1                          = 0x4e,
+	DBG_CLIENT_BLKID_pa11                            = 0x4f,
+	DBG_CLIENT_BLKID_pa01                            = 0x50,
+	DBG_CLIENT_BLKID_cb201                           = 0x51,
+	DBG_CLIENT_BLKID_cb202                           = 0x52,
+	DBG_CLIENT_BLKID_cb203                           = 0x53,
+	DBG_CLIENT_BLKID_spim2                           = 0x54,
+	DBG_CLIENT_BLKID_scb2                            = 0x55,
+	DBG_CLIENT_BLKID_vgt2                            = 0x56,
+	DBG_CLIENT_BLKID_pc2                             = 0x57,
+	DBG_CLIENT_BLKID_smu_0                           = 0x58,
+	DBG_CLIENT_BLKID_smu_1                           = 0x59,
+	DBG_CLIENT_BLKID_smu_2                           = 0x5a,
+	DBG_CLIENT_BLKID_cb1                             = 0x5b,
+	DBG_CLIENT_BLKID_ia0                             = 0x5c,
+	DBG_CLIENT_BLKID_wd                              = 0x5d,
+	DBG_CLIENT_BLKID_ia1                             = 0x5e,
+	DBG_CLIENT_BLKID_scf0                            = 0x5f,
+	DBG_CLIENT_BLKID_vgt1                            = 0x60,
+	DBG_CLIENT_BLKID_pc1                             = 0x61,
+	DBG_CLIENT_BLKID_cb0                             = 0x62,
+	DBG_CLIENT_BLKID_gdc_one_0                       = 0x63,
+	DBG_CLIENT_BLKID_gdc_one_1                       = 0x64,
+	DBG_CLIENT_BLKID_gdc_one_2                       = 0x65,
+	DBG_CLIENT_BLKID_gdc_one_3                       = 0x66,
+	DBG_CLIENT_BLKID_gdc_one_4                       = 0x67,
+	DBG_CLIENT_BLKID_gdc_one_5                       = 0x68,
+	DBG_CLIENT_BLKID_gdc_one_6                       = 0x69,
+	DBG_CLIENT_BLKID_gdc_one_7                       = 0x6a,
+	DBG_CLIENT_BLKID_gdc_one_8                       = 0x6b,
+	DBG_CLIENT_BLKID_gdc_one_9                       = 0x6c,
+	DBG_CLIENT_BLKID_gdc_one_10                      = 0x6d,
+	DBG_CLIENT_BLKID_gdc_one_11                      = 0x6e,
+	DBG_CLIENT_BLKID_gdc_one_12                      = 0x6f,
+	DBG_CLIENT_BLKID_gdc_one_13                      = 0x70,
+	DBG_CLIENT_BLKID_gdc_one_14                      = 0x71,
+	DBG_CLIENT_BLKID_gdc_one_15                      = 0x72,
+	DBG_CLIENT_BLKID_gdc_one_16                      = 0x73,
+	DBG_CLIENT_BLKID_gdc_one_17                      = 0x74,
+	DBG_CLIENT_BLKID_gdc_one_18                      = 0x75,
+	DBG_CLIENT_BLKID_gdc_one_19                      = 0x76,
+	DBG_CLIENT_BLKID_gdc_one_20                      = 0x77,
+	DBG_CLIENT_BLKID_gdc_one_21                      = 0x78,
+	DBG_CLIENT_BLKID_gdc_one_22                      = 0x79,
+	DBG_CLIENT_BLKID_gdc_one_23                      = 0x7a,
+	DBG_CLIENT_BLKID_gdc_one_24                      = 0x7b,
+	DBG_CLIENT_BLKID_gdc_one_25                      = 0x7c,
+	DBG_CLIENT_BLKID_gdc_one_26                      = 0x7d,
+	DBG_CLIENT_BLKID_gdc_one_27                      = 0x7e,
+	DBG_CLIENT_BLKID_gdc_one_28                      = 0x7f,
+	DBG_CLIENT_BLKID_gdc_one_29                      = 0x80,
+	DBG_CLIENT_BLKID_gdc_one_30                      = 0x81,
+	DBG_CLIENT_BLKID_gdc_one_31                      = 0x82,
+	DBG_CLIENT_BLKID_gdc_one_32                      = 0x83,
+	DBG_CLIENT_BLKID_gdc_one_33                      = 0x84,
+	DBG_CLIENT_BLKID_gdc_one_34                      = 0x85,
+	DBG_CLIENT_BLKID_gdc_one_35                      = 0x86,
+	DBG_CLIENT_BLKID_vceb0_0                         = 0x87,
+	DBG_CLIENT_BLKID_vgt3                            = 0x88,
+	DBG_CLIENT_BLKID_pc3                             = 0x89,
+	DBG_CLIENT_BLKID_mcd3_0                          = 0x8a,
+	DBG_CLIENT_BLKID_mcd3_1                          = 0x8b,
+	DBG_CLIENT_BLKID_uvdu_0                          = 0x8c,
+	DBG_CLIENT_BLKID_uvdu_1                          = 0x8d,
+	DBG_CLIENT_BLKID_uvdu_2                          = 0x8e,
+	DBG_CLIENT_BLKID_uvdu_3                          = 0x8f,
+	DBG_CLIENT_BLKID_uvdu_4                          = 0x90,
+	DBG_CLIENT_BLKID_uvdu_5                          = 0x91,
+	DBG_CLIENT_BLKID_uvdu_6                          = 0x92,
+	DBG_CLIENT_BLKID_cb300                           = 0x93,
+	DBG_CLIENT_BLKID_mcd1_0                          = 0x94,
+	DBG_CLIENT_BLKID_mcd1_1                          = 0x95,
+	DBG_CLIENT_BLKID_sx00                            = 0x96,
+	DBG_CLIENT_BLKID_uvdc_0                          = 0x97,
+	DBG_CLIENT_BLKID_uvdc_1                          = 0x98,
+	DBG_CLIENT_BLKID_mcc3                            = 0x99,
+	DBG_CLIENT_BLKID_mcc4                            = 0x9a,
+	DBG_CLIENT_BLKID_mcc5                            = 0x9b,
+	DBG_CLIENT_BLKID_mcc6                            = 0x9c,
+	DBG_CLIENT_BLKID_mcc7                            = 0x9d,
+	DBG_CLIENT_BLKID_cpg_0                           = 0x9e,
+	DBG_CLIENT_BLKID_cpg_1                           = 0x9f,
+	DBG_CLIENT_BLKID_gck                             = 0xa0,
+	DBG_CLIENT_BLKID_mcc1                            = 0xa1,
+	DBG_CLIENT_BLKID_cpf_0                           = 0xa2,
+	DBG_CLIENT_BLKID_cpf_1                           = 0xa3,
+	DBG_CLIENT_BLKID_rlc                             = 0xa4,
+	DBG_CLIENT_BLKID_grbm                            = 0xa5,
+	DBG_CLIENT_BLKID_sammsp                          = 0xa6,
+	DBG_CLIENT_BLKID_dci_pg                          = 0xa7,
+	DBG_CLIENT_BLKID_dci_0                           = 0xa8,
+	DBG_CLIENT_BLKID_dccg0_0                         = 0xa9,
+	DBG_CLIENT_BLKID_dccg0_1                         = 0xaa,
+	DBG_CLIENT_BLKID_dcfe01_0                        = 0xab,
+	DBG_CLIENT_BLKID_dcfe02_0                        = 0xac,
+	DBG_CLIENT_BLKID_dcfe03_0                        = 0xad,
+	DBG_CLIENT_BLKID_dcfe04_0                        = 0xae,
+	DBG_CLIENT_BLKID_dcfe05_0                        = 0xaf,
+	DBG_CLIENT_BLKID_dcfe06_0                        = 0xb0,
+	DBG_CLIENT_BLKID_mcq0_0                          = 0xb1,
+	DBG_CLIENT_BLKID_mcq0_1                          = 0xb2,
+	DBG_CLIENT_BLKID_mcq1_0                          = 0xb3,
+	DBG_CLIENT_BLKID_mcq1_1                          = 0xb4,
+	DBG_CLIENT_BLKID_mcq2_0                          = 0xb5,
+	DBG_CLIENT_BLKID_mcq2_1                          = 0xb6,
+	DBG_CLIENT_BLKID_mcq3_0                          = 0xb7,
+	DBG_CLIENT_BLKID_mcq3_1                          = 0xb8,
+	DBG_CLIENT_BLKID_mcq4_0                          = 0xb9,
+	DBG_CLIENT_BLKID_mcq4_1                          = 0xba,
+	DBG_CLIENT_BLKID_mcq5_0                          = 0xbb,
+	DBG_CLIENT_BLKID_mcq5_1                          = 0xbc,
+	DBG_CLIENT_BLKID_mcq6_0                          = 0xbd,
+	DBG_CLIENT_BLKID_mcq6_1                          = 0xbe,
+	DBG_CLIENT_BLKID_mcq7_0                          = 0xbf,
+	DBG_CLIENT_BLKID_mcq7_1                          = 0xc0,
+	DBG_CLIENT_BLKID_uvdi_0                          = 0xc1,
+	DBG_CLIENT_BLKID_RESERVED_LAST                   = 0xc2,
+} DebugBlockId;
+typedef enum DebugBlockId_OLD {
+	DBG_BLOCK_ID_RESERVED                            = 0x0,
+	DBG_BLOCK_ID_DBG                                 = 0x1,
+	DBG_BLOCK_ID_VMC                                 = 0x2,
+	DBG_BLOCK_ID_PDMA                                = 0x3,
+	DBG_BLOCK_ID_CG                                  = 0x4,
+	DBG_BLOCK_ID_SRBM                                = 0x5,
+	DBG_BLOCK_ID_GRBM                                = 0x6,
+	DBG_BLOCK_ID_RLC                                 = 0x7,
+	DBG_BLOCK_ID_CSC                                 = 0x8,
+	DBG_BLOCK_ID_SEM                                 = 0x9,
+	DBG_BLOCK_ID_IH                                  = 0xa,
+	DBG_BLOCK_ID_SC                                  = 0xb,
+	DBG_BLOCK_ID_SQ                                  = 0xc,
+	DBG_BLOCK_ID_AVP                                 = 0xd,
+	DBG_BLOCK_ID_GMCON                               = 0xe,
+	DBG_BLOCK_ID_SMU                                 = 0xf,
+	DBG_BLOCK_ID_DMA0                                = 0x10,
+	DBG_BLOCK_ID_DMA1                                = 0x11,
+	DBG_BLOCK_ID_SPIM                                = 0x12,
+	DBG_BLOCK_ID_GDS                                 = 0x13,
+	DBG_BLOCK_ID_SPIS                                = 0x14,
+	DBG_BLOCK_ID_UNUSED0                             = 0x15,
+	DBG_BLOCK_ID_PA0                                 = 0x16,
+	DBG_BLOCK_ID_PA1                                 = 0x17,
+	DBG_BLOCK_ID_CP0                                 = 0x18,
+	DBG_BLOCK_ID_CP1                                 = 0x19,
+	DBG_BLOCK_ID_CP2                                 = 0x1a,
+	DBG_BLOCK_ID_UNUSED1                             = 0x1b,
+	DBG_BLOCK_ID_UVDU                                = 0x1c,
+	DBG_BLOCK_ID_UVDM                                = 0x1d,
+	DBG_BLOCK_ID_VCE                                 = 0x1e,
+	DBG_BLOCK_ID_UNUSED2                             = 0x1f,
+	DBG_BLOCK_ID_VGT0                                = 0x20,
+	DBG_BLOCK_ID_VGT1                                = 0x21,
+	DBG_BLOCK_ID_IA                                  = 0x22,
+	DBG_BLOCK_ID_UNUSED3                             = 0x23,
+	DBG_BLOCK_ID_SCT0                                = 0x24,
+	DBG_BLOCK_ID_SCT1                                = 0x25,
+	DBG_BLOCK_ID_SPM0                                = 0x26,
+	DBG_BLOCK_ID_SPM1                                = 0x27,
+	DBG_BLOCK_ID_TCAA                                = 0x28,
+	DBG_BLOCK_ID_TCAB                                = 0x29,
+	DBG_BLOCK_ID_TCCA                                = 0x2a,
+	DBG_BLOCK_ID_TCCB                                = 0x2b,
+	DBG_BLOCK_ID_MCC0                                = 0x2c,
+	DBG_BLOCK_ID_MCC1                                = 0x2d,
+	DBG_BLOCK_ID_MCC2                                = 0x2e,
+	DBG_BLOCK_ID_MCC3                                = 0x2f,
+	DBG_BLOCK_ID_SX0                                 = 0x30,
+	DBG_BLOCK_ID_SX1                                 = 0x31,
+	DBG_BLOCK_ID_SX2                                 = 0x32,
+	DBG_BLOCK_ID_SX3                                 = 0x33,
+	DBG_BLOCK_ID_UNUSED4                             = 0x34,
+	DBG_BLOCK_ID_UNUSED5                             = 0x35,
+	DBG_BLOCK_ID_UNUSED6                             = 0x36,
+	DBG_BLOCK_ID_UNUSED7                             = 0x37,
+	DBG_BLOCK_ID_PC0                                 = 0x38,
+	DBG_BLOCK_ID_PC1                                 = 0x39,
+	DBG_BLOCK_ID_UNUSED8                             = 0x3a,
+	DBG_BLOCK_ID_UNUSED9                             = 0x3b,
+	DBG_BLOCK_ID_UNUSED10                            = 0x3c,
+	DBG_BLOCK_ID_UNUSED11                            = 0x3d,
+	DBG_BLOCK_ID_MCB                                 = 0x3e,
+	DBG_BLOCK_ID_UNUSED12                            = 0x3f,
+	DBG_BLOCK_ID_SCB0                                = 0x40,
+	DBG_BLOCK_ID_SCB1                                = 0x41,
+	DBG_BLOCK_ID_UNUSED13                            = 0x42,
+	DBG_BLOCK_ID_UNUSED14                            = 0x43,
+	DBG_BLOCK_ID_SCF0                                = 0x44,
+	DBG_BLOCK_ID_SCF1                                = 0x45,
+	DBG_BLOCK_ID_UNUSED15                            = 0x46,
+	DBG_BLOCK_ID_UNUSED16                            = 0x47,
+	DBG_BLOCK_ID_BCI0                                = 0x48,
+	DBG_BLOCK_ID_BCI1                                = 0x49,
+	DBG_BLOCK_ID_BCI2                                = 0x4a,
+	DBG_BLOCK_ID_BCI3                                = 0x4b,
+	DBG_BLOCK_ID_UNUSED17                            = 0x4c,
+	DBG_BLOCK_ID_UNUSED18                            = 0x4d,
+	DBG_BLOCK_ID_UNUSED19                            = 0x4e,
+	DBG_BLOCK_ID_UNUSED20                            = 0x4f,
+	DBG_BLOCK_ID_CB00                                = 0x50,
+	DBG_BLOCK_ID_CB01                                = 0x51,
+	DBG_BLOCK_ID_CB02                                = 0x52,
+	DBG_BLOCK_ID_CB03                                = 0x53,
+	DBG_BLOCK_ID_CB04                                = 0x54,
+	DBG_BLOCK_ID_UNUSED21                            = 0x55,
+	DBG_BLOCK_ID_UNUSED22                            = 0x56,
+	DBG_BLOCK_ID_UNUSED23                            = 0x57,
+	DBG_BLOCK_ID_CB10                                = 0x58,
+	DBG_BLOCK_ID_CB11                                = 0x59,
+	DBG_BLOCK_ID_CB12                                = 0x5a,
+	DBG_BLOCK_ID_CB13                                = 0x5b,
+	DBG_BLOCK_ID_CB14                                = 0x5c,
+	DBG_BLOCK_ID_UNUSED24                            = 0x5d,
+	DBG_BLOCK_ID_UNUSED25                            = 0x5e,
+	DBG_BLOCK_ID_UNUSED26                            = 0x5f,
+	DBG_BLOCK_ID_TCP0                                = 0x60,
+	DBG_BLOCK_ID_TCP1                                = 0x61,
+	DBG_BLOCK_ID_TCP2                                = 0x62,
+	DBG_BLOCK_ID_TCP3                                = 0x63,
+	DBG_BLOCK_ID_TCP4                                = 0x64,
+	DBG_BLOCK_ID_TCP5                                = 0x65,
+	DBG_BLOCK_ID_TCP6                                = 0x66,
+	DBG_BLOCK_ID_TCP7                                = 0x67,
+	DBG_BLOCK_ID_TCP8                                = 0x68,
+	DBG_BLOCK_ID_TCP9                                = 0x69,
+	DBG_BLOCK_ID_TCP10                               = 0x6a,
+	DBG_BLOCK_ID_TCP11                               = 0x6b,
+	DBG_BLOCK_ID_TCP12                               = 0x6c,
+	DBG_BLOCK_ID_TCP13                               = 0x6d,
+	DBG_BLOCK_ID_TCP14                               = 0x6e,
+	DBG_BLOCK_ID_TCP15                               = 0x6f,
+	DBG_BLOCK_ID_TCP16                               = 0x70,
+	DBG_BLOCK_ID_TCP17                               = 0x71,
+	DBG_BLOCK_ID_TCP18                               = 0x72,
+	DBG_BLOCK_ID_TCP19                               = 0x73,
+	DBG_BLOCK_ID_TCP20                               = 0x74,
+	DBG_BLOCK_ID_TCP21                               = 0x75,
+	DBG_BLOCK_ID_TCP22                               = 0x76,
+	DBG_BLOCK_ID_TCP23                               = 0x77,
+	DBG_BLOCK_ID_TCP_RESERVED0                       = 0x78,
+	DBG_BLOCK_ID_TCP_RESERVED1                       = 0x79,
+	DBG_BLOCK_ID_TCP_RESERVED2                       = 0x7a,
+	DBG_BLOCK_ID_TCP_RESERVED3                       = 0x7b,
+	DBG_BLOCK_ID_TCP_RESERVED4                       = 0x7c,
+	DBG_BLOCK_ID_TCP_RESERVED5                       = 0x7d,
+	DBG_BLOCK_ID_TCP_RESERVED6                       = 0x7e,
+	DBG_BLOCK_ID_TCP_RESERVED7                       = 0x7f,
+	DBG_BLOCK_ID_DB00                                = 0x80,
+	DBG_BLOCK_ID_DB01                                = 0x81,
+	DBG_BLOCK_ID_DB02                                = 0x82,
+	DBG_BLOCK_ID_DB03                                = 0x83,
+	DBG_BLOCK_ID_DB04                                = 0x84,
+	DBG_BLOCK_ID_UNUSED27                            = 0x85,
+	DBG_BLOCK_ID_UNUSED28                            = 0x86,
+	DBG_BLOCK_ID_UNUSED29                            = 0x87,
+	DBG_BLOCK_ID_DB10                                = 0x88,
+	DBG_BLOCK_ID_DB11                                = 0x89,
+	DBG_BLOCK_ID_DB12                                = 0x8a,
+	DBG_BLOCK_ID_DB13                                = 0x8b,
+	DBG_BLOCK_ID_DB14                                = 0x8c,
+	DBG_BLOCK_ID_UNUSED30                            = 0x8d,
+	DBG_BLOCK_ID_UNUSED31                            = 0x8e,
+	DBG_BLOCK_ID_UNUSED32                            = 0x8f,
+	DBG_BLOCK_ID_TCC0                                = 0x90,
+	DBG_BLOCK_ID_TCC1                                = 0x91,
+	DBG_BLOCK_ID_TCC2                                = 0x92,
+	DBG_BLOCK_ID_TCC3                                = 0x93,
+	DBG_BLOCK_ID_TCC4                                = 0x94,
+	DBG_BLOCK_ID_TCC5                                = 0x95,
+	DBG_BLOCK_ID_TCC6                                = 0x96,
+	DBG_BLOCK_ID_TCC7                                = 0x97,
+	DBG_BLOCK_ID_SPS00                               = 0x98,
+	DBG_BLOCK_ID_SPS01                               = 0x99,
+	DBG_BLOCK_ID_SPS02                               = 0x9a,
+	DBG_BLOCK_ID_SPS10                               = 0x9b,
+	DBG_BLOCK_ID_SPS11                               = 0x9c,
+	DBG_BLOCK_ID_SPS12                               = 0x9d,
+	DBG_BLOCK_ID_UNUSED33                            = 0x9e,
+	DBG_BLOCK_ID_UNUSED34                            = 0x9f,
+	DBG_BLOCK_ID_TA00                                = 0xa0,
+	DBG_BLOCK_ID_TA01                                = 0xa1,
+	DBG_BLOCK_ID_TA02                                = 0xa2,
+	DBG_BLOCK_ID_TA03                                = 0xa3,
+	DBG_BLOCK_ID_TA04                                = 0xa4,
+	DBG_BLOCK_ID_TA05                                = 0xa5,
+	DBG_BLOCK_ID_TA06                                = 0xa6,
+	DBG_BLOCK_ID_TA07                                = 0xa7,
+	DBG_BLOCK_ID_TA08                                = 0xa8,
+	DBG_BLOCK_ID_TA09                                = 0xa9,
+	DBG_BLOCK_ID_TA0A                                = 0xaa,
+	DBG_BLOCK_ID_TA0B                                = 0xab,
+	DBG_BLOCK_ID_UNUSED35                            = 0xac,
+	DBG_BLOCK_ID_UNUSED36                            = 0xad,
+	DBG_BLOCK_ID_UNUSED37                            = 0xae,
+	DBG_BLOCK_ID_UNUSED38                            = 0xaf,
+	DBG_BLOCK_ID_TA10                                = 0xb0,
+	DBG_BLOCK_ID_TA11                                = 0xb1,
+	DBG_BLOCK_ID_TA12                                = 0xb2,
+	DBG_BLOCK_ID_TA13                                = 0xb3,
+	DBG_BLOCK_ID_TA14                                = 0xb4,
+	DBG_BLOCK_ID_TA15                                = 0xb5,
+	DBG_BLOCK_ID_TA16                                = 0xb6,
+	DBG_BLOCK_ID_TA17                                = 0xb7,
+	DBG_BLOCK_ID_TA18                                = 0xb8,
+	DBG_BLOCK_ID_TA19                                = 0xb9,
+	DBG_BLOCK_ID_TA1A                                = 0xba,
+	DBG_BLOCK_ID_TA1B                                = 0xbb,
+	DBG_BLOCK_ID_UNUSED39                            = 0xbc,
+	DBG_BLOCK_ID_UNUSED40                            = 0xbd,
+	DBG_BLOCK_ID_UNUSED41                            = 0xbe,
+	DBG_BLOCK_ID_UNUSED42                            = 0xbf,
+	DBG_BLOCK_ID_TD00                                = 0xc0,
+	DBG_BLOCK_ID_TD01                                = 0xc1,
+	DBG_BLOCK_ID_TD02                                = 0xc2,
+	DBG_BLOCK_ID_TD03                                = 0xc3,
+	DBG_BLOCK_ID_TD04                                = 0xc4,
+	DBG_BLOCK_ID_TD05                                = 0xc5,
+	DBG_BLOCK_ID_TD06                                = 0xc6,
+	DBG_BLOCK_ID_TD07                                = 0xc7,
+	DBG_BLOCK_ID_TD08                                = 0xc8,
+	DBG_BLOCK_ID_TD09                                = 0xc9,
+	DBG_BLOCK_ID_TD0A                                = 0xca,
+	DBG_BLOCK_ID_TD0B                                = 0xcb,
+	DBG_BLOCK_ID_UNUSED43                            = 0xcc,
+	DBG_BLOCK_ID_UNUSED44                            = 0xcd,
+	DBG_BLOCK_ID_UNUSED45                            = 0xce,
+	DBG_BLOCK_ID_UNUSED46                            = 0xcf,
+	DBG_BLOCK_ID_TD10                                = 0xd0,
+	DBG_BLOCK_ID_TD11                                = 0xd1,
+	DBG_BLOCK_ID_TD12                                = 0xd2,
+	DBG_BLOCK_ID_TD13                                = 0xd3,
+	DBG_BLOCK_ID_TD14                                = 0xd4,
+	DBG_BLOCK_ID_TD15                                = 0xd5,
+	DBG_BLOCK_ID_TD16                                = 0xd6,
+	DBG_BLOCK_ID_TD17                                = 0xd7,
+	DBG_BLOCK_ID_TD18                                = 0xd8,
+	DBG_BLOCK_ID_TD19                                = 0xd9,
+	DBG_BLOCK_ID_TD1A                                = 0xda,
+	DBG_BLOCK_ID_TD1B                                = 0xdb,
+	DBG_BLOCK_ID_UNUSED47                            = 0xdc,
+	DBG_BLOCK_ID_UNUSED48                            = 0xdd,
+	DBG_BLOCK_ID_UNUSED49                            = 0xde,
+	DBG_BLOCK_ID_UNUSED50                            = 0xdf,
+	DBG_BLOCK_ID_MCD0                                = 0xe0,
+	DBG_BLOCK_ID_MCD1                                = 0xe1,
+	DBG_BLOCK_ID_MCD2                                = 0xe2,
+	DBG_BLOCK_ID_MCD3                                = 0xe3,
+	DBG_BLOCK_ID_MCD4                                = 0xe4,
+	DBG_BLOCK_ID_MCD5                                = 0xe5,
+	DBG_BLOCK_ID_UNUSED51                            = 0xe6,
+	DBG_BLOCK_ID_UNUSED52                            = 0xe7,
+} DebugBlockId_OLD;
+typedef enum DebugBlockId_BY2 {
+	DBG_BLOCK_ID_RESERVED_BY2                        = 0x0,
+	DBG_BLOCK_ID_VMC_BY2                             = 0x1,
+	DBG_BLOCK_ID_CG_BY2                              = 0x2,
+	DBG_BLOCK_ID_GRBM_BY2                            = 0x3,
+	DBG_BLOCK_ID_CSC_BY2                             = 0x4,
+	DBG_BLOCK_ID_IH_BY2                              = 0x5,
+	DBG_BLOCK_ID_SQ_BY2                              = 0x6,
+	DBG_BLOCK_ID_GMCON_BY2                           = 0x7,
+	DBG_BLOCK_ID_DMA0_BY2                            = 0x8,
+	DBG_BLOCK_ID_SPIM_BY2                            = 0x9,
+	DBG_BLOCK_ID_SPIS_BY2                            = 0xa,
+	DBG_BLOCK_ID_PA0_BY2                             = 0xb,
+	DBG_BLOCK_ID_CP0_BY2                             = 0xc,
+	DBG_BLOCK_ID_CP2_BY2                             = 0xd,
+	DBG_BLOCK_ID_UVDU_BY2                            = 0xe,
+	DBG_BLOCK_ID_VCE_BY2                             = 0xf,
+	DBG_BLOCK_ID_VGT0_BY2                            = 0x10,
+	DBG_BLOCK_ID_IA_BY2                              = 0x11,
+	DBG_BLOCK_ID_SCT0_BY2                            = 0x12,
+	DBG_BLOCK_ID_SPM0_BY2                            = 0x13,
+	DBG_BLOCK_ID_TCAA_BY2                            = 0x14,
+	DBG_BLOCK_ID_TCCA_BY2                            = 0x15,
+	DBG_BLOCK_ID_MCC0_BY2                            = 0x16,
+	DBG_BLOCK_ID_MCC2_BY2                            = 0x17,
+	DBG_BLOCK_ID_SX0_BY2                             = 0x18,
+	DBG_BLOCK_ID_SX2_BY2                             = 0x19,
+	DBG_BLOCK_ID_UNUSED4_BY2                         = 0x1a,
+	DBG_BLOCK_ID_UNUSED6_BY2                         = 0x1b,
+	DBG_BLOCK_ID_PC0_BY2                             = 0x1c,
+	DBG_BLOCK_ID_UNUSED8_BY2                         = 0x1d,
+	DBG_BLOCK_ID_UNUSED10_BY2                        = 0x1e,
+	DBG_BLOCK_ID_MCB_BY2                             = 0x1f,
+	DBG_BLOCK_ID_SCB0_BY2                            = 0x20,
+	DBG_BLOCK_ID_UNUSED13_BY2                        = 0x21,
+	DBG_BLOCK_ID_SCF0_BY2                            = 0x22,
+	DBG_BLOCK_ID_UNUSED15_BY2                        = 0x23,
+	DBG_BLOCK_ID_BCI0_BY2                            = 0x24,
+	DBG_BLOCK_ID_BCI2_BY2                            = 0x25,
+	DBG_BLOCK_ID_UNUSED17_BY2                        = 0x26,
+	DBG_BLOCK_ID_UNUSED19_BY2                        = 0x27,
+	DBG_BLOCK_ID_CB00_BY2                            = 0x28,
+	DBG_BLOCK_ID_CB02_BY2                            = 0x29,
+	DBG_BLOCK_ID_CB04_BY2                            = 0x2a,
+	DBG_BLOCK_ID_UNUSED22_BY2                        = 0x2b,
+	DBG_BLOCK_ID_CB10_BY2                            = 0x2c,
+	DBG_BLOCK_ID_CB12_BY2                            = 0x2d,
+	DBG_BLOCK_ID_CB14_BY2                            = 0x2e,
+	DBG_BLOCK_ID_UNUSED25_BY2                        = 0x2f,
+	DBG_BLOCK_ID_TCP0_BY2                            = 0x30,
+	DBG_BLOCK_ID_TCP2_BY2                            = 0x31,
+	DBG_BLOCK_ID_TCP4_BY2                            = 0x32,
+	DBG_BLOCK_ID_TCP6_BY2                            = 0x33,
+	DBG_BLOCK_ID_TCP8_BY2                            = 0x34,
+	DBG_BLOCK_ID_TCP10_BY2                           = 0x35,
+	DBG_BLOCK_ID_TCP12_BY2                           = 0x36,
+	DBG_BLOCK_ID_TCP14_BY2                           = 0x37,
+	DBG_BLOCK_ID_TCP16_BY2                           = 0x38,
+	DBG_BLOCK_ID_TCP18_BY2                           = 0x39,
+	DBG_BLOCK_ID_TCP20_BY2                           = 0x3a,
+	DBG_BLOCK_ID_TCP22_BY2                           = 0x3b,
+	DBG_BLOCK_ID_TCP_RESERVED0_BY2                   = 0x3c,
+	DBG_BLOCK_ID_TCP_RESERVED2_BY2                   = 0x3d,
+	DBG_BLOCK_ID_TCP_RESERVED4_BY2                   = 0x3e,
+	DBG_BLOCK_ID_TCP_RESERVED6_BY2                   = 0x3f,
+	DBG_BLOCK_ID_DB00_BY2                            = 0x40,
+	DBG_BLOCK_ID_DB02_BY2                            = 0x41,
+	DBG_BLOCK_ID_DB04_BY2                            = 0x42,
+	DBG_BLOCK_ID_UNUSED28_BY2                        = 0x43,
+	DBG_BLOCK_ID_DB10_BY2                            = 0x44,
+	DBG_BLOCK_ID_DB12_BY2                            = 0x45,
+	DBG_BLOCK_ID_DB14_BY2                            = 0x46,
+	DBG_BLOCK_ID_UNUSED31_BY2                        = 0x47,
+	DBG_BLOCK_ID_TCC0_BY2                            = 0x48,
+	DBG_BLOCK_ID_TCC2_BY2                            = 0x49,
+	DBG_BLOCK_ID_TCC4_BY2                            = 0x4a,
+	DBG_BLOCK_ID_TCC6_BY2                            = 0x4b,
+	DBG_BLOCK_ID_SPS00_BY2                           = 0x4c,
+	DBG_BLOCK_ID_SPS02_BY2                           = 0x4d,
+	DBG_BLOCK_ID_SPS11_BY2                           = 0x4e,
+	DBG_BLOCK_ID_UNUSED33_BY2                        = 0x4f,
+	DBG_BLOCK_ID_TA00_BY2                            = 0x50,
+	DBG_BLOCK_ID_TA02_BY2                            = 0x51,
+	DBG_BLOCK_ID_TA04_BY2                            = 0x52,
+	DBG_BLOCK_ID_TA06_BY2                            = 0x53,
+	DBG_BLOCK_ID_TA08_BY2                            = 0x54,
+	DBG_BLOCK_ID_TA0A_BY2                            = 0x55,
+	DBG_BLOCK_ID_UNUSED35_BY2                        = 0x56,
+	DBG_BLOCK_ID_UNUSED37_BY2                        = 0x57,
+	DBG_BLOCK_ID_TA10_BY2                            = 0x58,
+	DBG_BLOCK_ID_TA12_BY2                            = 0x59,
+	DBG_BLOCK_ID_TA14_BY2                            = 0x5a,
+	DBG_BLOCK_ID_TA16_BY2                            = 0x5b,
+	DBG_BLOCK_ID_TA18_BY2                            = 0x5c,
+	DBG_BLOCK_ID_TA1A_BY2                            = 0x5d,
+	DBG_BLOCK_ID_UNUSED39_BY2                        = 0x5e,
+	DBG_BLOCK_ID_UNUSED41_BY2                        = 0x5f,
+	DBG_BLOCK_ID_TD00_BY2                            = 0x60,
+	DBG_BLOCK_ID_TD02_BY2                            = 0x61,
+	DBG_BLOCK_ID_TD04_BY2                            = 0x62,
+	DBG_BLOCK_ID_TD06_BY2                            = 0x63,
+	DBG_BLOCK_ID_TD08_BY2                            = 0x64,
+	DBG_BLOCK_ID_TD0A_BY2                            = 0x65,
+	DBG_BLOCK_ID_UNUSED43_BY2                        = 0x66,
+	DBG_BLOCK_ID_UNUSED45_BY2                        = 0x67,
+	DBG_BLOCK_ID_TD10_BY2                            = 0x68,
+	DBG_BLOCK_ID_TD12_BY2                            = 0x69,
+	DBG_BLOCK_ID_TD14_BY2                            = 0x6a,
+	DBG_BLOCK_ID_TD16_BY2                            = 0x6b,
+	DBG_BLOCK_ID_TD18_BY2                            = 0x6c,
+	DBG_BLOCK_ID_TD1A_BY2                            = 0x6d,
+	DBG_BLOCK_ID_UNUSED47_BY2                        = 0x6e,
+	DBG_BLOCK_ID_UNUSED49_BY2                        = 0x6f,
+	DBG_BLOCK_ID_MCD0_BY2                            = 0x70,
+	DBG_BLOCK_ID_MCD2_BY2                            = 0x71,
+	DBG_BLOCK_ID_MCD4_BY2                            = 0x72,
+	DBG_BLOCK_ID_UNUSED51_BY2                        = 0x73,
+} DebugBlockId_BY2;
+typedef enum DebugBlockId_BY4 {
+	DBG_BLOCK_ID_RESERVED_BY4                        = 0x0,
+	DBG_BLOCK_ID_CG_BY4                              = 0x1,
+	DBG_BLOCK_ID_CSC_BY4                             = 0x2,
+	DBG_BLOCK_ID_SQ_BY4                              = 0x3,
+	DBG_BLOCK_ID_DMA0_BY4                            = 0x4,
+	DBG_BLOCK_ID_SPIS_BY4                            = 0x5,
+	DBG_BLOCK_ID_CP0_BY4                             = 0x6,
+	DBG_BLOCK_ID_UVDU_BY4                            = 0x7,
+	DBG_BLOCK_ID_VGT0_BY4                            = 0x8,
+	DBG_BLOCK_ID_SCT0_BY4                            = 0x9,
+	DBG_BLOCK_ID_TCAA_BY4                            = 0xa,
+	DBG_BLOCK_ID_MCC0_BY4                            = 0xb,
+	DBG_BLOCK_ID_SX0_BY4                             = 0xc,
+	DBG_BLOCK_ID_UNUSED4_BY4                         = 0xd,
+	DBG_BLOCK_ID_PC0_BY4                             = 0xe,
+	DBG_BLOCK_ID_UNUSED10_BY4                        = 0xf,
+	DBG_BLOCK_ID_SCB0_BY4                            = 0x10,
+	DBG_BLOCK_ID_SCF0_BY4                            = 0x11,
+	DBG_BLOCK_ID_BCI0_BY4                            = 0x12,
+	DBG_BLOCK_ID_UNUSED17_BY4                        = 0x13,
+	DBG_BLOCK_ID_CB00_BY4                            = 0x14,
+	DBG_BLOCK_ID_CB04_BY4                            = 0x15,
+	DBG_BLOCK_ID_CB10_BY4                            = 0x16,
+	DBG_BLOCK_ID_CB14_BY4                            = 0x17,
+	DBG_BLOCK_ID_TCP0_BY4                            = 0x18,
+	DBG_BLOCK_ID_TCP4_BY4                            = 0x19,
+	DBG_BLOCK_ID_TCP8_BY4                            = 0x1a,
+	DBG_BLOCK_ID_TCP12_BY4                           = 0x1b,
+	DBG_BLOCK_ID_TCP16_BY4                           = 0x1c,
+	DBG_BLOCK_ID_TCP20_BY4                           = 0x1d,
+	DBG_BLOCK_ID_TCP_RESERVED0_BY4                   = 0x1e,
+	DBG_BLOCK_ID_TCP_RESERVED4_BY4                   = 0x1f,
+	DBG_BLOCK_ID_DB_BY4                              = 0x20,
+	DBG_BLOCK_ID_DB04_BY4                            = 0x21,
+	DBG_BLOCK_ID_DB10_BY4                            = 0x22,
+	DBG_BLOCK_ID_DB14_BY4                            = 0x23,
+	DBG_BLOCK_ID_TCC0_BY4                            = 0x24,
+	DBG_BLOCK_ID_TCC4_BY4                            = 0x25,
+	DBG_BLOCK_ID_SPS00_BY4                           = 0x26,
+	DBG_BLOCK_ID_SPS11_BY4                           = 0x27,
+	DBG_BLOCK_ID_TA00_BY4                            = 0x28,
+	DBG_BLOCK_ID_TA04_BY4                            = 0x29,
+	DBG_BLOCK_ID_TA08_BY4                            = 0x2a,
+	DBG_BLOCK_ID_UNUSED35_BY4                        = 0x2b,
+	DBG_BLOCK_ID_TA10_BY4                            = 0x2c,
+	DBG_BLOCK_ID_TA14_BY4                            = 0x2d,
+	DBG_BLOCK_ID_TA18_BY4                            = 0x2e,
+	DBG_BLOCK_ID_UNUSED39_BY4                        = 0x2f,
+	DBG_BLOCK_ID_TD00_BY4                            = 0x30,
+	DBG_BLOCK_ID_TD04_BY4                            = 0x31,
+	DBG_BLOCK_ID_TD08_BY4                            = 0x32,
+	DBG_BLOCK_ID_UNUSED43_BY4                        = 0x33,
+	DBG_BLOCK_ID_TD10_BY4                            = 0x34,
+	DBG_BLOCK_ID_TD14_BY4                            = 0x35,
+	DBG_BLOCK_ID_TD18_BY4                            = 0x36,
+	DBG_BLOCK_ID_UNUSED47_BY4                        = 0x37,
+	DBG_BLOCK_ID_MCD0_BY4                            = 0x38,
+	DBG_BLOCK_ID_MCD4_BY4                            = 0x39,
+} DebugBlockId_BY4;
+typedef enum DebugBlockId_BY8 {
+	DBG_BLOCK_ID_RESERVED_BY8                        = 0x0,
+	DBG_BLOCK_ID_CSC_BY8                             = 0x1,
+	DBG_BLOCK_ID_DMA0_BY8                            = 0x2,
+	DBG_BLOCK_ID_CP0_BY8                             = 0x3,
+	DBG_BLOCK_ID_VGT0_BY8                            = 0x4,
+	DBG_BLOCK_ID_TCAA_BY8                            = 0x5,
+	DBG_BLOCK_ID_SX0_BY8                             = 0x6,
+	DBG_BLOCK_ID_PC0_BY8                             = 0x7,
+	DBG_BLOCK_ID_SCB0_BY8                            = 0x8,
+	DBG_BLOCK_ID_BCI0_BY8                            = 0x9,
+	DBG_BLOCK_ID_CB00_BY8                            = 0xa,
+	DBG_BLOCK_ID_CB10_BY8                            = 0xb,
+	DBG_BLOCK_ID_TCP0_BY8                            = 0xc,
+	DBG_BLOCK_ID_TCP8_BY8                            = 0xd,
+	DBG_BLOCK_ID_TCP16_BY8                           = 0xe,
+	DBG_BLOCK_ID_TCP_RESERVED0_BY8                   = 0xf,
+	DBG_BLOCK_ID_DB00_BY8                            = 0x10,
+	DBG_BLOCK_ID_DB10_BY8                            = 0x11,
+	DBG_BLOCK_ID_TCC0_BY8                            = 0x12,
+	DBG_BLOCK_ID_SPS00_BY8                           = 0x13,
+	DBG_BLOCK_ID_TA00_BY8                            = 0x14,
+	DBG_BLOCK_ID_TA08_BY8                            = 0x15,
+	DBG_BLOCK_ID_TA10_BY8                            = 0x16,
+	DBG_BLOCK_ID_TA18_BY8                            = 0x17,
+	DBG_BLOCK_ID_TD00_BY8                            = 0x18,
+	DBG_BLOCK_ID_TD08_BY8                            = 0x19,
+	DBG_BLOCK_ID_TD10_BY8                            = 0x1a,
+	DBG_BLOCK_ID_TD18_BY8                            = 0x1b,
+	DBG_BLOCK_ID_MCD0_BY8                            = 0x1c,
+} DebugBlockId_BY8;
+typedef enum DebugBlockId_BY16 {
+	DBG_BLOCK_ID_RESERVED_BY16                       = 0x0,
+	DBG_BLOCK_ID_DMA0_BY16                           = 0x1,
+	DBG_BLOCK_ID_VGT0_BY16                           = 0x2,
+	DBG_BLOCK_ID_SX0_BY16                            = 0x3,
+	DBG_BLOCK_ID_SCB0_BY16                           = 0x4,
+	DBG_BLOCK_ID_CB00_BY16                           = 0x5,
+	DBG_BLOCK_ID_TCP0_BY16                           = 0x6,
+	DBG_BLOCK_ID_TCP16_BY16                          = 0x7,
+	DBG_BLOCK_ID_DB00_BY16                           = 0x8,
+	DBG_BLOCK_ID_TCC0_BY16                           = 0x9,
+	DBG_BLOCK_ID_TA00_BY16                           = 0xa,
+	DBG_BLOCK_ID_TA10_BY16                           = 0xb,
+	DBG_BLOCK_ID_TD00_BY16                           = 0xc,
+	DBG_BLOCK_ID_TD10_BY16                           = 0xd,
+	DBG_BLOCK_ID_MCD0_BY16                           = 0xe,
+} DebugBlockId_BY16;
+typedef enum ColorTransform {
+	DCC_CT_AUTO                                      = 0x0,
+	DCC_CT_NONE                                      = 0x1,
+	ABGR_TO_A_BG_G_RB                                = 0x2,
+	BGRA_TO_BG_G_RB_A                                = 0x3,
+} ColorTransform;
+typedef enum CompareRef {
+	REF_NEVER                                        = 0x0,
+	REF_LESS                                         = 0x1,
+	REF_EQUAL                                        = 0x2,
+	REF_LEQUAL                                       = 0x3,
+	REF_GREATER                                      = 0x4,
+	REF_NOTEQUAL                                     = 0x5,
+	REF_GEQUAL                                       = 0x6,
+	REF_ALWAYS                                       = 0x7,
+} CompareRef;
+typedef enum ReadSize {
+	READ_256_BITS                                    = 0x0,
+	READ_512_BITS                                    = 0x1,
+} ReadSize;
+typedef enum DepthFormat {
+	DEPTH_INVALID                                    = 0x0,
+	DEPTH_16                                         = 0x1,
+	DEPTH_X8_24                                      = 0x2,
+	DEPTH_8_24                                       = 0x3,
+	DEPTH_X8_24_FLOAT                                = 0x4,
+	DEPTH_8_24_FLOAT                                 = 0x5,
+	DEPTH_32_FLOAT                                   = 0x6,
+	DEPTH_X24_8_32_FLOAT                             = 0x7,
+} DepthFormat;
+typedef enum ZFormat {
+	Z_INVALID                                        = 0x0,
+	Z_16                                             = 0x1,
+	Z_24                                             = 0x2,
+	Z_32_FLOAT                                       = 0x3,
+} ZFormat;
+typedef enum StencilFormat {
+	STENCIL_INVALID                                  = 0x0,
+	STENCIL_8                                        = 0x1,
+} StencilFormat;
+typedef enum CmaskMode {
+	CMASK_CLEAR_NONE                                 = 0x0,
+	CMASK_CLEAR_ONE                                  = 0x1,
+	CMASK_CLEAR_ALL                                  = 0x2,
+	CMASK_ANY_EXPANDED                               = 0x3,
+	CMASK_ALPHA0_FRAG1                               = 0x4,
+	CMASK_ALPHA0_FRAG2                               = 0x5,
+	CMASK_ALPHA0_FRAG4                               = 0x6,
+	CMASK_ALPHA0_FRAGS                               = 0x7,
+	CMASK_ALPHA1_FRAG1                               = 0x8,
+	CMASK_ALPHA1_FRAG2                               = 0x9,
+	CMASK_ALPHA1_FRAG4                               = 0xa,
+	CMASK_ALPHA1_FRAGS                               = 0xb,
+	CMASK_ALPHAX_FRAG1                               = 0xc,
+	CMASK_ALPHAX_FRAG2                               = 0xd,
+	CMASK_ALPHAX_FRAG4                               = 0xe,
+	CMASK_ALPHAX_FRAGS                               = 0xf,
+} CmaskMode;
+typedef enum QuadExportFormat {
+	EXPORT_UNUSED                                    = 0x0,
+	EXPORT_32_R                                      = 0x1,
+	EXPORT_32_GR                                     = 0x2,
+	EXPORT_32_AR                                     = 0x3,
+	EXPORT_FP16_ABGR                                 = 0x4,
+	EXPORT_UNSIGNED16_ABGR                           = 0x5,
+	EXPORT_SIGNED16_ABGR                             = 0x6,
+	EXPORT_32_ABGR                                   = 0x7,
+} QuadExportFormat;
+typedef enum QuadExportFormatOld {
+	EXPORT_4P_32BPC_ABGR                             = 0x0,
+	EXPORT_4P_16BPC_ABGR                             = 0x1,
+	EXPORT_4P_32BPC_GR                               = 0x2,
+	EXPORT_4P_32BPC_AR                               = 0x3,
+	EXPORT_2P_32BPC_ABGR                             = 0x4,
+	EXPORT_8P_32BPC_R                                = 0x5,
+} QuadExportFormatOld;
+typedef enum ColorFormat {
+	COLOR_INVALID                                    = 0x0,
+	COLOR_8                                          = 0x1,
+	COLOR_16                                         = 0x2,
+	COLOR_8_8                                        = 0x3,
+	COLOR_32                                         = 0x4,
+	COLOR_16_16                                      = 0x5,
+	COLOR_10_11_11                                   = 0x6,
+	COLOR_11_11_10                                   = 0x7,
+	COLOR_10_10_10_2                                 = 0x8,
+	COLOR_2_10_10_10                                 = 0x9,
+	COLOR_8_8_8_8                                    = 0xa,
+	COLOR_32_32                                      = 0xb,
+	COLOR_16_16_16_16                                = 0xc,
+	COLOR_RESERVED_13                                = 0xd,
+	COLOR_32_32_32_32                                = 0xe,
+	COLOR_RESERVED_15                                = 0xf,
+	COLOR_5_6_5                                      = 0x10,
+	COLOR_1_5_5_5                                    = 0x11,
+	COLOR_5_5_5_1                                    = 0x12,
+	COLOR_4_4_4_4                                    = 0x13,
+	COLOR_8_24                                       = 0x14,
+	COLOR_24_8                                       = 0x15,
+	COLOR_X24_8_32_FLOAT                             = 0x16,
+	COLOR_RESERVED_23                                = 0x17,
+} ColorFormat;
+typedef enum SurfaceFormat {
+	FMT_INVALID                                      = 0x0,
+	FMT_8                                            = 0x1,
+	FMT_16                                           = 0x2,
+	FMT_8_8                                          = 0x3,
+	FMT_32                                           = 0x4,
+	FMT_16_16                                        = 0x5,
+	FMT_10_11_11                                     = 0x6,
+	FMT_11_11_10                                     = 0x7,
+	FMT_10_10_10_2                                   = 0x8,
+	FMT_2_10_10_10                                   = 0x9,
+	FMT_8_8_8_8                                      = 0xa,
+	FMT_32_32                                        = 0xb,
+	FMT_16_16_16_16                                  = 0xc,
+	FMT_32_32_32                                     = 0xd,
+	FMT_32_32_32_32                                  = 0xe,
+	FMT_RESERVED_4                                   = 0xf,
+	FMT_5_6_5                                        = 0x10,
+	FMT_1_5_5_5                                      = 0x11,
+	FMT_5_5_5_1                                      = 0x12,
+	FMT_4_4_4_4                                      = 0x13,
+	FMT_8_24                                         = 0x14,
+	FMT_24_8                                         = 0x15,
+	FMT_X24_8_32_FLOAT                               = 0x16,
+	FMT_RESERVED_33                                  = 0x17,
+	FMT_11_11_10_FLOAT                               = 0x18,
+	FMT_16_FLOAT                                     = 0x19,
+	FMT_32_FLOAT                                     = 0x1a,
+	FMT_16_16_FLOAT                                  = 0x1b,
+	FMT_8_24_FLOAT                                   = 0x1c,
+	FMT_24_8_FLOAT                                   = 0x1d,
+	FMT_32_32_FLOAT                                  = 0x1e,
+	FMT_10_11_11_FLOAT                               = 0x1f,
+	FMT_16_16_16_16_FLOAT                            = 0x20,
+	FMT_3_3_2                                        = 0x21,
+	FMT_6_5_5                                        = 0x22,
+	FMT_32_32_32_32_FLOAT                            = 0x23,
+	FMT_RESERVED_36                                  = 0x24,
+	FMT_1                                            = 0x25,
+	FMT_1_REVERSED                                   = 0x26,
+	FMT_GB_GR                                        = 0x27,
+	FMT_BG_RG                                        = 0x28,
+	FMT_32_AS_8                                      = 0x29,
+	FMT_32_AS_8_8                                    = 0x2a,
+	FMT_5_9_9_9_SHAREDEXP                            = 0x2b,
+	FMT_8_8_8                                        = 0x2c,
+	FMT_16_16_16                                     = 0x2d,
+	FMT_16_16_16_FLOAT                               = 0x2e,
+	FMT_4_4                                          = 0x2f,
+	FMT_32_32_32_FLOAT                               = 0x30,
+	FMT_BC1                                          = 0x31,
+	FMT_BC2                                          = 0x32,
+	FMT_BC3                                          = 0x33,
+	FMT_BC4                                          = 0x34,
+	FMT_BC5                                          = 0x35,
+	FMT_BC6                                          = 0x36,
+	FMT_BC7                                          = 0x37,
+	FMT_32_AS_32_32_32_32                            = 0x38,
+	FMT_APC3                                         = 0x39,
+	FMT_APC4                                         = 0x3a,
+	FMT_APC5                                         = 0x3b,
+	FMT_APC6                                         = 0x3c,
+	FMT_APC7                                         = 0x3d,
+	FMT_CTX1                                         = 0x3e,
+	FMT_RESERVED_63                                  = 0x3f,
+} SurfaceFormat;
+typedef enum BUF_DATA_FORMAT {
+	BUF_DATA_FORMAT_INVALID                          = 0x0,
+	BUF_DATA_FORMAT_8                                = 0x1,
+	BUF_DATA_FORMAT_16                               = 0x2,
+	BUF_DATA_FORMAT_8_8                              = 0x3,
+	BUF_DATA_FORMAT_32                               = 0x4,
+	BUF_DATA_FORMAT_16_16                            = 0x5,
+	BUF_DATA_FORMAT_10_11_11                         = 0x6,
+	BUF_DATA_FORMAT_11_11_10                         = 0x7,
+	BUF_DATA_FORMAT_10_10_10_2                       = 0x8,
+	BUF_DATA_FORMAT_2_10_10_10                       = 0x9,
+	BUF_DATA_FORMAT_8_8_8_8                          = 0xa,
+	BUF_DATA_FORMAT_32_32                            = 0xb,
+	BUF_DATA_FORMAT_16_16_16_16                      = 0xc,
+	BUF_DATA_FORMAT_32_32_32                         = 0xd,
+	BUF_DATA_FORMAT_32_32_32_32                      = 0xe,
+	BUF_DATA_FORMAT_RESERVED_15                      = 0xf,
+} BUF_DATA_FORMAT;
+typedef enum IMG_DATA_FORMAT {
+	IMG_DATA_FORMAT_INVALID                          = 0x0,
+	IMG_DATA_FORMAT_8                                = 0x1,
+	IMG_DATA_FORMAT_16                               = 0x2,
+	IMG_DATA_FORMAT_8_8                              = 0x3,
+	IMG_DATA_FORMAT_32                               = 0x4,
+	IMG_DATA_FORMAT_16_16                            = 0x5,
+	IMG_DATA_FORMAT_10_11_11                         = 0x6,
+	IMG_DATA_FORMAT_11_11_10                         = 0x7,
+	IMG_DATA_FORMAT_10_10_10_2                       = 0x8,
+	IMG_DATA_FORMAT_2_10_10_10                       = 0x9,
+	IMG_DATA_FORMAT_8_8_8_8                          = 0xa,
+	IMG_DATA_FORMAT_32_32                            = 0xb,
+	IMG_DATA_FORMAT_16_16_16_16                      = 0xc,
+	IMG_DATA_FORMAT_32_32_32                         = 0xd,
+	IMG_DATA_FORMAT_32_32_32_32                      = 0xe,
+	IMG_DATA_FORMAT_RESERVED_15                      = 0xf,
+	IMG_DATA_FORMAT_5_6_5                            = 0x10,
+	IMG_DATA_FORMAT_1_5_5_5                          = 0x11,
+	IMG_DATA_FORMAT_5_5_5_1                          = 0x12,
+	IMG_DATA_FORMAT_4_4_4_4                          = 0x13,
+	IMG_DATA_FORMAT_8_24                             = 0x14,
+	IMG_DATA_FORMAT_24_8                             = 0x15,
+	IMG_DATA_FORMAT_X24_8_32                         = 0x16,
+	IMG_DATA_FORMAT_RESERVED_23                      = 0x17,
+	IMG_DATA_FORMAT_RESERVED_24                      = 0x18,
+	IMG_DATA_FORMAT_RESERVED_25                      = 0x19,
+	IMG_DATA_FORMAT_RESERVED_26                      = 0x1a,
+	IMG_DATA_FORMAT_RESERVED_27                      = 0x1b,
+	IMG_DATA_FORMAT_RESERVED_28                      = 0x1c,
+	IMG_DATA_FORMAT_RESERVED_29                      = 0x1d,
+	IMG_DATA_FORMAT_RESERVED_30                      = 0x1e,
+	IMG_DATA_FORMAT_RESERVED_31                      = 0x1f,
+	IMG_DATA_FORMAT_GB_GR                            = 0x20,
+	IMG_DATA_FORMAT_BG_RG                            = 0x21,
+	IMG_DATA_FORMAT_5_9_9_9                          = 0x22,
+	IMG_DATA_FORMAT_BC1                              = 0x23,
+	IMG_DATA_FORMAT_BC2                              = 0x24,
+	IMG_DATA_FORMAT_BC3                              = 0x25,
+	IMG_DATA_FORMAT_BC4                              = 0x26,
+	IMG_DATA_FORMAT_BC5                              = 0x27,
+	IMG_DATA_FORMAT_BC6                              = 0x28,
+	IMG_DATA_FORMAT_BC7                              = 0x29,
+	IMG_DATA_FORMAT_RESERVED_42                      = 0x2a,
+	IMG_DATA_FORMAT_RESERVED_43                      = 0x2b,
+	IMG_DATA_FORMAT_FMASK8_S2_F1                     = 0x2c,
+	IMG_DATA_FORMAT_FMASK8_S4_F1                     = 0x2d,
+	IMG_DATA_FORMAT_FMASK8_S8_F1                     = 0x2e,
+	IMG_DATA_FORMAT_FMASK8_S2_F2                     = 0x2f,
+	IMG_DATA_FORMAT_FMASK8_S4_F2                     = 0x30,
+	IMG_DATA_FORMAT_FMASK8_S4_F4                     = 0x31,
+	IMG_DATA_FORMAT_FMASK16_S16_F1                   = 0x32,
+	IMG_DATA_FORMAT_FMASK16_S8_F2                    = 0x33,
+	IMG_DATA_FORMAT_FMASK32_S16_F2                   = 0x34,
+	IMG_DATA_FORMAT_FMASK32_S8_F4                    = 0x35,
+	IMG_DATA_FORMAT_FMASK32_S8_F8                    = 0x36,
+	IMG_DATA_FORMAT_FMASK64_S16_F4                   = 0x37,
+	IMG_DATA_FORMAT_FMASK64_S16_F8                   = 0x38,
+	IMG_DATA_FORMAT_4_4                              = 0x39,
+	IMG_DATA_FORMAT_6_5_5                            = 0x3a,
+	IMG_DATA_FORMAT_1                                = 0x3b,
+	IMG_DATA_FORMAT_1_REVERSED                       = 0x3c,
+	IMG_DATA_FORMAT_32_AS_8                          = 0x3d,
+	IMG_DATA_FORMAT_32_AS_8_8                        = 0x3e,
+	IMG_DATA_FORMAT_32_AS_32_32_32_32                = 0x3f,
+} IMG_DATA_FORMAT;
+typedef enum BUF_NUM_FORMAT {
+	BUF_NUM_FORMAT_UNORM                             = 0x0,
+	BUF_NUM_FORMAT_SNORM                             = 0x1,
+	BUF_NUM_FORMAT_USCALED                           = 0x2,
+	BUF_NUM_FORMAT_SSCALED                           = 0x3,
+	BUF_NUM_FORMAT_UINT                              = 0x4,
+	BUF_NUM_FORMAT_SINT                              = 0x5,
+	BUF_NUM_FORMAT_RESERVED_6                        = 0x6,
+	BUF_NUM_FORMAT_FLOAT                             = 0x7,
+} BUF_NUM_FORMAT;
+typedef enum IMG_NUM_FORMAT {
+	IMG_NUM_FORMAT_UNORM                             = 0x0,
+	IMG_NUM_FORMAT_SNORM                             = 0x1,
+	IMG_NUM_FORMAT_USCALED                           = 0x2,
+	IMG_NUM_FORMAT_SSCALED                           = 0x3,
+	IMG_NUM_FORMAT_UINT                              = 0x4,
+	IMG_NUM_FORMAT_SINT                              = 0x5,
+	IMG_NUM_FORMAT_RESERVED_6                        = 0x6,
+	IMG_NUM_FORMAT_FLOAT                             = 0x7,
+	IMG_NUM_FORMAT_RESERVED_8                        = 0x8,
+	IMG_NUM_FORMAT_SRGB                              = 0x9,
+	IMG_NUM_FORMAT_RESERVED_10                       = 0xa,
+	IMG_NUM_FORMAT_RESERVED_11                       = 0xb,
+	IMG_NUM_FORMAT_RESERVED_12                       = 0xc,
+	IMG_NUM_FORMAT_RESERVED_13                       = 0xd,
+	IMG_NUM_FORMAT_RESERVED_14                       = 0xe,
+	IMG_NUM_FORMAT_RESERVED_15                       = 0xf,
+} IMG_NUM_FORMAT;
+typedef enum TileType {
+	ARRAY_COLOR_TILE                                 = 0x0,
+	ARRAY_DEPTH_TILE                                 = 0x1,
+} TileType;
+typedef enum NonDispTilingOrder {
+	ADDR_SURF_MICRO_TILING_DISPLAY                   = 0x0,
+	ADDR_SURF_MICRO_TILING_NON_DISPLAY               = 0x1,
+} NonDispTilingOrder;
+typedef enum MicroTileMode {
+	ADDR_SURF_DISPLAY_MICRO_TILING                   = 0x0,
+	ADDR_SURF_THIN_MICRO_TILING                      = 0x1,
+	ADDR_SURF_DEPTH_MICRO_TILING                     = 0x2,
+	ADDR_SURF_ROTATED_MICRO_TILING                   = 0x3,
+	ADDR_SURF_THICK_MICRO_TILING                     = 0x4,
+} MicroTileMode;
+typedef enum TileSplit {
+	ADDR_SURF_TILE_SPLIT_64B                         = 0x0,
+	ADDR_SURF_TILE_SPLIT_128B                        = 0x1,
+	ADDR_SURF_TILE_SPLIT_256B                        = 0x2,
+	ADDR_SURF_TILE_SPLIT_512B                        = 0x3,
+	ADDR_SURF_TILE_SPLIT_1KB                         = 0x4,
+	ADDR_SURF_TILE_SPLIT_2KB                         = 0x5,
+	ADDR_SURF_TILE_SPLIT_4KB                         = 0x6,
+} TileSplit;
+typedef enum SampleSplit {
+	ADDR_SURF_SAMPLE_SPLIT_1                         = 0x0,
+	ADDR_SURF_SAMPLE_SPLIT_2                         = 0x1,
+	ADDR_SURF_SAMPLE_SPLIT_4                         = 0x2,
+	ADDR_SURF_SAMPLE_SPLIT_8                         = 0x3,
+} SampleSplit;
+typedef enum PipeConfig {
+	ADDR_SURF_P2                                     = 0x0,
+	ADDR_SURF_P2_RESERVED0                           = 0x1,
+	ADDR_SURF_P2_RESERVED1                           = 0x2,
+	ADDR_SURF_P2_RESERVED2                           = 0x3,
+	ADDR_SURF_P4_8x16                                = 0x4,
+	ADDR_SURF_P4_16x16                               = 0x5,
+	ADDR_SURF_P4_16x32                               = 0x6,
+	ADDR_SURF_P4_32x32                               = 0x7,
+	ADDR_SURF_P8_16x16_8x16                          = 0x8,
+	ADDR_SURF_P8_16x32_8x16                          = 0x9,
+	ADDR_SURF_P8_32x32_8x16                          = 0xa,
+	ADDR_SURF_P8_16x32_16x16                         = 0xb,
+	ADDR_SURF_P8_32x32_16x16                         = 0xc,
+	ADDR_SURF_P8_32x32_16x32                         = 0xd,
+	ADDR_SURF_P8_32x64_32x32                         = 0xe,
+	ADDR_SURF_P8_RESERVED0                           = 0xf,
+	ADDR_SURF_P16_32x32_8x16                         = 0x10,
+	ADDR_SURF_P16_32x32_16x16                        = 0x11,
+} PipeConfig;
+typedef enum NumBanks {
+	ADDR_SURF_2_BANK                                 = 0x0,
+	ADDR_SURF_4_BANK                                 = 0x1,
+	ADDR_SURF_8_BANK                                 = 0x2,
+	ADDR_SURF_16_BANK                                = 0x3,
+} NumBanks;
+typedef enum BankWidth {
+	ADDR_SURF_BANK_WIDTH_1                           = 0x0,
+	ADDR_SURF_BANK_WIDTH_2                           = 0x1,
+	ADDR_SURF_BANK_WIDTH_4                           = 0x2,
+	ADDR_SURF_BANK_WIDTH_8                           = 0x3,
+} BankWidth;
+typedef enum BankHeight {
+	ADDR_SURF_BANK_HEIGHT_1                          = 0x0,
+	ADDR_SURF_BANK_HEIGHT_2                          = 0x1,
+	ADDR_SURF_BANK_HEIGHT_4                          = 0x2,
+	ADDR_SURF_BANK_HEIGHT_8                          = 0x3,
+} BankHeight;
+typedef enum BankWidthHeight {
+	ADDR_SURF_BANK_WH_1                              = 0x0,
+	ADDR_SURF_BANK_WH_2                              = 0x1,
+	ADDR_SURF_BANK_WH_4                              = 0x2,
+	ADDR_SURF_BANK_WH_8                              = 0x3,
+} BankWidthHeight;
+typedef enum MacroTileAspect {
+	ADDR_SURF_MACRO_ASPECT_1                         = 0x0,
+	ADDR_SURF_MACRO_ASPECT_2                         = 0x1,
+	ADDR_SURF_MACRO_ASPECT_4                         = 0x2,
+	ADDR_SURF_MACRO_ASPECT_8                         = 0x3,
+} MacroTileAspect;
+typedef enum GATCL1RequestType {
+	GATCL1_TYPE_NORMAL                               = 0x0,
+	GATCL1_TYPE_SHOOTDOWN                            = 0x1,
+	GATCL1_TYPE_BYPASS                               = 0x2,
+} GATCL1RequestType;
+typedef enum TCC_CACHE_POLICIES {
+	TCC_CACHE_POLICY_LRU                             = 0x0,
+	TCC_CACHE_POLICY_STREAM                          = 0x1,
+} TCC_CACHE_POLICIES;
+typedef enum MTYPE {
+	MTYPE_NC_NV                                      = 0x0,
+	MTYPE_NC                                         = 0x1,
+	MTYPE_CC                                         = 0x2,
+	MTYPE_UC                                         = 0x3,
+} MTYPE;
+typedef enum PERFMON_COUNTER_MODE {
+	PERFMON_COUNTER_MODE_ACCUM                       = 0x0,
+	PERFMON_COUNTER_MODE_ACTIVE_CYCLES               = 0x1,
+	PERFMON_COUNTER_MODE_MAX                         = 0x2,
+	PERFMON_COUNTER_MODE_DIRTY                       = 0x3,
+	PERFMON_COUNTER_MODE_SAMPLE                      = 0x4,
+	PERFMON_COUNTER_MODE_CYCLES_SINCE_FIRST_EVENT    = 0x5,
+	PERFMON_COUNTER_MODE_CYCLES_SINCE_LAST_EVENT     = 0x6,
+	PERFMON_COUNTER_MODE_CYCLES_GE_HI                = 0x7,
+	PERFMON_COUNTER_MODE_CYCLES_EQ_HI                = 0x8,
+	PERFMON_COUNTER_MODE_INACTIVE_CYCLES             = 0x9,
+	PERFMON_COUNTER_MODE_RESERVED                    = 0xf,
+} PERFMON_COUNTER_MODE;
+typedef enum PERFMON_SPM_MODE {
+	PERFMON_SPM_MODE_OFF                             = 0x0,
+	PERFMON_SPM_MODE_16BIT_CLAMP                     = 0x1,
+	PERFMON_SPM_MODE_16BIT_NO_CLAMP                  = 0x2,
+	PERFMON_SPM_MODE_32BIT_CLAMP                     = 0x3,
+	PERFMON_SPM_MODE_32BIT_NO_CLAMP                  = 0x4,
+	PERFMON_SPM_MODE_RESERVED_5                      = 0x5,
+	PERFMON_SPM_MODE_RESERVED_6                      = 0x6,
+	PERFMON_SPM_MODE_RESERVED_7                      = 0x7,
+	PERFMON_SPM_MODE_TEST_MODE_0                     = 0x8,
+	PERFMON_SPM_MODE_TEST_MODE_1                     = 0x9,
+	PERFMON_SPM_MODE_TEST_MODE_2                     = 0xa,
+} PERFMON_SPM_MODE;
+typedef enum SurfaceTiling {
+	ARRAY_LINEAR                                     = 0x0,
+	ARRAY_TILED                                      = 0x1,
+} SurfaceTiling;
+typedef enum SurfaceArray {
+	ARRAY_1D                                         = 0x0,
+	ARRAY_2D                                         = 0x1,
+	ARRAY_3D                                         = 0x2,
+	ARRAY_3D_SLICE                                   = 0x3,
+} SurfaceArray;
+typedef enum ColorArray {
+	ARRAY_2D_ALT_COLOR                               = 0x0,
+	ARRAY_2D_COLOR                                   = 0x1,
+	ARRAY_3D_SLICE_COLOR                             = 0x3,
+} ColorArray;
+typedef enum DepthArray {
+	ARRAY_2D_ALT_DEPTH                               = 0x0,
+	ARRAY_2D_DEPTH                                   = 0x1,
+} DepthArray;
+typedef enum ENUM_NUM_SIMD_PER_CU {
+	NUM_SIMD_PER_CU                                  = 0x4,
+} ENUM_NUM_SIMD_PER_CU;
+typedef enum MEM_PWR_FORCE_CTRL {
+	NO_FORCE_REQUEST                                 = 0x0,
+	FORCE_LIGHT_SLEEP_REQUEST                        = 0x1,
+	FORCE_DEEP_SLEEP_REQUEST                         = 0x2,
+	FORCE_SHUT_DOWN_REQUEST                          = 0x3,
+} MEM_PWR_FORCE_CTRL;
+typedef enum MEM_PWR_FORCE_CTRL2 {
+	NO_FORCE_REQ                                     = 0x0,
+	FORCE_LIGHT_SLEEP_REQ                            = 0x1,
+} MEM_PWR_FORCE_CTRL2;
+typedef enum MEM_PWR_DIS_CTRL {
+	ENABLE_MEM_PWR_CTRL                              = 0x0,
+	DISABLE_MEM_PWR_CTRL                             = 0x1,
+} MEM_PWR_DIS_CTRL;
+typedef enum MEM_PWR_SEL_CTRL {
+	DYNAMIC_SHUT_DOWN_ENABLE                         = 0x0,
+	DYNAMIC_DEEP_SLEEP_ENABLE                        = 0x1,
+	DYNAMIC_LIGHT_SLEEP_ENABLE                       = 0x2,
+} MEM_PWR_SEL_CTRL;
+typedef enum MEM_PWR_SEL_CTRL2 {
+	DYNAMIC_DEEP_SLEEP_EN                            = 0x0,
+	DYNAMIC_LIGHT_SLEEP_EN                           = 0x1,
+} MEM_PWR_SEL_CTRL2;
+
+#endif /* SMU_7_1_3_ENUM_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
new file mode 100644
index 000000000000..1ede9e274714
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
@@ -0,0 +1,6080 @@
+/*
+ * SMU_7_1_3 Register documentation
+ *
+ * Copyright (C) 2014  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef SMU_7_1_3_SH_MASK_H
+#define SMU_7_1_3_SH_MASK_H
+
+#define GCK_SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff
+#define GCK_SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0
+#define GCK_SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff
+#define GCK_SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0
+#define GCK_MCLK_FUSES__StartupMClkDid_MASK 0x7f
+#define GCK_MCLK_FUSES__StartupMClkDid__SHIFT 0x0
+#define GCK_MCLK_FUSES__MClkADCA_MASK 0x780
+#define GCK_MCLK_FUSES__MClkADCA__SHIFT 0x7
+#define GCK_MCLK_FUSES__MClkDDCA_MASK 0x1800
+#define GCK_MCLK_FUSES__MClkDDCA__SHIFT 0xb
+#define GCK_MCLK_FUSES__MClkDiDtWait_MASK 0xe000
+#define GCK_MCLK_FUSES__MClkDiDtWait__SHIFT 0xd
+#define GCK_MCLK_FUSES__MClkDiDtFloor_MASK 0x30000
+#define GCK_MCLK_FUSES__MClkDiDtFloor__SHIFT 0x10
+#define CG_DCLK_CNTL__DCLK_DIVIDER_MASK 0x7f
+#define CG_DCLK_CNTL__DCLK_DIVIDER__SHIFT 0x0
+#define CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK 0x100
+#define CG_DCLK_CNTL__DCLK_DIR_CNTL_EN__SHIFT 0x8
+#define CG_DCLK_CNTL__DCLK_DIR_CNTL_TOG_MASK 0x200
+#define CG_DCLK_CNTL__DCLK_DIR_CNTL_TOG__SHIFT 0x9
+#define CG_DCLK_CNTL__DCLK_DIR_CNTL_DIVIDER_MASK 0x1fc00
+#define CG_DCLK_CNTL__DCLK_DIR_CNTL_DIVIDER__SHIFT 0xa
+#define CG_DCLK_STATUS__DCLK_STATUS_MASK 0x1
+#define CG_DCLK_STATUS__DCLK_STATUS__SHIFT 0x0
+#define CG_DCLK_STATUS__DCLK_DIR_CNTL_DONETOG_MASK 0x2
+#define CG_DCLK_STATUS__DCLK_DIR_CNTL_DONETOG__SHIFT 0x1
+#define CG_VCLK_CNTL__VCLK_DIVIDER_MASK 0x7f
+#define CG_VCLK_CNTL__VCLK_DIVIDER__SHIFT 0x0
+#define CG_VCLK_CNTL__VCLK_DIR_CNTL_EN_MASK 0x100
+#define CG_VCLK_CNTL__VCLK_DIR_CNTL_EN__SHIFT 0x8
+#define CG_VCLK_CNTL__VCLK_DIR_CNTL_TOG_MASK 0x200
+#define CG_VCLK_CNTL__VCLK_DIR_CNTL_TOG__SHIFT 0x9
+#define CG_VCLK_CNTL__VCLK_DIR_CNTL_DIVIDER_MASK 0x1fc00
+#define CG_VCLK_CNTL__VCLK_DIR_CNTL_DIVIDER__SHIFT 0xa
+#define CG_VCLK_STATUS__VCLK_STATUS_MASK 0x1
+#define CG_VCLK_STATUS__VCLK_STATUS__SHIFT 0x0
+#define CG_VCLK_STATUS__VCLK_DIR_CNTL_DONETOG_MASK 0x2
+#define CG_VCLK_STATUS__VCLK_DIR_CNTL_DONETOG__SHIFT 0x1
+#define CG_ECLK_CNTL__ECLK_DIVIDER_MASK 0x7f
+#define CG_ECLK_CNTL__ECLK_DIVIDER__SHIFT 0x0
+#define CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK 0x100
+#define CG_ECLK_CNTL__ECLK_DIR_CNTL_EN__SHIFT 0x8
+#define CG_ECLK_CNTL__ECLK_DIR_CNTL_TOG_MASK 0x200
+#define CG_ECLK_CNTL__ECLK_DIR_CNTL_TOG__SHIFT 0x9
+#define CG_ECLK_CNTL__ECLK_DIR_CNTL_DIVIDER_MASK 0x1fc00
+#define CG_ECLK_CNTL__ECLK_DIR_CNTL_DIVIDER__SHIFT 0xa
+#define CG_ECLK_STATUS__ECLK_STATUS_MASK 0x1
+#define CG_ECLK_STATUS__ECLK_STATUS__SHIFT 0x0
+#define CG_ECLK_STATUS__ECLK_DIR_CNTL_DONETOG_MASK 0x2
+#define CG_ECLK_STATUS__ECLK_DIR_CNTL_DONETOG__SHIFT 0x1
+#define CG_ACLK_CNTL__ACLK_DIVIDER_MASK 0x7f
+#define CG_ACLK_CNTL__ACLK_DIVIDER__SHIFT 0x0
+#define CG_ACLK_CNTL__ACLK_DIR_CNTL_EN_MASK 0x100
+#define CG_ACLK_CNTL__ACLK_DIR_CNTL_EN__SHIFT 0x8
+#define CG_ACLK_CNTL__ACLK_DIR_CNTL_TOG_MASK 0x200
+#define CG_ACLK_CNTL__ACLK_DIR_CNTL_TOG__SHIFT 0x9
+#define CG_ACLK_CNTL__ACLK_DIR_CNTL_DIVIDER_MASK 0x1fc00
+#define CG_ACLK_CNTL__ACLK_DIR_CNTL_DIVIDER__SHIFT 0xa
+#define CG_MCLK_CNTL__MCLK_DIVIDER_MASK 0x7f
+#define CG_MCLK_CNTL__MCLK_DIVIDER__SHIFT 0x0
+#define CG_MCLK_CNTL__MCLK_DIR_CNTL_EN_MASK 0x100
+#define CG_MCLK_CNTL__MCLK_DIR_CNTL_EN__SHIFT 0x8
+#define CG_MCLK_CNTL__MCLK_DIR_CNTL_TOG_MASK 0x200
+#define CG_MCLK_CNTL__MCLK_DIR_CNTL_TOG__SHIFT 0x9
+#define CG_MCLK_CNTL__MCLK_DIR_CNTL_DIVIDER_MASK 0x1fc00
+#define CG_MCLK_CNTL__MCLK_DIR_CNTL_DIVIDER__SHIFT 0xa
+#define CG_MCLK_STATUS__MCLK_STATUS_MASK 0x1
+#define CG_MCLK_STATUS__MCLK_STATUS__SHIFT 0x0
+#define CG_MCLK_STATUS__MCLK_DIR_CNTL_DONETOG_MASK 0x2
+#define CG_MCLK_STATUS__MCLK_DIR_CNTL_DONETOG__SHIFT 0x1
+#define GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK 0x1
+#define GCK_DFS_BYPASS_CNTL__BYPASSECLK__SHIFT 0x0
+#define GCK_DFS_BYPASS_CNTL__BYPASSLCLK_MASK 0x2
+#define GCK_DFS_BYPASS_CNTL__BYPASSLCLK__SHIFT 0x1
+#define GCK_DFS_BYPASS_CNTL__BYPASSEVCLK_MASK 0x4
+#define GCK_DFS_BYPASS_CNTL__BYPASSEVCLK__SHIFT 0x2
+#define GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK 0x8
+#define GCK_DFS_BYPASS_CNTL__BYPASSDCLK__SHIFT 0x3
+#define GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK 0x10
+#define GCK_DFS_BYPASS_CNTL__BYPASSVCLK__SHIFT 0x4
+#define GCK_DFS_BYPASS_CNTL__BYPASSDISPCLK_MASK 0x20
+#define GCK_DFS_BYPASS_CNTL__BYPASSDISPCLK__SHIFT 0x5
+#define GCK_DFS_BYPASS_CNTL__BYPASSDPREFCLK_MASK 0x40
+#define GCK_DFS_BYPASS_CNTL__BYPASSDPREFCLK__SHIFT 0x6
+#define GCK_DFS_BYPASS_CNTL__BYPASSACLK_MASK 0x80
+#define GCK_DFS_BYPASS_CNTL__BYPASSACLK__SHIFT 0x7
+#define GCK_DFS_BYPASS_CNTL__BYPASSADIVCLK_MASK 0x100
+#define GCK_DFS_BYPASS_CNTL__BYPASSADIVCLK__SHIFT 0x8
+#define GCK_DFS_BYPASS_CNTL__BYPASSPSPCLK_MASK 0x200
+#define GCK_DFS_BYPASS_CNTL__BYPASSPSPCLK__SHIFT 0x9
+#define GCK_DFS_BYPASS_CNTL__BYPASSSAMCLK_MASK 0x400
+#define GCK_DFS_BYPASS_CNTL__BYPASSSAMCLK__SHIFT 0xa
+#define GCK_DFS_BYPASS_CNTL__BYPASSSCLK_MASK 0x800
+#define GCK_DFS_BYPASS_CNTL__BYPASSSCLK__SHIFT 0xb
+#define GCK_DFS_BYPASS_CNTL__USE_SPLL_BYPASS_EN_MASK 0x1000
+#define GCK_DFS_BYPASS_CNTL__USE_SPLL_BYPASS_EN__SHIFT 0xc
+#define GCK_DFS_BYPASS_CNTL__BYPASSMCLK_MASK 0x2000
+#define GCK_DFS_BYPASS_CNTL__BYPASSMCLK__SHIFT 0xd
+#define CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK 0x1
+#define CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT 0x0
+#define CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK 0x2
+#define CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT 0x1
+#define CG_SPLL_FUNC_CNTL__SPLL_DIVEN_MASK 0x4
+#define CG_SPLL_FUNC_CNTL__SPLL_DIVEN__SHIFT 0x2
+#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK 0x8
+#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT 0x3
+#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_THRU_DFS_MASK 0x10
+#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_THRU_DFS__SHIFT 0x4
+#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK 0x7e0
+#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV__SHIFT 0x5
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_UPDATE_MASK 0x800
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_UPDATE__SHIFT 0xb
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_EN_MASK 0x1000
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_EN__SHIFT 0xc
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK 0x7f00000
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT 0x14
+#define CG_SPLL_FUNC_CNTL__SPLL_DIVA_ACK_MASK 0x8000000
+#define CG_SPLL_FUNC_CNTL__SPLL_DIVA_ACK__SHIFT 0x1b
+#define CG_SPLL_FUNC_CNTL__SPLL_OTEST_LOCK_EN_MASK 0x10000000
+#define CG_SPLL_FUNC_CNTL__SPLL_OTEST_LOCK_EN__SHIFT 0x1c
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK 0x1ff
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT 0x0
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_MASK 0x800
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ__SHIFT 0xb
+#define CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK 0x400000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT 0x16
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK 0x800000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT 0x17
+#define CG_SPLL_FUNC_CNTL_2__SPLL_RESET_CHG_MASK 0x1000000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_RESET_CHG__SHIFT 0x18
+#define CG_SPLL_FUNC_CNTL_2__SPLL_BABY_STEP_CHG_MASK 0x2000000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_BABY_STEP_CHG__SHIFT 0x19
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE_MASK 0x4000000
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE__SHIFT 0x1a
+#define CG_SPLL_FUNC_CNTL_2__SPLL_UNLOCK_CLEAR_MASK 0x8000000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_UNLOCK_CLEAR__SHIFT 0x1b
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CLKF_UPDATE_MASK 0x10000000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CLKF_UPDATE__SHIFT 0x1c
+#define CG_SPLL_FUNC_CNTL_2__SPLL_TEST_UNLOCK_CLR_MASK 0x40000000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_TEST_UNLOCK_CLR__SHIFT 0x1e
+#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK 0x3ffffff
+#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT 0x0
+#define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK 0x10000000
+#define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN__SHIFT 0x1c
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_TEST_SEL_MASK 0xf
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_TEST_SEL__SHIFT 0x0
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT_SEL_MASK 0x60
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT_SEL__SHIFT 0x5
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EN_MASK 0x180
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EN__SHIFT 0x7
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_MASK 0xe00
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE__SHIFT 0x9
+#define CG_SPLL_FUNC_CNTL_4__PCC_INC_DIV_MASK 0x7f000
+#define CG_SPLL_FUNC_CNTL_4__PCC_INC_DIV__SHIFT 0xc
+#define CG_SPLL_FUNC_CNTL_4__TEST_FRAC_BYPASS_MASK 0x200000
+#define CG_SPLL_FUNC_CNTL_4__TEST_FRAC_BYPASS__SHIFT 0x15
+#define CG_SPLL_FUNC_CNTL_4__SPLL_ILOCK_MASK 0x800000
+#define CG_SPLL_FUNC_CNTL_4__SPLL_ILOCK__SHIFT 0x17
+#define CG_SPLL_FUNC_CNTL_4__SPLL_FBCLK_SEL_MASK 0x1000000
+#define CG_SPLL_FUNC_CNTL_4__SPLL_FBCLK_SEL__SHIFT 0x18
+#define CG_SPLL_FUNC_CNTL_4__SPLL_VCTRLADC_EN_MASK 0x2000000
+#define CG_SPLL_FUNC_CNTL_4__SPLL_VCTRLADC_EN__SHIFT 0x19
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT_MASK 0xc000000
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT__SHIFT 0x1a
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_EXT_MASK 0x70000000
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_EXT__SHIFT 0x1c
+#define CG_SPLL_FUNC_CNTL_4__SPLL_VTOI_BIAS_CNTL_MASK 0x80000000
+#define CG_SPLL_FUNC_CNTL_4__SPLL_VTOI_BIAS_CNTL__SHIFT 0x1f
+#define CG_SPLL_FUNC_CNTL_5__FBDIV_SSC_BYPASS_MASK 0x1
+#define CG_SPLL_FUNC_CNTL_5__FBDIV_SSC_BYPASS__SHIFT 0x0
+#define CG_SPLL_FUNC_CNTL_5__RISEFBVCO_EN_MASK 0x2
+#define CG_SPLL_FUNC_CNTL_5__RISEFBVCO_EN__SHIFT 0x1
+#define CG_SPLL_FUNC_CNTL_5__PFD_RESET_CNTRL_MASK 0xc
+#define CG_SPLL_FUNC_CNTL_5__PFD_RESET_CNTRL__SHIFT 0x2
+#define CG_SPLL_FUNC_CNTL_5__RESET_TIMER_MASK 0x30
+#define CG_SPLL_FUNC_CNTL_5__RESET_TIMER__SHIFT 0x4
+#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_CNTRL_MASK 0xc0
+#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_CNTRL__SHIFT 0x6
+#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_EN_MASK 0x100
+#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_EN__SHIFT 0x8
+#define CG_SPLL_FUNC_CNTL_5__RESET_ANTI_MUX_MASK 0x200
+#define CG_SPLL_FUNC_CNTL_5__RESET_ANTI_MUX__SHIFT 0x9
+#define CG_SPLL_FUNC_CNTL_6__SCLKMUX0_CLKOFF_CNT_MASK 0xff
+#define CG_SPLL_FUNC_CNTL_6__SCLKMUX0_CLKOFF_CNT__SHIFT 0x0
+#define CG_SPLL_FUNC_CNTL_6__SCLKMUX1_CLKOFF_CNT_MASK 0xff00
+#define CG_SPLL_FUNC_CNTL_6__SCLKMUX1_CLKOFF_CNT__SHIFT 0x8
+#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_EN_MASK 0x10000
+#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_EN__SHIFT 0x10
+#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_IN_MASK 0x1e0000
+#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_IN__SHIFT 0x11
+#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_OUT_MASK 0x1e00000
+#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_OUT__SHIFT 0x15
+#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR_MASK 0xfe000000
+#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19
+#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff
+#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0
+#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1
+#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0
+#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2
+#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV__SHIFT 0x1
+#define SPLL_CNTL_MODE__SPLL_TEST_MASK 0x4
+#define SPLL_CNTL_MODE__SPLL_TEST__SHIFT 0x2
+#define SPLL_CNTL_MODE__SPLL_FASTEN_MASK 0x8
+#define SPLL_CNTL_MODE__SPLL_FASTEN__SHIFT 0x3
+#define SPLL_CNTL_MODE__SPLL_ENSAT_MASK 0x10
+#define SPLL_CNTL_MODE__SPLL_ENSAT__SHIFT 0x4
+#define SPLL_CNTL_MODE__SPLL_TEST_CLK_EXT_DIV_MASK 0xc00
+#define SPLL_CNTL_MODE__SPLL_TEST_CLK_EXT_DIV__SHIFT 0xa
+#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT_MASK 0xff000
+#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT__SHIFT 0xc
+#define SPLL_CNTL_MODE__SPLL_RESET_EN_MASK 0x10000000
+#define SPLL_CNTL_MODE__SPLL_RESET_EN__SHIFT 0x1c
+#define SPLL_CNTL_MODE__SPLL_VCO_MODE_MASK 0x60000000
+#define SPLL_CNTL_MODE__SPLL_VCO_MODE__SHIFT 0x1d
+#define CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK 0x1
+#define CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT 0x0
+#define CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK 0xfff0
+#define CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT 0x4
+#define CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK 0x3ffffff
+#define CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT 0x0
+#define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK 0xff00
+#define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT 0x8
+#define CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK 0x2
+#define CG_CLKPIN_CNTL__XTALIN_DIVIDE__SHIFT 0x1
+#define CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK 0x4
+#define CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT 0x2
+#define CG_CLKPIN_CNTL_2__ENABLE_XCLK_MASK 0x1
+#define CG_CLKPIN_CNTL_2__ENABLE_XCLK__SHIFT 0x0
+#define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK 0x8
+#define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT 0x3
+#define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK_MASK 0x100
+#define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK__SHIFT 0x8
+#define CG_CLKPIN_CNTL_2__XO_IN_OSCIN_EN_MASK 0x4000
+#define CG_CLKPIN_CNTL_2__XO_IN_OSCIN_EN__SHIFT 0xe
+#define CG_CLKPIN_CNTL_2__XO_IN_ICORE_CLK_OE_MASK 0x8000
+#define CG_CLKPIN_CNTL_2__XO_IN_ICORE_CLK_OE__SHIFT 0xf
+#define CG_CLKPIN_CNTL_2__XO_IN_CML_RXEN_MASK 0x10000
+#define CG_CLKPIN_CNTL_2__XO_IN_CML_RXEN__SHIFT 0x10
+#define CG_CLKPIN_CNTL_2__XO_IN_BIDIR_CML_OE_MASK 0x20000
+#define CG_CLKPIN_CNTL_2__XO_IN_BIDIR_CML_OE__SHIFT 0x11
+#define CG_CLKPIN_CNTL_2__XO_IN2_OSCIN_EN_MASK 0x40000
+#define CG_CLKPIN_CNTL_2__XO_IN2_OSCIN_EN__SHIFT 0x12
+#define CG_CLKPIN_CNTL_2__XO_IN2_ICORE_CLK_OE_MASK 0x80000
+#define CG_CLKPIN_CNTL_2__XO_IN2_ICORE_CLK_OE__SHIFT 0x13
+#define CG_CLKPIN_CNTL_2__XO_IN2_CML_RXEN_MASK 0x100000
+#define CG_CLKPIN_CNTL_2__XO_IN2_CML_RXEN__SHIFT 0x14
+#define CG_CLKPIN_CNTL_2__XO_IN2_BIDIR_CML_OE_MASK 0x200000
+#define CG_CLKPIN_CNTL_2__XO_IN2_BIDIR_CML_OE__SHIFT 0x15
+#define CG_CLKPIN_CNTL_2__CML_CTRL_MASK 0xc00000
+#define CG_CLKPIN_CNTL_2__CML_CTRL__SHIFT 0x16
+#define CG_CLKPIN_CNTL_2__CLK_SPARE_MASK 0xff000000
+#define CG_CLKPIN_CNTL_2__CLK_SPARE__SHIFT 0x18
+#define CG_CLKPIN_CNTL_DC__OSC_EN_MASK 0x1
+#define CG_CLKPIN_CNTL_DC__OSC_EN__SHIFT 0x0
+#define CG_CLKPIN_CNTL_DC__XTL_LOW_GAIN_MASK 0x6
+#define CG_CLKPIN_CNTL_DC__XTL_LOW_GAIN__SHIFT 0x1
+#define CG_CLKPIN_CNTL_DC__XTL_XOCLK_DRV_R_EN_MASK 0x200
+#define CG_CLKPIN_CNTL_DC__XTL_XOCLK_DRV_R_EN__SHIFT 0x9
+#define CG_CLKPIN_CNTL_DC__XTALIN_SEL_MASK 0x1c00
+#define CG_CLKPIN_CNTL_DC__XTALIN_SEL__SHIFT 0xa
+#define THM_CLK_CNTL__CMON_CLK_SEL_MASK 0xff
+#define THM_CLK_CNTL__CMON_CLK_SEL__SHIFT 0x0
+#define THM_CLK_CNTL__TMON_CLK_SEL_MASK 0xff00
+#define THM_CLK_CNTL__TMON_CLK_SEL__SHIFT 0x8
+#define THM_CLK_CNTL__CTF_CLK_SHUTOFF_EN_MASK 0x10000
+#define THM_CLK_CNTL__CTF_CLK_SHUTOFF_EN__SHIFT 0x10
+#define MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK 0xff
+#define MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT 0x0
+#define MISC_CLK_CTRL__ZCLK_SEL_MASK 0xff00
+#define MISC_CLK_CTRL__ZCLK_SEL__SHIFT 0x8
+#define MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK 0xff0000
+#define MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT 0x10
+#define GCK_PLL_TEST_CNTL__TST_SRC_SEL_MASK 0x1f
+#define GCK_PLL_TEST_CNTL__TST_SRC_SEL__SHIFT 0x0
+#define GCK_PLL_TEST_CNTL__TST_REF_SEL_MASK 0x3e0
+#define GCK_PLL_TEST_CNTL__TST_REF_SEL__SHIFT 0x5
+#define GCK_PLL_TEST_CNTL__REF_TEST_COUNT_MASK 0x1fc00
+#define GCK_PLL_TEST_CNTL__REF_TEST_COUNT__SHIFT 0xa
+#define GCK_PLL_TEST_CNTL__TST_RESET_MASK 0x20000
+#define GCK_PLL_TEST_CNTL__TST_RESET__SHIFT 0x11
+#define GCK_PLL_TEST_CNTL__TST_CLK_SEL_MODE_MASK 0x40000
+#define GCK_PLL_TEST_CNTL__TST_CLK_SEL_MODE__SHIFT 0x12
+#define GCK_PLL_TEST_CNTL_2__TEST_COUNT_MASK 0xfffe0000
+#define GCK_PLL_TEST_CNTL_2__TEST_COUNT__SHIFT 0x11
+#define GCK_ADFS_CLK_BYPASS_CNTL1__ECLK_BYPASS_CNTL_MASK 0x7
+#define GCK_ADFS_CLK_BYPASS_CNTL1__ECLK_BYPASS_CNTL__SHIFT 0x0
+#define GCK_ADFS_CLK_BYPASS_CNTL1__SCLK_BYPASS_CNTL_MASK 0x38
+#define GCK_ADFS_CLK_BYPASS_CNTL1__SCLK_BYPASS_CNTL__SHIFT 0x3
+#define GCK_ADFS_CLK_BYPASS_CNTL1__LCLK_BYPASS_CNTL_MASK 0x1c0
+#define GCK_ADFS_CLK_BYPASS_CNTL1__LCLK_BYPASS_CNTL__SHIFT 0x6
+#define GCK_ADFS_CLK_BYPASS_CNTL1__DCLK_BYPASS_CNTL_MASK 0xe00
+#define GCK_ADFS_CLK_BYPASS_CNTL1__DCLK_BYPASS_CNTL__SHIFT 0x9
+#define GCK_ADFS_CLK_BYPASS_CNTL1__VCLK_BYPASS_CNTL_MASK 0x7000
+#define GCK_ADFS_CLK_BYPASS_CNTL1__VCLK_BYPASS_CNTL__SHIFT 0xc
+#define GCK_ADFS_CLK_BYPASS_CNTL1__DISPCLK_BYPASS_CNTL_MASK 0x38000
+#define GCK_ADFS_CLK_BYPASS_CNTL1__DISPCLK_BYPASS_CNTL__SHIFT 0xf
+#define GCK_ADFS_CLK_BYPASS_CNTL1__DRREFCLK_BYPASS_CNTL_MASK 0x1c0000
+#define GCK_ADFS_CLK_BYPASS_CNTL1__DRREFCLK_BYPASS_CNTL__SHIFT 0x12
+#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_BYPASS_CNTL_MASK 0xe00000
+#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_BYPASS_CNTL__SHIFT 0x15
+#define GCK_ADFS_CLK_BYPASS_CNTL1__SAMCLK_BYPASS_CNTL_MASK 0x7000000
+#define GCK_ADFS_CLK_BYPASS_CNTL1__SAMCLK_BYPASS_CNTL__SHIFT 0x18
+#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_DIV_BYPASS_CNTL_MASK 0x38000000
+#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_DIV_BYPASS_CNTL__SHIFT 0x1b
+#define SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_0__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_0__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_0__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_0__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_1__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_1__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_1__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_1__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_2__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_2__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_2__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_2__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_3__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_3__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_3__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_3__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_4__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_4__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_4__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_4__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_5__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_5__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_5__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_5__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_6__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_6__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_6__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_6__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_7__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_7__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_7__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_7__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK 0x1
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0__SHIFT 0x0
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1_MASK 0x2
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1__SHIFT 0x1
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_2_MASK 0x4
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_2__SHIFT 0x2
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_3_MASK 0x8
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_3__SHIFT 0x3
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_4_MASK 0x10
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_4__SHIFT 0x4
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_5_MASK 0x20
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_5__SHIFT 0x5
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_6_MASK 0x40
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_6__SHIFT 0x6
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_7_MASK 0x80
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_7__SHIFT 0x7
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_8_MASK 0x100
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_8__SHIFT 0x8
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_9_MASK 0x200
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_9__SHIFT 0x9
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_10_MASK 0x400
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_10__SHIFT 0xa
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_11_MASK 0x800
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_11__SHIFT 0xb
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_12_MASK 0x1000
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_12__SHIFT 0xc
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_13_MASK 0x2000
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_13__SHIFT 0xd
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_14_MASK 0x4000
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_14__SHIFT 0xe
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_15_MASK 0x8000
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_15__SHIFT 0xf
+#define SMC_MESSAGE_0__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_0__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_0__SMC_RESP_MASK 0xffff
+#define SMC_RESP_0__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_1__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_1__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_1__SMC_RESP_MASK 0xffff
+#define SMC_RESP_1__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_2__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_2__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_2__SMC_RESP_MASK 0xffff
+#define SMC_RESP_2__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_3__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_3__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_3__SMC_RESP_MASK 0xffff
+#define SMC_RESP_3__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_4__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_4__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_4__SMC_RESP_MASK 0xffff
+#define SMC_RESP_4__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_5__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_5__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_5__SMC_RESP_MASK 0xffff
+#define SMC_RESP_5__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_6__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_6__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_6__SMC_RESP_MASK 0xffff
+#define SMC_RESP_6__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_7__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_7__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_7__SMC_RESP_MASK 0xffff
+#define SMC_RESP_7__SMC_RESP__SHIFT 0x0
+#define SMC_MSG_ARG_0__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_0__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_1__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_1__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_2__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_2__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_3__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_3__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_4__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_4__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_5__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_5__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_6__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_6__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_7__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_7__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MESSAGE_8__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_8__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_8__SMC_RESP_MASK 0xffff
+#define SMC_RESP_8__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_9__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_9__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_9__SMC_RESP_MASK 0xffff
+#define SMC_RESP_9__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_10__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_10__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_10__SMC_RESP_MASK 0xffff
+#define SMC_RESP_10__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_11__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_11__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_11__SMC_RESP_MASK 0xffff
+#define SMC_RESP_11__SMC_RESP__SHIFT 0x0
+#define SMC_MSG_ARG_8__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_8__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_9__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_9__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_10__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_10__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_11__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_11__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_SYSCON_RESET_CNTL__rst_reg_MASK 0x1
+#define SMC_SYSCON_RESET_CNTL__rst_reg__SHIFT 0x0
+#define SMC_SYSCON_RESET_CNTL__srbm_soft_rst_override_MASK 0x2
+#define SMC_SYSCON_RESET_CNTL__srbm_soft_rst_override__SHIFT 0x1
+#define SMC_SYSCON_RESET_CNTL__RegReset_MASK 0x40000000
+#define SMC_SYSCON_RESET_CNTL__RegReset__SHIFT 0x1e
+#define SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK 0x1
+#define SMC_SYSCON_CLOCK_CNTL_0__ck_disable__SHIFT 0x0
+#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_en_MASK 0x2
+#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_en__SHIFT 0x1
+#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_timeout_MASK 0xffff00
+#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_timeout__SHIFT 0x8
+#define SMC_SYSCON_CLOCK_CNTL_0__cken_MASK 0x1000000
+#define SMC_SYSCON_CLOCK_CNTL_0__cken__SHIFT 0x18
+#define SMC_SYSCON_CLOCK_CNTL_1__auto_ck_disable_MASK 0x1
+#define SMC_SYSCON_CLOCK_CNTL_1__auto_ck_disable__SHIFT 0x0
+#define SMC_SYSCON_CLOCK_CNTL_2__wake_on_irq_MASK 0xffffffff
+#define SMC_SYSCON_CLOCK_CNTL_2__wake_on_irq__SHIFT 0x0
+#define SMC_SYSCON_MISC_CNTL__dma_no_outstanding_MASK 0x2
+#define SMC_SYSCON_MISC_CNTL__dma_no_outstanding__SHIFT 0x1
+#define SMC_SYSCON_MSG_ARG_0__smc_msg_arg_MASK 0xffffffff
+#define SMC_SYSCON_MSG_ARG_0__smc_msg_arg__SHIFT 0x0
+#define SMC_PC_C__smc_pc_c_MASK 0xffffffff
+#define SMC_PC_C__smc_pc_c__SHIFT 0x0
+#define SMC_SCRATCH9__SCRATCH_VALUE_MASK 0xffffffff
+#define SMC_SCRATCH9__SCRATCH_VALUE__SHIFT 0x0
+#define GPIOPAD_SW_INT_STAT__SW_INT_STAT_MASK 0x1
+#define GPIOPAD_SW_INT_STAT__SW_INT_STAT__SHIFT 0x0
+#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SN_MASK 0xf
+#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SN__SHIFT 0x0
+#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SP_MASK 0xf0
+#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SP__SHIFT 0x4
+#define GPIOPAD_MASK__GPIO_MASK_MASK 0x7fffffff
+#define GPIOPAD_MASK__GPIO_MASK__SHIFT 0x0
+#define GPIOPAD_A__GPIO_A_MASK 0x7fffffff
+#define GPIOPAD_A__GPIO_A__SHIFT 0x0
+#define GPIOPAD_EN__GPIO_EN_MASK 0x7fffffff
+#define GPIOPAD_EN__GPIO_EN__SHIFT 0x0
+#define GPIOPAD_Y__GPIO_Y_MASK 0x7fffffff
+#define GPIOPAD_Y__GPIO_Y__SHIFT 0x0
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0_MASK 0x1
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0__SHIFT 0x0
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1_MASK 0x2
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1__SHIFT 0x1
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2_MASK 0x4
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2__SHIFT 0x2
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3_MASK 0x8
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3__SHIFT 0x3
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4_MASK 0x10
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4__SHIFT 0x4
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5_MASK 0x20
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5__SHIFT 0x5
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6_MASK 0x40
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6__SHIFT 0x6
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7_MASK 0x80
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7__SHIFT 0x7
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8_MASK 0x100
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8__SHIFT 0x8
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9_MASK 0x200
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9__SHIFT 0x9
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10_MASK 0x400
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10__SHIFT 0xa
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11_MASK 0x800
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11__SHIFT 0xb
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12_MASK 0x1000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12__SHIFT 0xc
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13_MASK 0x2000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13__SHIFT 0xd
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14_MASK 0x4000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14__SHIFT 0xe
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15_MASK 0x8000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15__SHIFT 0xf
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16_MASK 0x10000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16__SHIFT 0x10
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17_MASK 0x20000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17__SHIFT 0x11
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18_MASK 0x40000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18__SHIFT 0x12
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19_MASK 0x80000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19__SHIFT 0x13
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20_MASK 0x100000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20__SHIFT 0x14
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21_MASK 0x200000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21__SHIFT 0x15
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22_MASK 0x400000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22__SHIFT 0x16
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23_MASK 0x800000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23__SHIFT 0x17
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24_MASK 0x1000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24__SHIFT 0x18
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25_MASK 0x2000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25__SHIFT 0x19
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26_MASK 0x4000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26__SHIFT 0x1a
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27_MASK 0x8000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27__SHIFT 0x1b
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28_MASK 0x10000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28__SHIFT 0x1c
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29_MASK 0x20000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29__SHIFT 0x1d
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30_MASK 0x40000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30__SHIFT 0x1e
+#define GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN_MASK 0x1fffffff
+#define GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN__SHIFT 0x0
+#define GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN_MASK 0x80000000
+#define GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN__SHIFT 0x1f
+#define GPIOPAD_INT_STAT__GPIO_INT_STAT_MASK 0x1fffffff
+#define GPIOPAD_INT_STAT__GPIO_INT_STAT__SHIFT 0x0
+#define GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT_MASK 0x80000000
+#define GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT__SHIFT 0x1f
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0_MASK 0x1
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0__SHIFT 0x0
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1_MASK 0x2
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1__SHIFT 0x1
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2_MASK 0x4
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2__SHIFT 0x2
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3_MASK 0x8
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3__SHIFT 0x3
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4_MASK 0x10
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4__SHIFT 0x4
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5_MASK 0x20
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5__SHIFT 0x5
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6_MASK 0x40
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6__SHIFT 0x6
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7_MASK 0x80
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7__SHIFT 0x7
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8_MASK 0x100
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8__SHIFT 0x8
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9_MASK 0x200
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9__SHIFT 0x9
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10_MASK 0x400
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10__SHIFT 0xa
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11_MASK 0x800
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11__SHIFT 0xb
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12_MASK 0x1000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12__SHIFT 0xc
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13_MASK 0x2000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13__SHIFT 0xd
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14_MASK 0x4000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14__SHIFT 0xe
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15_MASK 0x8000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15__SHIFT 0xf
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16_MASK 0x10000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16__SHIFT 0x10
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17_MASK 0x20000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17__SHIFT 0x11
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18_MASK 0x40000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18__SHIFT 0x12
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19_MASK 0x80000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19__SHIFT 0x13
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20_MASK 0x100000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20__SHIFT 0x14
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21_MASK 0x200000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21__SHIFT 0x15
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22_MASK 0x400000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22__SHIFT 0x16
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23_MASK 0x800000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23__SHIFT 0x17
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24_MASK 0x1000000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24__SHIFT 0x18
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25_MASK 0x2000000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25__SHIFT 0x19
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26_MASK 0x4000000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26__SHIFT 0x1a
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27_MASK 0x8000000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27__SHIFT 0x1b
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28_MASK 0x10000000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28__SHIFT 0x1c
+#define GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK_MASK 0x80000000
+#define GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK__SHIFT 0x1f
+#define GPIOPAD_INT_EN__GPIO_INT_EN_MASK 0x1fffffff
+#define GPIOPAD_INT_EN__GPIO_INT_EN__SHIFT 0x0
+#define GPIOPAD_INT_EN__SW_INITIATED_INT_EN_MASK 0x80000000
+#define GPIOPAD_INT_EN__SW_INITIATED_INT_EN__SHIFT 0x1f
+#define GPIOPAD_INT_TYPE__GPIO_INT_TYPE_MASK 0x1fffffff
+#define GPIOPAD_INT_TYPE__GPIO_INT_TYPE__SHIFT 0x0
+#define GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE_MASK 0x80000000
+#define GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE__SHIFT 0x1f
+#define GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY_MASK 0x1fffffff
+#define GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY__SHIFT 0x0
+#define GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY_MASK 0x80000000
+#define GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY__SHIFT 0x1f
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_SEL_MASK 0x1f
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_SEL__SHIFT 0x0
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_CLR_MASK 0x20
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_CLR__SHIFT 0x5
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_READ_MASK 0x40
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_READ__SHIFT 0x6
+#define GPIOPAD_RCVR_SEL__GPIO_RCVR_SEL_MASK 0x7fffffff
+#define GPIOPAD_RCVR_SEL__GPIO_RCVR_SEL__SHIFT 0x0
+#define GPIOPAD_PU_EN__GPIO_PU_EN_MASK 0x7fffffff
+#define GPIOPAD_PU_EN__GPIO_PU_EN__SHIFT 0x0
+#define GPIOPAD_PD_EN__GPIO_PD_EN_MASK 0x7fffffff
+#define GPIOPAD_PD_EN__GPIO_PD_EN__SHIFT 0x0
+#define CG_FPS_CNT__FPS_CNT_MASK 0xffffffff
+#define CG_FPS_CNT__FPS_CNT__SHIFT 0x0
+#define SMU_IND_INDEX_0__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_0__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_0__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_0__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_1__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_1__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_1__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_1__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_2__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_2__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_2__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_2__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_3__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_3__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_3__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_3__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_4__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_4__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_4__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_4__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_5__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_5__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_5__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_5__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_6__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_6__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_6__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_6__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_7__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_7__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_7__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_7__SMC_IND_DATA__SHIFT 0x0
+#define SMU_SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0
+#define RCU_UC_EVENTS__RCU_TST_jpc_rep_req_MASK 0x1
+#define RCU_UC_EVENTS__RCU_TST_jpc_rep_req__SHIFT 0x0
+#define RCU_UC_EVENTS__TST_RCU_jpc_rep_done_MASK 0x2
+#define RCU_UC_EVENTS__TST_RCU_jpc_rep_done__SHIFT 0x1
+#define RCU_UC_EVENTS__drv_rst_mode_MASK 0x4
+#define RCU_UC_EVENTS__drv_rst_mode__SHIFT 0x2
+#define RCU_UC_EVENTS__SMU_DC_efuse_status_invalid_MASK 0x8
+#define RCU_UC_EVENTS__SMU_DC_efuse_status_invalid__SHIFT 0x3
+#define RCU_UC_EVENTS__TP_Tester_MASK 0x40
+#define RCU_UC_EVENTS__TP_Tester__SHIFT 0x6
+#define RCU_UC_EVENTS__boot_seq_done_MASK 0x80
+#define RCU_UC_EVENTS__boot_seq_done__SHIFT 0x7
+#define RCU_UC_EVENTS__sclk_deep_sleep_exit_MASK 0x100
+#define RCU_UC_EVENTS__sclk_deep_sleep_exit__SHIFT 0x8
+#define RCU_UC_EVENTS__BREAK_PT1_ACTIVE_MASK 0x200
+#define RCU_UC_EVENTS__BREAK_PT1_ACTIVE__SHIFT 0x9
+#define RCU_UC_EVENTS__BREAK_PT2_ACTIVE_MASK 0x400
+#define RCU_UC_EVENTS__BREAK_PT2_ACTIVE__SHIFT 0xa
+#define RCU_UC_EVENTS__FCH_HALT_MASK 0x800
+#define RCU_UC_EVENTS__FCH_HALT__SHIFT 0xb
+#define RCU_UC_EVENTS__RCU_GIO_fch_lockdown_MASK 0x2000
+#define RCU_UC_EVENTS__RCU_GIO_fch_lockdown__SHIFT 0xd
+#define RCU_UC_EVENTS__INTERRUPTS_ENABLED_MASK 0x10000
+#define RCU_UC_EVENTS__INTERRUPTS_ENABLED__SHIFT 0x10
+#define RCU_UC_EVENTS__RCU_DtmCnt0_Done_MASK 0x20000
+#define RCU_UC_EVENTS__RCU_DtmCnt0_Done__SHIFT 0x11
+#define RCU_UC_EVENTS__RCU_DtmCnt1_Done_MASK 0x40000
+#define RCU_UC_EVENTS__RCU_DtmCnt1_Done__SHIFT 0x12
+#define RCU_UC_EVENTS__RCU_DtmCnt2_Done_MASK 0x80000
+#define RCU_UC_EVENTS__RCU_DtmCnt2_Done__SHIFT 0x13
+#define RCU_UC_EVENTS__irq31_sel_MASK 0x3000000
+#define RCU_UC_EVENTS__irq31_sel__SHIFT 0x18
+#define RCU_MISC_CTRL__REG_DRV_RST_MODE_MASK 0x2
+#define RCU_MISC_CTRL__REG_DRV_RST_MODE__SHIFT 0x1
+#define RCU_MISC_CTRL__REG_RCU_MEMREP_DIS_MASK 0x8
+#define RCU_MISC_CTRL__REG_RCU_MEMREP_DIS__SHIFT 0x3
+#define RCU_MISC_CTRL__REG_CC_FUSE_DISABLE_MASK 0x10
+#define RCU_MISC_CTRL__REG_CC_FUSE_DISABLE__SHIFT 0x4
+#define RCU_MISC_CTRL__REG_SAMU_FUSE_DISABLE_MASK 0x20
+#define RCU_MISC_CTRL__REG_SAMU_FUSE_DISABLE__SHIFT 0x5
+#define RCU_MISC_CTRL__REG_CC_SRBM_RD_DISABLE_MASK 0x100
+#define RCU_MISC_CTRL__REG_CC_SRBM_RD_DISABLE__SHIFT 0x8
+#define RCU_MISC_CTRL__BREAK_PT1_DONE_MASK 0x10000
+#define RCU_MISC_CTRL__BREAK_PT1_DONE__SHIFT 0x10
+#define RCU_MISC_CTRL__BREAK_PT2_DONE_MASK 0x20000
+#define RCU_MISC_CTRL__BREAK_PT2_DONE__SHIFT 0x11
+#define RCU_MISC_CTRL__SAMU_START_MASK 0x400000
+#define RCU_MISC_CTRL__SAMU_START__SHIFT 0x16
+#define RCU_MISC_CTRL__RST_PULSE_WIDTH_MASK 0xff800000
+#define RCU_MISC_CTRL__RST_PULSE_WIDTH__SHIFT 0x17
+#define RCU_VIRT_RESET_REQ__VF_MASK 0xffff
+#define RCU_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define RCU_VIRT_RESET_REQ__PF_MASK 0x80000000
+#define RCU_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define CC_RCU_FUSES__GPU_DIS_MASK 0x2
+#define CC_RCU_FUSES__GPU_DIS__SHIFT 0x1
+#define CC_RCU_FUSES__DEBUG_DISABLE_MASK 0x4
+#define CC_RCU_FUSES__DEBUG_DISABLE__SHIFT 0x2
+#define CC_RCU_FUSES__EFUSE_RD_DISABLE_MASK 0x10
+#define CC_RCU_FUSES__EFUSE_RD_DISABLE__SHIFT 0x4
+#define CC_RCU_FUSES__CG_RST_GLB_REQ_DIS_MASK 0x20
+#define CC_RCU_FUSES__CG_RST_GLB_REQ_DIS__SHIFT 0x5
+#define CC_RCU_FUSES__DRV_RST_MODE_MASK 0x40
+#define CC_RCU_FUSES__DRV_RST_MODE__SHIFT 0x6
+#define CC_RCU_FUSES__ROM_DIS_MASK 0x80
+#define CC_RCU_FUSES__ROM_DIS__SHIFT 0x7
+#define CC_RCU_FUSES__JPC_REP_DISABLE_MASK 0x100
+#define CC_RCU_FUSES__JPC_REP_DISABLE__SHIFT 0x8
+#define CC_RCU_FUSES__RCU_BREAK_POINT1_MASK 0x200
+#define CC_RCU_FUSES__RCU_BREAK_POINT1__SHIFT 0x9
+#define CC_RCU_FUSES__RCU_BREAK_POINT2_MASK 0x400
+#define CC_RCU_FUSES__RCU_BREAK_POINT2__SHIFT 0xa
+#define CC_RCU_FUSES__SMU_IOC_MST_DISABLE_MASK 0x4000
+#define CC_RCU_FUSES__SMU_IOC_MST_DISABLE__SHIFT 0xe
+#define CC_RCU_FUSES__FCH_LOCKOUT_ENABLE_MASK 0x8000
+#define CC_RCU_FUSES__FCH_LOCKOUT_ENABLE__SHIFT 0xf
+#define CC_RCU_FUSES__FCH_XFIRE_FILTER_ENABLE_MASK 0x10000
+#define CC_RCU_FUSES__FCH_XFIRE_FILTER_ENABLE__SHIFT 0x10
+#define CC_RCU_FUSES__XFIRE_DISABLE_MASK 0x20000
+#define CC_RCU_FUSES__XFIRE_DISABLE__SHIFT 0x11
+#define CC_RCU_FUSES__SAMU_FUSE_DISABLE_MASK 0x40000
+#define CC_RCU_FUSES__SAMU_FUSE_DISABLE__SHIFT 0x12
+#define CC_RCU_FUSES__BIF_RST_POLLING_DISABLE_MASK 0x80000
+#define CC_RCU_FUSES__BIF_RST_POLLING_DISABLE__SHIFT 0x13
+#define CC_RCU_FUSES__MEM_HARDREP_EN_MASK 0x200000
+#define CC_RCU_FUSES__MEM_HARDREP_EN__SHIFT 0x15
+#define CC_RCU_FUSES__PCIE_INIT_DISABLE_MASK 0x400000
+#define CC_RCU_FUSES__PCIE_INIT_DISABLE__SHIFT 0x16
+#define CC_RCU_FUSES__DSMU_DISABLE_MASK 0x800000
+#define CC_RCU_FUSES__DSMU_DISABLE__SHIFT 0x17
+#define CC_RCU_FUSES__WRP_FUSE_VALID_MASK 0x1000000
+#define CC_RCU_FUSES__WRP_FUSE_VALID__SHIFT 0x18
+#define CC_RCU_FUSES__PHY_FUSE_VALID_MASK 0x2000000
+#define CC_RCU_FUSES__PHY_FUSE_VALID__SHIFT 0x19
+#define CC_RCU_FUSES__RCU_SPARE_MASK 0xfc000000
+#define CC_RCU_FUSES__RCU_SPARE__SHIFT 0x1a
+#define CC_SMU_MISC_FUSES__IOMMU_V2_DISABLE_MASK 0x2
+#define CC_SMU_MISC_FUSES__IOMMU_V2_DISABLE__SHIFT 0x1
+#define CC_SMU_MISC_FUSES__MinSClkDid_MASK 0x1fc
+#define CC_SMU_MISC_FUSES__MinSClkDid__SHIFT 0x2
+#define CC_SMU_MISC_FUSES__MISC_SPARE_MASK 0x600
+#define CC_SMU_MISC_FUSES__MISC_SPARE__SHIFT 0x9
+#define CC_SMU_MISC_FUSES__PostResetGnbClkDid_MASK 0x3f800
+#define CC_SMU_MISC_FUSES__PostResetGnbClkDid__SHIFT 0xb
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_dtc_half_MASK 0x40000
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_dtc_half__SHIFT 0x12
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_half_MASK 0x80000
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_half__SHIFT 0x13
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_half_MASK 0x100000
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_half__SHIFT 0x14
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_pdc_half_MASK 0x200000
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_pdc_half__SHIFT 0x15
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_dis_MASK 0x400000
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_dis__SHIFT 0x16
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_dis_MASK 0x800000
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_dis__SHIFT 0x17
+#define CC_SMU_MISC_FUSES__VCE_DISABLE_MASK 0x8000000
+#define CC_SMU_MISC_FUSES__VCE_DISABLE__SHIFT 0x1b
+#define CC_SMU_MISC_FUSES__IOC_IOMMU_DISABLE_MASK 0x10000000
+#define CC_SMU_MISC_FUSES__IOC_IOMMU_DISABLE__SHIFT 0x1c
+#define CC_SMU_MISC_FUSES__GNB_SPARE_MASK 0x60000000
+#define CC_SMU_MISC_FUSES__GNB_SPARE__SHIFT 0x1d
+#define CC_SCLK_VID_FUSES__SClkVid0_MASK 0xff
+#define CC_SCLK_VID_FUSES__SClkVid0__SHIFT 0x0
+#define CC_SCLK_VID_FUSES__SClkVid1_MASK 0xff00
+#define CC_SCLK_VID_FUSES__SClkVid1__SHIFT 0x8
+#define CC_SCLK_VID_FUSES__SClkVid2_MASK 0xff0000
+#define CC_SCLK_VID_FUSES__SClkVid2__SHIFT 0x10
+#define CC_SCLK_VID_FUSES__SClkVid3_MASK 0xff000000
+#define CC_SCLK_VID_FUSES__SClkVid3__SHIFT 0x18
+#define CC_GIO_IOCCFG_FUSES__NB_REV_ID_MASK 0x7fe
+#define CC_GIO_IOCCFG_FUSES__NB_REV_ID__SHIFT 0x1
+#define CC_GIO_IOC_FUSES__IOC_FUSES_MASK 0x3e
+#define CC_GIO_IOC_FUSES__IOC_FUSES__SHIFT 0x1
+#define CC_SMU_TST_EFUSE1_MISC__RF_RM_6_2_MASK 0x3e
+#define CC_SMU_TST_EFUSE1_MISC__RF_RM_6_2__SHIFT 0x1
+#define CC_SMU_TST_EFUSE1_MISC__RME_MASK 0x40
+#define CC_SMU_TST_EFUSE1_MISC__RME__SHIFT 0x6
+#define CC_SMU_TST_EFUSE1_MISC__MBIST_DISABLE_MASK 0x80
+#define CC_SMU_TST_EFUSE1_MISC__MBIST_DISABLE__SHIFT 0x7
+#define CC_SMU_TST_EFUSE1_MISC__HARD_REPAIR_DISABLE_MASK 0x100
+#define CC_SMU_TST_EFUSE1_MISC__HARD_REPAIR_DISABLE__SHIFT 0x8
+#define CC_SMU_TST_EFUSE1_MISC__SOFT_REPAIR_DISABLE_MASK 0x200
+#define CC_SMU_TST_EFUSE1_MISC__SOFT_REPAIR_DISABLE__SHIFT 0x9
+#define CC_SMU_TST_EFUSE1_MISC__GPU_DIS_MASK 0x400
+#define CC_SMU_TST_EFUSE1_MISC__GPU_DIS__SHIFT 0xa
+#define CC_SMU_TST_EFUSE1_MISC__SMS_PWRDWN_DISABLE_MASK 0x800
+#define CC_SMU_TST_EFUSE1_MISC__SMS_PWRDWN_DISABLE__SHIFT 0xb
+#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISA_MASK 0x1000
+#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISA__SHIFT 0xc
+#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISB_MASK 0x2000
+#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISB__SHIFT 0xd
+#define CC_SMU_TST_EFUSE1_MISC__RM_RF8_MASK 0x4000
+#define CC_SMU_TST_EFUSE1_MISC__RM_RF8__SHIFT 0xe
+#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE1_MASK 0x400000
+#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE1__SHIFT 0x16
+#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE2_MASK 0x800000
+#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE2__SHIFT 0x17
+#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE3_MASK 0x1000000
+#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE3__SHIFT 0x18
+#define CC_SMU_TST_EFUSE1_MISC__VCE_DISABLE_MASK 0x2000000
+#define CC_SMU_TST_EFUSE1_MISC__VCE_DISABLE__SHIFT 0x19
+#define CC_SMU_TST_EFUSE1_MISC__DCE_SCAN_DISABLE_MASK 0x4000000
+#define CC_SMU_TST_EFUSE1_MISC__DCE_SCAN_DISABLE__SHIFT 0x1a
+#define CC_TST_ID_STRAPS__DEVICE_ID_MASK 0xffff0
+#define CC_TST_ID_STRAPS__DEVICE_ID__SHIFT 0x4
+#define CC_TST_ID_STRAPS__MAJOR_REV_ID_MASK 0xf00000
+#define CC_TST_ID_STRAPS__MAJOR_REV_ID__SHIFT 0x14
+#define CC_TST_ID_STRAPS__MINOR_REV_ID_MASK 0xf000000
+#define CC_TST_ID_STRAPS__MINOR_REV_ID__SHIFT 0x18
+#define CC_TST_ID_STRAPS__ATI_REV_ID_MASK 0xf0000000
+#define CC_TST_ID_STRAPS__ATI_REV_ID__SHIFT 0x1c
+#define CC_FCTRL_FUSES__EXT_EFUSE_MACRO_PRESENT_MASK 0x2
+#define CC_FCTRL_FUSES__EXT_EFUSE_MACRO_PRESENT__SHIFT 0x1
+#define CC_HARVEST_FUSES__VCE_DISABLE_MASK 0x6
+#define CC_HARVEST_FUSES__VCE_DISABLE__SHIFT 0x1
+#define CC_HARVEST_FUSES__UVD_DISABLE_MASK 0x10
+#define CC_HARVEST_FUSES__UVD_DISABLE__SHIFT 0x4
+#define CC_HARVEST_FUSES__ACP_DISABLE_MASK 0x40
+#define CC_HARVEST_FUSES__ACP_DISABLE__SHIFT 0x6
+#define CC_HARVEST_FUSES__DC_DISABLE_MASK 0x3f00
+#define CC_HARVEST_FUSES__DC_DISABLE__SHIFT 0x8
+#define SMU_MAIN_PLL_OP_FREQ__PLL_OP_FREQ_MASK 0xffffffff
+#define SMU_MAIN_PLL_OP_FREQ__PLL_OP_FREQ__SHIFT 0x0
+#define SMU_STATUS__SMU_DONE_MASK 0x1
+#define SMU_STATUS__SMU_DONE__SHIFT 0x0
+#define SMU_STATUS__SMU_PASS_MASK 0x2
+#define SMU_STATUS__SMU_PASS__SHIFT 0x1
+#define SMU_FIRMWARE__SMU_IN_PROG_MASK 0x1
+#define SMU_FIRMWARE__SMU_IN_PROG__SHIFT 0x0
+#define SMU_FIRMWARE__SMU_RD_DONE_MASK 0x6
+#define SMU_FIRMWARE__SMU_RD_DONE__SHIFT 0x1
+#define SMU_FIRMWARE__SMU_SRAM_RD_BLOCK_EN_MASK 0x8
+#define SMU_FIRMWARE__SMU_SRAM_RD_BLOCK_EN__SHIFT 0x3
+#define SMU_FIRMWARE__SMU_SRAM_WR_BLOCK_EN_MASK 0x10
+#define SMU_FIRMWARE__SMU_SRAM_WR_BLOCK_EN__SHIFT 0x4
+#define SMU_FIRMWARE__SMU_counter_MASK 0xf00
+#define SMU_FIRMWARE__SMU_counter__SHIFT 0x8
+#define SMU_FIRMWARE__SMU_MODE_MASK 0x10000
+#define SMU_FIRMWARE__SMU_MODE__SHIFT 0x10
+#define SMU_FIRMWARE__SMU_SEL_MASK 0x20000
+#define SMU_FIRMWARE__SMU_SEL__SHIFT 0x11
+#define SMU_INPUT_DATA__START_ADDR_MASK 0x7fffffff
+#define SMU_INPUT_DATA__START_ADDR__SHIFT 0x0
+#define SMU_INPUT_DATA__AUTO_START_MASK 0x80000000
+#define SMU_INPUT_DATA__AUTO_START__SHIFT 0x1f
+#define SMU_EFUSE_0__EFUSE_DATA_MASK 0xffffffff
+#define SMU_EFUSE_0__EFUSE_DATA__SHIFT 0x0
+#define FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x1
+#define FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0
+#define FIRMWARE_FLAGS__RESERVED_MASK 0xfffffe
+#define FIRMWARE_FLAGS__RESERVED__SHIFT 0x1
+#define FIRMWARE_FLAGS__TEST_COUNT_MASK 0xff000000
+#define FIRMWARE_FLAGS__TEST_COUNT__SHIFT 0x18
+#define TDC_STATUS__VDD_Boost_MASK 0xff
+#define TDC_STATUS__VDD_Boost__SHIFT 0x0
+#define TDC_STATUS__VDD_Throttle_MASK 0xff00
+#define TDC_STATUS__VDD_Throttle__SHIFT 0x8
+#define TDC_STATUS__VDDC_Boost_MASK 0xff0000
+#define TDC_STATUS__VDDC_Boost__SHIFT 0x10
+#define TDC_STATUS__VDDC_Throttle_MASK 0xff000000
+#define TDC_STATUS__VDDC_Throttle__SHIFT 0x18
+#define TDC_MV_AVERAGE__IDD_MASK 0xffff
+#define TDC_MV_AVERAGE__IDD__SHIFT 0x0
+#define TDC_MV_AVERAGE__IDDC_MASK 0xffff0000
+#define TDC_MV_AVERAGE__IDDC__SHIFT 0x10
+#define TDC_VRM_LIMIT__IDD_MASK 0xffff
+#define TDC_VRM_LIMIT__IDD__SHIFT 0x0
+#define TDC_VRM_LIMIT__IDDC_MASK 0xffff0000
+#define TDC_VRM_LIMIT__IDDC__SHIFT 0x10
+#define FEATURE_STATUS__SCLK_DPM_ON_MASK 0x1
+#define FEATURE_STATUS__SCLK_DPM_ON__SHIFT 0x0
+#define FEATURE_STATUS__MCLK_DPM_ON_MASK 0x2
+#define FEATURE_STATUS__MCLK_DPM_ON__SHIFT 0x1
+#define FEATURE_STATUS__LCLK_DPM_ON_MASK 0x4
+#define FEATURE_STATUS__LCLK_DPM_ON__SHIFT 0x2
+#define FEATURE_STATUS__UVD_DPM_ON_MASK 0x8
+#define FEATURE_STATUS__UVD_DPM_ON__SHIFT 0x3
+#define FEATURE_STATUS__VCE_DPM_ON_MASK 0x10
+#define FEATURE_STATUS__VCE_DPM_ON__SHIFT 0x4
+#define FEATURE_STATUS__SAMU_DPM_ON_MASK 0x20
+#define FEATURE_STATUS__SAMU_DPM_ON__SHIFT 0x5
+#define FEATURE_STATUS__ACP_DPM_ON_MASK 0x40
+#define FEATURE_STATUS__ACP_DPM_ON__SHIFT 0x6
+#define FEATURE_STATUS__PCIE_DPM_ON_MASK 0x80
+#define FEATURE_STATUS__PCIE_DPM_ON__SHIFT 0x7
+#define FEATURE_STATUS__BAPM_ON_MASK 0x100
+#define FEATURE_STATUS__BAPM_ON__SHIFT 0x8
+#define FEATURE_STATUS__LPMX_ON_MASK 0x200
+#define FEATURE_STATUS__LPMX_ON__SHIFT 0x9
+#define FEATURE_STATUS__NBDPM_ON_MASK 0x400
+#define FEATURE_STATUS__NBDPM_ON__SHIFT 0xa
+#define FEATURE_STATUS__LHTC_ON_MASK 0x800
+#define FEATURE_STATUS__LHTC_ON__SHIFT 0xb
+#define FEATURE_STATUS__VPC_ON_MASK 0x1000
+#define FEATURE_STATUS__VPC_ON__SHIFT 0xc
+#define FEATURE_STATUS__VOLTAGE_CONTROLLER_ON_MASK 0x2000
+#define FEATURE_STATUS__VOLTAGE_CONTROLLER_ON__SHIFT 0xd
+#define FEATURE_STATUS__TDC_LIMIT_ON_MASK 0x4000
+#define FEATURE_STATUS__TDC_LIMIT_ON__SHIFT 0xe
+#define FEATURE_STATUS__GPU_CAC_ON_MASK 0x8000
+#define FEATURE_STATUS__GPU_CAC_ON__SHIFT 0xf
+#define FEATURE_STATUS__AVS_ON_MASK 0x10000
+#define FEATURE_STATUS__AVS_ON__SHIFT 0x10
+#define FEATURE_STATUS__SPMI_ON_MASK 0x20000
+#define FEATURE_STATUS__SPMI_ON__SHIFT 0x11
+#define FEATURE_STATUS__SCLK_DPM_FORCED_MASK 0x40000
+#define FEATURE_STATUS__SCLK_DPM_FORCED__SHIFT 0x12
+#define FEATURE_STATUS__MCLK_DPM_FORCED_MASK 0x80000
+#define FEATURE_STATUS__MCLK_DPM_FORCED__SHIFT 0x13
+#define FEATURE_STATUS__LCLK_DPM_FORCED_MASK 0x100000
+#define FEATURE_STATUS__LCLK_DPM_FORCED__SHIFT 0x14
+#define FEATURE_STATUS__PCIE_DPM_FORCED_MASK 0x200000
+#define FEATURE_STATUS__PCIE_DPM_FORCED__SHIFT 0x15
+#define FEATURE_STATUS__RESERVED_MASK 0xffc00000
+#define FEATURE_STATUS__RESERVED__SHIFT 0x16
+#define ENTITY_TEMPERATURES_1__GPU_MASK 0xffffffff
+#define ENTITY_TEMPERATURES_1__GPU__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_1__entries_0_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_1__entries_0_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_2__entries_0_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_2__entries_0_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_4__entries_0_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_4__entries_0_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_5__entries_0_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_5__entries_0_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_7__entries_0_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_7__entries_0_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_8__entries_0_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_8__entries_0_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_10__entries_0_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_10__entries_0_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_11__entries_0_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_11__entries_0_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_13__entries_1_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_13__entries_1_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_14__entries_1_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_14__entries_1_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_16__entries_1_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_16__entries_1_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_17__entries_1_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_17__entries_1_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_19__entries_1_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_19__entries_1_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_20__entries_1_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_20__entries_1_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_22__entries_1_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_22__entries_1_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_23__entries_1_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_23__entries_1_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_25__entries_2_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_25__entries_2_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_26__entries_2_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_26__entries_2_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_28__entries_2_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_28__entries_2_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_29__entries_2_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_29__entries_2_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_31__entries_2_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_31__entries_2_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_32__entries_2_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_32__entries_2_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_34__entries_2_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_34__entries_2_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_35__entries_2_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_35__entries_2_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_37__entries_3_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_37__entries_3_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_38__entries_3_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_38__entries_3_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_40__entries_3_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_40__entries_3_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_41__entries_3_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_41__entries_3_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_43__entries_3_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_43__entries_3_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_44__entries_3_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_44__entries_3_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_46__entries_3_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_46__entries_3_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_47__entries_3_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_47__entries_3_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_49__entries_4_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_49__entries_4_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_50__entries_4_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_50__entries_4_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_52__entries_4_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_52__entries_4_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_53__entries_4_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_53__entries_4_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_55__entries_4_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_55__entries_4_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_56__entries_4_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_56__entries_4_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_58__entries_4_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_58__entries_4_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_59__entries_4_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_59__entries_4_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_61__entries_5_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_61__entries_5_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_62__entries_5_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_62__entries_5_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_64__entries_5_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_64__entries_5_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_65__entries_5_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_65__entries_5_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_67__entries_5_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_67__entries_5_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_68__entries_5_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_68__entries_5_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_70__entries_5_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_70__entries_5_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_71__entries_5_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_71__entries_5_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_73__entries_6_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_73__entries_6_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_74__entries_6_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_74__entries_6_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_76__entries_6_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_76__entries_6_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_77__entries_6_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_77__entries_6_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_79__entries_6_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_79__entries_6_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_80__entries_6_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_80__entries_6_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_82__entries_6_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_82__entries_6_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_83__entries_6_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_83__entries_6_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_85__entries_7_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_85__entries_7_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_86__entries_7_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_86__entries_7_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_88__entries_7_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_88__entries_7_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_89__entries_7_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_89__entries_7_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_91__entries_7_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_91__entries_7_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_92__entries_7_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_92__entries_7_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_94__entries_7_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_94__entries_7_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_95__entries_7_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_95__entries_7_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_McArbBurstTime__SHIFT 0x18
+#define DPM_TABLE_1__GraphicsPIDController_Ki_MASK 0xffffffff
+#define DPM_TABLE_1__GraphicsPIDController_Ki__SHIFT 0x0
+#define DPM_TABLE_2__GraphicsPIDController_LFWindupUpperLim_MASK 0xffffffff
+#define DPM_TABLE_2__GraphicsPIDController_LFWindupUpperLim__SHIFT 0x0
+#define DPM_TABLE_3__GraphicsPIDController_LFWindupLowerLim_MASK 0xffffffff
+#define DPM_TABLE_3__GraphicsPIDController_LFWindupLowerLim__SHIFT 0x0
+#define DPM_TABLE_4__GraphicsPIDController_StatePrecision_MASK 0xffffffff
+#define DPM_TABLE_4__GraphicsPIDController_StatePrecision__SHIFT 0x0
+#define DPM_TABLE_5__GraphicsPIDController_LfPrecision_MASK 0xffffffff
+#define DPM_TABLE_5__GraphicsPIDController_LfPrecision__SHIFT 0x0
+#define DPM_TABLE_6__GraphicsPIDController_LfOffset_MASK 0xffffffff
+#define DPM_TABLE_6__GraphicsPIDController_LfOffset__SHIFT 0x0
+#define DPM_TABLE_7__GraphicsPIDController_MaxState_MASK 0xffffffff
+#define DPM_TABLE_7__GraphicsPIDController_MaxState__SHIFT 0x0
+#define DPM_TABLE_8__GraphicsPIDController_MaxLfFraction_MASK 0xffffffff
+#define DPM_TABLE_8__GraphicsPIDController_MaxLfFraction__SHIFT 0x0
+#define DPM_TABLE_9__GraphicsPIDController_StateShift_MASK 0xffffffff
+#define DPM_TABLE_9__GraphicsPIDController_StateShift__SHIFT 0x0
+#define DPM_TABLE_10__MemoryPIDController_Ki_MASK 0xffffffff
+#define DPM_TABLE_10__MemoryPIDController_Ki__SHIFT 0x0
+#define DPM_TABLE_11__MemoryPIDController_LFWindupUpperLim_MASK 0xffffffff
+#define DPM_TABLE_11__MemoryPIDController_LFWindupUpperLim__SHIFT 0x0
+#define DPM_TABLE_12__MemoryPIDController_LFWindupLowerLim_MASK 0xffffffff
+#define DPM_TABLE_12__MemoryPIDController_LFWindupLowerLim__SHIFT 0x0
+#define DPM_TABLE_13__MemoryPIDController_StatePrecision_MASK 0xffffffff
+#define DPM_TABLE_13__MemoryPIDController_StatePrecision__SHIFT 0x0
+#define DPM_TABLE_14__MemoryPIDController_LfPrecision_MASK 0xffffffff
+#define DPM_TABLE_14__MemoryPIDController_LfPrecision__SHIFT 0x0
+#define DPM_TABLE_15__MemoryPIDController_LfOffset_MASK 0xffffffff
+#define DPM_TABLE_15__MemoryPIDController_LfOffset__SHIFT 0x0
+#define DPM_TABLE_16__MemoryPIDController_MaxState_MASK 0xffffffff
+#define DPM_TABLE_16__MemoryPIDController_MaxState__SHIFT 0x0
+#define DPM_TABLE_17__MemoryPIDController_MaxLfFraction_MASK 0xffffffff
+#define DPM_TABLE_17__MemoryPIDController_MaxLfFraction__SHIFT 0x0
+#define DPM_TABLE_18__MemoryPIDController_StateShift_MASK 0xffffffff
+#define DPM_TABLE_18__MemoryPIDController_StateShift__SHIFT 0x0
+#define DPM_TABLE_19__LinkPIDController_Ki_MASK 0xffffffff
+#define DPM_TABLE_19__LinkPIDController_Ki__SHIFT 0x0
+#define DPM_TABLE_20__LinkPIDController_LFWindupUpperLim_MASK 0xffffffff
+#define DPM_TABLE_20__LinkPIDController_LFWindupUpperLim__SHIFT 0x0
+#define DPM_TABLE_21__LinkPIDController_LFWindupLowerLim_MASK 0xffffffff
+#define DPM_TABLE_21__LinkPIDController_LFWindupLowerLim__SHIFT 0x0
+#define DPM_TABLE_22__LinkPIDController_StatePrecision_MASK 0xffffffff
+#define DPM_TABLE_22__LinkPIDController_StatePrecision__SHIFT 0x0
+#define DPM_TABLE_23__LinkPIDController_LfPrecision_MASK 0xffffffff
+#define DPM_TABLE_23__LinkPIDController_LfPrecision__SHIFT 0x0
+#define DPM_TABLE_24__LinkPIDController_LfOffset_MASK 0xffffffff
+#define DPM_TABLE_24__LinkPIDController_LfOffset__SHIFT 0x0
+#define DPM_TABLE_25__LinkPIDController_MaxState_MASK 0xffffffff
+#define DPM_TABLE_25__LinkPIDController_MaxState__SHIFT 0x0
+#define DPM_TABLE_26__LinkPIDController_MaxLfFraction_MASK 0xffffffff
+#define DPM_TABLE_26__LinkPIDController_MaxLfFraction__SHIFT 0x0
+#define DPM_TABLE_27__LinkPIDController_StateShift_MASK 0xffffffff
+#define DPM_TABLE_27__LinkPIDController_StateShift__SHIFT 0x0
+#define DPM_TABLE_28__SystemFlags_MASK 0xffffffff
+#define DPM_TABLE_28__SystemFlags__SHIFT 0x0
+#define DPM_TABLE_29__VRConfig_MASK 0xffffffff
+#define DPM_TABLE_29__VRConfig__SHIFT 0x0
+#define DPM_TABLE_30__SmioMask1_MASK 0xffffffff
+#define DPM_TABLE_30__SmioMask1__SHIFT 0x0
+#define DPM_TABLE_31__SmioMask2_MASK 0xffffffff
+#define DPM_TABLE_31__SmioMask2__SHIFT 0x0
+#define DPM_TABLE_32__SmioTable1_Pattern_0_padding_MASK 0xff
+#define DPM_TABLE_32__SmioTable1_Pattern_0_padding__SHIFT 0x0
+#define DPM_TABLE_32__SmioTable1_Pattern_0_Smio_MASK 0xff00
+#define DPM_TABLE_32__SmioTable1_Pattern_0_Smio__SHIFT 0x8
+#define DPM_TABLE_32__SmioTable1_Pattern_0_Voltage_MASK 0xffff0000
+#define DPM_TABLE_32__SmioTable1_Pattern_0_Voltage__SHIFT 0x10
+#define DPM_TABLE_33__SmioTable1_Pattern_1_padding_MASK 0xff
+#define DPM_TABLE_33__SmioTable1_Pattern_1_padding__SHIFT 0x0
+#define DPM_TABLE_33__SmioTable1_Pattern_1_Smio_MASK 0xff00
+#define DPM_TABLE_33__SmioTable1_Pattern_1_Smio__SHIFT 0x8
+#define DPM_TABLE_33__SmioTable1_Pattern_1_Voltage_MASK 0xffff0000
+#define DPM_TABLE_33__SmioTable1_Pattern_1_Voltage__SHIFT 0x10
+#define DPM_TABLE_34__SmioTable1_Pattern_2_padding_MASK 0xff
+#define DPM_TABLE_34__SmioTable1_Pattern_2_padding__SHIFT 0x0
+#define DPM_TABLE_34__SmioTable1_Pattern_2_Smio_MASK 0xff00
+#define DPM_TABLE_34__SmioTable1_Pattern_2_Smio__SHIFT 0x8
+#define DPM_TABLE_34__SmioTable1_Pattern_2_Voltage_MASK 0xffff0000
+#define DPM_TABLE_34__SmioTable1_Pattern_2_Voltage__SHIFT 0x10
+#define DPM_TABLE_35__SmioTable1_Pattern_3_padding_MASK 0xff
+#define DPM_TABLE_35__SmioTable1_Pattern_3_padding__SHIFT 0x0
+#define DPM_TABLE_35__SmioTable1_Pattern_3_Smio_MASK 0xff00
+#define DPM_TABLE_35__SmioTable1_Pattern_3_Smio__SHIFT 0x8
+#define DPM_TABLE_35__SmioTable1_Pattern_3_Voltage_MASK 0xffff0000
+#define DPM_TABLE_35__SmioTable1_Pattern_3_Voltage__SHIFT 0x10
+#define DPM_TABLE_36__SmioTable2_Pattern_0_padding_MASK 0xff
+#define DPM_TABLE_36__SmioTable2_Pattern_0_padding__SHIFT 0x0
+#define DPM_TABLE_36__SmioTable2_Pattern_0_Smio_MASK 0xff00
+#define DPM_TABLE_36__SmioTable2_Pattern_0_Smio__SHIFT 0x8
+#define DPM_TABLE_36__SmioTable2_Pattern_0_Voltage_MASK 0xffff0000
+#define DPM_TABLE_36__SmioTable2_Pattern_0_Voltage__SHIFT 0x10
+#define DPM_TABLE_37__SmioTable2_Pattern_1_padding_MASK 0xff
+#define DPM_TABLE_37__SmioTable2_Pattern_1_padding__SHIFT 0x0
+#define DPM_TABLE_37__SmioTable2_Pattern_1_Smio_MASK 0xff00
+#define DPM_TABLE_37__SmioTable2_Pattern_1_Smio__SHIFT 0x8
+#define DPM_TABLE_37__SmioTable2_Pattern_1_Voltage_MASK 0xffff0000
+#define DPM_TABLE_37__SmioTable2_Pattern_1_Voltage__SHIFT 0x10
+#define DPM_TABLE_38__SmioTable2_Pattern_2_padding_MASK 0xff
+#define DPM_TABLE_38__SmioTable2_Pattern_2_padding__SHIFT 0x0
+#define DPM_TABLE_38__SmioTable2_Pattern_2_Smio_MASK 0xff00
+#define DPM_TABLE_38__SmioTable2_Pattern_2_Smio__SHIFT 0x8
+#define DPM_TABLE_38__SmioTable2_Pattern_2_Voltage_MASK 0xffff0000
+#define DPM_TABLE_38__SmioTable2_Pattern_2_Voltage__SHIFT 0x10
+#define DPM_TABLE_39__SmioTable2_Pattern_3_padding_MASK 0xff
+#define DPM_TABLE_39__SmioTable2_Pattern_3_padding__SHIFT 0x0
+#define DPM_TABLE_39__SmioTable2_Pattern_3_Smio_MASK 0xff00
+#define DPM_TABLE_39__SmioTable2_Pattern_3_Smio__SHIFT 0x8
+#define DPM_TABLE_39__SmioTable2_Pattern_3_Voltage_MASK 0xffff0000
+#define DPM_TABLE_39__SmioTable2_Pattern_3_Voltage__SHIFT 0x10
+#define DPM_TABLE_40__VddcLevelCount_MASK 0xffffffff
+#define DPM_TABLE_40__VddcLevelCount__SHIFT 0x0
+#define DPM_TABLE_41__VddciLevelCount_MASK 0xffffffff
+#define DPM_TABLE_41__VddciLevelCount__SHIFT 0x0
+#define DPM_TABLE_42__VddGfxLevelCount_MASK 0xffffffff
+#define DPM_TABLE_42__VddGfxLevelCount__SHIFT 0x0
+#define DPM_TABLE_43__MvddLevelCount_MASK 0xffffffff
+#define DPM_TABLE_43__MvddLevelCount__SHIFT 0x0
+#define DPM_TABLE_44__VddcTable_1_MASK 0xffff
+#define DPM_TABLE_44__VddcTable_1__SHIFT 0x0
+#define DPM_TABLE_44__VddcTable_0_MASK 0xffff0000
+#define DPM_TABLE_44__VddcTable_0__SHIFT 0x10
+#define DPM_TABLE_45__VddcTable_3_MASK 0xffff
+#define DPM_TABLE_45__VddcTable_3__SHIFT 0x0
+#define DPM_TABLE_45__VddcTable_2_MASK 0xffff0000
+#define DPM_TABLE_45__VddcTable_2__SHIFT 0x10
+#define DPM_TABLE_46__VddcTable_5_MASK 0xffff
+#define DPM_TABLE_46__VddcTable_5__SHIFT 0x0
+#define DPM_TABLE_46__VddcTable_4_MASK 0xffff0000
+#define DPM_TABLE_46__VddcTable_4__SHIFT 0x10
+#define DPM_TABLE_47__VddcTable_7_MASK 0xffff
+#define DPM_TABLE_47__VddcTable_7__SHIFT 0x0
+#define DPM_TABLE_47__VddcTable_6_MASK 0xffff0000
+#define DPM_TABLE_47__VddcTable_6__SHIFT 0x10
+#define DPM_TABLE_48__VddcTable_9_MASK 0xffff
+#define DPM_TABLE_48__VddcTable_9__SHIFT 0x0
+#define DPM_TABLE_48__VddcTable_8_MASK 0xffff0000
+#define DPM_TABLE_48__VddcTable_8__SHIFT 0x10
+#define DPM_TABLE_49__VddcTable_11_MASK 0xffff
+#define DPM_TABLE_49__VddcTable_11__SHIFT 0x0
+#define DPM_TABLE_49__VddcTable_10_MASK 0xffff0000
+#define DPM_TABLE_49__VddcTable_10__SHIFT 0x10
+#define DPM_TABLE_50__VddcTable_13_MASK 0xffff
+#define DPM_TABLE_50__VddcTable_13__SHIFT 0x0
+#define DPM_TABLE_50__VddcTable_12_MASK 0xffff0000
+#define DPM_TABLE_50__VddcTable_12__SHIFT 0x10
+#define DPM_TABLE_51__VddcTable_15_MASK 0xffff
+#define DPM_TABLE_51__VddcTable_15__SHIFT 0x0
+#define DPM_TABLE_51__VddcTable_14_MASK 0xffff0000
+#define DPM_TABLE_51__VddcTable_14__SHIFT 0x10
+#define DPM_TABLE_52__VddGfxTable_1_MASK 0xffff
+#define DPM_TABLE_52__VddGfxTable_1__SHIFT 0x0
+#define DPM_TABLE_52__VddGfxTable_0_MASK 0xffff0000
+#define DPM_TABLE_52__VddGfxTable_0__SHIFT 0x10
+#define DPM_TABLE_53__VddGfxTable_3_MASK 0xffff
+#define DPM_TABLE_53__VddGfxTable_3__SHIFT 0x0
+#define DPM_TABLE_53__VddGfxTable_2_MASK 0xffff0000
+#define DPM_TABLE_53__VddGfxTable_2__SHIFT 0x10
+#define DPM_TABLE_54__VddGfxTable_5_MASK 0xffff
+#define DPM_TABLE_54__VddGfxTable_5__SHIFT 0x0
+#define DPM_TABLE_54__VddGfxTable_4_MASK 0xffff0000
+#define DPM_TABLE_54__VddGfxTable_4__SHIFT 0x10
+#define DPM_TABLE_55__VddGfxTable_7_MASK 0xffff
+#define DPM_TABLE_55__VddGfxTable_7__SHIFT 0x0
+#define DPM_TABLE_55__VddGfxTable_6_MASK 0xffff0000
+#define DPM_TABLE_55__VddGfxTable_6__SHIFT 0x10
+#define DPM_TABLE_56__VddGfxTable_9_MASK 0xffff
+#define DPM_TABLE_56__VddGfxTable_9__SHIFT 0x0
+#define DPM_TABLE_56__VddGfxTable_8_MASK 0xffff0000
+#define DPM_TABLE_56__VddGfxTable_8__SHIFT 0x10
+#define DPM_TABLE_57__VddGfxTable_11_MASK 0xffff
+#define DPM_TABLE_57__VddGfxTable_11__SHIFT 0x0
+#define DPM_TABLE_57__VddGfxTable_10_MASK 0xffff0000
+#define DPM_TABLE_57__VddGfxTable_10__SHIFT 0x10
+#define DPM_TABLE_58__VddGfxTable_13_MASK 0xffff
+#define DPM_TABLE_58__VddGfxTable_13__SHIFT 0x0
+#define DPM_TABLE_58__VddGfxTable_12_MASK 0xffff0000
+#define DPM_TABLE_58__VddGfxTable_12__SHIFT 0x10
+#define DPM_TABLE_59__VddGfxTable_15_MASK 0xffff
+#define DPM_TABLE_59__VddGfxTable_15__SHIFT 0x0
+#define DPM_TABLE_59__VddGfxTable_14_MASK 0xffff0000
+#define DPM_TABLE_59__VddGfxTable_14__SHIFT 0x10
+#define DPM_TABLE_60__VddciTable_1_MASK 0xffff
+#define DPM_TABLE_60__VddciTable_1__SHIFT 0x0
+#define DPM_TABLE_60__VddciTable_0_MASK 0xffff0000
+#define DPM_TABLE_60__VddciTable_0__SHIFT 0x10
+#define DPM_TABLE_61__VddciTable_3_MASK 0xffff
+#define DPM_TABLE_61__VddciTable_3__SHIFT 0x0
+#define DPM_TABLE_61__VddciTable_2_MASK 0xffff0000
+#define DPM_TABLE_61__VddciTable_2__SHIFT 0x10
+#define DPM_TABLE_62__VddciTable_5_MASK 0xffff
+#define DPM_TABLE_62__VddciTable_5__SHIFT 0x0
+#define DPM_TABLE_62__VddciTable_4_MASK 0xffff0000
+#define DPM_TABLE_62__VddciTable_4__SHIFT 0x10
+#define DPM_TABLE_63__VddciTable_7_MASK 0xffff
+#define DPM_TABLE_63__VddciTable_7__SHIFT 0x0
+#define DPM_TABLE_63__VddciTable_6_MASK 0xffff0000
+#define DPM_TABLE_63__VddciTable_6__SHIFT 0x10
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_3_MASK 0xff
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_3__SHIFT 0x0
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_2_MASK 0xff00
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_2__SHIFT 0x8
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_1_MASK 0xff0000
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_1__SHIFT 0x10
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_0_MASK 0xff000000
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_0__SHIFT 0x18
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_7_MASK 0xff
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_7__SHIFT 0x0
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_6_MASK 0xff00
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_6__SHIFT 0x8
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_5_MASK 0xff0000
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_5__SHIFT 0x10
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_4_MASK 0xff000000
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_4__SHIFT 0x18
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_11_MASK 0xff
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_11__SHIFT 0x0
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_10_MASK 0xff00
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_10__SHIFT 0x8
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_9_MASK 0xff0000
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_9__SHIFT 0x10
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_8_MASK 0xff000000
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_8__SHIFT 0x18
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_15_MASK 0xff
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_15__SHIFT 0x0
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_14_MASK 0xff00
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_14__SHIFT 0x8
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_13_MASK 0xff0000
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_13__SHIFT 0x10
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_12_MASK 0xff000000
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_12__SHIFT 0x18
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_3_MASK 0xff
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_3__SHIFT 0x0
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_2_MASK 0xff00
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_2__SHIFT 0x8
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_1_MASK 0xff0000
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_1__SHIFT 0x10
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_0_MASK 0xff000000
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_0__SHIFT 0x18
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_7_MASK 0xff
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_7__SHIFT 0x0
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_6_MASK 0xff00
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_6__SHIFT 0x8
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_5_MASK 0xff0000
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_5__SHIFT 0x10
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_4_MASK 0xff000000
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_4__SHIFT 0x18
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_11_MASK 0xff
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_11__SHIFT 0x0
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_10_MASK 0xff00
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_10__SHIFT 0x8
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_9_MASK 0xff0000
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_9__SHIFT 0x10
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_8_MASK 0xff000000
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_8__SHIFT 0x18
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_15_MASK 0xff
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_15__SHIFT 0x0
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_14_MASK 0xff00
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_14__SHIFT 0x8
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_13_MASK 0xff0000
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_13__SHIFT 0x10
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_12_MASK 0xff000000
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_12__SHIFT 0x18
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_3_MASK 0xff
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_3__SHIFT 0x0
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_2_MASK 0xff00
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_2__SHIFT 0x8
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_1_MASK 0xff0000
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_1__SHIFT 0x10
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_0_MASK 0xff000000
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_0__SHIFT 0x18
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_7_MASK 0xff
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_7__SHIFT 0x0
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_6_MASK 0xff00
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_6__SHIFT 0x8
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_5_MASK 0xff0000
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_5__SHIFT 0x10
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_4_MASK 0xff000000
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_4__SHIFT 0x18
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_11_MASK 0xff
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_11__SHIFT 0x0
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_10_MASK 0xff00
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_10__SHIFT 0x8
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_9_MASK 0xff0000
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_9__SHIFT 0x10
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_8_MASK 0xff000000
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_8__SHIFT 0x18
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_15_MASK 0xff
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_15__SHIFT 0x0
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_14_MASK 0xff00
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_14__SHIFT 0x8
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_13_MASK 0xff0000
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_13__SHIFT 0x10
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_12_MASK 0xff000000
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_12__SHIFT 0x18
+#define DPM_TABLE_76__BapmVddcVidHiSidd_3_MASK 0xff
+#define DPM_TABLE_76__BapmVddcVidHiSidd_3__SHIFT 0x0
+#define DPM_TABLE_76__BapmVddcVidHiSidd_2_MASK 0xff00
+#define DPM_TABLE_76__BapmVddcVidHiSidd_2__SHIFT 0x8
+#define DPM_TABLE_76__BapmVddcVidHiSidd_1_MASK 0xff0000
+#define DPM_TABLE_76__BapmVddcVidHiSidd_1__SHIFT 0x10
+#define DPM_TABLE_76__BapmVddcVidHiSidd_0_MASK 0xff000000
+#define DPM_TABLE_76__BapmVddcVidHiSidd_0__SHIFT 0x18
+#define DPM_TABLE_77__BapmVddcVidHiSidd_7_MASK 0xff
+#define DPM_TABLE_77__BapmVddcVidHiSidd_7__SHIFT 0x0
+#define DPM_TABLE_77__BapmVddcVidHiSidd_6_MASK 0xff00
+#define DPM_TABLE_77__BapmVddcVidHiSidd_6__SHIFT 0x8
+#define DPM_TABLE_77__BapmVddcVidHiSidd_5_MASK 0xff0000
+#define DPM_TABLE_77__BapmVddcVidHiSidd_5__SHIFT 0x10
+#define DPM_TABLE_77__BapmVddcVidHiSidd_4_MASK 0xff000000
+#define DPM_TABLE_77__BapmVddcVidHiSidd_4__SHIFT 0x18
+#define DPM_TABLE_78__BapmVddcVidHiSidd_11_MASK 0xff
+#define DPM_TABLE_78__BapmVddcVidHiSidd_11__SHIFT 0x0
+#define DPM_TABLE_78__BapmVddcVidHiSidd_10_MASK 0xff00
+#define DPM_TABLE_78__BapmVddcVidHiSidd_10__SHIFT 0x8
+#define DPM_TABLE_78__BapmVddcVidHiSidd_9_MASK 0xff0000
+#define DPM_TABLE_78__BapmVddcVidHiSidd_9__SHIFT 0x10
+#define DPM_TABLE_78__BapmVddcVidHiSidd_8_MASK 0xff000000
+#define DPM_TABLE_78__BapmVddcVidHiSidd_8__SHIFT 0x18
+#define DPM_TABLE_79__BapmVddcVidHiSidd_15_MASK 0xff
+#define DPM_TABLE_79__BapmVddcVidHiSidd_15__SHIFT 0x0
+#define DPM_TABLE_79__BapmVddcVidHiSidd_14_MASK 0xff00
+#define DPM_TABLE_79__BapmVddcVidHiSidd_14__SHIFT 0x8
+#define DPM_TABLE_79__BapmVddcVidHiSidd_13_MASK 0xff0000
+#define DPM_TABLE_79__BapmVddcVidHiSidd_13__SHIFT 0x10
+#define DPM_TABLE_79__BapmVddcVidHiSidd_12_MASK 0xff000000
+#define DPM_TABLE_79__BapmVddcVidHiSidd_12__SHIFT 0x18
+#define DPM_TABLE_80__BapmVddcVidLoSidd_3_MASK 0xff
+#define DPM_TABLE_80__BapmVddcVidLoSidd_3__SHIFT 0x0
+#define DPM_TABLE_80__BapmVddcVidLoSidd_2_MASK 0xff00
+#define DPM_TABLE_80__BapmVddcVidLoSidd_2__SHIFT 0x8
+#define DPM_TABLE_80__BapmVddcVidLoSidd_1_MASK 0xff0000
+#define DPM_TABLE_80__BapmVddcVidLoSidd_1__SHIFT 0x10
+#define DPM_TABLE_80__BapmVddcVidLoSidd_0_MASK 0xff000000
+#define DPM_TABLE_80__BapmVddcVidLoSidd_0__SHIFT 0x18
+#define DPM_TABLE_81__BapmVddcVidLoSidd_7_MASK 0xff
+#define DPM_TABLE_81__BapmVddcVidLoSidd_7__SHIFT 0x0
+#define DPM_TABLE_81__BapmVddcVidLoSidd_6_MASK 0xff00
+#define DPM_TABLE_81__BapmVddcVidLoSidd_6__SHIFT 0x8
+#define DPM_TABLE_81__BapmVddcVidLoSidd_5_MASK 0xff0000
+#define DPM_TABLE_81__BapmVddcVidLoSidd_5__SHIFT 0x10
+#define DPM_TABLE_81__BapmVddcVidLoSidd_4_MASK 0xff000000
+#define DPM_TABLE_81__BapmVddcVidLoSidd_4__SHIFT 0x18
+#define DPM_TABLE_82__BapmVddcVidLoSidd_11_MASK 0xff
+#define DPM_TABLE_82__BapmVddcVidLoSidd_11__SHIFT 0x0
+#define DPM_TABLE_82__BapmVddcVidLoSidd_10_MASK 0xff00
+#define DPM_TABLE_82__BapmVddcVidLoSidd_10__SHIFT 0x8
+#define DPM_TABLE_82__BapmVddcVidLoSidd_9_MASK 0xff0000
+#define DPM_TABLE_82__BapmVddcVidLoSidd_9__SHIFT 0x10
+#define DPM_TABLE_82__BapmVddcVidLoSidd_8_MASK 0xff000000
+#define DPM_TABLE_82__BapmVddcVidLoSidd_8__SHIFT 0x18
+#define DPM_TABLE_83__BapmVddcVidLoSidd_15_MASK 0xff
+#define DPM_TABLE_83__BapmVddcVidLoSidd_15__SHIFT 0x0
+#define DPM_TABLE_83__BapmVddcVidLoSidd_14_MASK 0xff00
+#define DPM_TABLE_83__BapmVddcVidLoSidd_14__SHIFT 0x8
+#define DPM_TABLE_83__BapmVddcVidLoSidd_13_MASK 0xff0000
+#define DPM_TABLE_83__BapmVddcVidLoSidd_13__SHIFT 0x10
+#define DPM_TABLE_83__BapmVddcVidLoSidd_12_MASK 0xff000000
+#define DPM_TABLE_83__BapmVddcVidLoSidd_12__SHIFT 0x18
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_3_MASK 0xff
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_3__SHIFT 0x0
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_2_MASK 0xff00
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_2__SHIFT 0x8
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_1_MASK 0xff0000
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_1__SHIFT 0x10
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_0_MASK 0xff000000
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_0__SHIFT 0x18
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_7_MASK 0xff
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_7__SHIFT 0x0
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_6_MASK 0xff00
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_6__SHIFT 0x8
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_5_MASK 0xff0000
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_5__SHIFT 0x10
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_4_MASK 0xff000000
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_4__SHIFT 0x18
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_11_MASK 0xff
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_11__SHIFT 0x0
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_10_MASK 0xff00
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_10__SHIFT 0x8
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_9_MASK 0xff0000
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_9__SHIFT 0x10
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_8_MASK 0xff000000
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_8__SHIFT 0x18
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_15_MASK 0xff
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_15__SHIFT 0x0
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_14_MASK 0xff00
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_14__SHIFT 0x8
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_13_MASK 0xff0000
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_13__SHIFT 0x10
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_12_MASK 0xff000000
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_12__SHIFT 0x18
+#define DPM_TABLE_88__MasterDeepSleepControl_MASK 0xff
+#define DPM_TABLE_88__MasterDeepSleepControl__SHIFT 0x0
+#define DPM_TABLE_88__LinkLevelCount_MASK 0xff00
+#define DPM_TABLE_88__LinkLevelCount__SHIFT 0x8
+#define DPM_TABLE_88__MemoryDpmLevelCount_MASK 0xff0000
+#define DPM_TABLE_88__MemoryDpmLevelCount__SHIFT 0x10
+#define DPM_TABLE_88__GraphicsDpmLevelCount_MASK 0xff000000
+#define DPM_TABLE_88__GraphicsDpmLevelCount__SHIFT 0x18
+#define DPM_TABLE_89__SamuLevelCount_MASK 0xff
+#define DPM_TABLE_89__SamuLevelCount__SHIFT 0x0
+#define DPM_TABLE_89__AcpLevelCount_MASK 0xff00
+#define DPM_TABLE_89__AcpLevelCount__SHIFT 0x8
+#define DPM_TABLE_89__VceLevelCount_MASK 0xff0000
+#define DPM_TABLE_89__VceLevelCount__SHIFT 0x10
+#define DPM_TABLE_89__UvdLevelCount_MASK 0xff000000
+#define DPM_TABLE_89__UvdLevelCount__SHIFT 0x18
+#define DPM_TABLE_90__Reserved_0_MASK 0xff
+#define DPM_TABLE_90__Reserved_0__SHIFT 0x0
+#define DPM_TABLE_90__ThermOutMode_MASK 0xff00
+#define DPM_TABLE_90__ThermOutMode__SHIFT 0x8
+#define DPM_TABLE_90__ThermOutPolarity_MASK 0xff0000
+#define DPM_TABLE_90__ThermOutPolarity__SHIFT 0x10
+#define DPM_TABLE_90__ThermOutGpio_MASK 0xff000000
+#define DPM_TABLE_90__ThermOutGpio__SHIFT 0x18
+#define DPM_TABLE_91__Reserved_0_MASK 0xffffffff
+#define DPM_TABLE_91__Reserved_0__SHIFT 0x0
+#define DPM_TABLE_92__Reserved_1_MASK 0xffffffff
+#define DPM_TABLE_92__Reserved_1__SHIFT 0x0
+#define DPM_TABLE_93__Reserved_2_MASK 0xffffffff
+#define DPM_TABLE_93__Reserved_2__SHIFT 0x0
+#define DPM_TABLE_94__Reserved_3_MASK 0xffffffff
+#define DPM_TABLE_94__Reserved_3__SHIFT 0x0
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_96__GraphicsLevel_0_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_96__GraphicsLevel_0_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_97__GraphicsLevel_0_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_97__GraphicsLevel_0_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_97__GraphicsLevel_0_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_97__GraphicsLevel_0_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_97__GraphicsLevel_0_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_97__GraphicsLevel_0_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_98__GraphicsLevel_0_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_98__GraphicsLevel_0_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_99__GraphicsLevel_0_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_99__GraphicsLevel_0_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_100__GraphicsLevel_0_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_100__GraphicsLevel_0_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_101__GraphicsLevel_0_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_101__GraphicsLevel_0_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_102__GraphicsLevel_0_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_102__GraphicsLevel_0_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_103__GraphicsLevel_0_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_103__GraphicsLevel_0_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_104__GraphicsLevel_0_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_104__GraphicsLevel_0_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_104__GraphicsLevel_0_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_104__GraphicsLevel_0_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_104__GraphicsLevel_0_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_104__GraphicsLevel_0_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_104__GraphicsLevel_0_SclkDid_MASK 0xff000000
+#define DPM_TABLE_104__GraphicsLevel_0_SclkDid__SHIFT 0x18
+#define DPM_TABLE_105__GraphicsLevel_0_PowerThrottle_MASK 0xff
+#define DPM_TABLE_105__GraphicsLevel_0_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_105__GraphicsLevel_0_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_105__GraphicsLevel_0_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_105__GraphicsLevel_0_DownHyst_MASK 0xff0000
+#define DPM_TABLE_105__GraphicsLevel_0_DownHyst__SHIFT 0x10
+#define DPM_TABLE_105__GraphicsLevel_0_UpHyst_MASK 0xff000000
+#define DPM_TABLE_105__GraphicsLevel_0_UpHyst__SHIFT 0x18
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_107__GraphicsLevel_1_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_107__GraphicsLevel_1_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_108__GraphicsLevel_1_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_108__GraphicsLevel_1_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_108__GraphicsLevel_1_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_108__GraphicsLevel_1_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_108__GraphicsLevel_1_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_108__GraphicsLevel_1_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_109__GraphicsLevel_1_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_109__GraphicsLevel_1_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_110__GraphicsLevel_1_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_110__GraphicsLevel_1_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_111__GraphicsLevel_1_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_111__GraphicsLevel_1_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_112__GraphicsLevel_1_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_112__GraphicsLevel_1_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_113__GraphicsLevel_1_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_113__GraphicsLevel_1_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_114__GraphicsLevel_1_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_114__GraphicsLevel_1_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_115__GraphicsLevel_1_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_115__GraphicsLevel_1_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_115__GraphicsLevel_1_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_115__GraphicsLevel_1_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_115__GraphicsLevel_1_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_115__GraphicsLevel_1_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_115__GraphicsLevel_1_SclkDid_MASK 0xff000000
+#define DPM_TABLE_115__GraphicsLevel_1_SclkDid__SHIFT 0x18
+#define DPM_TABLE_116__GraphicsLevel_1_PowerThrottle_MASK 0xff
+#define DPM_TABLE_116__GraphicsLevel_1_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_116__GraphicsLevel_1_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_116__GraphicsLevel_1_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_116__GraphicsLevel_1_DownHyst_MASK 0xff0000
+#define DPM_TABLE_116__GraphicsLevel_1_DownHyst__SHIFT 0x10
+#define DPM_TABLE_116__GraphicsLevel_1_UpHyst_MASK 0xff000000
+#define DPM_TABLE_116__GraphicsLevel_1_UpHyst__SHIFT 0x18
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_118__GraphicsLevel_2_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_118__GraphicsLevel_2_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_119__GraphicsLevel_2_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_119__GraphicsLevel_2_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_119__GraphicsLevel_2_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_119__GraphicsLevel_2_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_119__GraphicsLevel_2_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_119__GraphicsLevel_2_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_120__GraphicsLevel_2_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_120__GraphicsLevel_2_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_121__GraphicsLevel_2_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_121__GraphicsLevel_2_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_122__GraphicsLevel_2_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_122__GraphicsLevel_2_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_123__GraphicsLevel_2_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_123__GraphicsLevel_2_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_124__GraphicsLevel_2_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_124__GraphicsLevel_2_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_125__GraphicsLevel_2_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_125__GraphicsLevel_2_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_126__GraphicsLevel_2_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_126__GraphicsLevel_2_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_126__GraphicsLevel_2_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_126__GraphicsLevel_2_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_126__GraphicsLevel_2_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_126__GraphicsLevel_2_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_126__GraphicsLevel_2_SclkDid_MASK 0xff000000
+#define DPM_TABLE_126__GraphicsLevel_2_SclkDid__SHIFT 0x18
+#define DPM_TABLE_127__GraphicsLevel_2_PowerThrottle_MASK 0xff
+#define DPM_TABLE_127__GraphicsLevel_2_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_127__GraphicsLevel_2_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_127__GraphicsLevel_2_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_127__GraphicsLevel_2_DownHyst_MASK 0xff0000
+#define DPM_TABLE_127__GraphicsLevel_2_DownHyst__SHIFT 0x10
+#define DPM_TABLE_127__GraphicsLevel_2_UpHyst_MASK 0xff000000
+#define DPM_TABLE_127__GraphicsLevel_2_UpHyst__SHIFT 0x18
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_129__GraphicsLevel_3_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_129__GraphicsLevel_3_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_130__GraphicsLevel_3_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_130__GraphicsLevel_3_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_130__GraphicsLevel_3_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_130__GraphicsLevel_3_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_130__GraphicsLevel_3_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_130__GraphicsLevel_3_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_131__GraphicsLevel_3_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_131__GraphicsLevel_3_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_132__GraphicsLevel_3_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_132__GraphicsLevel_3_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_133__GraphicsLevel_3_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_133__GraphicsLevel_3_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_134__GraphicsLevel_3_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_134__GraphicsLevel_3_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_135__GraphicsLevel_3_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_135__GraphicsLevel_3_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_136__GraphicsLevel_3_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_136__GraphicsLevel_3_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_137__GraphicsLevel_3_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_137__GraphicsLevel_3_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_137__GraphicsLevel_3_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_137__GraphicsLevel_3_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_137__GraphicsLevel_3_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_137__GraphicsLevel_3_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_137__GraphicsLevel_3_SclkDid_MASK 0xff000000
+#define DPM_TABLE_137__GraphicsLevel_3_SclkDid__SHIFT 0x18
+#define DPM_TABLE_138__GraphicsLevel_3_PowerThrottle_MASK 0xff
+#define DPM_TABLE_138__GraphicsLevel_3_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_138__GraphicsLevel_3_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_138__GraphicsLevel_3_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_138__GraphicsLevel_3_DownHyst_MASK 0xff0000
+#define DPM_TABLE_138__GraphicsLevel_3_DownHyst__SHIFT 0x10
+#define DPM_TABLE_138__GraphicsLevel_3_UpHyst_MASK 0xff000000
+#define DPM_TABLE_138__GraphicsLevel_3_UpHyst__SHIFT 0x18
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_140__GraphicsLevel_4_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_140__GraphicsLevel_4_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_141__GraphicsLevel_4_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_141__GraphicsLevel_4_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_141__GraphicsLevel_4_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_141__GraphicsLevel_4_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_141__GraphicsLevel_4_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_141__GraphicsLevel_4_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_142__GraphicsLevel_4_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_142__GraphicsLevel_4_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_143__GraphicsLevel_4_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_143__GraphicsLevel_4_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_144__GraphicsLevel_4_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_144__GraphicsLevel_4_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_145__GraphicsLevel_4_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_145__GraphicsLevel_4_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_146__GraphicsLevel_4_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_146__GraphicsLevel_4_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_147__GraphicsLevel_4_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_147__GraphicsLevel_4_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_148__GraphicsLevel_4_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_148__GraphicsLevel_4_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_148__GraphicsLevel_4_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_148__GraphicsLevel_4_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_148__GraphicsLevel_4_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_148__GraphicsLevel_4_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_148__GraphicsLevel_4_SclkDid_MASK 0xff000000
+#define DPM_TABLE_148__GraphicsLevel_4_SclkDid__SHIFT 0x18
+#define DPM_TABLE_149__GraphicsLevel_4_PowerThrottle_MASK 0xff
+#define DPM_TABLE_149__GraphicsLevel_4_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_149__GraphicsLevel_4_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_149__GraphicsLevel_4_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_149__GraphicsLevel_4_DownHyst_MASK 0xff0000
+#define DPM_TABLE_149__GraphicsLevel_4_DownHyst__SHIFT 0x10
+#define DPM_TABLE_149__GraphicsLevel_4_UpHyst_MASK 0xff000000
+#define DPM_TABLE_149__GraphicsLevel_4_UpHyst__SHIFT 0x18
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_151__GraphicsLevel_5_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_151__GraphicsLevel_5_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_152__GraphicsLevel_5_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_152__GraphicsLevel_5_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_152__GraphicsLevel_5_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_152__GraphicsLevel_5_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_152__GraphicsLevel_5_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_152__GraphicsLevel_5_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_153__GraphicsLevel_5_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_153__GraphicsLevel_5_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_154__GraphicsLevel_5_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_154__GraphicsLevel_5_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_155__GraphicsLevel_5_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_155__GraphicsLevel_5_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_156__GraphicsLevel_5_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_156__GraphicsLevel_5_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_157__GraphicsLevel_5_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_157__GraphicsLevel_5_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_158__GraphicsLevel_5_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_158__GraphicsLevel_5_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_159__GraphicsLevel_5_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_159__GraphicsLevel_5_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_159__GraphicsLevel_5_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_159__GraphicsLevel_5_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_159__GraphicsLevel_5_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_159__GraphicsLevel_5_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_159__GraphicsLevel_5_SclkDid_MASK 0xff000000
+#define DPM_TABLE_159__GraphicsLevel_5_SclkDid__SHIFT 0x18
+#define DPM_TABLE_160__GraphicsLevel_5_PowerThrottle_MASK 0xff
+#define DPM_TABLE_160__GraphicsLevel_5_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_160__GraphicsLevel_5_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_160__GraphicsLevel_5_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_160__GraphicsLevel_5_DownHyst_MASK 0xff0000
+#define DPM_TABLE_160__GraphicsLevel_5_DownHyst__SHIFT 0x10
+#define DPM_TABLE_160__GraphicsLevel_5_UpHyst_MASK 0xff000000
+#define DPM_TABLE_160__GraphicsLevel_5_UpHyst__SHIFT 0x18
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_162__GraphicsLevel_6_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_162__GraphicsLevel_6_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_163__GraphicsLevel_6_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_163__GraphicsLevel_6_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_163__GraphicsLevel_6_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_163__GraphicsLevel_6_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_163__GraphicsLevel_6_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_163__GraphicsLevel_6_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_164__GraphicsLevel_6_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_164__GraphicsLevel_6_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_165__GraphicsLevel_6_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_165__GraphicsLevel_6_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_166__GraphicsLevel_6_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_166__GraphicsLevel_6_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_167__GraphicsLevel_6_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_167__GraphicsLevel_6_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_168__GraphicsLevel_6_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_168__GraphicsLevel_6_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_169__GraphicsLevel_6_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_169__GraphicsLevel_6_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_170__GraphicsLevel_6_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_170__GraphicsLevel_6_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_170__GraphicsLevel_6_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_170__GraphicsLevel_6_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_170__GraphicsLevel_6_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_170__GraphicsLevel_6_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_170__GraphicsLevel_6_SclkDid_MASK 0xff000000
+#define DPM_TABLE_170__GraphicsLevel_6_SclkDid__SHIFT 0x18
+#define DPM_TABLE_171__GraphicsLevel_6_PowerThrottle_MASK 0xff
+#define DPM_TABLE_171__GraphicsLevel_6_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_171__GraphicsLevel_6_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_171__GraphicsLevel_6_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_171__GraphicsLevel_6_DownHyst_MASK 0xff0000
+#define DPM_TABLE_171__GraphicsLevel_6_DownHyst__SHIFT 0x10
+#define DPM_TABLE_171__GraphicsLevel_6_UpHyst_MASK 0xff000000
+#define DPM_TABLE_171__GraphicsLevel_6_UpHyst__SHIFT 0x18
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_173__GraphicsLevel_7_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_173__GraphicsLevel_7_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_174__GraphicsLevel_7_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_174__GraphicsLevel_7_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_174__GraphicsLevel_7_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_174__GraphicsLevel_7_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_174__GraphicsLevel_7_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_174__GraphicsLevel_7_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_175__GraphicsLevel_7_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_175__GraphicsLevel_7_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_176__GraphicsLevel_7_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_176__GraphicsLevel_7_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_177__GraphicsLevel_7_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_177__GraphicsLevel_7_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_178__GraphicsLevel_7_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_178__GraphicsLevel_7_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_179__GraphicsLevel_7_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_179__GraphicsLevel_7_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_180__GraphicsLevel_7_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_180__GraphicsLevel_7_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_181__GraphicsLevel_7_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_181__GraphicsLevel_7_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_181__GraphicsLevel_7_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_181__GraphicsLevel_7_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_181__GraphicsLevel_7_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_181__GraphicsLevel_7_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_181__GraphicsLevel_7_SclkDid_MASK 0xff000000
+#define DPM_TABLE_181__GraphicsLevel_7_SclkDid__SHIFT 0x18
+#define DPM_TABLE_182__GraphicsLevel_7_PowerThrottle_MASK 0xff
+#define DPM_TABLE_182__GraphicsLevel_7_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_182__GraphicsLevel_7_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_182__GraphicsLevel_7_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_182__GraphicsLevel_7_DownHyst_MASK 0xff0000
+#define DPM_TABLE_182__GraphicsLevel_7_DownHyst__SHIFT 0x10
+#define DPM_TABLE_182__GraphicsLevel_7_UpHyst_MASK 0xff000000
+#define DPM_TABLE_182__GraphicsLevel_7_UpHyst__SHIFT 0x18
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_184__MemoryACPILevel_MinMvdd_MASK 0xffffffff
+#define DPM_TABLE_184__MemoryACPILevel_MinMvdd__SHIFT 0x0
+#define DPM_TABLE_185__MemoryACPILevel_MclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_185__MemoryACPILevel_MclkFrequency__SHIFT 0x0
+#define DPM_TABLE_186__MemoryACPILevel_EnabledForActivity_MASK 0xff
+#define DPM_TABLE_186__MemoryACPILevel_EnabledForActivity__SHIFT 0x0
+#define DPM_TABLE_186__MemoryACPILevel_EnabledForThrottle_MASK 0xff00
+#define DPM_TABLE_186__MemoryACPILevel_EnabledForThrottle__SHIFT 0x8
+#define DPM_TABLE_186__MemoryACPILevel_FreqRange_MASK 0xff0000
+#define DPM_TABLE_186__MemoryACPILevel_FreqRange__SHIFT 0x10
+#define DPM_TABLE_186__MemoryACPILevel_StutterEnable_MASK 0xff000000
+#define DPM_TABLE_186__MemoryACPILevel_StutterEnable__SHIFT 0x18
+#define DPM_TABLE_187__MemoryACPILevel_padding_MASK 0xff
+#define DPM_TABLE_187__MemoryACPILevel_padding__SHIFT 0x0
+#define DPM_TABLE_187__MemoryACPILevel_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_187__MemoryACPILevel_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_187__MemoryACPILevel_DownHyst_MASK 0xff0000
+#define DPM_TABLE_187__MemoryACPILevel_DownHyst__SHIFT 0x10
+#define DPM_TABLE_187__MemoryACPILevel_UpHyst_MASK 0xff000000
+#define DPM_TABLE_187__MemoryACPILevel_UpHyst__SHIFT 0x18
+#define DPM_TABLE_188__MemoryACPILevel_MclkDivider_MASK 0xff
+#define DPM_TABLE_188__MemoryACPILevel_MclkDivider__SHIFT 0x0
+#define DPM_TABLE_188__MemoryACPILevel_DisplayWatermark_MASK 0xff00
+#define DPM_TABLE_188__MemoryACPILevel_DisplayWatermark__SHIFT 0x8
+#define DPM_TABLE_188__MemoryACPILevel_ActivityLevel_MASK 0xffff0000
+#define DPM_TABLE_188__MemoryACPILevel_ActivityLevel__SHIFT 0x10
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_190__MemoryLevel_0_MinMvdd_MASK 0xffffffff
+#define DPM_TABLE_190__MemoryLevel_0_MinMvdd__SHIFT 0x0
+#define DPM_TABLE_191__MemoryLevel_0_MclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_191__MemoryLevel_0_MclkFrequency__SHIFT 0x0
+#define DPM_TABLE_192__MemoryLevel_0_EnabledForActivity_MASK 0xff
+#define DPM_TABLE_192__MemoryLevel_0_EnabledForActivity__SHIFT 0x0
+#define DPM_TABLE_192__MemoryLevel_0_EnabledForThrottle_MASK 0xff00
+#define DPM_TABLE_192__MemoryLevel_0_EnabledForThrottle__SHIFT 0x8
+#define DPM_TABLE_192__MemoryLevel_0_FreqRange_MASK 0xff0000
+#define DPM_TABLE_192__MemoryLevel_0_FreqRange__SHIFT 0x10
+#define DPM_TABLE_192__MemoryLevel_0_StutterEnable_MASK 0xff000000
+#define DPM_TABLE_192__MemoryLevel_0_StutterEnable__SHIFT 0x18
+#define DPM_TABLE_193__MemoryLevel_0_padding_MASK 0xff
+#define DPM_TABLE_193__MemoryLevel_0_padding__SHIFT 0x0
+#define DPM_TABLE_193__MemoryLevel_0_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_193__MemoryLevel_0_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_193__MemoryLevel_0_DownHyst_MASK 0xff0000
+#define DPM_TABLE_193__MemoryLevel_0_DownHyst__SHIFT 0x10
+#define DPM_TABLE_193__MemoryLevel_0_UpHyst_MASK 0xff000000
+#define DPM_TABLE_193__MemoryLevel_0_UpHyst__SHIFT 0x18
+#define DPM_TABLE_194__MemoryLevel_0_MclkDivider_MASK 0xff
+#define DPM_TABLE_194__MemoryLevel_0_MclkDivider__SHIFT 0x0
+#define DPM_TABLE_194__MemoryLevel_0_DisplayWatermark_MASK 0xff00
+#define DPM_TABLE_194__MemoryLevel_0_DisplayWatermark__SHIFT 0x8
+#define DPM_TABLE_194__MemoryLevel_0_ActivityLevel_MASK 0xffff0000
+#define DPM_TABLE_194__MemoryLevel_0_ActivityLevel__SHIFT 0x10
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_196__MemoryLevel_1_MinMvdd_MASK 0xffffffff
+#define DPM_TABLE_196__MemoryLevel_1_MinMvdd__SHIFT 0x0
+#define DPM_TABLE_197__MemoryLevel_1_MclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_197__MemoryLevel_1_MclkFrequency__SHIFT 0x0
+#define DPM_TABLE_198__MemoryLevel_1_EnabledForActivity_MASK 0xff
+#define DPM_TABLE_198__MemoryLevel_1_EnabledForActivity__SHIFT 0x0
+#define DPM_TABLE_198__MemoryLevel_1_EnabledForThrottle_MASK 0xff00
+#define DPM_TABLE_198__MemoryLevel_1_EnabledForThrottle__SHIFT 0x8
+#define DPM_TABLE_198__MemoryLevel_1_FreqRange_MASK 0xff0000
+#define DPM_TABLE_198__MemoryLevel_1_FreqRange__SHIFT 0x10
+#define DPM_TABLE_198__MemoryLevel_1_StutterEnable_MASK 0xff000000
+#define DPM_TABLE_198__MemoryLevel_1_StutterEnable__SHIFT 0x18
+#define DPM_TABLE_199__MemoryLevel_1_padding_MASK 0xff
+#define DPM_TABLE_199__MemoryLevel_1_padding__SHIFT 0x0
+#define DPM_TABLE_199__MemoryLevel_1_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_199__MemoryLevel_1_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_199__MemoryLevel_1_DownHyst_MASK 0xff0000
+#define DPM_TABLE_199__MemoryLevel_1_DownHyst__SHIFT 0x10
+#define DPM_TABLE_199__MemoryLevel_1_UpHyst_MASK 0xff000000
+#define DPM_TABLE_199__MemoryLevel_1_UpHyst__SHIFT 0x18
+#define DPM_TABLE_200__MemoryLevel_1_MclkDivider_MASK 0xff
+#define DPM_TABLE_200__MemoryLevel_1_MclkDivider__SHIFT 0x0
+#define DPM_TABLE_200__MemoryLevel_1_DisplayWatermark_MASK 0xff00
+#define DPM_TABLE_200__MemoryLevel_1_DisplayWatermark__SHIFT 0x8
+#define DPM_TABLE_200__MemoryLevel_1_ActivityLevel_MASK 0xffff0000
+#define DPM_TABLE_200__MemoryLevel_1_ActivityLevel__SHIFT 0x10
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_202__MemoryLevel_2_MinMvdd_MASK 0xffffffff
+#define DPM_TABLE_202__MemoryLevel_2_MinMvdd__SHIFT 0x0
+#define DPM_TABLE_203__MemoryLevel_2_MclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_203__MemoryLevel_2_MclkFrequency__SHIFT 0x0
+#define DPM_TABLE_204__MemoryLevel_2_EnabledForActivity_MASK 0xff
+#define DPM_TABLE_204__MemoryLevel_2_EnabledForActivity__SHIFT 0x0
+#define DPM_TABLE_204__MemoryLevel_2_EnabledForThrottle_MASK 0xff00
+#define DPM_TABLE_204__MemoryLevel_2_EnabledForThrottle__SHIFT 0x8
+#define DPM_TABLE_204__MemoryLevel_2_FreqRange_MASK 0xff0000
+#define DPM_TABLE_204__MemoryLevel_2_FreqRange__SHIFT 0x10
+#define DPM_TABLE_204__MemoryLevel_2_StutterEnable_MASK 0xff000000
+#define DPM_TABLE_204__MemoryLevel_2_StutterEnable__SHIFT 0x18
+#define DPM_TABLE_205__MemoryLevel_2_padding_MASK 0xff
+#define DPM_TABLE_205__MemoryLevel_2_padding__SHIFT 0x0
+#define DPM_TABLE_205__MemoryLevel_2_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_205__MemoryLevel_2_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_205__MemoryLevel_2_DownHyst_MASK 0xff0000
+#define DPM_TABLE_205__MemoryLevel_2_DownHyst__SHIFT 0x10
+#define DPM_TABLE_205__MemoryLevel_2_UpHyst_MASK 0xff000000
+#define DPM_TABLE_205__MemoryLevel_2_UpHyst__SHIFT 0x18
+#define DPM_TABLE_206__MemoryLevel_2_MclkDivider_MASK 0xff
+#define DPM_TABLE_206__MemoryLevel_2_MclkDivider__SHIFT 0x0
+#define DPM_TABLE_206__MemoryLevel_2_DisplayWatermark_MASK 0xff00
+#define DPM_TABLE_206__MemoryLevel_2_DisplayWatermark__SHIFT 0x8
+#define DPM_TABLE_206__MemoryLevel_2_ActivityLevel_MASK 0xffff0000
+#define DPM_TABLE_206__MemoryLevel_2_ActivityLevel__SHIFT 0x10
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_208__MemoryLevel_3_MinMvdd_MASK 0xffffffff
+#define DPM_TABLE_208__MemoryLevel_3_MinMvdd__SHIFT 0x0
+#define DPM_TABLE_209__MemoryLevel_3_MclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_209__MemoryLevel_3_MclkFrequency__SHIFT 0x0
+#define DPM_TABLE_210__MemoryLevel_3_EnabledForActivity_MASK 0xff
+#define DPM_TABLE_210__MemoryLevel_3_EnabledForActivity__SHIFT 0x0
+#define DPM_TABLE_210__MemoryLevel_3_EnabledForThrottle_MASK 0xff00
+#define DPM_TABLE_210__MemoryLevel_3_EnabledForThrottle__SHIFT 0x8
+#define DPM_TABLE_210__MemoryLevel_3_FreqRange_MASK 0xff0000
+#define DPM_TABLE_210__MemoryLevel_3_FreqRange__SHIFT 0x10
+#define DPM_TABLE_210__MemoryLevel_3_StutterEnable_MASK 0xff000000
+#define DPM_TABLE_210__MemoryLevel_3_StutterEnable__SHIFT 0x18
+#define DPM_TABLE_211__MemoryLevel_3_padding_MASK 0xff
+#define DPM_TABLE_211__MemoryLevel_3_padding__SHIFT 0x0
+#define DPM_TABLE_211__MemoryLevel_3_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_211__MemoryLevel_3_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_211__MemoryLevel_3_DownHyst_MASK 0xff0000
+#define DPM_TABLE_211__MemoryLevel_3_DownHyst__SHIFT 0x10
+#define DPM_TABLE_211__MemoryLevel_3_UpHyst_MASK 0xff000000
+#define DPM_TABLE_211__MemoryLevel_3_UpHyst__SHIFT 0x18
+#define DPM_TABLE_212__MemoryLevel_3_MclkDivider_MASK 0xff
+#define DPM_TABLE_212__MemoryLevel_3_MclkDivider__SHIFT 0x0
+#define DPM_TABLE_212__MemoryLevel_3_DisplayWatermark_MASK 0xff00
+#define DPM_TABLE_212__MemoryLevel_3_DisplayWatermark__SHIFT 0x8
+#define DPM_TABLE_212__MemoryLevel_3_ActivityLevel_MASK 0xffff0000
+#define DPM_TABLE_212__MemoryLevel_3_ActivityLevel__SHIFT 0x10
+#define DPM_TABLE_213__LinkLevel_0_SPC_MASK 0xff
+#define DPM_TABLE_213__LinkLevel_0_SPC__SHIFT 0x0
+#define DPM_TABLE_213__LinkLevel_0_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_213__LinkLevel_0_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_213__LinkLevel_0_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_213__LinkLevel_0_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_213__LinkLevel_0_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_213__LinkLevel_0_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_214__LinkLevel_0_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_214__LinkLevel_0_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_215__LinkLevel_0_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_215__LinkLevel_0_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_216__LinkLevel_0_Reserved_MASK 0xffffffff
+#define DPM_TABLE_216__LinkLevel_0_Reserved__SHIFT 0x0
+#define DPM_TABLE_217__LinkLevel_1_SPC_MASK 0xff
+#define DPM_TABLE_217__LinkLevel_1_SPC__SHIFT 0x0
+#define DPM_TABLE_217__LinkLevel_1_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_217__LinkLevel_1_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_217__LinkLevel_1_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_217__LinkLevel_1_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_217__LinkLevel_1_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_217__LinkLevel_1_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_218__LinkLevel_1_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_218__LinkLevel_1_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_219__LinkLevel_1_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_219__LinkLevel_1_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_220__LinkLevel_1_Reserved_MASK 0xffffffff
+#define DPM_TABLE_220__LinkLevel_1_Reserved__SHIFT 0x0
+#define DPM_TABLE_221__LinkLevel_2_SPC_MASK 0xff
+#define DPM_TABLE_221__LinkLevel_2_SPC__SHIFT 0x0
+#define DPM_TABLE_221__LinkLevel_2_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_221__LinkLevel_2_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_221__LinkLevel_2_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_221__LinkLevel_2_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_221__LinkLevel_2_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_221__LinkLevel_2_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_222__LinkLevel_2_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_222__LinkLevel_2_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_223__LinkLevel_2_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_223__LinkLevel_2_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_224__LinkLevel_2_Reserved_MASK 0xffffffff
+#define DPM_TABLE_224__LinkLevel_2_Reserved__SHIFT 0x0
+#define DPM_TABLE_225__LinkLevel_3_SPC_MASK 0xff
+#define DPM_TABLE_225__LinkLevel_3_SPC__SHIFT 0x0
+#define DPM_TABLE_225__LinkLevel_3_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_225__LinkLevel_3_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_225__LinkLevel_3_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_225__LinkLevel_3_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_225__LinkLevel_3_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_225__LinkLevel_3_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_226__LinkLevel_3_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_226__LinkLevel_3_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_227__LinkLevel_3_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_227__LinkLevel_3_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_228__LinkLevel_3_Reserved_MASK 0xffffffff
+#define DPM_TABLE_228__LinkLevel_3_Reserved__SHIFT 0x0
+#define DPM_TABLE_229__LinkLevel_4_SPC_MASK 0xff
+#define DPM_TABLE_229__LinkLevel_4_SPC__SHIFT 0x0
+#define DPM_TABLE_229__LinkLevel_4_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_229__LinkLevel_4_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_229__LinkLevel_4_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_229__LinkLevel_4_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_229__LinkLevel_4_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_229__LinkLevel_4_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_230__LinkLevel_4_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_230__LinkLevel_4_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_231__LinkLevel_4_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_231__LinkLevel_4_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_232__LinkLevel_4_Reserved_MASK 0xffffffff
+#define DPM_TABLE_232__LinkLevel_4_Reserved__SHIFT 0x0
+#define DPM_TABLE_233__LinkLevel_5_SPC_MASK 0xff
+#define DPM_TABLE_233__LinkLevel_5_SPC__SHIFT 0x0
+#define DPM_TABLE_233__LinkLevel_5_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_233__LinkLevel_5_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_233__LinkLevel_5_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_233__LinkLevel_5_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_233__LinkLevel_5_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_233__LinkLevel_5_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_234__LinkLevel_5_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_234__LinkLevel_5_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_235__LinkLevel_5_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_235__LinkLevel_5_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_236__LinkLevel_5_Reserved_MASK 0xffffffff
+#define DPM_TABLE_236__LinkLevel_5_Reserved__SHIFT 0x0
+#define DPM_TABLE_237__LinkLevel_6_SPC_MASK 0xff
+#define DPM_TABLE_237__LinkLevel_6_SPC__SHIFT 0x0
+#define DPM_TABLE_237__LinkLevel_6_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_237__LinkLevel_6_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_237__LinkLevel_6_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_237__LinkLevel_6_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_237__LinkLevel_6_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_237__LinkLevel_6_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_238__LinkLevel_6_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_238__LinkLevel_6_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_239__LinkLevel_6_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_239__LinkLevel_6_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_240__LinkLevel_6_Reserved_MASK 0xffffffff
+#define DPM_TABLE_240__LinkLevel_6_Reserved__SHIFT 0x0
+#define DPM_TABLE_241__LinkLevel_7_SPC_MASK 0xff
+#define DPM_TABLE_241__LinkLevel_7_SPC__SHIFT 0x0
+#define DPM_TABLE_241__LinkLevel_7_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_241__LinkLevel_7_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_241__LinkLevel_7_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_241__LinkLevel_7_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_241__LinkLevel_7_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_241__LinkLevel_7_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_242__LinkLevel_7_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_242__LinkLevel_7_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_243__LinkLevel_7_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_243__LinkLevel_7_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_244__LinkLevel_7_Reserved_MASK 0xffffffff
+#define DPM_TABLE_244__LinkLevel_7_Reserved__SHIFT 0x0
+#define DPM_TABLE_245__ACPILevel_Flags_MASK 0xffffffff
+#define DPM_TABLE_245__ACPILevel_Flags__SHIFT 0x0
+#define DPM_TABLE_246__ACPILevel_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_246__ACPILevel_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_246__ACPILevel_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_246__ACPILevel_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_247__ACPILevel_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_247__ACPILevel_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_248__ACPILevel_padding_MASK 0xff
+#define DPM_TABLE_248__ACPILevel_padding__SHIFT 0x0
+#define DPM_TABLE_248__ACPILevel_DeepSleepDivId_MASK 0xff00
+#define DPM_TABLE_248__ACPILevel_DeepSleepDivId__SHIFT 0x8
+#define DPM_TABLE_248__ACPILevel_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_248__ACPILevel_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_248__ACPILevel_SclkDid_MASK 0xff000000
+#define DPM_TABLE_248__ACPILevel_SclkDid__SHIFT 0x18
+#define DPM_TABLE_249__ACPILevel_CgSpllFuncCntl_MASK 0xffffffff
+#define DPM_TABLE_249__ACPILevel_CgSpllFuncCntl__SHIFT 0x0
+#define DPM_TABLE_250__ACPILevel_CgSpllFuncCntl2_MASK 0xffffffff
+#define DPM_TABLE_250__ACPILevel_CgSpllFuncCntl2__SHIFT 0x0
+#define DPM_TABLE_251__ACPILevel_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_251__ACPILevel_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_252__ACPILevel_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_252__ACPILevel_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_253__ACPILevel_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_253__ACPILevel_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_254__ACPILevel_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_254__ACPILevel_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_255__ACPILevel_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_255__ACPILevel_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_256__ACPILevel_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_256__ACPILevel_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_257__UvdLevel_0_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_257__UvdLevel_0_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_258__UvdLevel_0_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_258__UvdLevel_0_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_260__UvdLevel_0_padding_1_MASK 0xff
+#define DPM_TABLE_260__UvdLevel_0_padding_1__SHIFT 0x0
+#define DPM_TABLE_260__UvdLevel_0_padding_0_MASK 0xff00
+#define DPM_TABLE_260__UvdLevel_0_padding_0__SHIFT 0x8
+#define DPM_TABLE_260__UvdLevel_0_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_260__UvdLevel_0_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_260__UvdLevel_0_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_260__UvdLevel_0_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_261__UvdLevel_1_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_261__UvdLevel_1_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_262__UvdLevel_1_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_262__UvdLevel_1_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_264__UvdLevel_1_padding_1_MASK 0xff
+#define DPM_TABLE_264__UvdLevel_1_padding_1__SHIFT 0x0
+#define DPM_TABLE_264__UvdLevel_1_padding_0_MASK 0xff00
+#define DPM_TABLE_264__UvdLevel_1_padding_0__SHIFT 0x8
+#define DPM_TABLE_264__UvdLevel_1_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_264__UvdLevel_1_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_264__UvdLevel_1_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_264__UvdLevel_1_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_265__UvdLevel_2_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_265__UvdLevel_2_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_266__UvdLevel_2_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_266__UvdLevel_2_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_268__UvdLevel_2_padding_1_MASK 0xff
+#define DPM_TABLE_268__UvdLevel_2_padding_1__SHIFT 0x0
+#define DPM_TABLE_268__UvdLevel_2_padding_0_MASK 0xff00
+#define DPM_TABLE_268__UvdLevel_2_padding_0__SHIFT 0x8
+#define DPM_TABLE_268__UvdLevel_2_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_268__UvdLevel_2_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_268__UvdLevel_2_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_268__UvdLevel_2_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_269__UvdLevel_3_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_269__UvdLevel_3_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_270__UvdLevel_3_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_270__UvdLevel_3_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_272__UvdLevel_3_padding_1_MASK 0xff
+#define DPM_TABLE_272__UvdLevel_3_padding_1__SHIFT 0x0
+#define DPM_TABLE_272__UvdLevel_3_padding_0_MASK 0xff00
+#define DPM_TABLE_272__UvdLevel_3_padding_0__SHIFT 0x8
+#define DPM_TABLE_272__UvdLevel_3_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_272__UvdLevel_3_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_272__UvdLevel_3_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_272__UvdLevel_3_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_273__UvdLevel_4_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_273__UvdLevel_4_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_274__UvdLevel_4_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_274__UvdLevel_4_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_276__UvdLevel_4_padding_1_MASK 0xff
+#define DPM_TABLE_276__UvdLevel_4_padding_1__SHIFT 0x0
+#define DPM_TABLE_276__UvdLevel_4_padding_0_MASK 0xff00
+#define DPM_TABLE_276__UvdLevel_4_padding_0__SHIFT 0x8
+#define DPM_TABLE_276__UvdLevel_4_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_276__UvdLevel_4_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_276__UvdLevel_4_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_276__UvdLevel_4_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_277__UvdLevel_5_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_277__UvdLevel_5_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_278__UvdLevel_5_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_278__UvdLevel_5_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_280__UvdLevel_5_padding_1_MASK 0xff
+#define DPM_TABLE_280__UvdLevel_5_padding_1__SHIFT 0x0
+#define DPM_TABLE_280__UvdLevel_5_padding_0_MASK 0xff00
+#define DPM_TABLE_280__UvdLevel_5_padding_0__SHIFT 0x8
+#define DPM_TABLE_280__UvdLevel_5_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_280__UvdLevel_5_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_280__UvdLevel_5_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_280__UvdLevel_5_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_281__UvdLevel_6_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_281__UvdLevel_6_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_282__UvdLevel_6_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_282__UvdLevel_6_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_284__UvdLevel_6_padding_1_MASK 0xff
+#define DPM_TABLE_284__UvdLevel_6_padding_1__SHIFT 0x0
+#define DPM_TABLE_284__UvdLevel_6_padding_0_MASK 0xff00
+#define DPM_TABLE_284__UvdLevel_6_padding_0__SHIFT 0x8
+#define DPM_TABLE_284__UvdLevel_6_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_284__UvdLevel_6_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_284__UvdLevel_6_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_284__UvdLevel_6_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_285__UvdLevel_7_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_285__UvdLevel_7_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_286__UvdLevel_7_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_286__UvdLevel_7_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_288__UvdLevel_7_padding_1_MASK 0xff
+#define DPM_TABLE_288__UvdLevel_7_padding_1__SHIFT 0x0
+#define DPM_TABLE_288__UvdLevel_7_padding_0_MASK 0xff00
+#define DPM_TABLE_288__UvdLevel_7_padding_0__SHIFT 0x8
+#define DPM_TABLE_288__UvdLevel_7_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_288__UvdLevel_7_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_288__UvdLevel_7_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_288__UvdLevel_7_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_289__VceLevel_0_Frequency_MASK 0xffffffff
+#define DPM_TABLE_289__VceLevel_0_Frequency__SHIFT 0x0
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_291__VceLevel_0_padding_2_MASK 0xff
+#define DPM_TABLE_291__VceLevel_0_padding_2__SHIFT 0x0
+#define DPM_TABLE_291__VceLevel_0_padding_1_MASK 0xff00
+#define DPM_TABLE_291__VceLevel_0_padding_1__SHIFT 0x8
+#define DPM_TABLE_291__VceLevel_0_padding_0_MASK 0xff0000
+#define DPM_TABLE_291__VceLevel_0_padding_0__SHIFT 0x10
+#define DPM_TABLE_291__VceLevel_0_Divider_MASK 0xff000000
+#define DPM_TABLE_291__VceLevel_0_Divider__SHIFT 0x18
+#define DPM_TABLE_292__VceLevel_1_Frequency_MASK 0xffffffff
+#define DPM_TABLE_292__VceLevel_1_Frequency__SHIFT 0x0
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_294__VceLevel_1_padding_2_MASK 0xff
+#define DPM_TABLE_294__VceLevel_1_padding_2__SHIFT 0x0
+#define DPM_TABLE_294__VceLevel_1_padding_1_MASK 0xff00
+#define DPM_TABLE_294__VceLevel_1_padding_1__SHIFT 0x8
+#define DPM_TABLE_294__VceLevel_1_padding_0_MASK 0xff0000
+#define DPM_TABLE_294__VceLevel_1_padding_0__SHIFT 0x10
+#define DPM_TABLE_294__VceLevel_1_Divider_MASK 0xff000000
+#define DPM_TABLE_294__VceLevel_1_Divider__SHIFT 0x18
+#define DPM_TABLE_295__VceLevel_2_Frequency_MASK 0xffffffff
+#define DPM_TABLE_295__VceLevel_2_Frequency__SHIFT 0x0
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_297__VceLevel_2_padding_2_MASK 0xff
+#define DPM_TABLE_297__VceLevel_2_padding_2__SHIFT 0x0
+#define DPM_TABLE_297__VceLevel_2_padding_1_MASK 0xff00
+#define DPM_TABLE_297__VceLevel_2_padding_1__SHIFT 0x8
+#define DPM_TABLE_297__VceLevel_2_padding_0_MASK 0xff0000
+#define DPM_TABLE_297__VceLevel_2_padding_0__SHIFT 0x10
+#define DPM_TABLE_297__VceLevel_2_Divider_MASK 0xff000000
+#define DPM_TABLE_297__VceLevel_2_Divider__SHIFT 0x18
+#define DPM_TABLE_298__VceLevel_3_Frequency_MASK 0xffffffff
+#define DPM_TABLE_298__VceLevel_3_Frequency__SHIFT 0x0
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_300__VceLevel_3_padding_2_MASK 0xff
+#define DPM_TABLE_300__VceLevel_3_padding_2__SHIFT 0x0
+#define DPM_TABLE_300__VceLevel_3_padding_1_MASK 0xff00
+#define DPM_TABLE_300__VceLevel_3_padding_1__SHIFT 0x8
+#define DPM_TABLE_300__VceLevel_3_padding_0_MASK 0xff0000
+#define DPM_TABLE_300__VceLevel_3_padding_0__SHIFT 0x10
+#define DPM_TABLE_300__VceLevel_3_Divider_MASK 0xff000000
+#define DPM_TABLE_300__VceLevel_3_Divider__SHIFT 0x18
+#define DPM_TABLE_301__VceLevel_4_Frequency_MASK 0xffffffff
+#define DPM_TABLE_301__VceLevel_4_Frequency__SHIFT 0x0
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_303__VceLevel_4_padding_2_MASK 0xff
+#define DPM_TABLE_303__VceLevel_4_padding_2__SHIFT 0x0
+#define DPM_TABLE_303__VceLevel_4_padding_1_MASK 0xff00
+#define DPM_TABLE_303__VceLevel_4_padding_1__SHIFT 0x8
+#define DPM_TABLE_303__VceLevel_4_padding_0_MASK 0xff0000
+#define DPM_TABLE_303__VceLevel_4_padding_0__SHIFT 0x10
+#define DPM_TABLE_303__VceLevel_4_Divider_MASK 0xff000000
+#define DPM_TABLE_303__VceLevel_4_Divider__SHIFT 0x18
+#define DPM_TABLE_304__VceLevel_5_Frequency_MASK 0xffffffff
+#define DPM_TABLE_304__VceLevel_5_Frequency__SHIFT 0x0
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_306__VceLevel_5_padding_2_MASK 0xff
+#define DPM_TABLE_306__VceLevel_5_padding_2__SHIFT 0x0
+#define DPM_TABLE_306__VceLevel_5_padding_1_MASK 0xff00
+#define DPM_TABLE_306__VceLevel_5_padding_1__SHIFT 0x8
+#define DPM_TABLE_306__VceLevel_5_padding_0_MASK 0xff0000
+#define DPM_TABLE_306__VceLevel_5_padding_0__SHIFT 0x10
+#define DPM_TABLE_306__VceLevel_5_Divider_MASK 0xff000000
+#define DPM_TABLE_306__VceLevel_5_Divider__SHIFT 0x18
+#define DPM_TABLE_307__VceLevel_6_Frequency_MASK 0xffffffff
+#define DPM_TABLE_307__VceLevel_6_Frequency__SHIFT 0x0
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_309__VceLevel_6_padding_2_MASK 0xff
+#define DPM_TABLE_309__VceLevel_6_padding_2__SHIFT 0x0
+#define DPM_TABLE_309__VceLevel_6_padding_1_MASK 0xff00
+#define DPM_TABLE_309__VceLevel_6_padding_1__SHIFT 0x8
+#define DPM_TABLE_309__VceLevel_6_padding_0_MASK 0xff0000
+#define DPM_TABLE_309__VceLevel_6_padding_0__SHIFT 0x10
+#define DPM_TABLE_309__VceLevel_6_Divider_MASK 0xff000000
+#define DPM_TABLE_309__VceLevel_6_Divider__SHIFT 0x18
+#define DPM_TABLE_310__VceLevel_7_Frequency_MASK 0xffffffff
+#define DPM_TABLE_310__VceLevel_7_Frequency__SHIFT 0x0
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_312__VceLevel_7_padding_2_MASK 0xff
+#define DPM_TABLE_312__VceLevel_7_padding_2__SHIFT 0x0
+#define DPM_TABLE_312__VceLevel_7_padding_1_MASK 0xff00
+#define DPM_TABLE_312__VceLevel_7_padding_1__SHIFT 0x8
+#define DPM_TABLE_312__VceLevel_7_padding_0_MASK 0xff0000
+#define DPM_TABLE_312__VceLevel_7_padding_0__SHIFT 0x10
+#define DPM_TABLE_312__VceLevel_7_Divider_MASK 0xff000000
+#define DPM_TABLE_312__VceLevel_7_Divider__SHIFT 0x18
+#define DPM_TABLE_313__AcpLevel_0_Frequency_MASK 0xffffffff
+#define DPM_TABLE_313__AcpLevel_0_Frequency__SHIFT 0x0
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_315__AcpLevel_0_padding_2_MASK 0xff
+#define DPM_TABLE_315__AcpLevel_0_padding_2__SHIFT 0x0
+#define DPM_TABLE_315__AcpLevel_0_padding_1_MASK 0xff00
+#define DPM_TABLE_315__AcpLevel_0_padding_1__SHIFT 0x8
+#define DPM_TABLE_315__AcpLevel_0_padding_0_MASK 0xff0000
+#define DPM_TABLE_315__AcpLevel_0_padding_0__SHIFT 0x10
+#define DPM_TABLE_315__AcpLevel_0_Divider_MASK 0xff000000
+#define DPM_TABLE_315__AcpLevel_0_Divider__SHIFT 0x18
+#define DPM_TABLE_316__AcpLevel_1_Frequency_MASK 0xffffffff
+#define DPM_TABLE_316__AcpLevel_1_Frequency__SHIFT 0x0
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_318__AcpLevel_1_padding_2_MASK 0xff
+#define DPM_TABLE_318__AcpLevel_1_padding_2__SHIFT 0x0
+#define DPM_TABLE_318__AcpLevel_1_padding_1_MASK 0xff00
+#define DPM_TABLE_318__AcpLevel_1_padding_1__SHIFT 0x8
+#define DPM_TABLE_318__AcpLevel_1_padding_0_MASK 0xff0000
+#define DPM_TABLE_318__AcpLevel_1_padding_0__SHIFT 0x10
+#define DPM_TABLE_318__AcpLevel_1_Divider_MASK 0xff000000
+#define DPM_TABLE_318__AcpLevel_1_Divider__SHIFT 0x18
+#define DPM_TABLE_319__AcpLevel_2_Frequency_MASK 0xffffffff
+#define DPM_TABLE_319__AcpLevel_2_Frequency__SHIFT 0x0
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_321__AcpLevel_2_padding_2_MASK 0xff
+#define DPM_TABLE_321__AcpLevel_2_padding_2__SHIFT 0x0
+#define DPM_TABLE_321__AcpLevel_2_padding_1_MASK 0xff00
+#define DPM_TABLE_321__AcpLevel_2_padding_1__SHIFT 0x8
+#define DPM_TABLE_321__AcpLevel_2_padding_0_MASK 0xff0000
+#define DPM_TABLE_321__AcpLevel_2_padding_0__SHIFT 0x10
+#define DPM_TABLE_321__AcpLevel_2_Divider_MASK 0xff000000
+#define DPM_TABLE_321__AcpLevel_2_Divider__SHIFT 0x18
+#define DPM_TABLE_322__AcpLevel_3_Frequency_MASK 0xffffffff
+#define DPM_TABLE_322__AcpLevel_3_Frequency__SHIFT 0x0
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_324__AcpLevel_3_padding_2_MASK 0xff
+#define DPM_TABLE_324__AcpLevel_3_padding_2__SHIFT 0x0
+#define DPM_TABLE_324__AcpLevel_3_padding_1_MASK 0xff00
+#define DPM_TABLE_324__AcpLevel_3_padding_1__SHIFT 0x8
+#define DPM_TABLE_324__AcpLevel_3_padding_0_MASK 0xff0000
+#define DPM_TABLE_324__AcpLevel_3_padding_0__SHIFT 0x10
+#define DPM_TABLE_324__AcpLevel_3_Divider_MASK 0xff000000
+#define DPM_TABLE_324__AcpLevel_3_Divider__SHIFT 0x18
+#define DPM_TABLE_325__AcpLevel_4_Frequency_MASK 0xffffffff
+#define DPM_TABLE_325__AcpLevel_4_Frequency__SHIFT 0x0
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_327__AcpLevel_4_padding_2_MASK 0xff
+#define DPM_TABLE_327__AcpLevel_4_padding_2__SHIFT 0x0
+#define DPM_TABLE_327__AcpLevel_4_padding_1_MASK 0xff00
+#define DPM_TABLE_327__AcpLevel_4_padding_1__SHIFT 0x8
+#define DPM_TABLE_327__AcpLevel_4_padding_0_MASK 0xff0000
+#define DPM_TABLE_327__AcpLevel_4_padding_0__SHIFT 0x10
+#define DPM_TABLE_327__AcpLevel_4_Divider_MASK 0xff000000
+#define DPM_TABLE_327__AcpLevel_4_Divider__SHIFT 0x18
+#define DPM_TABLE_328__AcpLevel_5_Frequency_MASK 0xffffffff
+#define DPM_TABLE_328__AcpLevel_5_Frequency__SHIFT 0x0
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_330__AcpLevel_5_padding_2_MASK 0xff
+#define DPM_TABLE_330__AcpLevel_5_padding_2__SHIFT 0x0
+#define DPM_TABLE_330__AcpLevel_5_padding_1_MASK 0xff00
+#define DPM_TABLE_330__AcpLevel_5_padding_1__SHIFT 0x8
+#define DPM_TABLE_330__AcpLevel_5_padding_0_MASK 0xff0000
+#define DPM_TABLE_330__AcpLevel_5_padding_0__SHIFT 0x10
+#define DPM_TABLE_330__AcpLevel_5_Divider_MASK 0xff000000
+#define DPM_TABLE_330__AcpLevel_5_Divider__SHIFT 0x18
+#define DPM_TABLE_331__AcpLevel_6_Frequency_MASK 0xffffffff
+#define DPM_TABLE_331__AcpLevel_6_Frequency__SHIFT 0x0
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_333__AcpLevel_6_padding_2_MASK 0xff
+#define DPM_TABLE_333__AcpLevel_6_padding_2__SHIFT 0x0
+#define DPM_TABLE_333__AcpLevel_6_padding_1_MASK 0xff00
+#define DPM_TABLE_333__AcpLevel_6_padding_1__SHIFT 0x8
+#define DPM_TABLE_333__AcpLevel_6_padding_0_MASK 0xff0000
+#define DPM_TABLE_333__AcpLevel_6_padding_0__SHIFT 0x10
+#define DPM_TABLE_333__AcpLevel_6_Divider_MASK 0xff000000
+#define DPM_TABLE_333__AcpLevel_6_Divider__SHIFT 0x18
+#define DPM_TABLE_334__AcpLevel_7_Frequency_MASK 0xffffffff
+#define DPM_TABLE_334__AcpLevel_7_Frequency__SHIFT 0x0
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_336__AcpLevel_7_padding_2_MASK 0xff
+#define DPM_TABLE_336__AcpLevel_7_padding_2__SHIFT 0x0
+#define DPM_TABLE_336__AcpLevel_7_padding_1_MASK 0xff00
+#define DPM_TABLE_336__AcpLevel_7_padding_1__SHIFT 0x8
+#define DPM_TABLE_336__AcpLevel_7_padding_0_MASK 0xff0000
+#define DPM_TABLE_336__AcpLevel_7_padding_0__SHIFT 0x10
+#define DPM_TABLE_336__AcpLevel_7_Divider_MASK 0xff000000
+#define DPM_TABLE_336__AcpLevel_7_Divider__SHIFT 0x18
+#define DPM_TABLE_337__SamuLevel_0_Frequency_MASK 0xffffffff
+#define DPM_TABLE_337__SamuLevel_0_Frequency__SHIFT 0x0
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_339__SamuLevel_0_padding_2_MASK 0xff
+#define DPM_TABLE_339__SamuLevel_0_padding_2__SHIFT 0x0
+#define DPM_TABLE_339__SamuLevel_0_padding_1_MASK 0xff00
+#define DPM_TABLE_339__SamuLevel_0_padding_1__SHIFT 0x8
+#define DPM_TABLE_339__SamuLevel_0_padding_0_MASK 0xff0000
+#define DPM_TABLE_339__SamuLevel_0_padding_0__SHIFT 0x10
+#define DPM_TABLE_339__SamuLevel_0_Divider_MASK 0xff000000
+#define DPM_TABLE_339__SamuLevel_0_Divider__SHIFT 0x18
+#define DPM_TABLE_340__SamuLevel_1_Frequency_MASK 0xffffffff
+#define DPM_TABLE_340__SamuLevel_1_Frequency__SHIFT 0x0
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_342__SamuLevel_1_padding_2_MASK 0xff
+#define DPM_TABLE_342__SamuLevel_1_padding_2__SHIFT 0x0
+#define DPM_TABLE_342__SamuLevel_1_padding_1_MASK 0xff00
+#define DPM_TABLE_342__SamuLevel_1_padding_1__SHIFT 0x8
+#define DPM_TABLE_342__SamuLevel_1_padding_0_MASK 0xff0000
+#define DPM_TABLE_342__SamuLevel_1_padding_0__SHIFT 0x10
+#define DPM_TABLE_342__SamuLevel_1_Divider_MASK 0xff000000
+#define DPM_TABLE_342__SamuLevel_1_Divider__SHIFT 0x18
+#define DPM_TABLE_343__SamuLevel_2_Frequency_MASK 0xffffffff
+#define DPM_TABLE_343__SamuLevel_2_Frequency__SHIFT 0x0
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_345__SamuLevel_2_padding_2_MASK 0xff
+#define DPM_TABLE_345__SamuLevel_2_padding_2__SHIFT 0x0
+#define DPM_TABLE_345__SamuLevel_2_padding_1_MASK 0xff00
+#define DPM_TABLE_345__SamuLevel_2_padding_1__SHIFT 0x8
+#define DPM_TABLE_345__SamuLevel_2_padding_0_MASK 0xff0000
+#define DPM_TABLE_345__SamuLevel_2_padding_0__SHIFT 0x10
+#define DPM_TABLE_345__SamuLevel_2_Divider_MASK 0xff000000
+#define DPM_TABLE_345__SamuLevel_2_Divider__SHIFT 0x18
+#define DPM_TABLE_346__SamuLevel_3_Frequency_MASK 0xffffffff
+#define DPM_TABLE_346__SamuLevel_3_Frequency__SHIFT 0x0
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_348__SamuLevel_3_padding_2_MASK 0xff
+#define DPM_TABLE_348__SamuLevel_3_padding_2__SHIFT 0x0
+#define DPM_TABLE_348__SamuLevel_3_padding_1_MASK 0xff00
+#define DPM_TABLE_348__SamuLevel_3_padding_1__SHIFT 0x8
+#define DPM_TABLE_348__SamuLevel_3_padding_0_MASK 0xff0000
+#define DPM_TABLE_348__SamuLevel_3_padding_0__SHIFT 0x10
+#define DPM_TABLE_348__SamuLevel_3_Divider_MASK 0xff000000
+#define DPM_TABLE_348__SamuLevel_3_Divider__SHIFT 0x18
+#define DPM_TABLE_349__SamuLevel_4_Frequency_MASK 0xffffffff
+#define DPM_TABLE_349__SamuLevel_4_Frequency__SHIFT 0x0
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_351__SamuLevel_4_padding_2_MASK 0xff
+#define DPM_TABLE_351__SamuLevel_4_padding_2__SHIFT 0x0
+#define DPM_TABLE_351__SamuLevel_4_padding_1_MASK 0xff00
+#define DPM_TABLE_351__SamuLevel_4_padding_1__SHIFT 0x8
+#define DPM_TABLE_351__SamuLevel_4_padding_0_MASK 0xff0000
+#define DPM_TABLE_351__SamuLevel_4_padding_0__SHIFT 0x10
+#define DPM_TABLE_351__SamuLevel_4_Divider_MASK 0xff000000
+#define DPM_TABLE_351__SamuLevel_4_Divider__SHIFT 0x18
+#define DPM_TABLE_352__SamuLevel_5_Frequency_MASK 0xffffffff
+#define DPM_TABLE_352__SamuLevel_5_Frequency__SHIFT 0x0
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_354__SamuLevel_5_padding_2_MASK 0xff
+#define DPM_TABLE_354__SamuLevel_5_padding_2__SHIFT 0x0
+#define DPM_TABLE_354__SamuLevel_5_padding_1_MASK 0xff00
+#define DPM_TABLE_354__SamuLevel_5_padding_1__SHIFT 0x8
+#define DPM_TABLE_354__SamuLevel_5_padding_0_MASK 0xff0000
+#define DPM_TABLE_354__SamuLevel_5_padding_0__SHIFT 0x10
+#define DPM_TABLE_354__SamuLevel_5_Divider_MASK 0xff000000
+#define DPM_TABLE_354__SamuLevel_5_Divider__SHIFT 0x18
+#define DPM_TABLE_355__SamuLevel_6_Frequency_MASK 0xffffffff
+#define DPM_TABLE_355__SamuLevel_6_Frequency__SHIFT 0x0
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_357__SamuLevel_6_padding_2_MASK 0xff
+#define DPM_TABLE_357__SamuLevel_6_padding_2__SHIFT 0x0
+#define DPM_TABLE_357__SamuLevel_6_padding_1_MASK 0xff00
+#define DPM_TABLE_357__SamuLevel_6_padding_1__SHIFT 0x8
+#define DPM_TABLE_357__SamuLevel_6_padding_0_MASK 0xff0000
+#define DPM_TABLE_357__SamuLevel_6_padding_0__SHIFT 0x10
+#define DPM_TABLE_357__SamuLevel_6_Divider_MASK 0xff000000
+#define DPM_TABLE_357__SamuLevel_6_Divider__SHIFT 0x18
+#define DPM_TABLE_358__SamuLevel_7_Frequency_MASK 0xffffffff
+#define DPM_TABLE_358__SamuLevel_7_Frequency__SHIFT 0x0
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_360__SamuLevel_7_padding_2_MASK 0xff
+#define DPM_TABLE_360__SamuLevel_7_padding_2__SHIFT 0x0
+#define DPM_TABLE_360__SamuLevel_7_padding_1_MASK 0xff00
+#define DPM_TABLE_360__SamuLevel_7_padding_1__SHIFT 0x8
+#define DPM_TABLE_360__SamuLevel_7_padding_0_MASK 0xff0000
+#define DPM_TABLE_360__SamuLevel_7_padding_0__SHIFT 0x10
+#define DPM_TABLE_360__SamuLevel_7_Divider_MASK 0xff000000
+#define DPM_TABLE_360__SamuLevel_7_Divider__SHIFT 0x18
+#define DPM_TABLE_361__Ulv_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_361__Ulv_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_362__Ulv_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_362__Ulv_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_363__Ulv_VddcPhase_MASK 0xff
+#define DPM_TABLE_363__Ulv_VddcPhase__SHIFT 0x0
+#define DPM_TABLE_363__Ulv_VddcOffsetVid_MASK 0xff00
+#define DPM_TABLE_363__Ulv_VddcOffsetVid__SHIFT 0x8
+#define DPM_TABLE_363__Ulv_VddcOffset_MASK 0xffff0000
+#define DPM_TABLE_363__Ulv_VddcOffset__SHIFT 0x10
+#define DPM_TABLE_364__Ulv_Reserved_MASK 0xffffffff
+#define DPM_TABLE_364__Ulv_Reserved__SHIFT 0x0
+#define DPM_TABLE_365__SclkStepSize_MASK 0xffffffff
+#define DPM_TABLE_365__SclkStepSize__SHIFT 0x0
+#define DPM_TABLE_366__Smio_0_MASK 0xffffffff
+#define DPM_TABLE_366__Smio_0__SHIFT 0x0
+#define DPM_TABLE_367__Smio_1_MASK 0xffffffff
+#define DPM_TABLE_367__Smio_1__SHIFT 0x0
+#define DPM_TABLE_368__Smio_2_MASK 0xffffffff
+#define DPM_TABLE_368__Smio_2__SHIFT 0x0
+#define DPM_TABLE_369__Smio_3_MASK 0xffffffff
+#define DPM_TABLE_369__Smio_3__SHIFT 0x0
+#define DPM_TABLE_370__Smio_4_MASK 0xffffffff
+#define DPM_TABLE_370__Smio_4__SHIFT 0x0
+#define DPM_TABLE_371__Smio_5_MASK 0xffffffff
+#define DPM_TABLE_371__Smio_5__SHIFT 0x0
+#define DPM_TABLE_372__Smio_6_MASK 0xffffffff
+#define DPM_TABLE_372__Smio_6__SHIFT 0x0
+#define DPM_TABLE_373__Smio_7_MASK 0xffffffff
+#define DPM_TABLE_373__Smio_7__SHIFT 0x0
+#define DPM_TABLE_374__Smio_8_MASK 0xffffffff
+#define DPM_TABLE_374__Smio_8__SHIFT 0x0
+#define DPM_TABLE_375__Smio_9_MASK 0xffffffff
+#define DPM_TABLE_375__Smio_9__SHIFT 0x0
+#define DPM_TABLE_376__Smio_10_MASK 0xffffffff
+#define DPM_TABLE_376__Smio_10__SHIFT 0x0
+#define DPM_TABLE_377__Smio_11_MASK 0xffffffff
+#define DPM_TABLE_377__Smio_11__SHIFT 0x0
+#define DPM_TABLE_378__Smio_12_MASK 0xffffffff
+#define DPM_TABLE_378__Smio_12__SHIFT 0x0
+#define DPM_TABLE_379__Smio_13_MASK 0xffffffff
+#define DPM_TABLE_379__Smio_13__SHIFT 0x0
+#define DPM_TABLE_380__Smio_14_MASK 0xffffffff
+#define DPM_TABLE_380__Smio_14__SHIFT 0x0
+#define DPM_TABLE_381__Smio_15_MASK 0xffffffff
+#define DPM_TABLE_381__Smio_15__SHIFT 0x0
+#define DPM_TABLE_382__Smio_16_MASK 0xffffffff
+#define DPM_TABLE_382__Smio_16__SHIFT 0x0
+#define DPM_TABLE_383__Smio_17_MASK 0xffffffff
+#define DPM_TABLE_383__Smio_17__SHIFT 0x0
+#define DPM_TABLE_384__Smio_18_MASK 0xffffffff
+#define DPM_TABLE_384__Smio_18__SHIFT 0x0
+#define DPM_TABLE_385__Smio_19_MASK 0xffffffff
+#define DPM_TABLE_385__Smio_19__SHIFT 0x0
+#define DPM_TABLE_386__Smio_20_MASK 0xffffffff
+#define DPM_TABLE_386__Smio_20__SHIFT 0x0
+#define DPM_TABLE_387__Smio_21_MASK 0xffffffff
+#define DPM_TABLE_387__Smio_21__SHIFT 0x0
+#define DPM_TABLE_388__Smio_22_MASK 0xffffffff
+#define DPM_TABLE_388__Smio_22__SHIFT 0x0
+#define DPM_TABLE_389__Smio_23_MASK 0xffffffff
+#define DPM_TABLE_389__Smio_23__SHIFT 0x0
+#define DPM_TABLE_390__Smio_24_MASK 0xffffffff
+#define DPM_TABLE_390__Smio_24__SHIFT 0x0
+#define DPM_TABLE_391__Smio_25_MASK 0xffffffff
+#define DPM_TABLE_391__Smio_25__SHIFT 0x0
+#define DPM_TABLE_392__Smio_26_MASK 0xffffffff
+#define DPM_TABLE_392__Smio_26__SHIFT 0x0
+#define DPM_TABLE_393__Smio_27_MASK 0xffffffff
+#define DPM_TABLE_393__Smio_27__SHIFT 0x0
+#define DPM_TABLE_394__Smio_28_MASK 0xffffffff
+#define DPM_TABLE_394__Smio_28__SHIFT 0x0
+#define DPM_TABLE_395__Smio_29_MASK 0xffffffff
+#define DPM_TABLE_395__Smio_29__SHIFT 0x0
+#define DPM_TABLE_396__Smio_30_MASK 0xffffffff
+#define DPM_TABLE_396__Smio_30__SHIFT 0x0
+#define DPM_TABLE_397__Smio_31_MASK 0xffffffff
+#define DPM_TABLE_397__Smio_31__SHIFT 0x0
+#define DPM_TABLE_398__SamuBootLevel_MASK 0xff
+#define DPM_TABLE_398__SamuBootLevel__SHIFT 0x0
+#define DPM_TABLE_398__AcpBootLevel_MASK 0xff00
+#define DPM_TABLE_398__AcpBootLevel__SHIFT 0x8
+#define DPM_TABLE_398__VceBootLevel_MASK 0xff0000
+#define DPM_TABLE_398__VceBootLevel__SHIFT 0x10
+#define DPM_TABLE_398__UvdBootLevel_MASK 0xff000000
+#define DPM_TABLE_398__UvdBootLevel__SHIFT 0x18
+#define DPM_TABLE_399__GraphicsInterval_MASK 0xff
+#define DPM_TABLE_399__GraphicsInterval__SHIFT 0x0
+#define DPM_TABLE_399__GraphicsThermThrottleEnable_MASK 0xff00
+#define DPM_TABLE_399__GraphicsThermThrottleEnable__SHIFT 0x8
+#define DPM_TABLE_399__GraphicsVoltageChangeEnable_MASK 0xff0000
+#define DPM_TABLE_399__GraphicsVoltageChangeEnable__SHIFT 0x10
+#define DPM_TABLE_399__GraphicsBootLevel_MASK 0xff000000
+#define DPM_TABLE_399__GraphicsBootLevel__SHIFT 0x18
+#define DPM_TABLE_400__TemperatureLimitHigh_MASK 0xffff
+#define DPM_TABLE_400__TemperatureLimitHigh__SHIFT 0x0
+#define DPM_TABLE_400__ThermalInterval_MASK 0xff0000
+#define DPM_TABLE_400__ThermalInterval__SHIFT 0x10
+#define DPM_TABLE_400__VoltageInterval_MASK 0xff000000
+#define DPM_TABLE_400__VoltageInterval__SHIFT 0x18
+#define DPM_TABLE_401__MemoryVoltageChangeEnable_MASK 0xff
+#define DPM_TABLE_401__MemoryVoltageChangeEnable__SHIFT 0x0
+#define DPM_TABLE_401__MemoryBootLevel_MASK 0xff00
+#define DPM_TABLE_401__MemoryBootLevel__SHIFT 0x8
+#define DPM_TABLE_401__TemperatureLimitLow_MASK 0xffff0000
+#define DPM_TABLE_401__TemperatureLimitLow__SHIFT 0x10
+#define DPM_TABLE_402__MemoryThermThrottleEnable_MASK 0xff
+#define DPM_TABLE_402__MemoryThermThrottleEnable__SHIFT 0x0
+#define DPM_TABLE_402__MemoryInterval_MASK 0xff00
+#define DPM_TABLE_402__MemoryInterval__SHIFT 0x8
+#define DPM_TABLE_402__BootMVdd_MASK 0xffff0000
+#define DPM_TABLE_402__BootMVdd__SHIFT 0x10
+#define DPM_TABLE_403__PhaseResponseTime_MASK 0xffff
+#define DPM_TABLE_403__PhaseResponseTime__SHIFT 0x0
+#define DPM_TABLE_403__VoltageResponseTime_MASK 0xffff0000
+#define DPM_TABLE_403__VoltageResponseTime__SHIFT 0x10
+#define DPM_TABLE_404__DTEMode_MASK 0xff
+#define DPM_TABLE_404__DTEMode__SHIFT 0x0
+#define DPM_TABLE_404__DTEInterval_MASK 0xff00
+#define DPM_TABLE_404__DTEInterval__SHIFT 0x8
+#define DPM_TABLE_404__PCIeGenInterval_MASK 0xff0000
+#define DPM_TABLE_404__PCIeGenInterval__SHIFT 0x10
+#define DPM_TABLE_404__PCIeBootLinkLevel_MASK 0xff000000
+#define DPM_TABLE_404__PCIeBootLinkLevel__SHIFT 0x18
+#define DPM_TABLE_405__ThermGpio_MASK 0xff
+#define DPM_TABLE_405__ThermGpio__SHIFT 0x0
+#define DPM_TABLE_405__AcDcGpio_MASK 0xff00
+#define DPM_TABLE_405__AcDcGpio__SHIFT 0x8
+#define DPM_TABLE_405__VRHotGpio_MASK 0xff0000
+#define DPM_TABLE_405__VRHotGpio__SHIFT 0x10
+#define DPM_TABLE_405__SVI2Enable_MASK 0xff000000
+#define DPM_TABLE_405__SVI2Enable__SHIFT 0x18
+#define DPM_TABLE_406__PPM_TemperatureLimit_MASK 0xffff
+#define DPM_TABLE_406__PPM_TemperatureLimit__SHIFT 0x0
+#define DPM_TABLE_406__PPM_PkgPwrLimit_MASK 0xffff0000
+#define DPM_TABLE_406__PPM_PkgPwrLimit__SHIFT 0x10
+#define DPM_TABLE_407__TargetTdp_MASK 0xffff
+#define DPM_TABLE_407__TargetTdp__SHIFT 0x0
+#define DPM_TABLE_407__DefaultTdp_MASK 0xffff0000
+#define DPM_TABLE_407__DefaultTdp__SHIFT 0x10
+#define DPM_TABLE_408__FpsLowThreshold_MASK 0xffff
+#define DPM_TABLE_408__FpsLowThreshold__SHIFT 0x0
+#define DPM_TABLE_408__FpsHighThreshold_MASK 0xffff0000
+#define DPM_TABLE_408__FpsHighThreshold__SHIFT 0x10
+#define DPM_TABLE_409__BAPMTI_R_0_1_0_MASK 0xffff
+#define DPM_TABLE_409__BAPMTI_R_0_1_0__SHIFT 0x0
+#define DPM_TABLE_409__BAPMTI_R_0_0_0_MASK 0xffff0000
+#define DPM_TABLE_409__BAPMTI_R_0_0_0__SHIFT 0x10
+#define DPM_TABLE_410__BAPMTI_R_1_0_0_MASK 0xffff
+#define DPM_TABLE_410__BAPMTI_R_1_0_0__SHIFT 0x0
+#define DPM_TABLE_410__BAPMTI_R_0_2_0_MASK 0xffff0000
+#define DPM_TABLE_410__BAPMTI_R_0_2_0__SHIFT 0x10
+#define DPM_TABLE_411__BAPMTI_R_1_2_0_MASK 0xffff
+#define DPM_TABLE_411__BAPMTI_R_1_2_0__SHIFT 0x0
+#define DPM_TABLE_411__BAPMTI_R_1_1_0_MASK 0xffff0000
+#define DPM_TABLE_411__BAPMTI_R_1_1_0__SHIFT 0x10
+#define DPM_TABLE_412__BAPMTI_R_2_1_0_MASK 0xffff
+#define DPM_TABLE_412__BAPMTI_R_2_1_0__SHIFT 0x0
+#define DPM_TABLE_412__BAPMTI_R_2_0_0_MASK 0xffff0000
+#define DPM_TABLE_412__BAPMTI_R_2_0_0__SHIFT 0x10
+#define DPM_TABLE_413__BAPMTI_R_3_0_0_MASK 0xffff
+#define DPM_TABLE_413__BAPMTI_R_3_0_0__SHIFT 0x0
+#define DPM_TABLE_413__BAPMTI_R_2_2_0_MASK 0xffff0000
+#define DPM_TABLE_413__BAPMTI_R_2_2_0__SHIFT 0x10
+#define DPM_TABLE_414__BAPMTI_R_3_2_0_MASK 0xffff
+#define DPM_TABLE_414__BAPMTI_R_3_2_0__SHIFT 0x0
+#define DPM_TABLE_414__BAPMTI_R_3_1_0_MASK 0xffff0000
+#define DPM_TABLE_414__BAPMTI_R_3_1_0__SHIFT 0x10
+#define DPM_TABLE_415__BAPMTI_R_4_1_0_MASK 0xffff
+#define DPM_TABLE_415__BAPMTI_R_4_1_0__SHIFT 0x0
+#define DPM_TABLE_415__BAPMTI_R_4_0_0_MASK 0xffff0000
+#define DPM_TABLE_415__BAPMTI_R_4_0_0__SHIFT 0x10
+#define DPM_TABLE_416__BAPMTI_RC_0_0_0_MASK 0xffff
+#define DPM_TABLE_416__BAPMTI_RC_0_0_0__SHIFT 0x0
+#define DPM_TABLE_416__BAPMTI_R_4_2_0_MASK 0xffff0000
+#define DPM_TABLE_416__BAPMTI_R_4_2_0__SHIFT 0x10
+#define DPM_TABLE_417__BAPMTI_RC_0_2_0_MASK 0xffff
+#define DPM_TABLE_417__BAPMTI_RC_0_2_0__SHIFT 0x0
+#define DPM_TABLE_417__BAPMTI_RC_0_1_0_MASK 0xffff0000
+#define DPM_TABLE_417__BAPMTI_RC_0_1_0__SHIFT 0x10
+#define DPM_TABLE_418__BAPMTI_RC_1_1_0_MASK 0xffff
+#define DPM_TABLE_418__BAPMTI_RC_1_1_0__SHIFT 0x0
+#define DPM_TABLE_418__BAPMTI_RC_1_0_0_MASK 0xffff0000
+#define DPM_TABLE_418__BAPMTI_RC_1_0_0__SHIFT 0x10
+#define DPM_TABLE_419__BAPMTI_RC_2_0_0_MASK 0xffff
+#define DPM_TABLE_419__BAPMTI_RC_2_0_0__SHIFT 0x0
+#define DPM_TABLE_419__BAPMTI_RC_1_2_0_MASK 0xffff0000
+#define DPM_TABLE_419__BAPMTI_RC_1_2_0__SHIFT 0x10
+#define DPM_TABLE_420__BAPMTI_RC_2_2_0_MASK 0xffff
+#define DPM_TABLE_420__BAPMTI_RC_2_2_0__SHIFT 0x0
+#define DPM_TABLE_420__BAPMTI_RC_2_1_0_MASK 0xffff0000
+#define DPM_TABLE_420__BAPMTI_RC_2_1_0__SHIFT 0x10
+#define DPM_TABLE_421__BAPMTI_RC_3_1_0_MASK 0xffff
+#define DPM_TABLE_421__BAPMTI_RC_3_1_0__SHIFT 0x0
+#define DPM_TABLE_421__BAPMTI_RC_3_0_0_MASK 0xffff0000
+#define DPM_TABLE_421__BAPMTI_RC_3_0_0__SHIFT 0x10
+#define DPM_TABLE_422__BAPMTI_RC_4_0_0_MASK 0xffff
+#define DPM_TABLE_422__BAPMTI_RC_4_0_0__SHIFT 0x0
+#define DPM_TABLE_422__BAPMTI_RC_3_2_0_MASK 0xffff0000
+#define DPM_TABLE_422__BAPMTI_RC_3_2_0__SHIFT 0x10
+#define DPM_TABLE_423__BAPMTI_RC_4_2_0_MASK 0xffff
+#define DPM_TABLE_423__BAPMTI_RC_4_2_0__SHIFT 0x0
+#define DPM_TABLE_423__BAPMTI_RC_4_1_0_MASK 0xffff0000
+#define DPM_TABLE_423__BAPMTI_RC_4_1_0__SHIFT 0x10
+#define DPM_TABLE_424__GpuTjHyst_MASK 0xff
+#define DPM_TABLE_424__GpuTjHyst__SHIFT 0x0
+#define DPM_TABLE_424__GpuTjMax_MASK 0xff00
+#define DPM_TABLE_424__GpuTjMax__SHIFT 0x8
+#define DPM_TABLE_424__DTETjOffset_MASK 0xff0000
+#define DPM_TABLE_424__DTETjOffset__SHIFT 0x10
+#define DPM_TABLE_424__DTEAmbientTempBase_MASK 0xff000000
+#define DPM_TABLE_424__DTEAmbientTempBase__SHIFT 0x18
+#define DPM_TABLE_425__BootVoltage_Phases_MASK 0xff
+#define DPM_TABLE_425__BootVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_425__BootVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_425__BootVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_425__BootVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_425__BootVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_425__BootVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_425__BootVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_426__BAPM_TEMP_GRADIENT_MASK 0xffffffff
+#define DPM_TABLE_426__BAPM_TEMP_GRADIENT__SHIFT 0x0
+#define DPM_TABLE_427__LowSclkInterruptThreshold_MASK 0xffffffff
+#define DPM_TABLE_427__LowSclkInterruptThreshold__SHIFT 0x0
+#define DPM_TABLE_428__VddGfxReChkWait_MASK 0xffffffff
+#define DPM_TABLE_428__VddGfxReChkWait__SHIFT 0x0
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_1_MASK 0xff
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_1__SHIFT 0x0
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_0_MASK 0xff00
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_0__SHIFT 0x8
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_maxVID_MASK 0xff0000
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_maxVID__SHIFT 0x10
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_minVID_MASK 0xff000000
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_minVID__SHIFT 0x18
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_3_MASK 0xff
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_3__SHIFT 0x0
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_2_MASK 0xff00
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_2__SHIFT 0x8
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_1_MASK 0xff0000
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_1__SHIFT 0x10
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_0_MASK 0xff000000
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_0__SHIFT 0x18
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_7_MASK 0xff
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_7__SHIFT 0x0
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_6_MASK 0xff00
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_6__SHIFT 0x8
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_5_MASK 0xff0000
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_5__SHIFT 0x10
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_4_MASK 0xff000000
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_4__SHIFT 0x18
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_1_MASK 0xff
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_1__SHIFT 0x0
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_0_MASK 0xff00
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_0__SHIFT 0x8
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_maxVID_MASK 0xff0000
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_maxVID__SHIFT 0x10
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_minVID_MASK 0xff000000
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_minVID__SHIFT 0x18
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_3_MASK 0xff
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_3__SHIFT 0x0
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_2_MASK 0xff00
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_2__SHIFT 0x8
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_1_MASK 0xff0000
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_1__SHIFT 0x10
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_0_MASK 0xff000000
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_0__SHIFT 0x18
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_7_MASK 0xff
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_7__SHIFT 0x0
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_6_MASK 0xff00
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_6__SHIFT 0x8
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_5_MASK 0xff0000
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_5__SHIFT 0x10
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_4_MASK 0xff000000
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_4__SHIFT 0x18
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_1_MASK 0xff
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_1__SHIFT 0x0
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_0_MASK 0xff00
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_0__SHIFT 0x8
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_maxVID_MASK 0xff0000
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_maxVID__SHIFT 0x10
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_minVID_MASK 0xff000000
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_minVID__SHIFT 0x18
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_3_MASK 0xff
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_3__SHIFT 0x0
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_2_MASK 0xff00
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_2__SHIFT 0x8
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_1_MASK 0xff0000
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_1__SHIFT 0x10
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_0_MASK 0xff000000
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_0__SHIFT 0x18
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_7_MASK 0xff
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_7__SHIFT 0x0
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_6_MASK 0xff00
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_6__SHIFT 0x8
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_5_MASK 0xff0000
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_5__SHIFT 0x10
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_4_MASK 0xff000000
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_4__SHIFT 0x18
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_1_MASK 0xff
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_1__SHIFT 0x0
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_0_MASK 0xff00
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_0__SHIFT 0x8
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_maxVID_MASK 0xff0000
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_maxVID__SHIFT 0x10
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_minVID_MASK 0xff000000
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_minVID__SHIFT 0x18
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_3_MASK 0xff
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_3__SHIFT 0x0
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_2_MASK 0xff00
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_2__SHIFT 0x8
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_1_MASK 0xff0000
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_1__SHIFT 0x10
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_0_MASK 0xff000000
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_0__SHIFT 0x18
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_7_MASK 0xff
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_7__SHIFT 0x0
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_6_MASK 0xff00
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_6__SHIFT 0x8
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_5_MASK 0xff0000
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_5__SHIFT 0x10
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_4_MASK 0xff000000
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_4__SHIFT 0x18
+#define SOFT_REGISTERS_TABLE_1__RefClockFrequency_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_1__RefClockFrequency__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_2__PmTimerPeriod_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_2__PmTimerPeriod__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_3__FeatureEnables_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_3__FeatureEnables__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_4__PreVBlankGap_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_4__PreVBlankGap__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_5__VBlankTimeout_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_5__VBlankTimeout__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_6__TrainTimeGap_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_6__TrainTimeGap__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_7__MvddSwitchTime_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_7__MvddSwitchTime__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_8__LongestAcpiTrainTime_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_8__LongestAcpiTrainTime__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_9__AcpiDelay_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_9__AcpiDelay__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_10__G5TrainTime_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_10__G5TrainTime__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_11__DelayMpllPwron_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_11__DelayMpllPwron__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_12__VoltageChangeTimeout_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_12__VoltageChangeTimeout__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_13__HandshakeDisables_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_13__HandshakeDisables__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy4Config_MASK 0xff
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy4Config__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy3Config_MASK 0xff00
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy3Config__SHIFT 0x8
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy2Config_MASK 0xff0000
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy2Config__SHIFT 0x10
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy1Config_MASK 0xff000000
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy1Config__SHIFT 0x18
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy8Config_MASK 0xff
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy8Config__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy7Config_MASK 0xff00
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy7Config__SHIFT 0x8
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy6Config_MASK 0xff0000
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy6Config__SHIFT 0x10
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy5Config_MASK 0xff000000
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy5Config__SHIFT 0x18
+#define SOFT_REGISTERS_TABLE_16__AverageGraphicsActivity_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_16__AverageGraphicsActivity__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_17__AverageMemoryActivity_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_17__AverageMemoryActivity__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_18__AverageGioActivity_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_18__AverageGioActivity__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_19__PCIeDpmEnabledLevels_MASK 0xff
+#define SOFT_REGISTERS_TABLE_19__PCIeDpmEnabledLevels__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_19__LClkDpmEnabledLevels_MASK 0xff00
+#define SOFT_REGISTERS_TABLE_19__LClkDpmEnabledLevels__SHIFT 0x8
+#define SOFT_REGISTERS_TABLE_19__MClkDpmEnabledLevels_MASK 0xff0000
+#define SOFT_REGISTERS_TABLE_19__MClkDpmEnabledLevels__SHIFT 0x10
+#define SOFT_REGISTERS_TABLE_19__SClkDpmEnabledLevels_MASK 0xff000000
+#define SOFT_REGISTERS_TABLE_19__SClkDpmEnabledLevels__SHIFT 0x18
+#define SOFT_REGISTERS_TABLE_20__VCEDpmEnabledLevels_MASK 0xff
+#define SOFT_REGISTERS_TABLE_20__VCEDpmEnabledLevels__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_20__ACPDpmEnabledLevels_MASK 0xff00
+#define SOFT_REGISTERS_TABLE_20__ACPDpmEnabledLevels__SHIFT 0x8
+#define SOFT_REGISTERS_TABLE_20__SAMUDpmEnabledLevels_MASK 0xff0000
+#define SOFT_REGISTERS_TABLE_20__SAMUDpmEnabledLevels__SHIFT 0x10
+#define SOFT_REGISTERS_TABLE_20__UVDDpmEnabledLevels_MASK 0xff000000
+#define SOFT_REGISTERS_TABLE_20__UVDDpmEnabledLevels__SHIFT 0x18
+#define SOFT_REGISTERS_TABLE_21__DRAM_LOG_ADDR_H_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_21__DRAM_LOG_ADDR_H__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_22__DRAM_LOG_ADDR_L_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_22__DRAM_LOG_ADDR_L__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_23__DRAM_LOG_PHY_ADDR_H_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_23__DRAM_LOG_PHY_ADDR_H__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_24__DRAM_LOG_PHY_ADDR_L_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_24__DRAM_LOG_PHY_ADDR_L__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_25__DRAM_LOG_BUFF_SIZE_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_25__DRAM_LOG_BUFF_SIZE__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_26__UlvEnterCount_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_26__UlvEnterCount__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_27__UlvTime_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_27__UlvTime__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_28__UcodeLoadStatus_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_28__UcodeLoadStatus__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_29__Reserved_0_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_29__Reserved_0__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_30__Reserved_1_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_30__Reserved_1__SHIFT 0x0
+#define PM_FUSES_1__SviLoadLineOffsetVddC_MASK 0xff
+#define PM_FUSES_1__SviLoadLineOffsetVddC__SHIFT 0x0
+#define PM_FUSES_1__SviLoadLineTrimVddC_MASK 0xff00
+#define PM_FUSES_1__SviLoadLineTrimVddC__SHIFT 0x8
+#define PM_FUSES_1__SviLoadLineVddC_MASK 0xff0000
+#define PM_FUSES_1__SviLoadLineVddC__SHIFT 0x10
+#define PM_FUSES_1__SviLoadLineEn_MASK 0xff000000
+#define PM_FUSES_1__SviLoadLineEn__SHIFT 0x18
+#define PM_FUSES_2__TDC_MAWt_MASK 0xff
+#define PM_FUSES_2__TDC_MAWt__SHIFT 0x0
+#define PM_FUSES_2__TDC_VDDC_ThrottleReleaseLimitPerc_MASK 0xff00
+#define PM_FUSES_2__TDC_VDDC_ThrottleReleaseLimitPerc__SHIFT 0x8
+#define PM_FUSES_2__TDC_VDDC_PkgLimit_MASK 0xffff0000
+#define PM_FUSES_2__TDC_VDDC_PkgLimit__SHIFT 0x10
+#define PM_FUSES_3__Reserved_MASK 0xff
+#define PM_FUSES_3__Reserved__SHIFT 0x0
+#define PM_FUSES_3__LPMLTemperatureMax_MASK 0xff00
+#define PM_FUSES_3__LPMLTemperatureMax__SHIFT 0x8
+#define PM_FUSES_3__LPMLTemperatureMin_MASK 0xff0000
+#define PM_FUSES_3__LPMLTemperatureMin__SHIFT 0x10
+#define PM_FUSES_3__TdcWaterfallCtl_MASK 0xff000000
+#define PM_FUSES_3__TdcWaterfallCtl__SHIFT 0x18
+#define PM_FUSES_4__LPMLTemperatureScaler_3_MASK 0xff
+#define PM_FUSES_4__LPMLTemperatureScaler_3__SHIFT 0x0
+#define PM_FUSES_4__LPMLTemperatureScaler_2_MASK 0xff00
+#define PM_FUSES_4__LPMLTemperatureScaler_2__SHIFT 0x8
+#define PM_FUSES_4__LPMLTemperatureScaler_1_MASK 0xff0000
+#define PM_FUSES_4__LPMLTemperatureScaler_1__SHIFT 0x10
+#define PM_FUSES_4__LPMLTemperatureScaler_0_MASK 0xff000000
+#define PM_FUSES_4__LPMLTemperatureScaler_0__SHIFT 0x18
+#define PM_FUSES_5__LPMLTemperatureScaler_7_MASK 0xff
+#define PM_FUSES_5__LPMLTemperatureScaler_7__SHIFT 0x0
+#define PM_FUSES_5__LPMLTemperatureScaler_6_MASK 0xff00
+#define PM_FUSES_5__LPMLTemperatureScaler_6__SHIFT 0x8
+#define PM_FUSES_5__LPMLTemperatureScaler_5_MASK 0xff0000
+#define PM_FUSES_5__LPMLTemperatureScaler_5__SHIFT 0x10
+#define PM_FUSES_5__LPMLTemperatureScaler_4_MASK 0xff000000
+#define PM_FUSES_5__LPMLTemperatureScaler_4__SHIFT 0x18
+#define PM_FUSES_6__LPMLTemperatureScaler_11_MASK 0xff
+#define PM_FUSES_6__LPMLTemperatureScaler_11__SHIFT 0x0
+#define PM_FUSES_6__LPMLTemperatureScaler_10_MASK 0xff00
+#define PM_FUSES_6__LPMLTemperatureScaler_10__SHIFT 0x8
+#define PM_FUSES_6__LPMLTemperatureScaler_9_MASK 0xff0000
+#define PM_FUSES_6__LPMLTemperatureScaler_9__SHIFT 0x10
+#define PM_FUSES_6__LPMLTemperatureScaler_8_MASK 0xff000000
+#define PM_FUSES_6__LPMLTemperatureScaler_8__SHIFT 0x18
+#define PM_FUSES_7__LPMLTemperatureScaler_15_MASK 0xff
+#define PM_FUSES_7__LPMLTemperatureScaler_15__SHIFT 0x0
+#define PM_FUSES_7__LPMLTemperatureScaler_14_MASK 0xff00
+#define PM_FUSES_7__LPMLTemperatureScaler_14__SHIFT 0x8
+#define PM_FUSES_7__LPMLTemperatureScaler_13_MASK 0xff0000
+#define PM_FUSES_7__LPMLTemperatureScaler_13__SHIFT 0x10
+#define PM_FUSES_7__LPMLTemperatureScaler_12_MASK 0xff000000
+#define PM_FUSES_7__LPMLTemperatureScaler_12__SHIFT 0x18
+#define PM_FUSES_8__FuzzyFan_ErrorRateSetDelta_MASK 0xffff
+#define PM_FUSES_8__FuzzyFan_ErrorRateSetDelta__SHIFT 0x0
+#define PM_FUSES_8__FuzzyFan_ErrorSetDelta_MASK 0xffff0000
+#define PM_FUSES_8__FuzzyFan_ErrorSetDelta__SHIFT 0x10
+#define PM_FUSES_9__Reserved6_MASK 0xffff
+#define PM_FUSES_9__Reserved6__SHIFT 0x0
+#define PM_FUSES_9__FuzzyFan_PwmSetDelta_MASK 0xffff0000
+#define PM_FUSES_9__FuzzyFan_PwmSetDelta__SHIFT 0x10
+#define PM_FUSES_10__GnbLPML_3_MASK 0xff
+#define PM_FUSES_10__GnbLPML_3__SHIFT 0x0
+#define PM_FUSES_10__GnbLPML_2_MASK 0xff00
+#define PM_FUSES_10__GnbLPML_2__SHIFT 0x8
+#define PM_FUSES_10__GnbLPML_1_MASK 0xff0000
+#define PM_FUSES_10__GnbLPML_1__SHIFT 0x10
+#define PM_FUSES_10__GnbLPML_0_MASK 0xff000000
+#define PM_FUSES_10__GnbLPML_0__SHIFT 0x18
+#define PM_FUSES_11__GnbLPML_7_MASK 0xff
+#define PM_FUSES_11__GnbLPML_7__SHIFT 0x0
+#define PM_FUSES_11__GnbLPML_6_MASK 0xff00
+#define PM_FUSES_11__GnbLPML_6__SHIFT 0x8
+#define PM_FUSES_11__GnbLPML_5_MASK 0xff0000
+#define PM_FUSES_11__GnbLPML_5__SHIFT 0x10
+#define PM_FUSES_11__GnbLPML_4_MASK 0xff000000
+#define PM_FUSES_11__GnbLPML_4__SHIFT 0x18
+#define PM_FUSES_12__GnbLPML_11_MASK 0xff
+#define PM_FUSES_12__GnbLPML_11__SHIFT 0x0
+#define PM_FUSES_12__GnbLPML_10_MASK 0xff00
+#define PM_FUSES_12__GnbLPML_10__SHIFT 0x8
+#define PM_FUSES_12__GnbLPML_9_MASK 0xff0000
+#define PM_FUSES_12__GnbLPML_9__SHIFT 0x10
+#define PM_FUSES_12__GnbLPML_8_MASK 0xff000000
+#define PM_FUSES_12__GnbLPML_8__SHIFT 0x18
+#define PM_FUSES_13__GnbLPML_15_MASK 0xff
+#define PM_FUSES_13__GnbLPML_15__SHIFT 0x0
+#define PM_FUSES_13__GnbLPML_14_MASK 0xff00
+#define PM_FUSES_13__GnbLPML_14__SHIFT 0x8
+#define PM_FUSES_13__GnbLPML_13_MASK 0xff0000
+#define PM_FUSES_13__GnbLPML_13__SHIFT 0x10
+#define PM_FUSES_13__GnbLPML_12_MASK 0xff000000
+#define PM_FUSES_13__GnbLPML_12__SHIFT 0x18
+#define PM_FUSES_14__Reserved1_1_MASK 0xff
+#define PM_FUSES_14__Reserved1_1__SHIFT 0x0
+#define PM_FUSES_14__Reserved1_0_MASK 0xff00
+#define PM_FUSES_14__Reserved1_0__SHIFT 0x8
+#define PM_FUSES_14__GnbLPMLMinVid_MASK 0xff0000
+#define PM_FUSES_14__GnbLPMLMinVid__SHIFT 0x10
+#define PM_FUSES_14__GnbLPMLMaxVid_MASK 0xff000000
+#define PM_FUSES_14__GnbLPMLMaxVid__SHIFT 0x18
+#define PM_FUSES_15__BapmVddCBaseLeakageLoSidd_MASK 0xffff
+#define PM_FUSES_15__BapmVddCBaseLeakageLoSidd__SHIFT 0x0
+#define PM_FUSES_15__BapmVddCBaseLeakageHiSidd_MASK 0xffff0000
+#define PM_FUSES_15__BapmVddCBaseLeakageHiSidd__SHIFT 0x10
+#define SMU_PM_STATUS_0__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_0__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_1__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_1__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_2__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_2__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_3__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_3__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_4__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_4__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_5__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_5__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_6__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_6__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_7__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_7__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_8__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_8__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_9__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_9__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_10__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_10__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_11__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_11__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_12__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_12__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_13__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_13__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_14__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_14__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_15__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_15__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_16__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_16__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_17__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_17__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_18__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_18__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_19__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_19__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_20__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_20__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_21__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_21__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_22__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_22__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_23__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_23__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_24__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_24__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_25__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_25__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_26__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_26__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_27__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_27__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_28__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_28__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_29__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_29__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_30__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_30__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_31__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_31__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_32__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_32__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_33__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_33__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_34__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_34__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_35__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_35__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_36__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_36__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_37__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_37__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_38__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_38__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_39__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_39__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_40__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_40__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_41__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_41__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_42__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_42__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_43__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_43__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_44__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_44__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_45__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_45__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_46__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_46__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_47__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_47__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_48__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_48__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_49__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_49__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_50__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_50__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_51__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_51__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_52__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_52__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_53__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_53__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_54__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_54__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_55__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_55__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_56__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_56__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_57__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_57__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_58__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_58__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_59__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_59__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_60__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_60__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_61__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_61__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_62__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_62__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_63__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_63__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_64__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_64__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_65__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_65__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_66__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_66__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_67__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_67__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_68__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_68__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_69__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_69__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_70__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_70__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_71__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_71__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_72__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_72__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_73__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_73__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_74__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_74__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_75__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_75__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_76__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_76__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_77__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_77__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_78__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_78__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_79__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_79__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_80__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_80__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_81__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_81__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_82__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_82__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_83__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_83__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_84__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_84__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_85__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_85__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_86__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_86__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_87__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_87__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_88__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_88__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_89__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_89__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_90__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_90__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_91__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_91__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_92__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_92__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_93__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_93__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_94__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_94__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_95__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_95__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_96__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_96__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_97__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_97__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_98__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_98__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_99__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_99__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_100__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_100__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_101__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_101__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_102__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_102__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_103__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_103__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_104__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_104__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_105__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_105__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_106__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_106__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_107__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_107__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_108__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_108__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_109__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_109__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_110__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_110__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_111__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_111__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_112__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_112__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_113__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_113__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_114__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_114__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_115__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_115__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_116__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_116__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_117__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_117__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_118__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_118__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_119__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_119__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_120__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_120__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_121__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_121__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_122__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_122__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_123__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_123__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_124__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_124__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_125__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_125__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_126__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_126__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_127__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_127__DATA__SHIFT 0x0
+#define CG_THERMAL_INT_ENA__THERM_INTH_SET_MASK 0x1
+#define CG_THERMAL_INT_ENA__THERM_INTH_SET__SHIFT 0x0
+#define CG_THERMAL_INT_ENA__THERM_INTL_SET_MASK 0x2
+#define CG_THERMAL_INT_ENA__THERM_INTL_SET__SHIFT 0x1
+#define CG_THERMAL_INT_ENA__THERM_TRIGGER_SET_MASK 0x4
+#define CG_THERMAL_INT_ENA__THERM_TRIGGER_SET__SHIFT 0x2
+#define CG_THERMAL_INT_ENA__THERM_INTH_CLR_MASK 0x8
+#define CG_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT 0x3
+#define CG_THERMAL_INT_ENA__THERM_INTL_CLR_MASK 0x10
+#define CG_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT 0x4
+#define CG_THERMAL_INT_ENA__THERM_TRIGGER_CLR_MASK 0x20
+#define CG_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT 0x5
+#define CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK 0xff
+#define CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT 0x0
+#define CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK 0xff00
+#define CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT 0x8
+#define CG_THERMAL_INT_CTRL__GNB_TEMP_THRESHOLD_MASK 0xff0000
+#define CG_THERMAL_INT_CTRL__GNB_TEMP_THRESHOLD__SHIFT 0x10
+#define CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK 0x1000000
+#define CG_THERMAL_INT_CTRL__THERM_INTH_MASK__SHIFT 0x18
+#define CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK 0x2000000
+#define CG_THERMAL_INT_CTRL__THERM_INTL_MASK__SHIFT 0x19
+#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK 0x4000000
+#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_MASK__SHIFT 0x1a
+#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_CNB_MASK_MASK 0x8000000
+#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_CNB_MASK__SHIFT 0x1b
+#define CG_THERMAL_INT_CTRL__THERM_GNB_HW_ENA_MASK 0x10000000
+#define CG_THERMAL_INT_CTRL__THERM_GNB_HW_ENA__SHIFT 0x1c
+#define CG_THERMAL_INT_STATUS__THERM_INTH_DETECT_MASK 0x1
+#define CG_THERMAL_INT_STATUS__THERM_INTH_DETECT__SHIFT 0x0
+#define CG_THERMAL_INT_STATUS__THERM_INTL_DETECT_MASK 0x2
+#define CG_THERMAL_INT_STATUS__THERM_INTL_DETECT__SHIFT 0x1
+#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_DETECT_MASK 0x4
+#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_DETECT__SHIFT 0x2
+#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_CNB_DETECT_MASK 0x8
+#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_CNB_DETECT__SHIFT 0x3
+#define CG_THERMAL_CTRL__DPM_EVENT_SRC_MASK 0x7
+#define CG_THERMAL_CTRL__DPM_EVENT_SRC__SHIFT 0x0
+#define CG_THERMAL_CTRL__THERM_INC_CLK_MASK 0x8
+#define CG_THERMAL_CTRL__THERM_INC_CLK__SHIFT 0x3
+#define CG_THERMAL_CTRL__SPARE_MASK 0x3ff0
+#define CG_THERMAL_CTRL__SPARE__SHIFT 0x4
+#define CG_THERMAL_CTRL__DIG_THERM_DPM_MASK 0x3fc000
+#define CG_THERMAL_CTRL__DIG_THERM_DPM__SHIFT 0xe
+#define CG_THERMAL_CTRL__RESERVED_MASK 0x1c00000
+#define CG_THERMAL_CTRL__RESERVED__SHIFT 0x16
+#define CG_THERMAL_CTRL__CTF_PAD_POLARITY_MASK 0x2000000
+#define CG_THERMAL_CTRL__CTF_PAD_POLARITY__SHIFT 0x19
+#define CG_THERMAL_CTRL__CTF_PAD_EN_MASK 0x4000000
+#define CG_THERMAL_CTRL__CTF_PAD_EN__SHIFT 0x1a
+#define CG_THERMAL_STATUS__SPARE_MASK 0x1ff
+#define CG_THERMAL_STATUS__SPARE__SHIFT 0x0
+#define CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK 0x1fe00
+#define CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT 0x9
+#define CG_THERMAL_STATUS__THERM_ALERT_MASK 0x20000
+#define CG_THERMAL_STATUS__THERM_ALERT__SHIFT 0x11
+#define CG_THERMAL_STATUS__GEN_STATUS_MASK 0x3c0000
+#define CG_THERMAL_STATUS__GEN_STATUS__SHIFT 0x12
+#define CG_THERMAL_INT__DIG_THERM_CTF_MASK 0xff
+#define CG_THERMAL_INT__DIG_THERM_CTF__SHIFT 0x0
+#define CG_THERMAL_INT__DIG_THERM_INTH_MASK 0xff00
+#define CG_THERMAL_INT__DIG_THERM_INTH__SHIFT 0x8
+#define CG_THERMAL_INT__DIG_THERM_INTL_MASK 0xff0000
+#define CG_THERMAL_INT__DIG_THERM_INTL__SHIFT 0x10
+#define CG_THERMAL_INT__THERM_INT_MASK_MASK 0xf000000
+#define CG_THERMAL_INT__THERM_INT_MASK__SHIFT 0x18
+#define CG_MULT_THERMAL_CTRL__TS_FILTER_MASK 0xf
+#define CG_MULT_THERMAL_CTRL__TS_FILTER__SHIFT 0x0
+#define CG_MULT_THERMAL_CTRL__UNUSED_MASK 0x1f0
+#define CG_MULT_THERMAL_CTRL__UNUSED__SHIFT 0x4
+#define CG_MULT_THERMAL_CTRL__THERMAL_RANGE_RST_MASK 0x200
+#define CG_MULT_THERMAL_CTRL__THERMAL_RANGE_RST__SHIFT 0x9
+#define CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK 0xff00000
+#define CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT 0x14
+#define CG_MULT_THERMAL_CTRL__THM_READY_CLEAR_MASK 0x10000000
+#define CG_MULT_THERMAL_CTRL__THM_READY_CLEAR__SHIFT 0x1c
+#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP_MASK 0x1ff
+#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP__SHIFT 0x0
+#define CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK 0x3fe00
+#define CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT 0x9
+#define THM_TMON2_CTRL__POWER_DOWN_MASK 0x1
+#define THM_TMON2_CTRL__POWER_DOWN__SHIFT 0x0
+#define THM_TMON2_CTRL__BGADJ_MASK 0x1fe
+#define THM_TMON2_CTRL__BGADJ__SHIFT 0x1
+#define THM_TMON2_CTRL__BGADJ_MODE_MASK 0x200
+#define THM_TMON2_CTRL__BGADJ_MODE__SHIFT 0x9
+#define THM_TMON2_CTRL__TMON_PAUSE_MASK 0x400
+#define THM_TMON2_CTRL__TMON_PAUSE__SHIFT 0xa
+#define THM_TMON2_CTRL__INT_MEAS_EN_MASK 0x800
+#define THM_TMON2_CTRL__INT_MEAS_EN__SHIFT 0xb
+#define THM_TMON2_CTRL__DEBUG_MODE_MASK 0x1000
+#define THM_TMON2_CTRL__DEBUG_MODE__SHIFT 0xc
+#define THM_TMON2_CTRL__EN_CFG_SERDES_MASK 0x2000
+#define THM_TMON2_CTRL__EN_CFG_SERDES__SHIFT 0xd
+#define THM_TMON2_CTRL2__RDIL_PRESENT_MASK 0xffff
+#define THM_TMON2_CTRL2__RDIL_PRESENT__SHIFT 0x0
+#define THM_TMON2_CTRL2__RDIR_PRESENT_MASK 0xffff0000
+#define THM_TMON2_CTRL2__RDIR_PRESENT__SHIFT 0x10
+#define THM_TMON2_CSR_WR__CSR_WRITE_MASK 0x1
+#define THM_TMON2_CSR_WR__CSR_WRITE__SHIFT 0x0
+#define THM_TMON2_CSR_WR__CSR_READ_MASK 0x2
+#define THM_TMON2_CSR_WR__CSR_READ__SHIFT 0x1
+#define THM_TMON2_CSR_WR__CSR_ADDR_MASK 0xffc
+#define THM_TMON2_CSR_WR__CSR_ADDR__SHIFT 0x2
+#define THM_TMON2_CSR_WR__WRITE_DATA_MASK 0xfff000
+#define THM_TMON2_CSR_WR__WRITE_DATA__SHIFT 0xc
+#define THM_TMON2_CSR_WR__SPARE_MASK 0x1000000
+#define THM_TMON2_CSR_WR__SPARE__SHIFT 0x18
+#define THM_TMON2_CSR_RD__READ_DATA_MASK 0xfff
+#define THM_TMON2_CSR_RD__READ_DATA__SHIFT 0x0
+#define CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK 0xff
+#define CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT 0x0
+#define CG_FDO_CTRL0__FAN_SPINUP_DUTY_MASK 0xff00
+#define CG_FDO_CTRL0__FAN_SPINUP_DUTY__SHIFT 0x8
+#define CG_FDO_CTRL0__FDO_PWM_MANUAL_MASK 0x10000
+#define CG_FDO_CTRL0__FDO_PWM_MANUAL__SHIFT 0x10
+#define CG_FDO_CTRL0__FDO_PWM_HYSTER_MASK 0x7e0000
+#define CG_FDO_CTRL0__FDO_PWM_HYSTER__SHIFT 0x11
+#define CG_FDO_CTRL0__FDO_PWM_RAMP_EN_MASK 0x800000
+#define CG_FDO_CTRL0__FDO_PWM_RAMP_EN__SHIFT 0x17
+#define CG_FDO_CTRL0__FDO_PWM_RAMP_MASK 0xff000000
+#define CG_FDO_CTRL0__FDO_PWM_RAMP__SHIFT 0x18
+#define CG_FDO_CTRL1__FMAX_DUTY100_MASK 0xff
+#define CG_FDO_CTRL1__FMAX_DUTY100__SHIFT 0x0
+#define CG_FDO_CTRL1__FMIN_DUTY_MASK 0xff00
+#define CG_FDO_CTRL1__FMIN_DUTY__SHIFT 0x8
+#define CG_FDO_CTRL1__M_MASK 0xff0000
+#define CG_FDO_CTRL1__M__SHIFT 0x10
+#define CG_FDO_CTRL1__RESERVED_MASK 0x3f000000
+#define CG_FDO_CTRL1__RESERVED__SHIFT 0x18
+#define CG_FDO_CTRL1__FDO_PWRDNB_MASK 0x40000000
+#define CG_FDO_CTRL1__FDO_PWRDNB__SHIFT 0x1e
+#define CG_FDO_CTRL2__TMIN_MASK 0xff
+#define CG_FDO_CTRL2__TMIN__SHIFT 0x0
+#define CG_FDO_CTRL2__FAN_SPINUP_TIME_MASK 0x700
+#define CG_FDO_CTRL2__FAN_SPINUP_TIME__SHIFT 0x8
+#define CG_FDO_CTRL2__FDO_PWM_MODE_MASK 0x3800
+#define CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT 0xb
+#define CG_FDO_CTRL2__TMIN_HYSTER_MASK 0x1c000
+#define CG_FDO_CTRL2__TMIN_HYSTER__SHIFT 0xe
+#define CG_FDO_CTRL2__TMAX_MASK 0x1fe0000
+#define CG_FDO_CTRL2__TMAX__SHIFT 0x11
+#define CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK 0xfe000000
+#define CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT 0x19
+#define CG_TACH_CTRL__EDGE_PER_REV_MASK 0x7
+#define CG_TACH_CTRL__EDGE_PER_REV__SHIFT 0x0
+#define CG_TACH_CTRL__TARGET_PERIOD_MASK 0xfffffff8
+#define CG_TACH_CTRL__TARGET_PERIOD__SHIFT 0x3
+#define CG_TACH_STATUS__TACH_PERIOD_MASK 0xffffffff
+#define CG_TACH_STATUS__TACH_PERIOD__SHIFT 0x0
+#define CC_THM_STRAPS0__TMON0_BGADJ_MASK 0x1fe
+#define CC_THM_STRAPS0__TMON0_BGADJ__SHIFT 0x1
+#define CC_THM_STRAPS0__TMON1_BGADJ_MASK 0x1fe00
+#define CC_THM_STRAPS0__TMON1_BGADJ__SHIFT 0x9
+#define CC_THM_STRAPS0__TMON_CMON_FUSE_SEL_MASK 0x20000
+#define CC_THM_STRAPS0__TMON_CMON_FUSE_SEL__SHIFT 0x11
+#define CC_THM_STRAPS0__NUM_ACQ_MASK 0x1c0000
+#define CC_THM_STRAPS0__NUM_ACQ__SHIFT 0x12
+#define CC_THM_STRAPS0__TMON_CLK_SEL_MASK 0xe00000
+#define CC_THM_STRAPS0__TMON_CLK_SEL__SHIFT 0x15
+#define CC_THM_STRAPS0__TMON_CONFIG_SOURCE_MASK 0x1000000
+#define CC_THM_STRAPS0__TMON_CONFIG_SOURCE__SHIFT 0x18
+#define CC_THM_STRAPS0__CTF_DISABLE_MASK 0x2000000
+#define CC_THM_STRAPS0__CTF_DISABLE__SHIFT 0x19
+#define CC_THM_STRAPS0__TMON0_DISABLE_MASK 0x4000000
+#define CC_THM_STRAPS0__TMON0_DISABLE__SHIFT 0x1a
+#define CC_THM_STRAPS0__TMON1_DISABLE_MASK 0x8000000
+#define CC_THM_STRAPS0__TMON1_DISABLE__SHIFT 0x1b
+#define CC_THM_STRAPS0__TMON2_DISABLE_MASK 0x10000000
+#define CC_THM_STRAPS0__TMON2_DISABLE__SHIFT 0x1c
+#define CC_THM_STRAPS0__TMON3_DISABLE_MASK 0x20000000
+#define CC_THM_STRAPS0__TMON3_DISABLE__SHIFT 0x1d
+#define CC_THM_STRAPS0__UNUSED_MASK 0x80000000
+#define CC_THM_STRAPS0__UNUSED__SHIFT 0x1f
+#define THM_TMON0_RDIL0_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL0_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL0_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL0_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL0_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL0_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL1_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL1_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL1_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL1_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL1_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL1_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL2_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL2_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL2_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL2_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL2_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL2_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL3_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL3_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL3_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL3_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL3_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL3_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL4_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL4_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL4_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL4_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL4_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL4_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL5_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL5_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL5_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL5_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL5_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL5_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL6_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL6_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL6_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL6_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL6_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL6_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL7_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL7_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL7_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL7_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL7_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL7_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL8_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL8_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL8_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL8_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL8_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL8_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL9_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL9_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL9_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL9_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL9_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL9_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL10_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL10_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL10_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL10_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL10_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL10_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL11_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL11_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL11_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL11_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL11_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL11_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL12_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL12_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL12_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL12_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL12_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL12_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL13_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL13_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL13_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL13_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL13_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL13_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL14_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL14_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL14_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL14_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL14_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL14_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL15_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL15_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL15_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL15_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL15_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL15_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR0_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR0_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR0_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR0_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR0_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR0_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR1_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR1_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR1_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR1_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR1_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR1_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR2_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR2_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR2_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR2_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR2_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR2_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR3_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR3_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR3_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR3_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR3_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR3_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR4_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR4_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR4_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR4_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR4_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR4_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR5_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR5_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR5_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR5_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR5_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR5_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR6_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR6_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR6_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR6_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR6_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR6_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR7_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR7_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR7_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR7_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR7_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR7_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR8_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR8_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR8_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR8_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR8_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR8_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR9_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR9_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR9_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR9_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR9_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR9_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR10_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR10_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR10_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR10_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR10_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR10_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR11_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR11_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR11_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR11_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR11_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR11_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR12_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR12_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR12_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR12_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR12_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR12_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR13_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR13_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR13_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR13_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR13_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR13_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR14_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR14_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR14_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR14_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR14_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR14_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR15_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR15_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR15_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR15_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR15_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR15_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL0_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL0_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL0_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL0_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL0_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL0_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL1_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL1_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL1_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL1_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL1_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL1_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL2_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL2_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL2_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL2_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL2_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL2_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL3_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL3_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL3_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL3_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL3_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL3_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL4_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL4_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL4_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL4_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL4_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL4_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL5_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL5_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL5_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL5_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL5_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL5_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL6_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL6_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL6_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL6_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL6_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL6_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL7_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL7_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL7_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL7_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL7_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL7_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL8_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL8_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL8_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL8_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL8_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL8_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL9_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL9_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL9_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL9_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL9_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL9_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL10_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL10_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL10_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL10_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL10_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL10_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL11_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL11_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL11_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL11_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL11_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL11_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL12_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL12_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL12_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL12_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL12_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL12_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL13_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL13_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL13_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL13_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL13_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL13_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL14_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL14_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL14_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL14_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL14_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL14_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL15_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL15_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL15_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL15_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL15_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL15_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR0_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR0_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR0_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR0_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR0_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR0_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR1_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR1_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR1_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR1_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR1_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR1_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR2_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR2_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR2_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR2_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR2_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR2_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR3_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR3_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR3_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR3_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR3_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR3_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR4_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR4_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR4_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR4_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR4_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR4_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR5_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR5_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR5_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR5_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR5_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR5_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR6_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR6_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR6_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR6_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR6_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR6_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR7_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR7_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR7_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR7_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR7_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR7_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR8_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR8_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR8_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR8_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR8_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR8_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR9_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR9_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR9_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR9_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR9_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR9_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR10_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR10_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR10_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR10_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR10_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR10_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR11_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR11_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR11_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR11_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR11_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR11_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR12_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR12_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR12_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR12_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR12_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR12_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR13_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR13_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR13_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR13_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR13_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR13_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR14_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR14_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR14_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR14_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR14_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR14_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR15_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR15_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR15_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR15_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR15_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR15_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL0_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL0_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL0_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL0_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL0_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL0_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL1_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL1_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL1_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL1_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL1_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL1_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL2_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL2_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL2_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL2_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL2_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL2_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL3_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL3_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL3_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL3_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL3_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL3_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL4_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL4_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL4_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL4_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL4_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL4_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL5_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL5_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL5_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL5_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL5_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL5_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL6_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL6_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL6_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL6_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL6_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL6_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL7_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL7_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL7_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL7_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL7_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL7_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL8_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL8_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL8_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL8_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL8_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL8_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL9_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL9_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL9_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL9_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL9_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL9_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL10_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL10_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL10_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL10_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL10_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL10_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL11_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL11_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL11_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL11_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL11_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL11_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL12_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL12_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL12_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL12_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL12_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL12_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL13_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL13_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL13_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL13_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL13_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL13_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL14_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL14_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL14_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL14_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL14_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL14_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL15_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL15_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL15_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL15_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL15_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL15_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR0_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR0_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR0_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR0_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR0_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR0_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR1_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR1_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR1_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR1_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR1_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR1_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR2_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR2_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR2_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR2_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR2_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR2_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR3_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR3_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR3_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR3_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR3_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR3_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR4_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR4_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR4_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR4_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR4_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR4_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR5_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR5_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR5_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR5_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR5_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR5_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR6_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR6_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR6_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR6_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR6_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR6_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR7_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR7_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR7_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR7_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR7_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR7_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR8_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR8_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR8_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR8_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR8_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR8_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR9_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR9_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR9_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR9_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR9_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR9_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR10_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR10_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR10_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR10_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR10_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR10_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR11_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR11_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR11_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR11_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR11_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR11_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR12_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR12_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR12_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR12_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR12_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR12_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR13_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR13_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR13_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR13_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR13_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR13_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR14_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR14_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR14_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR14_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR14_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR14_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR15_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR15_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR15_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR15_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR15_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR15_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_INT_DATA__Z_MASK 0x7ff
+#define THM_TMON0_INT_DATA__Z__SHIFT 0x0
+#define THM_TMON0_INT_DATA__VALID_MASK 0x800
+#define THM_TMON0_INT_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_INT_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_INT_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_INT_DATA__Z_MASK 0x7ff
+#define THM_TMON1_INT_DATA__Z__SHIFT 0x0
+#define THM_TMON1_INT_DATA__VALID_MASK 0x800
+#define THM_TMON1_INT_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_INT_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_INT_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_INT_DATA__Z_MASK 0x7ff
+#define THM_TMON2_INT_DATA__Z__SHIFT 0x0
+#define THM_TMON2_INT_DATA__VALID_MASK 0x800
+#define THM_TMON2_INT_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_INT_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_INT_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_DEBUG__DEBUG_RDI_MASK 0x1f
+#define THM_TMON0_DEBUG__DEBUG_RDI__SHIFT 0x0
+#define THM_TMON0_DEBUG__DEBUG_Z_MASK 0xffe0
+#define THM_TMON0_DEBUG__DEBUG_Z__SHIFT 0x5
+#define THM_TMON1_DEBUG__DEBUG_RDI_MASK 0x1f
+#define THM_TMON1_DEBUG__DEBUG_RDI__SHIFT 0x0
+#define THM_TMON1_DEBUG__DEBUG_Z_MASK 0xffe0
+#define THM_TMON1_DEBUG__DEBUG_Z__SHIFT 0x5
+#define THM_TMON2_DEBUG__DEBUG_RDI_MASK 0x1f
+#define THM_TMON2_DEBUG__DEBUG_RDI__SHIFT 0x0
+#define THM_TMON2_DEBUG__DEBUG_Z_MASK 0xffe0
+#define THM_TMON2_DEBUG__DEBUG_Z__SHIFT 0x5
+#define THM_TMON0_STATUS__CURRENT_RDI_MASK 0x1f
+#define THM_TMON0_STATUS__CURRENT_RDI__SHIFT 0x0
+#define THM_TMON0_STATUS__MEAS_DONE_MASK 0x20
+#define THM_TMON0_STATUS__MEAS_DONE__SHIFT 0x5
+#define THM_TMON1_STATUS__CURRENT_RDI_MASK 0x1f
+#define THM_TMON1_STATUS__CURRENT_RDI__SHIFT 0x0
+#define THM_TMON1_STATUS__MEAS_DONE_MASK 0x20
+#define THM_TMON1_STATUS__MEAS_DONE__SHIFT 0x5
+#define THM_TMON2_STATUS__CURRENT_RDI_MASK 0x1f
+#define THM_TMON2_STATUS__CURRENT_RDI__SHIFT 0x0
+#define THM_TMON2_STATUS__MEAS_DONE_MASK 0x20
+#define THM_TMON2_STATUS__MEAS_DONE__SHIFT 0x5
+#define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK 0x1
+#define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN__SHIFT 0x0
+#define GENERAL_PWRMGT__STATIC_PM_EN_MASK 0x2
+#define GENERAL_PWRMGT__STATIC_PM_EN__SHIFT 0x1
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK 0x4
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS__SHIFT 0x2
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE_MASK 0x8
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE__SHIFT 0x3
+#define GENERAL_PWRMGT__SW_SMIO_INDEX_MASK 0x40
+#define GENERAL_PWRMGT__SW_SMIO_INDEX__SHIFT 0x6
+#define GENERAL_PWRMGT__LOW_VOLT_D2_ACPI_MASK 0x100
+#define GENERAL_PWRMGT__LOW_VOLT_D2_ACPI__SHIFT 0x8
+#define GENERAL_PWRMGT__LOW_VOLT_D3_ACPI_MASK 0x200
+#define GENERAL_PWRMGT__LOW_VOLT_D3_ACPI__SHIFT 0x9
+#define GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK 0x400
+#define GENERAL_PWRMGT__VOLT_PWRMGT_EN__SHIFT 0xa
+#define GENERAL_PWRMGT__SPARE11_MASK 0x800
+#define GENERAL_PWRMGT__SPARE11__SHIFT 0xb
+#define GENERAL_PWRMGT__GPU_COUNTER_ACPI_MASK 0x4000
+#define GENERAL_PWRMGT__GPU_COUNTER_ACPI__SHIFT 0xe
+#define GENERAL_PWRMGT__GPU_COUNTER_CLK_MASK 0x8000
+#define GENERAL_PWRMGT__GPU_COUNTER_CLK__SHIFT 0xf
+#define GENERAL_PWRMGT__GPU_COUNTER_OFF_MASK 0x10000
+#define GENERAL_PWRMGT__GPU_COUNTER_OFF__SHIFT 0x10
+#define GENERAL_PWRMGT__GPU_COUNTER_INTF_OFF_MASK 0x20000
+#define GENERAL_PWRMGT__GPU_COUNTER_INTF_OFF__SHIFT 0x11
+#define GENERAL_PWRMGT__SPARE18_MASK 0x40000
+#define GENERAL_PWRMGT__SPARE18__SHIFT 0x12
+#define GENERAL_PWRMGT__ACPI_D3_VID_MASK 0x180000
+#define GENERAL_PWRMGT__ACPI_D3_VID__SHIFT 0x13
+#define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK 0x800000
+#define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN__SHIFT 0x17
+#define GENERAL_PWRMGT__SPARE27_MASK 0x8000000
+#define GENERAL_PWRMGT__SPARE27__SHIFT 0x1b
+#define GENERAL_PWRMGT__SPARE_MASK 0xf0000000
+#define GENERAL_PWRMGT__SPARE__SHIFT 0x1c
+#define CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK 0x3
+#define CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT 0x0
+#define CNB_PWRMGT_CNTL__GNB_SLOW_MASK 0x4
+#define CNB_PWRMGT_CNTL__GNB_SLOW__SHIFT 0x2
+#define CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK 0x8
+#define CNB_PWRMGT_CNTL__FORCE_NB_PS1__SHIFT 0x3
+#define CNB_PWRMGT_CNTL__DPM_ENABLED_MASK 0x10
+#define CNB_PWRMGT_CNTL__DPM_ENABLED__SHIFT 0x4
+#define CNB_PWRMGT_CNTL__SPARE_MASK 0xffffffe0
+#define CNB_PWRMGT_CNTL__SPARE__SHIFT 0x5
+#define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK 0x1
+#define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF__SHIFT 0x0
+#define SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK 0x10
+#define SCLK_PWRMGT_CNTL__RESET_BUSY_CNT__SHIFT 0x4
+#define SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK 0x20
+#define SCLK_PWRMGT_CNTL__RESET_SCLK_CNT__SHIFT 0x5
+#define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN_MASK 0x4000
+#define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN__SHIFT 0xe
+#define SCLK_PWRMGT_CNTL__AUTO_SCLK_PULSE_SKIP_MASK 0x8000
+#define SCLK_PWRMGT_CNTL__AUTO_SCLK_PULSE_SKIP__SHIFT 0xf
+#define SCLK_PWRMGT_CNTL__LIGHT_SLEEP_COUNTER_MASK 0x1f0000
+#define SCLK_PWRMGT_CNTL__LIGHT_SLEEP_COUNTER__SHIFT 0x10
+#define SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK 0x200000
+#define SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN__SHIFT 0x15
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARGET_STATE_MASK 0xf
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARGET_STATE__SHIFT 0x0
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_MASK 0xf0
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE__SHIFT 0x4
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK 0xf00
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT 0x8
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_MCLK_INDEX_MASK 0xf000
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_MCLK_INDEX__SHIFT 0xc
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK 0x1f0000
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT 0x10
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_SCLK_INDEX_MASK 0x3e00000
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_SCLK_INDEX__SHIFT 0x15
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_LCLK_INDEX_MASK 0x1c000000
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_LCLK_INDEX__SHIFT 0x1a
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_LCLK_INDEX_MASK 0xe0000000
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_LCLK_INDEX__SHIFT 0x1d
+#define PWR_PCC_CONTROL__PCC_POLARITY_MASK 0x1
+#define PWR_PCC_CONTROL__PCC_POLARITY__SHIFT 0x0
+#define PWR_PCC_GPIO_SELECT__GPIO_MASK 0xffffffff
+#define PWR_PCC_GPIO_SELECT__GPIO__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_0__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_0__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_0__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_0__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_0__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_0__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_0__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_0__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_0__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_0__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_0__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_0__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_0__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_0__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_0__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_0__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_0__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_0__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_0__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_0__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_0__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_0__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_0__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_0__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_0__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_0__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_0__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_0__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_0__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_0__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_0__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_0__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_0__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_0__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_0__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_0__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_0__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_0__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_0__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_0__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_0__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_0__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_0__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_0__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_0__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_0__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_1__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_1__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_1__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_1__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_1__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_1__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_1__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_1__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_1__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_1__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_1__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_1__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_1__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_1__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_1__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_1__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_1__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_1__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_1__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_1__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_1__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_1__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_1__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_1__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_1__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_1__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_1__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_1__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_1__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_1__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_1__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_1__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_1__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_1__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_1__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_1__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_1__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_1__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_1__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_1__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_1__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_1__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_1__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_1__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_1__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_1__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_2__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_2__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_2__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_2__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_2__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_2__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_2__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_2__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_2__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_2__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_2__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_2__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_2__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_2__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_2__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_2__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_2__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_2__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_2__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_2__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_2__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_2__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_2__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_2__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_2__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_2__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_2__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_2__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_2__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_2__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_2__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_2__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_2__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_2__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_2__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_2__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_2__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_2__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_2__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_2__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_2__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_2__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_2__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_2__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_2__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_2__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_3__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_3__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_3__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_3__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_3__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_3__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_3__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_3__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_3__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_3__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_3__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_3__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_3__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_3__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_3__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_3__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_3__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_3__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_3__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_3__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_3__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_3__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_3__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_3__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_3__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_3__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_3__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_3__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_3__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_3__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_3__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_3__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_3__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_3__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_3__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_3__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_3__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_3__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_3__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_3__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_3__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_3__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_3__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_3__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_3__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_3__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_4__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_4__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_4__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_4__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_4__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_4__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_4__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_4__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_4__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_4__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_4__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_4__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_4__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_4__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_4__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_4__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_4__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_4__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_4__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_4__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_4__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_4__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_4__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_4__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_4__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_4__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_4__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_4__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_4__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_4__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_4__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_4__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_4__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_4__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_4__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_4__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_4__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_4__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_4__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_4__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_4__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_4__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_4__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_4__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_4__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_4__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_5__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_5__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_5__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_5__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_5__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_5__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_5__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_5__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_5__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_5__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_5__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_5__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_5__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_5__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_5__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_5__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_5__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_5__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_5__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_5__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_5__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_5__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_5__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_5__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_5__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_5__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_5__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_5__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_5__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_5__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_5__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_5__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_5__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_5__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_5__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_5__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_5__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_5__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_5__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_5__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_5__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_5__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_5__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_5__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_5__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_5__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_6__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_6__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_6__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_6__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_6__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_6__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_6__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_6__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_6__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_6__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_6__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_6__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_6__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_6__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_6__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_6__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_6__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_6__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_6__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_6__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_6__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_6__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_6__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_6__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_6__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_6__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_6__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_6__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_6__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_6__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_6__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_6__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_6__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_6__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_6__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_6__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_6__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_6__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_6__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_6__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_6__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_6__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_6__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_6__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_6__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_6__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_7__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_7__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_7__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_7__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_7__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_7__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_7__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_7__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_7__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_7__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_7__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_7__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_7__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_7__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_7__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_7__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_7__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_7__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_7__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_7__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_7__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_7__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_7__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_7__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_7__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_7__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_7__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_7__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_7__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_7__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_7__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_7__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_7__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_7__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_7__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_7__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_7__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_7__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_7__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_7__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_7__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_7__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_7__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_7__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_7__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_7__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define PLL_TEST_CNTL__TST_SRC_SEL_MASK 0xf
+#define PLL_TEST_CNTL__TST_SRC_SEL__SHIFT 0x0
+#define PLL_TEST_CNTL__TST_REF_SEL_MASK 0xf0
+#define PLL_TEST_CNTL__TST_REF_SEL__SHIFT 0x4
+#define PLL_TEST_CNTL__REF_TEST_COUNT_MASK 0x7f00
+#define PLL_TEST_CNTL__REF_TEST_COUNT__SHIFT 0x8
+#define PLL_TEST_CNTL__TST_RESET_MASK 0x8000
+#define PLL_TEST_CNTL__TST_RESET__SHIFT 0xf
+#define PLL_TEST_CNTL__TEST_COUNT_MASK 0xfffe0000
+#define PLL_TEST_CNTL__TEST_COUNT__SHIFT 0x11
+#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_MASK 0xffff
+#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT 0x0
+#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT_MASK 0xf0000
+#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT 0x10
+#define CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK 0x3
+#define CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT 0x0
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT_MASK 0x3fff0
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT__SHIFT 0x4
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT_MASK 0x700000
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT__SHIFT 0x14
+#define CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK 0x3000000
+#define CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT 0x18
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_DISABLE_MASK 0x10000000
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_DISABLE__SHIFT 0x1c
+#define CG_DISPLAY_GAP_CNTL2__VBI_PREDICTION_MASK 0xffffffff
+#define CG_DISPLAY_GAP_CNTL2__VBI_PREDICTION__SHIFT 0x0
+#define CG_ACPI_CNTL__SCLK_ACPI_DIV_MASK 0x7f
+#define CG_ACPI_CNTL__SCLK_ACPI_DIV__SHIFT 0x0
+#define CG_ACPI_CNTL__SCLK_CHANGE_SKIP_MASK 0x80
+#define CG_ACPI_CNTL__SCLK_CHANGE_SKIP__SHIFT 0x7
+#define SCLK_DEEP_SLEEP_CNTL__DIV_ID_MASK 0x7
+#define SCLK_DEEP_SLEEP_CNTL__DIV_ID__SHIFT 0x0
+#define SCLK_DEEP_SLEEP_CNTL__RAMP_DIS_MASK 0x8
+#define SCLK_DEEP_SLEEP_CNTL__RAMP_DIS__SHIFT 0x3
+#define SCLK_DEEP_SLEEP_CNTL__HYSTERESIS_MASK 0xfff0
+#define SCLK_DEEP_SLEEP_CNTL__HYSTERESIS__SHIFT 0x4
+#define SCLK_DEEP_SLEEP_CNTL__SCLK_RUNNING_MASK_MASK 0x10000
+#define SCLK_DEEP_SLEEP_CNTL__SCLK_RUNNING_MASK__SHIFT 0x10
+#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_MASK_MASK 0x20000
+#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_MASK__SHIFT 0x11
+#define SCLK_DEEP_SLEEP_CNTL__ALLOW_NBPSTATE_MASK_MASK 0x40000
+#define SCLK_DEEP_SLEEP_CNTL__ALLOW_NBPSTATE_MASK__SHIFT 0x12
+#define SCLK_DEEP_SLEEP_CNTL__BIF_BUSY_MASK_MASK 0x80000
+#define SCLK_DEEP_SLEEP_CNTL__BIF_BUSY_MASK__SHIFT 0x13
+#define SCLK_DEEP_SLEEP_CNTL__UVD_BUSY_MASK_MASK 0x100000
+#define SCLK_DEEP_SLEEP_CNTL__UVD_BUSY_MASK__SHIFT 0x14
+#define SCLK_DEEP_SLEEP_CNTL__MC0SRBM_BUSY_MASK_MASK 0x200000
+#define SCLK_DEEP_SLEEP_CNTL__MC0SRBM_BUSY_MASK__SHIFT 0x15
+#define SCLK_DEEP_SLEEP_CNTL__MC1SRBM_BUSY_MASK_MASK 0x400000
+#define SCLK_DEEP_SLEEP_CNTL__MC1SRBM_BUSY_MASK__SHIFT 0x16
+#define SCLK_DEEP_SLEEP_CNTL__MC_ALLOW_MASK_MASK 0x800000
+#define SCLK_DEEP_SLEEP_CNTL__MC_ALLOW_MASK__SHIFT 0x17
+#define SCLK_DEEP_SLEEP_CNTL__SMU_BUSY_MASK_MASK 0x1000000
+#define SCLK_DEEP_SLEEP_CNTL__SMU_BUSY_MASK__SHIFT 0x18
+#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_NLC_MASK_MASK 0x2000000
+#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_NLC_MASK__SHIFT 0x19
+#define SCLK_DEEP_SLEEP_CNTL__FAST_EXIT_REQ_NBPSTATE_MASK 0x4000000
+#define SCLK_DEEP_SLEEP_CNTL__FAST_EXIT_REQ_NBPSTATE__SHIFT 0x1a
+#define SCLK_DEEP_SLEEP_CNTL__DEEP_SLEEP_ENTRY_MODE_MASK 0x8000000
+#define SCLK_DEEP_SLEEP_CNTL__DEEP_SLEEP_ENTRY_MODE__SHIFT 0x1b
+#define SCLK_DEEP_SLEEP_CNTL__MBUS2_ACTIVE_MASK_MASK 0x10000000
+#define SCLK_DEEP_SLEEP_CNTL__MBUS2_ACTIVE_MASK__SHIFT 0x1c
+#define SCLK_DEEP_SLEEP_CNTL__VCE_BUSY_MASK_MASK 0x20000000
+#define SCLK_DEEP_SLEEP_CNTL__VCE_BUSY_MASK__SHIFT 0x1d
+#define SCLK_DEEP_SLEEP_CNTL__AZ_BUSY_MASK_MASK 0x40000000
+#define SCLK_DEEP_SLEEP_CNTL__AZ_BUSY_MASK__SHIFT 0x1e
+#define SCLK_DEEP_SLEEP_CNTL__ENABLE_DS_MASK 0x80000000
+#define SCLK_DEEP_SLEEP_CNTL__ENABLE_DS__SHIFT 0x1f
+#define SCLK_DEEP_SLEEP_CNTL2__RLC_BUSY_MASK_MASK 0x1
+#define SCLK_DEEP_SLEEP_CNTL2__RLC_BUSY_MASK__SHIFT 0x0
+#define SCLK_DEEP_SLEEP_CNTL2__HDP_BUSY_MASK_MASK 0x2
+#define SCLK_DEEP_SLEEP_CNTL2__HDP_BUSY_MASK__SHIFT 0x1
+#define SCLK_DEEP_SLEEP_CNTL2__ROM_BUSY_MASK_MASK 0x4
+#define SCLK_DEEP_SLEEP_CNTL2__ROM_BUSY_MASK__SHIFT 0x2
+#define SCLK_DEEP_SLEEP_CNTL2__IH_SEM_BUSY_MASK_MASK 0x8
+#define SCLK_DEEP_SLEEP_CNTL2__IH_SEM_BUSY_MASK__SHIFT 0x3
+#define SCLK_DEEP_SLEEP_CNTL2__PDMA_BUSY_MASK_MASK 0x10
+#define SCLK_DEEP_SLEEP_CNTL2__PDMA_BUSY_MASK__SHIFT 0x4
+#define SCLK_DEEP_SLEEP_CNTL2__IDCT_BUSY_MASK_MASK 0x40
+#define SCLK_DEEP_SLEEP_CNTL2__IDCT_BUSY_MASK__SHIFT 0x6
+#define SCLK_DEEP_SLEEP_CNTL2__SDMA_BUSY_MASK_MASK 0x80
+#define SCLK_DEEP_SLEEP_CNTL2__SDMA_BUSY_MASK__SHIFT 0x7
+#define SCLK_DEEP_SLEEP_CNTL2__DC_AZ_BUSY_MASK_MASK 0x100
+#define SCLK_DEEP_SLEEP_CNTL2__DC_AZ_BUSY_MASK__SHIFT 0x8
+#define SCLK_DEEP_SLEEP_CNTL2__ACP_SMU_ALLOW_DSLEEP_STUTTER_MASK_MASK 0x200
+#define SCLK_DEEP_SLEEP_CNTL2__ACP_SMU_ALLOW_DSLEEP_STUTTER_MASK__SHIFT 0x9
+#define SCLK_DEEP_SLEEP_CNTL2__UVD_CG_MC_STAT_BUSY_MASK_MASK 0x400
+#define SCLK_DEEP_SLEEP_CNTL2__UVD_CG_MC_STAT_BUSY_MASK__SHIFT 0xa
+#define SCLK_DEEP_SLEEP_CNTL2__VCE_CG_MC_STAT_BUSY_MASK_MASK 0x800
+#define SCLK_DEEP_SLEEP_CNTL2__VCE_CG_MC_STAT_BUSY_MASK__SHIFT 0xb
+#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_MC_STAT_BUSY_MASK_MASK 0x1000
+#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_MC_STAT_BUSY_MASK__SHIFT 0xc
+#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_STATUS_BUSY_MASK_MASK 0x2000
+#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_STATUS_BUSY_MASK__SHIFT 0xd
+#define SCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK_MASK 0x4000
+#define SCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK__SHIFT 0xe
+#define SCLK_DEEP_SLEEP_CNTL2__SHALLOW_DIV_ID_MASK 0xe00000
+#define SCLK_DEEP_SLEEP_CNTL2__SHALLOW_DIV_ID__SHIFT 0x15
+#define SCLK_DEEP_SLEEP_CNTL2__INOUT_CUSHION_MASK 0xff000000
+#define SCLK_DEEP_SLEEP_CNTL2__INOUT_CUSHION__SHIFT 0x18
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_0_SMU_BUSY_MASK_MASK 0x1
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_0_SMU_BUSY_MASK__SHIFT 0x0
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_1_SMU_BUSY_MASK_MASK 0x2
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_1_SMU_BUSY_MASK__SHIFT 0x1
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_2_SMU_BUSY_MASK_MASK 0x4
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_2_SMU_BUSY_MASK__SHIFT 0x2
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_3_SMU_BUSY_MASK_MASK 0x8
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_3_SMU_BUSY_MASK__SHIFT 0x3
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_4_SMU_BUSY_MASK_MASK 0x10
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_4_SMU_BUSY_MASK__SHIFT 0x4
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_5_SMU_BUSY_MASK_MASK 0x20
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_5_SMU_BUSY_MASK__SHIFT 0x5
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_6_SMU_BUSY_MASK_MASK 0x40
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_6_SMU_BUSY_MASK__SHIFT 0x6
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_7_SMU_BUSY_MASK_MASK 0x80
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_7_SMU_BUSY_MASK__SHIFT 0x7
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_8_SMU_BUSY_MASK_MASK 0x100
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_8_SMU_BUSY_MASK__SHIFT 0x8
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_9_SMU_BUSY_MASK_MASK 0x200
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_9_SMU_BUSY_MASK__SHIFT 0x9
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_10_SMU_BUSY_MASK_MASK 0x400
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_10_SMU_BUSY_MASK__SHIFT 0xa
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_11_SMU_BUSY_MASK_MASK 0x800
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_11_SMU_BUSY_MASK__SHIFT 0xb
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_12_SMU_BUSY_MASK_MASK 0x1000
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_12_SMU_BUSY_MASK__SHIFT 0xc
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_13_SMU_BUSY_MASK_MASK 0x2000
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_13_SMU_BUSY_MASK__SHIFT 0xd
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_14_SMU_BUSY_MASK_MASK 0x4000
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_14_SMU_BUSY_MASK__SHIFT 0xe
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_15_SMU_BUSY_MASK_MASK 0x8000
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_15_SMU_BUSY_MASK__SHIFT 0xf
+#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_DS_DIV_ID_MASK 0x7
+#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_DS_DIV_ID__SHIFT 0x0
+#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_SS_DIV_ID_MASK 0x38
+#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_SS_DIV_ID__SHIFT 0x3
+#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_ENABLE_MASK 0x10000
+#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_ENABLE__SHIFT 0x10
+#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_DS_DIV_ID_MASK 0xe0000
+#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_DS_DIV_ID__SHIFT 0x11
+#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_SS_DIV_ID_MASK 0x700000
+#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_SS_DIV_ID__SHIFT 0x14
+#define LCLK_DEEP_SLEEP_CNTL__DIV_ID_MASK 0x7
+#define LCLK_DEEP_SLEEP_CNTL__DIV_ID__SHIFT 0x0
+#define LCLK_DEEP_SLEEP_CNTL__RAMP_DIS_MASK 0x8
+#define LCLK_DEEP_SLEEP_CNTL__RAMP_DIS__SHIFT 0x3
+#define LCLK_DEEP_SLEEP_CNTL__HYSTERESIS_MASK 0xfff0
+#define LCLK_DEEP_SLEEP_CNTL__HYSTERESIS__SHIFT 0x4
+#define LCLK_DEEP_SLEEP_CNTL__RESERVED_MASK 0x7fff0000
+#define LCLK_DEEP_SLEEP_CNTL__RESERVED__SHIFT 0x10
+#define LCLK_DEEP_SLEEP_CNTL__ENABLE_DS_MASK 0x80000000
+#define LCLK_DEEP_SLEEP_CNTL__ENABLE_DS__SHIFT 0x1f
+#define LCLK_DEEP_SLEEP_CNTL2__RFE_BUSY_MASK_MASK 0x1
+#define LCLK_DEEP_SLEEP_CNTL2__RFE_BUSY_MASK__SHIFT 0x0
+#define LCLK_DEEP_SLEEP_CNTL2__BIF_CG_LCLK_BUSY_MASK_MASK 0x2
+#define LCLK_DEEP_SLEEP_CNTL2__BIF_CG_LCLK_BUSY_MASK__SHIFT 0x1
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMU_SMU_IDLE_MASK_MASK 0x4
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMU_SMU_IDLE_MASK__SHIFT 0x2
+#define LCLK_DEEP_SLEEP_CNTL2__RESERVED_BIT3_MASK 0x8
+#define LCLK_DEEP_SLEEP_CNTL2__RESERVED_BIT3__SHIFT 0x3
+#define LCLK_DEEP_SLEEP_CNTL2__SCLK_RUNNING_MASK_MASK 0x10
+#define LCLK_DEEP_SLEEP_CNTL2__SCLK_RUNNING_MASK__SHIFT 0x4
+#define LCLK_DEEP_SLEEP_CNTL2__SMU_BUSY_MASK_MASK 0x20
+#define LCLK_DEEP_SLEEP_CNTL2__SMU_BUSY_MASK__SHIFT 0x5
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE1_MASK_MASK 0x40
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE1_MASK__SHIFT 0x6
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE2_MASK_MASK 0x80
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE2_MASK__SHIFT 0x7
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE3_MASK_MASK 0x100
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE3_MASK__SHIFT 0x8
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE4_MASK_MASK 0x200
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE4_MASK__SHIFT 0x9
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPP_IDLE_MASK_MASK 0x400
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPP_IDLE_MASK__SHIFT 0xa
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPPSB_IDLE_MASK_MASK 0x800
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPPSB_IDLE_MASK__SHIFT 0xb
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUBIF_IDLE_MASK_MASK 0x1000
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUBIF_IDLE_MASK__SHIFT 0xc
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUINTGEN_IDLE_MASK_MASK 0x2000
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUINTGEN_IDLE_MASK__SHIFT 0xd
+#define LCLK_DEEP_SLEEP_CNTL2__L2IMU_IDLE_MASK_MASK 0x4000
+#define LCLK_DEEP_SLEEP_CNTL2__L2IMU_IDLE_MASK__SHIFT 0xe
+#define LCLK_DEEP_SLEEP_CNTL2__ORB_IDLE_MASK_MASK 0x8000
+#define LCLK_DEEP_SLEEP_CNTL2__ORB_IDLE_MASK__SHIFT 0xf
+#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_MASK_MASK 0x10000
+#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_MASK__SHIFT 0x10
+#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_ACK_MASK_MASK 0x20000
+#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_ACK_MASK__SHIFT 0x11
+#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_MASK_MASK 0x40000
+#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_MASK__SHIFT 0x12
+#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_ACK_MASK_MASK 0x80000
+#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_ACK_MASK__SHIFT 0x13
+#define LCLK_DEEP_SLEEP_CNTL2__DMAACTIVE_MASK_MASK 0x100000
+#define LCLK_DEEP_SLEEP_CNTL2__DMAACTIVE_MASK__SHIFT 0x14
+#define LCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK_MASK 0x200000
+#define LCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK__SHIFT 0x15
+#define LCLK_DEEP_SLEEP_CNTL2__RESERVED_MASK 0xffc00000
+#define LCLK_DEEP_SLEEP_CNTL2__RESERVED__SHIFT 0x16
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDCI_INDEX_MASK 0xf
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDCI_INDEX__SHIFT 0x0
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDCI_INDEX_MASK 0xf0
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDCI_INDEX__SHIFT 0x4
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_MVDD_INDEX_MASK 0xf00
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_MVDD_INDEX__SHIFT 0x8
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_MVDD_INDEX_MASK 0xf000
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_MVDD_INDEX__SHIFT 0xc
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDC_INDEX_MASK 0xf0000
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDC_INDEX__SHIFT 0x10
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDC_INDEX_MASK 0xf00000
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDC_INDEX__SHIFT 0x14
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK 0xf000000
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT 0x18
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX_MASK 0xf0000000
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX__SHIFT 0x1c
+#define CG_ULV_PARAMETER__ULV_THRESHOLD_MASK 0xffff
+#define CG_ULV_PARAMETER__ULV_THRESHOLD__SHIFT 0x0
+#define CG_ULV_PARAMETER__ULV_THRESHOLD_UNIT_MASK 0xf0000
+#define CG_ULV_PARAMETER__ULV_THRESHOLD_UNIT__SHIFT 0x10
+#define SCLK_MIN_DIV__FRACV_MASK 0xfff
+#define SCLK_MIN_DIV__FRACV__SHIFT 0x0
+#define SCLK_MIN_DIV__INTV_MASK 0x7f000
+#define SCLK_MIN_DIV__INTV__SHIFT 0xc
+#define PWR_AVFS_SEL__AvfsSel_MASK 0xfffffff
+#define PWR_AVFS_SEL__AvfsSel__SHIFT 0x0
+#define PWR_AVFS_CNTL__MmBusIn_MASK 0xff
+#define PWR_AVFS_CNTL__MmBusIn__SHIFT 0x0
+#define PWR_AVFS_CNTL__MmLclRdEn_MASK 0x100
+#define PWR_AVFS_CNTL__MmLclRdEn__SHIFT 0x8
+#define PWR_AVFS_CNTL__MmLclWrEn_MASK 0x200
+#define PWR_AVFS_CNTL__MmLclWrEn__SHIFT 0x9
+#define PWR_AVFS_CNTL__MmLclSz_MASK 0xc00
+#define PWR_AVFS_CNTL__MmLclSz__SHIFT 0xa
+#define PWR_AVFS_CNTL__MmState_MASK 0x3f000
+#define PWR_AVFS_CNTL__MmState__SHIFT 0xc
+#define PWR_AVFS_CNTL__PsmScanMode_MASK 0x40000
+#define PWR_AVFS_CNTL__PsmScanMode__SHIFT 0x12
+#define PWR_AVFS_CNTL__PsmGater_MASK 0x80000
+#define PWR_AVFS_CNTL__PsmGater__SHIFT 0x13
+#define PWR_AVFS_CNTL__PsmTrst_MASK 0x100000
+#define PWR_AVFS_CNTL__PsmTrst__SHIFT 0x14
+#define PWR_AVFS_CNTL__PsmEn_MASK 0x200000
+#define PWR_AVFS_CNTL__PsmEn__SHIFT 0x15
+#define PWR_AVFS_CNTL__SkipPhaseEn_MASK 0x400000
+#define PWR_AVFS_CNTL__SkipPhaseEn__SHIFT 0x16
+#define PWR_AVFS_CNTL__Isolate_MASK 0x800000
+#define PWR_AVFS_CNTL__Isolate__SHIFT 0x17
+#define PWR_AVFS_CNTL__AvfsRst_MASK 0x1000000
+#define PWR_AVFS_CNTL__AvfsRst__SHIFT 0x18
+#define PWR_AVFS_CNTL__PccIsolateEn_MASK 0x2000000
+#define PWR_AVFS_CNTL__PccIsolateEn__SHIFT 0x19
+#define PWR_AVFS_CNTL__DeepSleepIsolateEn_MASK 0x4000000
+#define PWR_AVFS_CNTL__DeepSleepIsolateEn__SHIFT 0x1a
+#define PWR_AVFS0_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS0_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS0_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS0_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS0_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS0_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS1_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS1_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS1_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS1_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS1_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS1_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS2_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS2_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS2_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS2_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS2_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS2_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS3_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS3_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS3_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS3_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS3_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS3_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS4_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS4_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS4_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS4_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS4_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS4_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS5_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS5_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS5_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS5_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS5_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS5_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS6_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS6_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS6_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS6_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS6_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS6_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS7_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS7_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS7_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS7_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS7_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS7_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS8_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS8_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS8_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS8_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS8_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS8_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS9_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS9_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS9_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS9_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS9_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS9_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS10_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS10_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS10_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS10_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS10_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS10_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS11_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS11_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS11_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS11_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS11_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS11_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS12_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS12_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS12_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS12_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS12_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS12_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS13_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS13_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS13_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS13_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS13_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS13_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS14_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS14_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS14_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS14_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS14_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS14_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS15_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS15_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS15_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS15_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS15_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS15_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS16_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS16_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS16_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS16_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS16_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS16_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS17_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS17_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS17_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS17_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS17_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS17_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS18_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS18_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS18_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS18_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS18_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS18_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS19_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS19_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS19_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS19_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS19_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS19_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS20_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS20_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS20_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS20_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS20_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS20_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS21_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS21_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS21_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS21_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS21_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS21_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS22_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS22_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS22_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS22_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS22_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS22_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS23_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS23_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS23_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS23_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS23_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS23_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS24_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS24_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS24_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS24_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS24_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS24_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS25_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS25_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS25_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS25_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS25_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS25_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS26_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS26_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS26_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS26_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS26_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS26_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS27_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS27_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS27_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS27_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS27_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS27_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_CKS_ENABLE__STRETCH_ENABLE_MASK 0x1
+#define PWR_CKS_ENABLE__STRETCH_ENABLE__SHIFT 0x0
+#define PWR_CKS_ENABLE__masterReset_MASK 0x2
+#define PWR_CKS_ENABLE__masterReset__SHIFT 0x1
+#define PWR_CKS_ENABLE__staticEnable_MASK 0x4
+#define PWR_CKS_ENABLE__staticEnable__SHIFT 0x2
+#define PWR_CKS_ENABLE__IGNORE_DROOP_DETECT_MASK 0x8
+#define PWR_CKS_ENABLE__IGNORE_DROOP_DETECT__SHIFT 0x3
+#define PWR_CKS_ENABLE__PCC_HAND_SHAKE_EN_MASK 0x10
+#define PWR_CKS_ENABLE__PCC_HAND_SHAKE_EN__SHIFT 0x4
+#define PWR_CKS_ENABLE__MET_CTRL_SEL_MASK 0x60
+#define PWR_CKS_ENABLE__MET_CTRL_SEL__SHIFT 0x5
+#define PWR_CKS_ENABLE__DS_HAND_SHAKE_EN_MASK 0x80
+#define PWR_CKS_ENABLE__DS_HAND_SHAKE_EN__SHIFT 0x7
+#define PWR_CKS_CNTL__CKS_BYPASS_MASK 0x1
+#define PWR_CKS_CNTL__CKS_BYPASS__SHIFT 0x0
+#define PWR_CKS_CNTL__CKS_PCCEnable_MASK 0x2
+#define PWR_CKS_CNTL__CKS_PCCEnable__SHIFT 0x1
+#define PWR_CKS_CNTL__CKS_TEMP_COMP_MASK 0x4
+#define PWR_CKS_CNTL__CKS_TEMP_COMP__SHIFT 0x2
+#define PWR_CKS_CNTL__CKS_STRETCH_AMOUNT_MASK 0x78
+#define PWR_CKS_CNTL__CKS_STRETCH_AMOUNT__SHIFT 0x3
+#define PWR_CKS_CNTL__CKS_SKIP_PHASE_BYPASS_MASK 0x80
+#define PWR_CKS_CNTL__CKS_SKIP_PHASE_BYPASS__SHIFT 0x7
+#define PWR_CKS_CNTL__CKS_SAMPLE_SIZE_MASK 0xf00
+#define PWR_CKS_CNTL__CKS_SAMPLE_SIZE__SHIFT 0x8
+#define PWR_CKS_CNTL__CKS_FSM_WAIT_CYCLES_MASK 0xf000
+#define PWR_CKS_CNTL__CKS_FSM_WAIT_CYCLES__SHIFT 0xc
+#define PWR_CKS_CNTL__CKS_USE_FOR_LOW_FREQ_MASK 0x10000
+#define PWR_CKS_CNTL__CKS_USE_FOR_LOW_FREQ__SHIFT 0x10
+#define PWR_CKS_CNTL__CKS_NO_EXTRA_COARSE_STEP_MASK 0x20000
+#define PWR_CKS_CNTL__CKS_NO_EXTRA_COARSE_STEP__SHIFT 0x11
+#define PWR_CKS_CNTL__CKS_LDO_REFSEL_MASK 0x3c0000
+#define PWR_CKS_CNTL__CKS_LDO_REFSEL__SHIFT 0x12
+#define PWR_CKS_CNTL__DDT_DEBUS_SEL_MASK 0x400000
+#define PWR_CKS_CNTL__DDT_DEBUS_SEL__SHIFT 0x16
+#define PWR_CKS_CNTL__CKS_LDO_READY_COUNT_VAL_MASK 0x7f800000
+#define PWR_CKS_CNTL__CKS_LDO_READY_COUNT_VAL__SHIFT 0x17
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x1ffffff
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x2000000
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x4000000
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK_MASK 0x8000000
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x1
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT_MASK 0x2
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_MASK 0x4
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT__SHIFT 0x2
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xffffff80
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x1ffffff
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x2000000
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x4000000
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK_MASK 0x8000000
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x1
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT_MASK 0x2
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_MASK 0x4
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT__SHIFT 0x2
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xffffff80
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7
+#define PWR_DISP_TIMER_CONTROL2__DISP_TIMER_PULSE_WIDTH_MASK 0x3ff
+#define PWR_DISP_TIMER_CONTROL2__DISP_TIMER_PULSE_WIDTH__SHIFT 0x0
+#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD_MASK 0xffff
+#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD__SHIFT 0x0
+#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD_UNIT_MASK 0xf0000
+#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD_UNIT__SHIFT 0x10
+#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_EN_MASK 0x1
+#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_EN__SHIFT 0x0
+#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_DETECT_MASK 0x2
+#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_DETECT__SHIFT 0x1
+#define VDDGFX_IDLE_CONTROL__FORCE_VDDGFX_IDLE_EXIT_MASK 0x4
+#define VDDGFX_IDLE_CONTROL__FORCE_VDDGFX_IDLE_EXIT__SHIFT 0x2
+#define VDDGFX_IDLE_CONTROL__SMC_VDDGFX_IDLE_STATE_MASK 0x8
+#define VDDGFX_IDLE_CONTROL__SMC_VDDGFX_IDLE_STATE__SHIFT 0x3
+#define VDDGFX_IDLE_EXIT__BIF_EXIT_REQ_MASK 0x1
+#define VDDGFX_IDLE_EXIT__BIF_EXIT_REQ__SHIFT 0x0
+#define LCAC_MC0_CNTL__MC0_ENABLE_MASK 0x1
+#define LCAC_MC0_CNTL__MC0_ENABLE__SHIFT 0x0
+#define LCAC_MC0_CNTL__MC0_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC0_CNTL__MC0_THRESHOLD__SHIFT 0x1
+#define LCAC_MC0_CNTL__MC0_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC0_CNTL__MC0_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC0_CNTL__MC0_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC0_CNTL__MC0_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC0_OVR_SEL__MC0_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC0_OVR_SEL__MC0_OVR_SEL__SHIFT 0x0
+#define LCAC_MC0_OVR_VAL__MC0_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC0_OVR_VAL__MC0_OVR_VAL__SHIFT 0x0
+#define LCAC_MC1_CNTL__MC1_ENABLE_MASK 0x1
+#define LCAC_MC1_CNTL__MC1_ENABLE__SHIFT 0x0
+#define LCAC_MC1_CNTL__MC1_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC1_CNTL__MC1_THRESHOLD__SHIFT 0x1
+#define LCAC_MC1_CNTL__MC1_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC1_CNTL__MC1_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC1_CNTL__MC1_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC1_CNTL__MC1_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC1_OVR_SEL__MC1_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC1_OVR_SEL__MC1_OVR_SEL__SHIFT 0x0
+#define LCAC_MC1_OVR_VAL__MC1_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC1_OVR_VAL__MC1_OVR_VAL__SHIFT 0x0
+#define LCAC_MC2_CNTL__MC2_ENABLE_MASK 0x1
+#define LCAC_MC2_CNTL__MC2_ENABLE__SHIFT 0x0
+#define LCAC_MC2_CNTL__MC2_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC2_CNTL__MC2_THRESHOLD__SHIFT 0x1
+#define LCAC_MC2_CNTL__MC2_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC2_CNTL__MC2_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC2_CNTL__MC2_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC2_CNTL__MC2_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC2_OVR_SEL__MC2_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC2_OVR_SEL__MC2_OVR_SEL__SHIFT 0x0
+#define LCAC_MC2_OVR_VAL__MC2_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC2_OVR_VAL__MC2_OVR_VAL__SHIFT 0x0
+#define LCAC_MC3_CNTL__MC3_ENABLE_MASK 0x1
+#define LCAC_MC3_CNTL__MC3_ENABLE__SHIFT 0x0
+#define LCAC_MC3_CNTL__MC3_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC3_CNTL__MC3_THRESHOLD__SHIFT 0x1
+#define LCAC_MC3_CNTL__MC3_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC3_CNTL__MC3_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC3_CNTL__MC3_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC3_CNTL__MC3_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC3_OVR_SEL__MC3_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC3_OVR_SEL__MC3_OVR_SEL__SHIFT 0x0
+#define LCAC_MC3_OVR_VAL__MC3_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC3_OVR_VAL__MC3_OVR_VAL__SHIFT 0x0
+#define LCAC_MC4_CNTL__MC4_ENABLE_MASK 0x1
+#define LCAC_MC4_CNTL__MC4_ENABLE__SHIFT 0x0
+#define LCAC_MC4_CNTL__MC4_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC4_CNTL__MC4_THRESHOLD__SHIFT 0x1
+#define LCAC_MC4_CNTL__MC4_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC4_CNTL__MC4_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC4_CNTL__MC4_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC4_CNTL__MC4_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC4_OVR_SEL__MC4_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC4_OVR_SEL__MC4_OVR_SEL__SHIFT 0x0
+#define LCAC_MC4_OVR_VAL__MC4_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC4_OVR_VAL__MC4_OVR_VAL__SHIFT 0x0
+#define LCAC_MC5_CNTL__MC5_ENABLE_MASK 0x1
+#define LCAC_MC5_CNTL__MC5_ENABLE__SHIFT 0x0
+#define LCAC_MC5_CNTL__MC5_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC5_CNTL__MC5_THRESHOLD__SHIFT 0x1
+#define LCAC_MC5_CNTL__MC5_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC5_CNTL__MC5_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC5_CNTL__MC5_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC5_CNTL__MC5_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC5_OVR_SEL__MC5_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC5_OVR_SEL__MC5_OVR_SEL__SHIFT 0x0
+#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL__SHIFT 0x0
+#define LCAC_MC6_CNTL__MC6_ENABLE_MASK 0x1
+#define LCAC_MC6_CNTL__MC6_ENABLE__SHIFT 0x0
+#define LCAC_MC6_CNTL__MC6_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC6_CNTL__MC6_THRESHOLD__SHIFT 0x1
+#define LCAC_MC6_CNTL__MC6_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC6_CNTL__MC6_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC6_CNTL__MC6_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC6_CNTL__MC6_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC6_OVR_SEL__MC6_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC6_OVR_SEL__MC6_OVR_SEL__SHIFT 0x0
+#define LCAC_MC6_OVR_VAL__MC6_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC6_OVR_VAL__MC6_OVR_VAL__SHIFT 0x0
+#define LCAC_MC7_CNTL__MC7_ENABLE_MASK 0x1
+#define LCAC_MC7_CNTL__MC7_ENABLE__SHIFT 0x0
+#define LCAC_MC7_CNTL__MC7_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC7_CNTL__MC7_THRESHOLD__SHIFT 0x1
+#define LCAC_MC7_CNTL__MC7_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC7_CNTL__MC7_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC7_CNTL__MC7_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC7_CNTL__MC7_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC7_OVR_SEL__MC7_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC7_OVR_SEL__MC7_OVR_SEL__SHIFT 0x0
+#define LCAC_MC7_OVR_VAL__MC7_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC7_OVR_VAL__MC7_OVR_VAL__SHIFT 0x0
+#define LCAC_CPL_CNTL__CPL_ENABLE_MASK 0x1
+#define LCAC_CPL_CNTL__CPL_ENABLE__SHIFT 0x0
+#define LCAC_CPL_CNTL__CPL_THRESHOLD_MASK 0x1fffe
+#define LCAC_CPL_CNTL__CPL_THRESHOLD__SHIFT 0x1
+#define LCAC_CPL_CNTL__CPL_BLOCK_ID_MASK 0x3e0000
+#define LCAC_CPL_CNTL__CPL_BLOCK_ID__SHIFT 0x11
+#define LCAC_CPL_CNTL__CPL_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_CPL_CNTL__CPL_SIGNAL_ID__SHIFT 0x16
+#define LCAC_CPL_OVR_SEL__CPL_OVR_SEL_MASK 0xffffffff
+#define LCAC_CPL_OVR_SEL__CPL_OVR_SEL__SHIFT 0x0
+#define LCAC_CPL_OVR_VAL__CPL_OVR_VAL_MASK 0xffffffff
+#define LCAC_CPL_OVR_VAL__CPL_OVR_VAL__SHIFT 0x0
+#define ROM_SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff
+#define ROM_SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0
+#define ROM_SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff
+#define ROM_SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0
+#define ROM_CNTL__SCK_OVERWRITE_MASK 0x2
+#define ROM_CNTL__SCK_OVERWRITE__SHIFT 0x1
+#define ROM_CNTL__CLOCK_GATING_EN_MASK 0x4
+#define ROM_CNTL__CLOCK_GATING_EN__SHIFT 0x2
+#define ROM_CNTL__CSB_ACTIVE_TO_SCK_SETUP_TIME_MASK 0xff00
+#define ROM_CNTL__CSB_ACTIVE_TO_SCK_SETUP_TIME__SHIFT 0x8
+#define ROM_CNTL__CSB_ACTIVE_TO_SCK_HOLD_TIME_MASK 0xff0000
+#define ROM_CNTL__CSB_ACTIVE_TO_SCK_HOLD_TIME__SHIFT 0x10
+#define ROM_CNTL__SCK_PRESCALE_REFCLK_MASK 0xf000000
+#define ROM_CNTL__SCK_PRESCALE_REFCLK__SHIFT 0x18
+#define ROM_CNTL__SCK_PRESCALE_CRYSTAL_CLK_MASK 0xf0000000
+#define ROM_CNTL__SCK_PRESCALE_CRYSTAL_CLK__SHIFT 0x1c
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_BASE_ADDR_MASK 0xffffff
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_BASE_ADDR__SHIFT 0x0
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_INVALIDATE_MASK 0x1000000
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_INVALIDATE__SHIFT 0x18
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_ENABLE_MASK 0x2000000
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_ENABLE__SHIFT 0x19
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_USAGE_MASK 0xc000000
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_USAGE__SHIFT 0x1a
+#define ROM_STATUS__ROM_BUSY_MASK 0x1
+#define ROM_STATUS__ROM_BUSY__SHIFT 0x0
+#define CGTT_ROM_CLK_CTRL0__ON_DELAY_MASK 0xf
+#define CGTT_ROM_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS_MASK 0xff0
+#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x40000000
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x80000000
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1f
+#define ROM_INDEX__ROM_INDEX_MASK 0xffffff
+#define ROM_INDEX__ROM_INDEX__SHIFT 0x0
+#define ROM_DATA__ROM_DATA_MASK 0xffffffff
+#define ROM_DATA__ROM_DATA__SHIFT 0x0
+#define ROM_START__ROM_START_MASK 0xffffff
+#define ROM_START__ROM_START__SHIFT 0x0
+#define ROM_SW_CNTL__DATA_SIZE_MASK 0xffff
+#define ROM_SW_CNTL__DATA_SIZE__SHIFT 0x0
+#define ROM_SW_CNTL__COMMAND_SIZE_MASK 0x30000
+#define ROM_SW_CNTL__COMMAND_SIZE__SHIFT 0x10
+#define ROM_SW_CNTL__ROM_SW_RETURN_DATA_ENABLE_MASK 0x40000
+#define ROM_SW_CNTL__ROM_SW_RETURN_DATA_ENABLE__SHIFT 0x12
+#define ROM_SW_STATUS__ROM_SW_DONE_MASK 0x1
+#define ROM_SW_STATUS__ROM_SW_DONE__SHIFT 0x0
+#define ROM_SW_COMMAND__ROM_SW_INSTRUCTION_MASK 0xff
+#define ROM_SW_COMMAND__ROM_SW_INSTRUCTION__SHIFT 0x0
+#define ROM_SW_COMMAND__ROM_SW_ADDRESS_MASK 0xffffff00
+#define ROM_SW_COMMAND__ROM_SW_ADDRESS__SHIFT 0x8
+#define ROM_SW_DATA_1__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_1__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_2__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_2__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_3__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_3__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_4__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_4__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_5__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_5__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_6__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_6__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_7__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_7__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_8__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_8__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_9__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_9__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_10__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_10__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_11__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_11__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_12__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_12__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_13__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_13__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_14__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_14__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_15__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_15__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_16__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_16__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_17__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_17__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_18__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_18__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_19__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_19__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_20__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_20__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_21__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_21__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_22__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_22__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_23__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_23__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_24__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_24__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_25__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_25__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_26__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_26__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_27__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_27__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_28__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_28__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_29__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_29__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_30__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_30__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_31__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_31__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_32__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_32__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_33__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_33__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_34__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_34__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_35__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_35__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_36__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_36__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_37__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_37__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_38__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_38__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_39__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_39__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_40__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_40__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_41__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_41__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_42__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_42__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_43__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_43__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_44__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_44__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_45__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_45__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_46__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_46__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_47__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_47__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_48__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_48__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_49__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_49__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_50__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_50__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_51__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_51__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_52__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_52__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_53__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_53__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_54__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_54__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_55__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_55__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_56__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_56__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_57__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_57__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_58__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_58__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_59__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_59__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_60__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_60__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_61__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_61__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_62__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_62__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_63__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0
+#define GC_CAC_CGTT_CLK_CTRL__ON_DELAY_MASK 0xf
+#define GC_CAC_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define GC_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
+#define GC_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define SE_CAC_CGTT_CLK_CTRL__ON_DELAY_MASK 0xf
+#define SE_CAC_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define SE_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
+#define SE_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define GC_CAC_LKG_AGGR_LOWER__LKG_AGGR_31_0_MASK 0xffffffff
+#define GC_CAC_LKG_AGGR_LOWER__LKG_AGGR_31_0__SHIFT 0x0
+#define GC_CAC_LKG_AGGR_UPPER__LKG_AGGR_63_32_MASK 0xffffffff
+#define GC_CAC_LKG_AGGR_UPPER__LKG_AGGR_63_32__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG0_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG1_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG2_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG3_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG4_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG5_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG5__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG6_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG6__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG7_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG7__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG8_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG8__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG9_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG9__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG10_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG10__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG11_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG11__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG12_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG12__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG13_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG13__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG14_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG14__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG15_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG15__SHIFT 0x10
+#define GC_CAC_ACC_CU0__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU1__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU2__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU3__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU4__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU5__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU5__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU6__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU6__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU7__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU7__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU8__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU8__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU9__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU9__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU10__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU10__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU11__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU11__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU12__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU12__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU13__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU13__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU14__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU14__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU15__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU15__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_OVRD_CU__OVRRD_SELECT_MASK 0xffff
+#define GC_CAC_OVRD_CU__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_CU__OVRRD_VALUE_MASK 0xffff0000
+#define GC_CAC_OVRD_CU__OVRRD_VALUE__SHIFT 0x10
+
+#endif /* SMU_7_1_3_SH_MASK_H */
diff --git a/drivers/gpu/drm/amd/amdgpu/atom-bits.h b/drivers/gpu/drm/amd/include/atom-bits.h
index e8fae5c77514..e8fae5c77514 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom-bits.h
+++ b/drivers/gpu/drm/amd/include/atom-bits.h
diff --git a/drivers/gpu/drm/amd/amdgpu/atom-names.h b/drivers/gpu/drm/amd/include/atom-names.h
index 6f907a5ffa5f..6f907a5ffa5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom-names.h
+++ b/drivers/gpu/drm/amd/include/atom-names.h
diff --git a/drivers/gpu/drm/amd/amdgpu/atom-types.h b/drivers/gpu/drm/amd/include/atom-types.h
index 1125b866cdb0..1125b866cdb0 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom-types.h
+++ b/drivers/gpu/drm/amd/include/atom-types.h
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index 44c5d4a4d1bf..44c5d4a4d1bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
new file mode 100644
index 000000000000..992dcd8a5c6a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -0,0 +1,624 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#ifndef _CGS_COMMON_H
+#define _CGS_COMMON_H
+
+#include "amd_shared.h"
+
+/**
+ * enum cgs_gpu_mem_type - GPU memory types
+ */
+enum cgs_gpu_mem_type {
+	CGS_GPU_MEM_TYPE__VISIBLE_FB,
+	CGS_GPU_MEM_TYPE__INVISIBLE_FB,
+	CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
+	CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB,
+	CGS_GPU_MEM_TYPE__GART_CACHEABLE,
+	CGS_GPU_MEM_TYPE__GART_WRITECOMBINE
+};
+
+/**
+ * enum cgs_ind_reg - Indirect register spaces
+ */
+enum cgs_ind_reg {
+	CGS_IND_REG__MMIO,
+	CGS_IND_REG__PCIE,
+	CGS_IND_REG__SMC,
+	CGS_IND_REG__UVD_CTX,
+	CGS_IND_REG__DIDT,
+	CGS_IND_REG__AUDIO_ENDPT
+};
+
+/**
+ * enum cgs_clock - Clocks controlled by the SMU
+ */
+enum cgs_clock {
+	CGS_CLOCK__SCLK,
+	CGS_CLOCK__MCLK,
+	CGS_CLOCK__VCLK,
+	CGS_CLOCK__DCLK,
+	CGS_CLOCK__ECLK,
+	CGS_CLOCK__ACLK,
+	CGS_CLOCK__ICLK,
+	/* ... */
+};
+
+/**
+ * enum cgs_engine - Engines that can be statically power-gated
+ */
+enum cgs_engine {
+	CGS_ENGINE__UVD,
+	CGS_ENGINE__VCE,
+	CGS_ENGINE__VP8,
+	CGS_ENGINE__ACP_DMA,
+	CGS_ENGINE__ACP_DSP0,
+	CGS_ENGINE__ACP_DSP1,
+	CGS_ENGINE__ISP,
+	/* ... */
+};
+
+/**
+ * enum cgs_voltage_planes - Voltage planes for external camera HW
+ */
+enum cgs_voltage_planes {
+	CGS_VOLTAGE_PLANE__SENSOR0,
+	CGS_VOLTAGE_PLANE__SENSOR1,
+	/* ... */
+};
+
+/*
+ * enum cgs_ucode_id - Firmware types for different IPs
+ */
+enum cgs_ucode_id {
+	CGS_UCODE_ID_SMU = 0,
+	CGS_UCODE_ID_SDMA0,
+	CGS_UCODE_ID_SDMA1,
+	CGS_UCODE_ID_CP_CE,
+	CGS_UCODE_ID_CP_PFP,
+	CGS_UCODE_ID_CP_ME,
+	CGS_UCODE_ID_CP_MEC,
+	CGS_UCODE_ID_CP_MEC_JT1,
+	CGS_UCODE_ID_CP_MEC_JT2,
+	CGS_UCODE_ID_GMCON_RENG,
+	CGS_UCODE_ID_RLC_G,
+	CGS_UCODE_ID_MAXIMUM,
+};
+
+/**
+ * struct cgs_clock_limits - Clock limits
+ *
+ * Clocks are specified in 10KHz units.
+ */
+struct cgs_clock_limits {
+	unsigned min;		/**< Minimum supported frequency */
+	unsigned max;		/**< Maxumim supported frequency */
+	unsigned sustainable;	/**< Thermally sustainable frequency */
+};
+
+/**
+ * struct cgs_firmware_info - Firmware information
+ */
+struct cgs_firmware_info {
+	uint16_t		version;
+	uint16_t		feature_version;
+	uint32_t		image_size;
+	uint64_t		mc_addr;
+	void			*kptr;
+};
+
+typedef unsigned long cgs_handle_t;
+
+/**
+ * cgs_gpu_mem_info() - Return information about memory heaps
+ * @cgs_device: opaque device handle
+ * @type:	memory type
+ * @mc_start:	Start MC address of the heap (output)
+ * @mc_size:	MC address space size (output)
+ * @mem_size:	maximum amount of memory available for allocation (output)
+ *
+ * This function returns information about memory heaps. The type
+ * parameter is used to select the memory heap. The mc_start and
+ * mc_size for GART heaps may be bigger than the memory available for
+ * allocation.
+ *
+ * mc_start and mc_size are undefined for non-contiguous FB memory
+ * types, since buffers allocated with these types may or may not be
+ * GART mapped.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_gpu_mem_info_t)(void *cgs_device, enum cgs_gpu_mem_type type,
+				  uint64_t *mc_start, uint64_t *mc_size,
+				  uint64_t *mem_size);
+
+/**
+ * cgs_gmap_kmem() - map kernel memory to GART aperture
+ * @cgs_device:	opaque device handle
+ * @kmem:	pointer to kernel memory
+ * @size:	size to map
+ * @min_offset: minimum offset from start of GART aperture
+ * @max_offset: maximum offset from start of GART aperture
+ * @kmem_handle: kernel memory handle (output)
+ * @mcaddr:	MC address (output)
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_gmap_kmem_t)(void *cgs_device, void *kmem, uint64_t size,
+			       uint64_t min_offset, uint64_t max_offset,
+			       cgs_handle_t *kmem_handle, uint64_t *mcaddr);
+
+/**
+ * cgs_gunmap_kmem() - unmap kernel memory
+ * @cgs_device:	opaque device handle
+ * @kmem_handle: kernel memory handle returned by gmap_kmem
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_gunmap_kmem_t)(void *cgs_device, cgs_handle_t kmem_handle);
+
+/**
+ * cgs_alloc_gpu_mem() - Allocate GPU memory
+ * @cgs_device:	opaque device handle
+ * @type:	memory type
+ * @size:	size in bytes
+ * @align:	alignment in bytes
+ * @min_offset: minimum offset from start of heap
+ * @max_offset: maximum offset from start of heap
+ * @handle:	memory handle (output)
+ *
+ * The memory types CGS_GPU_MEM_TYPE_*_CONTIG_FB force contiguous
+ * memory allocation. This guarantees that the MC address returned by
+ * cgs_gmap_gpu_mem is not mapped through the GART. The non-contiguous
+ * FB memory types may be GART mapped depending on memory
+ * fragmentation and memory allocator policies.
+ *
+ * If min/max_offset are non-0, the allocation will be forced to
+ * reside between these offsets in its respective memory heap. The
+ * base address that the offset relates to, depends on the memory
+ * type.
+ *
+ * - CGS_GPU_MEM_TYPE__*_CONTIG_FB: FB MC base address
+ * - CGS_GPU_MEM_TYPE__GART_*:	    GART aperture base address
+ * - others:			    undefined, don't use with max_offset
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_alloc_gpu_mem_t)(void *cgs_device, enum cgs_gpu_mem_type type,
+				   uint64_t size, uint64_t align,
+				   uint64_t min_offset, uint64_t max_offset,
+				   cgs_handle_t *handle);
+
+/**
+ * cgs_free_gpu_mem() - Free GPU memory
+ * @cgs_device:	opaque device handle
+ * @handle:	memory handle returned by alloc or import
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_free_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
+
+/**
+ * cgs_gmap_gpu_mem() - GPU-map GPU memory
+ * @cgs_device:	opaque device handle
+ * @handle:	memory handle returned by alloc or import
+ * @mcaddr:	MC address (output)
+ *
+ * Ensures that a buffer is GPU accessible and returns its MC address.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_gmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle,
+				  uint64_t *mcaddr);
+
+/**
+ * cgs_gunmap_gpu_mem() - GPU-unmap GPU memory
+ * @cgs_device:	opaque device handle
+ * @handle:	memory handle returned by alloc or import
+ *
+ * Allows the buffer to be migrated while it's not used by the GPU.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_gunmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
+
+/**
+ * cgs_kmap_gpu_mem() - Kernel-map GPU memory
+ *
+ * @cgs_device:	opaque device handle
+ * @handle:	memory handle returned by alloc or import
+ * @map:	Kernel virtual address the memory was mapped to (output)
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_kmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle,
+				  void **map);
+
+/**
+ * cgs_kunmap_gpu_mem() - Kernel-unmap GPU memory
+ * @cgs_device:	opaque device handle
+ * @handle:	memory handle returned by alloc or import
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_kunmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
+
+/**
+ * cgs_read_register() - Read an MMIO register
+ * @cgs_device:	opaque device handle
+ * @offset:	register offset
+ *
+ * Return:  register value
+ */
+typedef uint32_t (*cgs_read_register_t)(void *cgs_device, unsigned offset);
+
+/**
+ * cgs_write_register() - Write an MMIO register
+ * @cgs_device:	opaque device handle
+ * @offset:	register offset
+ * @value:	register value
+ */
+typedef void (*cgs_write_register_t)(void *cgs_device, unsigned offset,
+				     uint32_t value);
+
+/**
+ * cgs_read_ind_register() - Read an indirect register
+ * @cgs_device:	opaque device handle
+ * @offset:	register offset
+ *
+ * Return:  register value
+ */
+typedef uint32_t (*cgs_read_ind_register_t)(void *cgs_device, enum cgs_ind_reg space,
+					    unsigned index);
+
+/**
+ * cgs_write_ind_register() - Write an indirect register
+ * @cgs_device:	opaque device handle
+ * @offset:	register offset
+ * @value:	register value
+ */
+typedef void (*cgs_write_ind_register_t)(void *cgs_device, enum cgs_ind_reg space,
+					 unsigned index, uint32_t value);
+
+/**
+ * cgs_read_pci_config_byte() - Read byte from PCI configuration space
+ * @cgs_device:	opaque device handle
+ * @addr:	address
+ *
+ * Return:  Value read
+ */
+typedef uint8_t (*cgs_read_pci_config_byte_t)(void *cgs_device, unsigned addr);
+
+/**
+ * cgs_read_pci_config_word() - Read word from PCI configuration space
+ * @cgs_device:	opaque device handle
+ * @addr:	address, must be word-aligned
+ *
+ * Return:  Value read
+ */
+typedef uint16_t (*cgs_read_pci_config_word_t)(void *cgs_device, unsigned addr);
+
+/**
+ * cgs_read_pci_config_dword() - Read dword from PCI configuration space
+ * @cgs_device:	opaque device handle
+ * @addr:	address, must be dword-aligned
+ *
+ * Return:  Value read
+ */
+typedef uint32_t (*cgs_read_pci_config_dword_t)(void *cgs_device,
+						unsigned addr);
+
+/**
+ * cgs_write_pci_config_byte() - Write byte to PCI configuration space
+ * @cgs_device:	opaque device handle
+ * @addr:	address
+ * @value:	value to write
+ */
+typedef void (*cgs_write_pci_config_byte_t)(void *cgs_device, unsigned addr,
+					    uint8_t value);
+
+/**
+ * cgs_write_pci_config_word() - Write byte to PCI configuration space
+ * @cgs_device:	opaque device handle
+ * @addr:	address, must be word-aligned
+ * @value:	value to write
+ */
+typedef void (*cgs_write_pci_config_word_t)(void *cgs_device, unsigned addr,
+					    uint16_t value);
+
+/**
+ * cgs_write_pci_config_dword() - Write byte to PCI configuration space
+ * @cgs_device:	opaque device handle
+ * @addr:	address, must be dword-aligned
+ * @value:	value to write
+ */
+typedef void (*cgs_write_pci_config_dword_t)(void *cgs_device, unsigned addr,
+					     uint32_t value);
+
+/**
+ * cgs_atom_get_data_table() - Get a pointer to an ATOM BIOS data table
+ * @cgs_device:	opaque device handle
+ * @table:	data table index
+ * @size:	size of the table (output, may be NULL)
+ * @frev:	table format revision (output, may be NULL)
+ * @crev:	table content revision (output, may be NULL)
+ *
+ * Return: Pointer to start of the table, or NULL on failure
+ */
+typedef const void *(*cgs_atom_get_data_table_t)(
+	void *cgs_device, unsigned table,
+	uint16_t *size, uint8_t *frev, uint8_t *crev);
+
+/**
+ * cgs_atom_get_cmd_table_revs() - Get ATOM BIOS command table revisions
+ * @cgs_device:	opaque device handle
+ * @table:	data table index
+ * @frev:	table format revision (output, may be NULL)
+ * @crev:	table content revision (output, may be NULL)
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_atom_get_cmd_table_revs_t)(void *cgs_device, unsigned table,
+					     uint8_t *frev, uint8_t *crev);
+
+/**
+ * cgs_atom_exec_cmd_table() - Execute an ATOM BIOS command table
+ * @cgs_device: opaque device handle
+ * @table:	command table index
+ * @args:	arguments
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_atom_exec_cmd_table_t)(void *cgs_device,
+					 unsigned table, void *args);
+
+/**
+ * cgs_create_pm_request() - Create a power management request
+ * @cgs_device:	opaque device handle
+ * @request:	handle of created PM request (output)
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_create_pm_request_t)(void *cgs_device, cgs_handle_t *request);
+
+/**
+ * cgs_destroy_pm_request() - Destroy a power management request
+ * @cgs_device:	opaque device handle
+ * @request:	handle of created PM request
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_destroy_pm_request_t)(void *cgs_device, cgs_handle_t request);
+
+/**
+ * cgs_set_pm_request() - Activate or deactiveate a PM request
+ * @cgs_device:	opaque device handle
+ * @request:	PM request handle
+ * @active:	0 = deactivate, non-0 = activate
+ *
+ * While a PM request is active, its minimum clock requests are taken
+ * into account as the requested engines are powered up. When the
+ * request is inactive, the engines may be powered down and clocks may
+ * be lower, depending on other PM requests by other driver
+ * components.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_set_pm_request_t)(void *cgs_device, cgs_handle_t request,
+				    int active);
+
+/**
+ * cgs_pm_request_clock() - Request a minimum frequency for a specific clock
+ * @cgs_device:	opaque device handle
+ * @request:	PM request handle
+ * @clock:	which clock?
+ * @freq:	requested min. frequency in 10KHz units (0 to clear request)
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_pm_request_clock_t)(void *cgs_device, cgs_handle_t request,
+				      enum cgs_clock clock, unsigned freq);
+
+/**
+ * cgs_pm_request_engine() - Request an engine to be powered up
+ * @cgs_device:	opaque device handle
+ * @request:	PM request handle
+ * @engine:	which engine?
+ * @powered:	0 = powered down, non-0 = powered up
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_pm_request_engine_t)(void *cgs_device, cgs_handle_t request,
+				       enum cgs_engine engine, int powered);
+
+/**
+ * cgs_pm_query_clock_limits() - Query clock frequency limits
+ * @cgs_device:	opaque device handle
+ * @clock:	which clock?
+ * @limits:	clock limits
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_pm_query_clock_limits_t)(void *cgs_device,
+					   enum cgs_clock clock,
+					   struct cgs_clock_limits *limits);
+
+/**
+ * cgs_set_camera_voltages() - Apply specific voltages to PMIC voltage planes
+ * @cgs_device:	opaque device handle
+ * @mask:	bitmask of voltages to change (1<<CGS_VOLTAGE_PLANE__xyz|...)
+ * @voltages:	pointer to array of voltage values in 1mV units
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_set_camera_voltages_t)(void *cgs_device, uint32_t mask,
+					 const uint32_t *voltages);
+/**
+ * cgs_get_firmware_info - Get the firmware information from core driver
+ * @cgs_device: opaque device handle
+ * @type: the firmware type
+ * @info: returend firmware information
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_get_firmware_info)(void *cgs_device,
+				     enum cgs_ucode_id type,
+				     struct cgs_firmware_info *info);
+
+typedef int(*cgs_set_powergating_state)(void *cgs_device,
+				  enum amd_ip_block_type block_type,
+				  enum amd_powergating_state state);
+
+typedef int(*cgs_set_clockgating_state)(void *cgs_device,
+				  enum amd_ip_block_type block_type,
+				  enum amd_clockgating_state state);
+
+struct cgs_ops {
+	/* memory management calls (similar to KFD interface) */
+	cgs_gpu_mem_info_t gpu_mem_info;
+	cgs_gmap_kmem_t gmap_kmem;
+	cgs_gunmap_kmem_t gunmap_kmem;
+	cgs_alloc_gpu_mem_t alloc_gpu_mem;
+	cgs_free_gpu_mem_t free_gpu_mem;
+	cgs_gmap_gpu_mem_t gmap_gpu_mem;
+	cgs_gunmap_gpu_mem_t gunmap_gpu_mem;
+	cgs_kmap_gpu_mem_t kmap_gpu_mem;
+	cgs_kunmap_gpu_mem_t kunmap_gpu_mem;
+	/* MMIO access */
+	cgs_read_register_t read_register;
+	cgs_write_register_t write_register;
+	cgs_read_ind_register_t read_ind_register;
+	cgs_write_ind_register_t write_ind_register;
+	/* PCI configuration space access */
+	cgs_read_pci_config_byte_t read_pci_config_byte;
+	cgs_read_pci_config_word_t read_pci_config_word;
+	cgs_read_pci_config_dword_t read_pci_config_dword;
+	cgs_write_pci_config_byte_t write_pci_config_byte;
+	cgs_write_pci_config_word_t write_pci_config_word;
+	cgs_write_pci_config_dword_t write_pci_config_dword;
+	/* ATOM BIOS */
+	cgs_atom_get_data_table_t atom_get_data_table;
+	cgs_atom_get_cmd_table_revs_t atom_get_cmd_table_revs;
+	cgs_atom_exec_cmd_table_t atom_exec_cmd_table;
+	/* Power management */
+	cgs_create_pm_request_t create_pm_request;
+	cgs_destroy_pm_request_t destroy_pm_request;
+	cgs_set_pm_request_t set_pm_request;
+	cgs_pm_request_clock_t pm_request_clock;
+	cgs_pm_request_engine_t pm_request_engine;
+	cgs_pm_query_clock_limits_t pm_query_clock_limits;
+	cgs_set_camera_voltages_t set_camera_voltages;
+	/* Firmware Info */
+	cgs_get_firmware_info get_firmware_info;
+	/* cg pg interface*/
+	cgs_set_powergating_state set_powergating_state;
+	cgs_set_clockgating_state set_clockgating_state;
+	/* ACPI (TODO) */
+};
+
+struct cgs_os_ops; /* To be define in OS-specific CGS header */
+
+struct cgs_device
+{
+	const struct cgs_ops *ops;
+	const struct cgs_os_ops *os_ops;
+	/* to be embedded at the start of driver private structure */
+};
+
+/* Convenience macros that make CGS indirect function calls look like
+ * normal function calls */
+#define CGS_CALL(func,dev,...) \
+	(((struct cgs_device *)dev)->ops->func(dev, ##__VA_ARGS__))
+#define CGS_OS_CALL(func,dev,...) \
+	(((struct cgs_device *)dev)->os_ops->func(dev, ##__VA_ARGS__))
+
+#define cgs_gpu_mem_info(dev,type,mc_start,mc_size,mem_size)		\
+	CGS_CALL(gpu_mem_info,dev,type,mc_start,mc_size,mem_size)
+#define cgs_gmap_kmem(dev,kmem,size,min_off,max_off,kmem_handle,mcaddr)	\
+	CGS_CALL(gmap_kmem,dev,kmem,size,min_off,max_off,kmem_handle,mcaddr)
+#define cgs_gunmap_kmem(dev,kmem_handle)	\
+	CGS_CALL(gunmap_kmem,dev,keme_handle)
+#define cgs_alloc_gpu_mem(dev,type,size,align,min_off,max_off,handle)	\
+	CGS_CALL(alloc_gpu_mem,dev,type,size,align,min_off,max_off,handle)
+#define cgs_free_gpu_mem(dev,handle)		\
+	CGS_CALL(free_gpu_mem,dev,handle)
+#define cgs_gmap_gpu_mem(dev,handle,mcaddr)	\
+	CGS_CALL(gmap_gpu_mem,dev,handle,mcaddr)
+#define cgs_gunmap_gpu_mem(dev,handle)		\
+	CGS_CALL(gunmap_gpu_mem,dev,handle)
+#define cgs_kmap_gpu_mem(dev,handle,map)	\
+	CGS_CALL(kmap_gpu_mem,dev,handle,map)
+#define cgs_kunmap_gpu_mem(dev,handle)		\
+	CGS_CALL(kunmap_gpu_mem,dev,handle)
+
+#define cgs_read_register(dev,offset)		\
+	CGS_CALL(read_register,dev,offset)
+#define cgs_write_register(dev,offset,value)		\
+	CGS_CALL(write_register,dev,offset,value)
+#define cgs_read_ind_register(dev,space,index)		\
+	CGS_CALL(read_ind_register,dev,space,index)
+#define cgs_write_ind_register(dev,space,index,value)		\
+	CGS_CALL(write_ind_register,dev,space,index,value)
+
+#define cgs_read_pci_config_byte(dev,addr)	\
+	CGS_CALL(read_pci_config_byte,dev,addr)
+#define cgs_read_pci_config_word(dev,addr)	\
+	CGS_CALL(read_pci_config_word,dev,addr)
+#define cgs_read_pci_config_dword(dev,addr)		\
+	CGS_CALL(read_pci_config_dword,dev,addr)
+#define cgs_write_pci_config_byte(dev,addr,value)	\
+	CGS_CALL(write_pci_config_byte,dev,addr,value)
+#define cgs_write_pci_config_word(dev,addr,value)	\
+	CGS_CALL(write_pci_config_word,dev,addr,value)
+#define cgs_write_pci_config_dword(dev,addr,value)	\
+	CGS_CALL(write_pci_config_dword,dev,addr,value)
+
+#define cgs_atom_get_data_table(dev,table,size,frev,crev)	\
+	CGS_CALL(atom_get_data_table,dev,table,size,frev,crev)
+#define cgs_atom_get_cmd_table_revs(dev,table,frev,crev)	\
+	CGS_CALL(atom_get_cmd_table_revs,dev,table,frev,crev)
+#define cgs_atom_exec_cmd_table(dev,table,args)		\
+	CGS_CALL(atom_exec_cmd_table,dev,table,args)
+
+#define cgs_create_pm_request(dev,request)	\
+	CGS_CALL(create_pm_request,dev,request)
+#define cgs_destroy_pm_request(dev,request)		\
+	CGS_CALL(destroy_pm_request,dev,request)
+#define cgs_set_pm_request(dev,request,active)		\
+	CGS_CALL(set_pm_request,dev,request,active)
+#define cgs_pm_request_clock(dev,request,clock,freq)		\
+	CGS_CALL(pm_request_clock,dev,request,clock,freq)
+#define cgs_pm_request_engine(dev,request,engine,powered)	\
+	CGS_CALL(pm_request_engine,dev,request,engine,powered)
+#define cgs_pm_query_clock_limits(dev,clock,limits)		\
+	CGS_CALL(pm_query_clock_limits,dev,clock,limits)
+#define cgs_set_camera_voltages(dev,mask,voltages)	\
+	CGS_CALL(set_camera_voltages,dev,mask,voltages)
+#define cgs_get_firmware_info(dev, type, info)	\
+	CGS_CALL(get_firmware_info, dev, type, info)
+#define cgs_set_powergating_state(dev, block_type, state)	\
+	CGS_CALL(set_powergating_state, dev, block_type, state)
+#define cgs_set_clockgating_state(dev, block_type, state)	\
+	CGS_CALL(set_clockgating_state, dev, block_type, state)
+
+#endif /* _CGS_COMMON_H */
diff --git a/drivers/gpu/drm/amd/include/cgs_linux.h b/drivers/gpu/drm/amd/include/cgs_linux.h
new file mode 100644
index 000000000000..488642f08267
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/cgs_linux.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#ifndef _CGS_LINUX_H
+#define _CGS_LINUX_H
+
+#include "cgs_common.h"
+
+/**
+ * cgs_import_gpu_mem() - Import dmabuf handle
+ * @cgs_device:  opaque device handle
+ * @dmabuf_fd:   DMABuf file descriptor
+ * @handle:      memory handle (output)
+ *
+ * Must be called in the process context that dmabuf_fd belongs to.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_import_gpu_mem_t)(void *cgs_device, int dmabuf_fd,
+				    cgs_handle_t *handle);
+
+/**
+ * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources
+ * @private_data:  private data provided to cgs_add_irq_source
+ * @src_id:        interrupt source ID
+ * @type:          interrupt type
+ * @enabled:       0 = disable source, non-0 = enable source
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_irq_source_set_func_t)(void *private_data,
+					 unsigned src_id, unsigned type,
+					 int enabled);
+
+/**
+ * cgs_irq_handler_func() - Interrupt handler callback
+ * @private_data:  private data provided to cgs_add_irq_source
+ * @src_id:        interrupt source ID
+ * @iv_entry:      pointer to raw ih ring entry
+ *
+ * This callback runs in interrupt context.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_irq_handler_func_t)(void *private_data,
+				      unsigned src_id, const uint32_t *iv_entry);
+
+/**
+ * cgs_add_irq_source() - Add an IRQ source
+ * @cgs_device:    opaque device handle
+ * @src_id:        interrupt source ID
+ * @num_types:     number of interrupt types that can be independently enabled
+ * @set:           callback function to enable/disable an interrupt type
+ * @handler:       interrupt handler callback
+ * @private_data:  private data to pass to callback functions
+ *
+ * The same IRQ source can be added only once. Adding an IRQ source
+ * indicates ownership of that IRQ source and all its IRQ types.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_add_irq_source_t)(void *cgs_device, unsigned src_id,
+				    unsigned num_types,
+				    cgs_irq_source_set_func_t set,
+				    cgs_irq_handler_func_t handler,
+				    void *private_data);
+
+/**
+ * cgs_irq_get() - Request enabling an IRQ source and type
+ * @cgs_device:  opaque device handle
+ * @src_id:      interrupt source ID
+ * @type:        interrupt type
+ *
+ * cgs_irq_get and cgs_irq_put calls must be balanced. They count
+ * "references" to IRQ sources.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type);
+
+/**
+ * cgs_irq_put() - Indicate IRQ source is no longer needed
+ * @cgs_device:  opaque device handle
+ * @src_id:      interrupt source ID
+ * @type:        interrupt type
+ *
+ * cgs_irq_get and cgs_irq_put calls must be balanced. They count
+ * "references" to IRQ sources. Even after cgs_irq_put is called, the
+ * IRQ handler may still be called if there are more refecences to
+ * the IRQ source.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type);
+
+struct cgs_os_ops {
+	cgs_import_gpu_mem_t import_gpu_mem;
+
+	/* IRQ handling */
+	cgs_add_irq_source_t add_irq_source;
+	cgs_irq_get_t irq_get;
+	cgs_irq_put_t irq_put;
+};
+
+#define cgs_import_gpu_mem(dev,dmabuf_fd,handle)		\
+	CGS_OS_CALL(import_gpu_mem,dev,dmabuf_fd,handle)
+#define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \
+	CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler,	\
+		    private_data)
+#define cgs_irq_get(dev,src_id,type)		\
+	CGS_OS_CALL(irq_get,dev,src_id,type)
+#define cgs_irq_put(dev,src_id,type)		\
+	CGS_OS_CALL(irq_put,dev,src_id,type)
+
+#endif /* _CGS_LINUX_H */
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 9080daa116b6..888250b33ea8 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -52,7 +52,8 @@ enum kgd_engine_type {
 	KGD_ENGINE_MEC1,
 	KGD_ENGINE_MEC2,
 	KGD_ENGINE_RLC,
-	KGD_ENGINE_SDMA,
+	KGD_ENGINE_SDMA1,
+	KGD_ENGINE_SDMA2,
 	KGD_ENGINE_MAX
 };
 
diff --git a/drivers/gpu/drm/amd/amdgpu/pptable.h b/drivers/gpu/drm/amd/include/pptable.h
index 0030f726e68c..ee6978b30b77 100644
--- a/drivers/gpu/drm/amd/amdgpu/pptable.h
+++ b/drivers/gpu/drm/amd/include/pptable.h
@@ -146,6 +146,9 @@ typedef struct _ATOM_PPLIB_EXTENDEDHEADER
 #define ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE   0x00200000   // Does the driver supports VR HOT GPIO Configurable.
 #define ATOM_PP_PLATFORM_CAP_TEMP_INVERSION   0x00400000            // Does the driver supports Temp Inversion feature.
 #define ATOM_PP_PLATFORM_CAP_EVV    0x00800000
+#define ATOM_PP_PLATFORM_COMBINE_PCC_WITH_THERMAL_SIGNAL    0x01000000
+#define ATOM_PP_PLATFORM_LOAD_POST_PRODUCTION_FIRMWARE      0x02000000
+#define ATOM_PP_PLATFORM_CAP_DISABLE_USING_ACTUAL_TEMPERATURE_FOR_POWER_CALC 0x04000000
 
 typedef struct _ATOM_PPLIB_POWERPLAYTABLE
 {
@@ -673,7 +676,8 @@ typedef struct _ATOM_PPLIB_POWERTUNE_Table_V1
       UCHAR revid;
       ATOM_PowerTune_Table power_tune_table;
       USHORT usMaximumPowerDeliveryLimit;
-      USHORT usReserve[7];
+      USHORT usTjMax;
+      USHORT usReserve[6];
 } ATOM_PPLIB_POWERTUNE_Table_V1;
 
 #define ATOM_PPM_A_A    1
diff --git a/drivers/gpu/drm/amd/include/vi_structs.h b/drivers/gpu/drm/amd/include/vi_structs.h
new file mode 100644
index 000000000000..65cfacd7a66c
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/vi_structs.h
@@ -0,0 +1,417 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef VI_STRUCTS_H_
+#define VI_STRUCTS_H_
+
+struct vi_sdma_mqd {
+	uint32_t sdmax_rlcx_rb_cntl;
+	uint32_t sdmax_rlcx_rb_base;
+	uint32_t sdmax_rlcx_rb_base_hi;
+	uint32_t sdmax_rlcx_rb_rptr;
+	uint32_t sdmax_rlcx_rb_wptr;
+	uint32_t sdmax_rlcx_rb_wptr_poll_cntl;
+	uint32_t sdmax_rlcx_rb_wptr_poll_addr_hi;
+	uint32_t sdmax_rlcx_rb_wptr_poll_addr_lo;
+	uint32_t sdmax_rlcx_rb_rptr_addr_hi;
+	uint32_t sdmax_rlcx_rb_rptr_addr_lo;
+	uint32_t sdmax_rlcx_ib_cntl;
+	uint32_t sdmax_rlcx_ib_rptr;
+	uint32_t sdmax_rlcx_ib_offset;
+	uint32_t sdmax_rlcx_ib_base_lo;
+	uint32_t sdmax_rlcx_ib_base_hi;
+	uint32_t sdmax_rlcx_ib_size;
+	uint32_t sdmax_rlcx_skip_cntl;
+	uint32_t sdmax_rlcx_context_status;
+	uint32_t sdmax_rlcx_doorbell;
+	uint32_t sdmax_rlcx_virtual_addr;
+	uint32_t sdmax_rlcx_ape1_cntl;
+	uint32_t sdmax_rlcx_doorbell_log;
+	uint32_t reserved_22;
+	uint32_t reserved_23;
+	uint32_t reserved_24;
+	uint32_t reserved_25;
+	uint32_t reserved_26;
+	uint32_t reserved_27;
+	uint32_t reserved_28;
+	uint32_t reserved_29;
+	uint32_t reserved_30;
+	uint32_t reserved_31;
+	uint32_t reserved_32;
+	uint32_t reserved_33;
+	uint32_t reserved_34;
+	uint32_t reserved_35;
+	uint32_t reserved_36;
+	uint32_t reserved_37;
+	uint32_t reserved_38;
+	uint32_t reserved_39;
+	uint32_t reserved_40;
+	uint32_t reserved_41;
+	uint32_t reserved_42;
+	uint32_t reserved_43;
+	uint32_t reserved_44;
+	uint32_t reserved_45;
+	uint32_t reserved_46;
+	uint32_t reserved_47;
+	uint32_t reserved_48;
+	uint32_t reserved_49;
+	uint32_t reserved_50;
+	uint32_t reserved_51;
+	uint32_t reserved_52;
+	uint32_t reserved_53;
+	uint32_t reserved_54;
+	uint32_t reserved_55;
+	uint32_t reserved_56;
+	uint32_t reserved_57;
+	uint32_t reserved_58;
+	uint32_t reserved_59;
+	uint32_t reserved_60;
+	uint32_t reserved_61;
+	uint32_t reserved_62;
+	uint32_t reserved_63;
+	uint32_t reserved_64;
+	uint32_t reserved_65;
+	uint32_t reserved_66;
+	uint32_t reserved_67;
+	uint32_t reserved_68;
+	uint32_t reserved_69;
+	uint32_t reserved_70;
+	uint32_t reserved_71;
+	uint32_t reserved_72;
+	uint32_t reserved_73;
+	uint32_t reserved_74;
+	uint32_t reserved_75;
+	uint32_t reserved_76;
+	uint32_t reserved_77;
+	uint32_t reserved_78;
+	uint32_t reserved_79;
+	uint32_t reserved_80;
+	uint32_t reserved_81;
+	uint32_t reserved_82;
+	uint32_t reserved_83;
+	uint32_t reserved_84;
+	uint32_t reserved_85;
+	uint32_t reserved_86;
+	uint32_t reserved_87;
+	uint32_t reserved_88;
+	uint32_t reserved_89;
+	uint32_t reserved_90;
+	uint32_t reserved_91;
+	uint32_t reserved_92;
+	uint32_t reserved_93;
+	uint32_t reserved_94;
+	uint32_t reserved_95;
+	uint32_t reserved_96;
+	uint32_t reserved_97;
+	uint32_t reserved_98;
+	uint32_t reserved_99;
+	uint32_t reserved_100;
+	uint32_t reserved_101;
+	uint32_t reserved_102;
+	uint32_t reserved_103;
+	uint32_t reserved_104;
+	uint32_t reserved_105;
+	uint32_t reserved_106;
+	uint32_t reserved_107;
+	uint32_t reserved_108;
+	uint32_t reserved_109;
+	uint32_t reserved_110;
+	uint32_t reserved_111;
+	uint32_t reserved_112;
+	uint32_t reserved_113;
+	uint32_t reserved_114;
+	uint32_t reserved_115;
+	uint32_t reserved_116;
+	uint32_t reserved_117;
+	uint32_t reserved_118;
+	uint32_t reserved_119;
+	uint32_t reserved_120;
+	uint32_t reserved_121;
+	uint32_t reserved_122;
+	uint32_t reserved_123;
+	uint32_t reserved_124;
+	uint32_t reserved_125;
+	uint32_t reserved_126;
+	uint32_t reserved_127;
+};
+
+struct vi_mqd {
+	uint32_t header;
+	uint32_t compute_dispatch_initiator;
+	uint32_t compute_dim_x;
+	uint32_t compute_dim_y;
+	uint32_t compute_dim_z;
+	uint32_t compute_start_x;
+	uint32_t compute_start_y;
+	uint32_t compute_start_z;
+	uint32_t compute_num_thread_x;
+	uint32_t compute_num_thread_y;
+	uint32_t compute_num_thread_z;
+	uint32_t compute_pipelinestat_enable;
+	uint32_t compute_perfcount_enable;
+	uint32_t compute_pgm_lo;
+	uint32_t compute_pgm_hi;
+	uint32_t compute_tba_lo;
+	uint32_t compute_tba_hi;
+	uint32_t compute_tma_lo;
+	uint32_t compute_tma_hi;
+	uint32_t compute_pgm_rsrc1;
+	uint32_t compute_pgm_rsrc2;
+	uint32_t compute_vmid;
+	uint32_t compute_resource_limits;
+	uint32_t compute_static_thread_mgmt_se0;
+	uint32_t compute_static_thread_mgmt_se1;
+	uint32_t compute_tmpring_size;
+	uint32_t compute_static_thread_mgmt_se2;
+	uint32_t compute_static_thread_mgmt_se3;
+	uint32_t compute_restart_x;
+	uint32_t compute_restart_y;
+	uint32_t compute_restart_z;
+	uint32_t compute_thread_trace_enable;
+	uint32_t compute_misc_reserved;
+	uint32_t compute_dispatch_id;
+	uint32_t compute_threadgroup_id;
+	uint32_t compute_relaunch;
+	uint32_t compute_wave_restore_addr_lo;
+	uint32_t compute_wave_restore_addr_hi;
+	uint32_t compute_wave_restore_control;
+	uint32_t reserved_39;
+	uint32_t reserved_40;
+	uint32_t reserved_41;
+	uint32_t reserved_42;
+	uint32_t reserved_43;
+	uint32_t reserved_44;
+	uint32_t reserved_45;
+	uint32_t reserved_46;
+	uint32_t reserved_47;
+	uint32_t reserved_48;
+	uint32_t reserved_49;
+	uint32_t reserved_50;
+	uint32_t reserved_51;
+	uint32_t reserved_52;
+	uint32_t reserved_53;
+	uint32_t reserved_54;
+	uint32_t reserved_55;
+	uint32_t reserved_56;
+	uint32_t reserved_57;
+	uint32_t reserved_58;
+	uint32_t reserved_59;
+	uint32_t reserved_60;
+	uint32_t reserved_61;
+	uint32_t reserved_62;
+	uint32_t reserved_63;
+	uint32_t reserved_64;
+	uint32_t compute_user_data_0;
+	uint32_t compute_user_data_1;
+	uint32_t compute_user_data_2;
+	uint32_t compute_user_data_3;
+	uint32_t compute_user_data_4;
+	uint32_t compute_user_data_5;
+	uint32_t compute_user_data_6;
+	uint32_t compute_user_data_7;
+	uint32_t compute_user_data_8;
+	uint32_t compute_user_data_9;
+	uint32_t compute_user_data_10;
+	uint32_t compute_user_data_11;
+	uint32_t compute_user_data_12;
+	uint32_t compute_user_data_13;
+	uint32_t compute_user_data_14;
+	uint32_t compute_user_data_15;
+	uint32_t cp_compute_csinvoc_count_lo;
+	uint32_t cp_compute_csinvoc_count_hi;
+	uint32_t reserved_83;
+	uint32_t reserved_84;
+	uint32_t reserved_85;
+	uint32_t cp_mqd_query_time_lo;
+	uint32_t cp_mqd_query_time_hi;
+	uint32_t cp_mqd_connect_start_time_lo;
+	uint32_t cp_mqd_connect_start_time_hi;
+	uint32_t cp_mqd_connect_end_time_lo;
+	uint32_t cp_mqd_connect_end_time_hi;
+	uint32_t cp_mqd_connect_end_wf_count;
+	uint32_t cp_mqd_connect_end_pq_rptr;
+	uint32_t cp_mqd_connect_end_pq_wptr;
+	uint32_t cp_mqd_connect_end_ib_rptr;
+	uint32_t reserved_96;
+	uint32_t reserved_97;
+	uint32_t cp_mqd_save_start_time_lo;
+	uint32_t cp_mqd_save_start_time_hi;
+	uint32_t cp_mqd_save_end_time_lo;
+	uint32_t cp_mqd_save_end_time_hi;
+	uint32_t cp_mqd_restore_start_time_lo;
+	uint32_t cp_mqd_restore_start_time_hi;
+	uint32_t cp_mqd_restore_end_time_lo;
+	uint32_t cp_mqd_restore_end_time_hi;
+	uint32_t reserved_106;
+	uint32_t reserved_107;
+	uint32_t gds_cs_ctxsw_cnt0;
+	uint32_t gds_cs_ctxsw_cnt1;
+	uint32_t gds_cs_ctxsw_cnt2;
+	uint32_t gds_cs_ctxsw_cnt3;
+	uint32_t reserved_112;
+	uint32_t reserved_113;
+	uint32_t cp_pq_exe_status_lo;
+	uint32_t cp_pq_exe_status_hi;
+	uint32_t cp_packet_id_lo;
+	uint32_t cp_packet_id_hi;
+	uint32_t cp_packet_exe_status_lo;
+	uint32_t cp_packet_exe_status_hi;
+	uint32_t gds_save_base_addr_lo;
+	uint32_t gds_save_base_addr_hi;
+	uint32_t gds_save_mask_lo;
+	uint32_t gds_save_mask_hi;
+	uint32_t ctx_save_base_addr_lo;
+	uint32_t ctx_save_base_addr_hi;
+	uint32_t reserved_126;
+	uint32_t reserved_127;
+	uint32_t cp_mqd_base_addr_lo;
+	uint32_t cp_mqd_base_addr_hi;
+	uint32_t cp_hqd_active;
+	uint32_t cp_hqd_vmid;
+	uint32_t cp_hqd_persistent_state;
+	uint32_t cp_hqd_pipe_priority;
+	uint32_t cp_hqd_queue_priority;
+	uint32_t cp_hqd_quantum;
+	uint32_t cp_hqd_pq_base_lo;
+	uint32_t cp_hqd_pq_base_hi;
+	uint32_t cp_hqd_pq_rptr;
+	uint32_t cp_hqd_pq_rptr_report_addr_lo;
+	uint32_t cp_hqd_pq_rptr_report_addr_hi;
+	uint32_t cp_hqd_pq_wptr_poll_addr_lo;
+	uint32_t cp_hqd_pq_wptr_poll_addr_hi;
+	uint32_t cp_hqd_pq_doorbell_control;
+	uint32_t cp_hqd_pq_wptr;
+	uint32_t cp_hqd_pq_control;
+	uint32_t cp_hqd_ib_base_addr_lo;
+	uint32_t cp_hqd_ib_base_addr_hi;
+	uint32_t cp_hqd_ib_rptr;
+	uint32_t cp_hqd_ib_control;
+	uint32_t cp_hqd_iq_timer;
+	uint32_t cp_hqd_iq_rptr;
+	uint32_t cp_hqd_dequeue_request;
+	uint32_t cp_hqd_dma_offload;
+	uint32_t cp_hqd_sema_cmd;
+	uint32_t cp_hqd_msg_type;
+	uint32_t cp_hqd_atomic0_preop_lo;
+	uint32_t cp_hqd_atomic0_preop_hi;
+	uint32_t cp_hqd_atomic1_preop_lo;
+	uint32_t cp_hqd_atomic1_preop_hi;
+	uint32_t cp_hqd_hq_status0;
+	uint32_t cp_hqd_hq_control0;
+	uint32_t cp_mqd_control;
+	uint32_t cp_hqd_hq_status1;
+	uint32_t cp_hqd_hq_control1;
+	uint32_t cp_hqd_eop_base_addr_lo;
+	uint32_t cp_hqd_eop_base_addr_hi;
+	uint32_t cp_hqd_eop_control;
+	uint32_t cp_hqd_eop_rptr;
+	uint32_t cp_hqd_eop_wptr;
+	uint32_t cp_hqd_eop_done_events;
+	uint32_t cp_hqd_ctx_save_base_addr_lo;
+	uint32_t cp_hqd_ctx_save_base_addr_hi;
+	uint32_t cp_hqd_ctx_save_control;
+	uint32_t cp_hqd_cntl_stack_offset;
+	uint32_t cp_hqd_cntl_stack_size;
+	uint32_t cp_hqd_wg_state_offset;
+	uint32_t cp_hqd_ctx_save_size;
+	uint32_t cp_hqd_gds_resource_state;
+	uint32_t cp_hqd_error;
+	uint32_t cp_hqd_eop_wptr_mem;
+	uint32_t cp_hqd_eop_dones;
+	uint32_t reserved_182;
+	uint32_t reserved_183;
+	uint32_t reserved_184;
+	uint32_t reserved_185;
+	uint32_t reserved_186;
+	uint32_t reserved_187;
+	uint32_t reserved_188;
+	uint32_t reserved_189;
+	uint32_t reserved_190;
+	uint32_t reserved_191;
+	uint32_t iqtimer_pkt_header;
+	uint32_t iqtimer_pkt_dw0;
+	uint32_t iqtimer_pkt_dw1;
+	uint32_t iqtimer_pkt_dw2;
+	uint32_t iqtimer_pkt_dw3;
+	uint32_t iqtimer_pkt_dw4;
+	uint32_t iqtimer_pkt_dw5;
+	uint32_t iqtimer_pkt_dw6;
+	uint32_t iqtimer_pkt_dw7;
+	uint32_t iqtimer_pkt_dw8;
+	uint32_t iqtimer_pkt_dw9;
+	uint32_t iqtimer_pkt_dw10;
+	uint32_t iqtimer_pkt_dw11;
+	uint32_t iqtimer_pkt_dw12;
+	uint32_t iqtimer_pkt_dw13;
+	uint32_t iqtimer_pkt_dw14;
+	uint32_t iqtimer_pkt_dw15;
+	uint32_t iqtimer_pkt_dw16;
+	uint32_t iqtimer_pkt_dw17;
+	uint32_t iqtimer_pkt_dw18;
+	uint32_t iqtimer_pkt_dw19;
+	uint32_t iqtimer_pkt_dw20;
+	uint32_t iqtimer_pkt_dw21;
+	uint32_t iqtimer_pkt_dw22;
+	uint32_t iqtimer_pkt_dw23;
+	uint32_t iqtimer_pkt_dw24;
+	uint32_t iqtimer_pkt_dw25;
+	uint32_t iqtimer_pkt_dw26;
+	uint32_t iqtimer_pkt_dw27;
+	uint32_t iqtimer_pkt_dw28;
+	uint32_t iqtimer_pkt_dw29;
+	uint32_t iqtimer_pkt_dw30;
+	uint32_t iqtimer_pkt_dw31;
+	uint32_t reserved_225;
+	uint32_t reserved_226;
+	uint32_t reserved_227;
+	uint32_t set_resources_header;
+	uint32_t set_resources_dw1;
+	uint32_t set_resources_dw2;
+	uint32_t set_resources_dw3;
+	uint32_t set_resources_dw4;
+	uint32_t set_resources_dw5;
+	uint32_t set_resources_dw6;
+	uint32_t set_resources_dw7;
+	uint32_t reserved_236;
+	uint32_t reserved_237;
+	uint32_t reserved_238;
+	uint32_t reserved_239;
+	uint32_t queue_doorbell_id0;
+	uint32_t queue_doorbell_id1;
+	uint32_t queue_doorbell_id2;
+	uint32_t queue_doorbell_id3;
+	uint32_t queue_doorbell_id4;
+	uint32_t queue_doorbell_id5;
+	uint32_t queue_doorbell_id6;
+	uint32_t queue_doorbell_id7;
+	uint32_t queue_doorbell_id8;
+	uint32_t queue_doorbell_id9;
+	uint32_t queue_doorbell_id10;
+	uint32_t queue_doorbell_id11;
+	uint32_t queue_doorbell_id12;
+	uint32_t queue_doorbell_id13;
+	uint32_t queue_doorbell_id14;
+	uint32_t queue_doorbell_id15;
+};
+
+#endif /* VI_STRUCTS_H_ */
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
new file mode 100644
index 000000000000..9259f1b6664c
--- /dev/null
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -0,0 +1,424 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <drm/drmP.h>
+#include "gpu_scheduler.h"
+
+static struct amd_sched_job *
+amd_sched_entity_pop_job(struct amd_sched_entity *entity);
+static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
+
+/* Initialize a given run queue struct */
+static void amd_sched_rq_init(struct amd_sched_rq *rq)
+{
+	spin_lock_init(&rq->lock);
+	INIT_LIST_HEAD(&rq->entities);
+	rq->current_entity = NULL;
+}
+
+static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
+				    struct amd_sched_entity *entity)
+{
+	spin_lock(&rq->lock);
+	list_add_tail(&entity->list, &rq->entities);
+	spin_unlock(&rq->lock);
+}
+
+static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
+				       struct amd_sched_entity *entity)
+{
+	spin_lock(&rq->lock);
+	list_del_init(&entity->list);
+	if (rq->current_entity == entity)
+		rq->current_entity = NULL;
+	spin_unlock(&rq->lock);
+}
+
+/**
+ * Select next job from a specified run queue with round robin policy.
+ * Return NULL if nothing available.
+ */
+static struct amd_sched_job *
+amd_sched_rq_select_job(struct amd_sched_rq *rq)
+{
+	struct amd_sched_entity *entity;
+	struct amd_sched_job *job;
+
+	spin_lock(&rq->lock);
+
+	entity = rq->current_entity;
+	if (entity) {
+		list_for_each_entry_continue(entity, &rq->entities, list) {
+			job = amd_sched_entity_pop_job(entity);
+			if (job) {
+				rq->current_entity = entity;
+				spin_unlock(&rq->lock);
+				return job;
+			}
+		}
+	}
+
+	list_for_each_entry(entity, &rq->entities, list) {
+
+		job = amd_sched_entity_pop_job(entity);
+		if (job) {
+			rq->current_entity = entity;
+			spin_unlock(&rq->lock);
+			return job;
+		}
+
+		if (entity == rq->current_entity)
+			break;
+	}
+
+	spin_unlock(&rq->lock);
+
+	return NULL;
+}
+
+/**
+ * Init a context entity used by scheduler when submit to HW ring.
+ *
+ * @sched	The pointer to the scheduler
+ * @entity	The pointer to a valid amd_sched_entity
+ * @rq		The run queue this entity belongs
+ * @kernel	If this is an entity for the kernel
+ * @jobs	The max number of jobs in the job queue
+ *
+ * return 0 if succeed. negative error code on failure
+*/
+int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
+			  struct amd_sched_entity *entity,
+			  struct amd_sched_rq *rq,
+			  uint32_t jobs)
+{
+	if (!(sched && entity && rq))
+		return -EINVAL;
+
+	memset(entity, 0, sizeof(struct amd_sched_entity));
+	entity->belongto_rq = rq;
+	entity->scheduler = sched;
+	entity->fence_context = fence_context_alloc(1);
+	if(kfifo_alloc(&entity->job_queue,
+		       jobs * sizeof(void *),
+		       GFP_KERNEL))
+		return -EINVAL;
+
+	spin_lock_init(&entity->queue_lock);
+	atomic_set(&entity->fence_seq, 0);
+
+	/* Add the entity to the run queue */
+	amd_sched_rq_add_entity(rq, entity);
+	return 0;
+}
+
+/**
+ * Query if entity is initialized
+ *
+ * @sched       Pointer to scheduler instance
+ * @entity	The pointer to a valid scheduler entity
+ *
+ * return true if entity is initialized, false otherwise
+*/
+static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
+					    struct amd_sched_entity *entity)
+{
+	return entity->scheduler == sched &&
+		entity->belongto_rq != NULL;
+}
+
+/**
+ * Check if entity is idle
+ *
+ * @entity	The pointer to a valid scheduler entity
+ *
+ * Return true if entity don't has any unscheduled jobs.
+ */
+static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
+{
+	rmb();
+	if (kfifo_is_empty(&entity->job_queue))
+		return true;
+
+	return false;
+}
+
+/**
+ * Destroy a context entity
+ *
+ * @sched       Pointer to scheduler instance
+ * @entity	The pointer to a valid scheduler entity
+ *
+ * Cleanup and free the allocated resources.
+ */
+void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
+			   struct amd_sched_entity *entity)
+{
+	struct amd_sched_rq *rq = entity->belongto_rq;
+
+	if (!amd_sched_entity_is_initialized(sched, entity))
+		return;
+
+	/**
+	 * The client will not queue more IBs during this fini, consume existing
+	 * queued IBs
+	*/
+	wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
+
+	amd_sched_rq_remove_entity(rq, entity);
+	kfifo_free(&entity->job_queue);
+}
+
+static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
+{
+	struct amd_sched_entity *entity =
+		container_of(cb, struct amd_sched_entity, cb);
+	entity->dependency = NULL;
+	fence_put(f);
+	amd_sched_wakeup(entity->scheduler);
+}
+
+static struct amd_sched_job *
+amd_sched_entity_pop_job(struct amd_sched_entity *entity)
+{
+	struct amd_gpu_scheduler *sched = entity->scheduler;
+	struct amd_sched_job *job;
+
+	if (ACCESS_ONCE(entity->dependency))
+		return NULL;
+
+	if (!kfifo_out_peek(&entity->job_queue, &job, sizeof(job)))
+		return NULL;
+
+	while ((entity->dependency = sched->ops->dependency(job))) {
+
+		if (fence_add_callback(entity->dependency, &entity->cb,
+				       amd_sched_entity_wakeup))
+			fence_put(entity->dependency);
+		else
+			return NULL;
+	}
+
+	return job;
+}
+
+/**
+ * Helper to submit a job to the job queue
+ *
+ * @job		The pointer to job required to submit
+ *
+ * Returns true if we could submit the job.
+ */
+static bool amd_sched_entity_in(struct amd_sched_job *job)
+{
+	struct amd_sched_entity *entity = job->s_entity;
+	bool added, first = false;
+
+	spin_lock(&entity->queue_lock);
+	added = kfifo_in(&entity->job_queue, &job, sizeof(job)) == sizeof(job);
+
+	if (added && kfifo_len(&entity->job_queue) == sizeof(job))
+		first = true;
+
+	spin_unlock(&entity->queue_lock);
+
+	/* first job wakes up scheduler */
+	if (first)
+		amd_sched_wakeup(job->sched);
+
+	return added;
+}
+
+/**
+ * Submit a job to the job queue
+ *
+ * @job		The pointer to job required to submit
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
+{
+	struct amd_sched_entity *entity = sched_job->s_entity;
+	struct amd_sched_fence *fence = amd_sched_fence_create(
+		entity, sched_job->owner);
+
+	if (!fence)
+		return -ENOMEM;
+
+	fence_get(&fence->base);
+	sched_job->s_fence = fence;
+
+	wait_event(entity->scheduler->job_scheduled,
+		   amd_sched_entity_in(sched_job));
+
+	return 0;
+}
+
+/**
+ * Return ture if we can push more jobs to the hw.
+ */
+static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
+{
+	return atomic_read(&sched->hw_rq_count) <
+		sched->hw_submission_limit;
+}
+
+/**
+ * Wake up the scheduler when it is ready
+ */
+static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
+{
+	if (amd_sched_ready(sched))
+		wake_up_interruptible(&sched->wake_up_worker);
+}
+
+/**
+ * Select next to run
+*/
+static struct amd_sched_job *
+amd_sched_select_job(struct amd_gpu_scheduler *sched)
+{
+	struct amd_sched_job *job;
+
+	if (!amd_sched_ready(sched))
+		return NULL;
+
+	/* Kernel run queue has higher priority than normal run queue*/
+	job = amd_sched_rq_select_job(&sched->kernel_rq);
+	if (job == NULL)
+		job = amd_sched_rq_select_job(&sched->sched_rq);
+
+	return job;
+}
+
+static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
+{
+	struct amd_sched_job *sched_job =
+		container_of(cb, struct amd_sched_job, cb);
+	struct amd_gpu_scheduler *sched;
+
+	sched = sched_job->sched;
+	amd_sched_fence_signal(sched_job->s_fence);
+	atomic_dec(&sched->hw_rq_count);
+	fence_put(&sched_job->s_fence->base);
+	sched->ops->process_job(sched_job);
+	wake_up_interruptible(&sched->wake_up_worker);
+}
+
+static int amd_sched_main(void *param)
+{
+	struct sched_param sparam = {.sched_priority = 1};
+	struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
+	int r, count;
+
+	sched_setscheduler(current, SCHED_FIFO, &sparam);
+
+	while (!kthread_should_stop()) {
+		struct amd_sched_entity *entity;
+		struct amd_sched_job *job;
+		struct fence *fence;
+
+		wait_event_interruptible(sched->wake_up_worker,
+			kthread_should_stop() ||
+			(job = amd_sched_select_job(sched)));
+
+		if (!job)
+			continue;
+
+		entity = job->s_entity;
+		atomic_inc(&sched->hw_rq_count);
+		fence = sched->ops->run_job(job);
+		if (fence) {
+			r = fence_add_callback(fence, &job->cb,
+					       amd_sched_process_job);
+			if (r == -ENOENT)
+				amd_sched_process_job(fence, &job->cb);
+			else if (r)
+				DRM_ERROR("fence add callback failed (%d)\n", r);
+			fence_put(fence);
+		}
+
+		count = kfifo_out(&entity->job_queue, &job, sizeof(job));
+		WARN_ON(count != sizeof(job));
+		wake_up(&sched->job_scheduled);
+	}
+	return 0;
+}
+
+/**
+ * Create a gpu scheduler
+ *
+ * @ops			The backend operations for this scheduler.
+ * @ring		The the ring id for the scheduler.
+ * @hw_submissions	Number of hw submissions to do.
+ *
+ * Return the pointer to scheduler for success, otherwise return NULL
+*/
+struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops,
+					   unsigned ring, unsigned hw_submission,
+					   void *priv)
+{
+	struct amd_gpu_scheduler *sched;
+
+	sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
+	if (!sched)
+		return NULL;
+
+	sched->ops = ops;
+	sched->ring_id = ring;
+	sched->hw_submission_limit = hw_submission;
+	sched->priv = priv;
+	snprintf(sched->name, sizeof(sched->name), "amdgpu[%d]", ring);
+	amd_sched_rq_init(&sched->sched_rq);
+	amd_sched_rq_init(&sched->kernel_rq);
+
+	init_waitqueue_head(&sched->wake_up_worker);
+	init_waitqueue_head(&sched->job_scheduled);
+	atomic_set(&sched->hw_rq_count, 0);
+	/* Each scheduler will run on a seperate kernel thread */
+	sched->thread = kthread_run(amd_sched_main, sched, sched->name);
+	if (IS_ERR(sched->thread)) {
+		DRM_ERROR("Failed to create scheduler for id %d.\n", ring);
+		kfree(sched);
+		return NULL;
+	}
+
+	return sched;
+}
+
+/**
+ * Destroy a gpu scheduler
+ *
+ * @sched	The pointer to the scheduler
+ *
+ * return 0 if succeed. -1 if failed.
+ */
+int amd_sched_destroy(struct amd_gpu_scheduler *sched)
+{
+	kthread_stop(sched->thread);
+	kfree(sched);
+	return  0;
+}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
new file mode 100644
index 000000000000..2af0e4d4d817
--- /dev/null
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _GPU_SCHEDULER_H_
+#define _GPU_SCHEDULER_H_
+
+#include <linux/kfifo.h>
+#include <linux/fence.h>
+
+struct amd_gpu_scheduler;
+struct amd_sched_rq;
+
+/**
+ * A scheduler entity is a wrapper around a job queue or a group
+ * of other entities. Entities take turns emitting jobs from their 
+ * job queues to corresponding hardware ring based on scheduling
+ * policy.
+*/
+struct amd_sched_entity {
+	struct list_head		list;
+	struct amd_sched_rq		*belongto_rq;
+	atomic_t			fence_seq;
+	/* the job_queue maintains the jobs submitted by clients */
+	struct kfifo                    job_queue;
+	spinlock_t			queue_lock;
+	struct amd_gpu_scheduler	*scheduler;
+	uint64_t                        fence_context;
+	struct fence			*dependency;
+	struct fence_cb			cb;
+};
+
+/**
+ * Run queue is a set of entities scheduling command submissions for
+ * one specific ring. It implements the scheduling policy that selects
+ * the next entity to emit commands from.
+*/
+struct amd_sched_rq {
+	spinlock_t		lock;
+	struct list_head	entities;
+	struct amd_sched_entity	*current_entity;
+};
+
+struct amd_sched_fence {
+	struct fence                    base;
+	struct amd_gpu_scheduler	*scheduler;
+	spinlock_t			lock;
+	void                            *owner;
+};
+
+struct amd_sched_job {
+	struct fence_cb                 cb;
+	struct amd_gpu_scheduler        *sched;
+	struct amd_sched_entity         *s_entity;
+	struct amd_sched_fence          *s_fence;
+	void		                *owner;
+};
+
+extern const struct fence_ops amd_sched_fence_ops;
+static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
+{
+	struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base);
+
+	if (__f->base.ops == &amd_sched_fence_ops)
+		return __f;
+
+	return NULL;
+}
+
+/**
+ * Define the backend operations called by the scheduler,
+ * these functions should be implemented in driver side
+*/
+struct amd_sched_backend_ops {
+	struct fence *(*dependency)(struct amd_sched_job *job);
+	struct fence *(*run_job)(struct amd_sched_job *job);
+	void (*process_job)(struct amd_sched_job *job);
+};
+
+/**
+ * One scheduler is implemented for each hardware ring
+*/
+struct amd_gpu_scheduler {
+	struct task_struct		*thread;
+	struct amd_sched_rq		sched_rq;
+	struct amd_sched_rq		kernel_rq;
+	atomic_t			hw_rq_count;
+	struct amd_sched_backend_ops	*ops;
+	uint32_t			ring_id;
+	wait_queue_head_t		wake_up_worker;
+	wait_queue_head_t		job_scheduled;
+	uint32_t                        hw_submission_limit;
+	char                            name[20];
+	void                            *priv;
+};
+
+struct amd_gpu_scheduler *
+amd_sched_create(struct amd_sched_backend_ops *ops,
+		 uint32_t ring, uint32_t hw_submission, void *priv);
+int amd_sched_destroy(struct amd_gpu_scheduler *sched);
+
+int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
+			  struct amd_sched_entity *entity,
+			  struct amd_sched_rq *rq,
+			  uint32_t jobs);
+void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
+			   struct amd_sched_entity *entity);
+int amd_sched_entity_push_job(struct amd_sched_job *sched_job);
+
+struct amd_sched_fence *amd_sched_fence_create(
+	struct amd_sched_entity *s_entity, void *owner);
+void amd_sched_fence_signal(struct amd_sched_fence *fence);
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
new file mode 100644
index 000000000000..e62c37920e11
--- /dev/null
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <drm/drmP.h>
+#include "gpu_scheduler.h"
+
+struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity, void *owner)
+{
+	struct amd_sched_fence *fence = NULL;
+	unsigned seq;
+
+	fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL);
+	if (fence == NULL)
+		return NULL;
+	fence->owner = owner;
+	fence->scheduler = s_entity->scheduler;
+	spin_lock_init(&fence->lock);
+
+	seq = atomic_inc_return(&s_entity->fence_seq);
+	fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock,
+		   s_entity->fence_context, seq);
+
+	return fence;
+}
+
+void amd_sched_fence_signal(struct amd_sched_fence *fence)
+{
+	int ret = fence_signal(&fence->base);
+	if (!ret)
+		FENCE_TRACE(&fence->base, "signaled from irq context\n");
+	else
+		FENCE_TRACE(&fence->base, "was already signaled\n");
+}
+
+static const char *amd_sched_fence_get_driver_name(struct fence *fence)
+{
+	return "amd_sched";
+}
+
+static const char *amd_sched_fence_get_timeline_name(struct fence *f)
+{
+	struct amd_sched_fence *fence = to_amd_sched_fence(f);
+	return (const char *)fence->scheduler->name;
+}
+
+static bool amd_sched_fence_enable_signaling(struct fence *f)
+{
+	return true;
+}
+
+const struct fence_ops amd_sched_fence_ops = {
+	.get_driver_name = amd_sched_fence_get_driver_name,
+	.get_timeline_name = amd_sched_fence_get_timeline_name,
+	.enable_signaling = amd_sched_fence_enable_signaling,
+	.signaled = NULL,
+	.wait = fence_default_wait,
+	.release = NULL,
+};
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 7838e731b0de..7d03c51abcb9 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -22,9 +22,9 @@ static /*const*/ struct fb_ops armada_fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_check_var	= drm_fb_helper_check_var,
 	.fb_set_par	= drm_fb_helper_set_par,
-	.fb_fillrect	= cfb_fillrect,
-	.fb_copyarea	= cfb_copyarea,
-	.fb_imageblit	= cfb_imageblit,
+	.fb_fillrect	= drm_fb_helper_cfb_fillrect,
+	.fb_copyarea	= drm_fb_helper_cfb_copyarea,
+	.fb_imageblit	= drm_fb_helper_cfb_imageblit,
 	.fb_pan_display	= drm_fb_helper_pan_display,
 	.fb_blank	= drm_fb_helper_blank,
 	.fb_setcmap	= drm_fb_helper_setcmap,
@@ -80,18 +80,12 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
 	if (IS_ERR(dfb))
 		return PTR_ERR(dfb);
 
-	info = framebuffer_alloc(0, dev->dev);
-	if (!info) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(fbh);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto err_fballoc;
 	}
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto err_fbcmap;
-	}
-
 	strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id));
 	info->par = fbh;
 	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
@@ -101,7 +95,7 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
 	info->screen_size = obj->obj.size;
 	info->screen_base = ptr;
 	fbh->fb = &dfb->fb;
-	fbh->fbdev = info;
+
 	drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
 	drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
 
@@ -111,8 +105,6 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
 
 	return 0;
 
- err_fbcmap:
-	framebuffer_release(info);
  err_fballoc:
 	dfb->fb.funcs->destroy(&dfb->fb);
 	return ret;
@@ -171,6 +163,7 @@ int armada_fbdev_init(struct drm_device *dev)
 
 	return 0;
  err_fb_setup:
+	drm_fb_helper_release_fbi(fbh);
 	drm_fb_helper_fini(fbh);
  err_fb_helper:
 	priv->fbdev = NULL;
@@ -191,14 +184,8 @@ void armada_fbdev_fini(struct drm_device *dev)
 	struct drm_fb_helper *fbh = priv->fbdev;
 
 	if (fbh) {
-		struct fb_info *info = fbh->fbdev;
-
-		if (info) {
-			unregister_framebuffer(info);
-			if (info->cmap.len)
-				fb_dealloc_cmap(&info->cmap);
-			framebuffer_release(info);
-		}
+		drm_fb_helper_unregister_fbi(fbh);
+		drm_fb_helper_release_fbi(fbh);
 
 		drm_fb_helper_fini(fbh);
 
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index ff68eefae273..f31db28a684b 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -125,7 +125,7 @@ static void ast_fillrect(struct fb_info *info,
 			 const struct fb_fillrect *rect)
 {
 	struct ast_fbdev *afbdev = info->par;
-	sys_fillrect(info, rect);
+	drm_fb_helper_sys_fillrect(info, rect);
 	ast_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
 			 rect->height);
 }
@@ -134,7 +134,7 @@ static void ast_copyarea(struct fb_info *info,
 			 const struct fb_copyarea *area)
 {
 	struct ast_fbdev *afbdev = info->par;
-	sys_copyarea(info, area);
+	drm_fb_helper_sys_copyarea(info, area);
 	ast_dirty_update(afbdev, area->dx, area->dy, area->width,
 			 area->height);
 }
@@ -143,7 +143,7 @@ static void ast_imageblit(struct fb_info *info,
 			  const struct fb_image *image)
 {
 	struct ast_fbdev *afbdev = info->par;
-	sys_imageblit(info, image);
+	drm_fb_helper_sys_imageblit(info, image);
 	ast_dirty_update(afbdev, image->dx, image->dy, image->width,
 			 image->height);
 }
@@ -193,7 +193,6 @@ static int astfb_create(struct drm_fb_helper *helper,
 	struct drm_framebuffer *fb;
 	struct fb_info *info;
 	int size, ret;
-	struct device *device = &dev->pdev->dev;
 	void *sysram;
 	struct drm_gem_object *gobj = NULL;
 	struct ast_bo *bo = NULL;
@@ -217,40 +216,28 @@ static int astfb_create(struct drm_fb_helper *helper,
 	if (!sysram)
 		return -ENOMEM;
 
-	info = framebuffer_alloc(0, device);
-	if (!info) {
-		ret = -ENOMEM;
-		goto out;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
+		goto err_free_vram;
 	}
 	info->par = afbdev;
 
 	ret = ast_framebuffer_init(dev, &afbdev->afb, &mode_cmd, gobj);
 	if (ret)
-		goto out;
+		goto err_release_fbi;
 
 	afbdev->sysram = sysram;
 	afbdev->size = size;
 
 	fb = &afbdev->afb.base;
 	afbdev->helper.fb = fb;
-	afbdev->helper.fbdev = info;
 
 	strcpy(info->fix.id, "astdrmfb");
 
 	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
 	info->fbops = &astfb_ops;
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	info->apertures = alloc_apertures(1);
-	if (!info->apertures) {
-		ret = -ENOMEM;
-		goto out;
-	}
 	info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
 	info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
 
@@ -266,7 +253,11 @@ static int astfb_create(struct drm_fb_helper *helper,
 		      fb->width, fb->height);
 
 	return 0;
-out:
+
+err_release_fbi:
+	drm_fb_helper_release_fbi(helper);
+err_free_vram:
+	vfree(afbdev->sysram);
 	return ret;
 }
 
@@ -297,15 +288,10 @@ static const struct drm_fb_helper_funcs ast_fb_helper_funcs = {
 static void ast_fbdev_destroy(struct drm_device *dev,
 			      struct ast_fbdev *afbdev)
 {
-	struct fb_info *info;
 	struct ast_framebuffer *afb = &afbdev->afb;
-	if (afbdev->helper.fbdev) {
-		info = afbdev->helper.fbdev;
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+
+	drm_fb_helper_unregister_fbi(&afbdev->helper);
+	drm_fb_helper_release_fbi(&afbdev->helper);
 
 	if (afb->obj) {
 		drm_gem_object_unreference_unlocked(afb->obj);
@@ -377,5 +363,5 @@ void ast_fbdev_set_suspend(struct drm_device *dev, int state)
 	if (!ast->fbdev)
 		return;
 
-	fb_set_suspend(ast->fbdev->helper.fbdev, state);
+	drm_fb_helper_set_suspend(&ast->fbdev->helper, state);
 }
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 035dacc93382..838217f8ce7d 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -571,24 +571,18 @@ ast_dumb_mmap_offset(struct drm_file *file,
 		     uint64_t *offset)
 {
 	struct drm_gem_object *obj;
-	int ret;
 	struct ast_bo *bo;
 
-	mutex_lock(&dev->struct_mutex);
 	obj = drm_gem_object_lookup(dev, file, handle);
-	if (obj == NULL) {
-		ret = -ENOENT;
-		goto out_unlock;
-	}
+	if (obj == NULL)
+		return -ENOENT;
 
 	bo = gem_to_ast_bo(obj);
 	*offset = ast_bo_mmap_offset(bo);
 
-	drm_gem_object_unreference(obj);
-	ret = 0;
-out_unlock:
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
+	drm_gem_object_unreference_unlocked(obj);
+
+	return 0;
 
 }
 
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 5ae5c6923128..9f6e234e7029 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -239,7 +239,8 @@ static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c,
 	return atmel_hlcdc_plane_prepare_disc_area(s);
 }
 
-static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c)
+static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c,
+					  struct drm_crtc_state *old_s)
 {
 	struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
 
@@ -253,7 +254,8 @@ static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c)
 	}
 }
 
-static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc)
+static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc,
+					  struct drm_crtc_state *old_s)
 {
 	/* TODO: write common plane control register if available */
 }
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index ef6182bc8e5e..8bc62ec407f9 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -29,6 +29,115 @@
 
 #define ATMEL_HLCDC_LAYER_IRQS_OFFSET		8
 
+static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9n12_layers[] = {
+	{
+		.name = "base",
+		.formats = &atmel_hlcdc_plane_rgb_formats,
+		.regs_offset = 0x40,
+		.id = 0,
+		.type = ATMEL_HLCDC_BASE_LAYER,
+		.nconfigs = 5,
+		.layout = {
+			.xstride = { 2 },
+			.default_color = 3,
+			.general_config = 4,
+		},
+	},
+};
+
+static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_at91sam9n12 = {
+	.min_width = 0,
+	.min_height = 0,
+	.max_width = 1280,
+	.max_height = 860,
+	.nlayers = ARRAY_SIZE(atmel_hlcdc_at91sam9n12_layers),
+	.layers = atmel_hlcdc_at91sam9n12_layers,
+};
+
+static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9x5_layers[] = {
+	{
+		.name = "base",
+		.formats = &atmel_hlcdc_plane_rgb_formats,
+		.regs_offset = 0x40,
+		.id = 0,
+		.type = ATMEL_HLCDC_BASE_LAYER,
+		.nconfigs = 5,
+		.layout = {
+			.xstride = { 2 },
+			.default_color = 3,
+			.general_config = 4,
+			.disc_pos = 5,
+			.disc_size = 6,
+		},
+	},
+	{
+		.name = "overlay1",
+		.formats = &atmel_hlcdc_plane_rgb_formats,
+		.regs_offset = 0x100,
+		.id = 1,
+		.type = ATMEL_HLCDC_OVERLAY_LAYER,
+		.nconfigs = 10,
+		.layout = {
+			.pos = 2,
+			.size = 3,
+			.xstride = { 4 },
+			.pstride = { 5 },
+			.default_color = 6,
+			.chroma_key = 7,
+			.chroma_key_mask = 8,
+			.general_config = 9,
+		},
+	},
+	{
+		.name = "high-end-overlay",
+		.formats = &atmel_hlcdc_plane_rgb_and_yuv_formats,
+		.regs_offset = 0x280,
+		.id = 2,
+		.type = ATMEL_HLCDC_OVERLAY_LAYER,
+		.nconfigs = 17,
+		.layout = {
+			.pos = 2,
+			.size = 3,
+			.memsize = 4,
+			.xstride = { 5, 7 },
+			.pstride = { 6, 8 },
+			.default_color = 9,
+			.chroma_key = 10,
+			.chroma_key_mask = 11,
+			.general_config = 12,
+			.csc = 14,
+		},
+	},
+	{
+		.name = "cursor",
+		.formats = &atmel_hlcdc_plane_rgb_formats,
+		.regs_offset = 0x340,
+		.id = 3,
+		.type = ATMEL_HLCDC_CURSOR_LAYER,
+		.nconfigs = 10,
+		.max_width = 128,
+		.max_height = 128,
+		.layout = {
+			.pos = 2,
+			.size = 3,
+			.xstride = { 4 },
+			.default_color = 6,
+			.chroma_key = 7,
+			.chroma_key_mask = 8,
+			.general_config = 9,
+		},
+	},
+};
+
+static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_at91sam9x5 = {
+	.min_width = 0,
+	.min_height = 0,
+	.max_width = 800,
+	.max_height = 600,
+	.nlayers = ARRAY_SIZE(atmel_hlcdc_at91sam9x5_layers),
+	.layers = atmel_hlcdc_at91sam9x5_layers,
+};
+
 static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = {
 	{
 		.name = "base",
@@ -132,11 +241,105 @@ static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_sama5d3 = {
 	.layers = atmel_hlcdc_sama5d3_layers,
 };
 
+static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d4_layers[] = {
+	{
+		.name = "base",
+		.formats = &atmel_hlcdc_plane_rgb_formats,
+		.regs_offset = 0x40,
+		.id = 0,
+		.type = ATMEL_HLCDC_BASE_LAYER,
+		.nconfigs = 7,
+		.layout = {
+			.xstride = { 2 },
+			.default_color = 3,
+			.general_config = 4,
+			.disc_pos = 5,
+			.disc_size = 6,
+		},
+	},
+	{
+		.name = "overlay1",
+		.formats = &atmel_hlcdc_plane_rgb_formats,
+		.regs_offset = 0x140,
+		.id = 1,
+		.type = ATMEL_HLCDC_OVERLAY_LAYER,
+		.nconfigs = 10,
+		.layout = {
+			.pos = 2,
+			.size = 3,
+			.xstride = { 4 },
+			.pstride = { 5 },
+			.default_color = 6,
+			.chroma_key = 7,
+			.chroma_key_mask = 8,
+			.general_config = 9,
+		},
+	},
+	{
+		.name = "overlay2",
+		.formats = &atmel_hlcdc_plane_rgb_formats,
+		.regs_offset = 0x240,
+		.id = 2,
+		.type = ATMEL_HLCDC_OVERLAY_LAYER,
+		.nconfigs = 10,
+		.layout = {
+			.pos = 2,
+			.size = 3,
+			.xstride = { 4 },
+			.pstride = { 5 },
+			.default_color = 6,
+			.chroma_key = 7,
+			.chroma_key_mask = 8,
+			.general_config = 9,
+		},
+	},
+	{
+		.name = "high-end-overlay",
+		.formats = &atmel_hlcdc_plane_rgb_and_yuv_formats,
+		.regs_offset = 0x340,
+		.id = 3,
+		.type = ATMEL_HLCDC_OVERLAY_LAYER,
+		.nconfigs = 42,
+		.layout = {
+			.pos = 2,
+			.size = 3,
+			.memsize = 4,
+			.xstride = { 5, 7 },
+			.pstride = { 6, 8 },
+			.default_color = 9,
+			.chroma_key = 10,
+			.chroma_key_mask = 11,
+			.general_config = 12,
+			.csc = 14,
+		},
+	},
+};
+
+static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_sama5d4 = {
+	.min_width = 0,
+	.min_height = 0,
+	.max_width = 2048,
+	.max_height = 2048,
+	.nlayers = ARRAY_SIZE(atmel_hlcdc_sama5d4_layers),
+	.layers = atmel_hlcdc_sama5d4_layers,
+};
 static const struct of_device_id atmel_hlcdc_of_match[] = {
 	{
+		.compatible = "atmel,at91sam9n12-hlcdc",
+		.data = &atmel_hlcdc_dc_at91sam9n12,
+	},
+	{
+		.compatible = "atmel,at91sam9x5-hlcdc",
+		.data = &atmel_hlcdc_dc_at91sam9x5,
+	},
+	{
 		.compatible = "atmel,sama5d3-hlcdc",
 		.data = &atmel_hlcdc_dc_sama5d3,
 	},
+	{
+		.compatible = "atmel,sama5d4-hlcdc",
+		.data = &atmel_hlcdc_dc_sama5d4,
+	},
 	{ /* sentinel */ },
 };
 
@@ -485,7 +688,9 @@ static const struct file_operations fops = {
 };
 
 static struct drm_driver atmel_hlcdc_dc_driver = {
-	.driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
+	.driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM |
+			   DRIVER_MODESET | DRIVER_PRIME |
+			   DRIVER_ATOMIC,
 	.preclose = atmel_hlcdc_dc_preclose,
 	.lastclose = atmel_hlcdc_dc_lastclose,
 	.irq_handler = atmel_hlcdc_dc_irq_handler,
@@ -497,6 +702,15 @@ static struct drm_driver atmel_hlcdc_dc_driver = {
 	.disable_vblank = atmel_hlcdc_dc_disable_vblank,
 	.gem_free_object = drm_gem_cma_free_object,
 	.gem_vm_ops = &drm_gem_cma_vm_ops,
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_prime_import = drm_gem_prime_import,
+	.gem_prime_export = drm_gem_prime_export,
+	.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+	.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+	.gem_prime_vmap = drm_gem_cma_prime_vmap,
+	.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+	.gem_prime_mmap = drm_gem_cma_prime_mmap,
 	.dumb_create = drm_gem_cma_dumb_create,
 	.dumb_map_offset = drm_gem_cma_dumb_map_offset,
 	.dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 9c4513005310..067e4c144bd6 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -126,12 +126,16 @@ atmel_hlcdc_rgb_encoder_mode_set(struct drm_encoder *encoder,
 
 	if (info->num_bus_formats) {
 		switch (info->bus_formats[0]) {
+		case MEDIA_BUS_FMT_RGB565_1X16:
+			cfg |= ATMEL_HLCDC_CONNECTOR_RGB565 << 8;
+			break;
 		case MEDIA_BUS_FMT_RGB666_1X18:
 			cfg |= ATMEL_HLCDC_CONNECTOR_RGB666 << 8;
 			break;
 		case MEDIA_BUS_FMT_RGB888_1X24:
 			cfg |= ATMEL_HLCDC_CONNECTOR_RGB888 << 8;
 			break;
+		case MEDIA_BUS_FMT_RGB444_1X12:
 		default:
 			break;
 		}
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 98837bde2d25..7f1a3604b19f 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -109,7 +109,7 @@ static int bochs_pm_suspend(struct device *dev)
 
 	if (bochs->fb.initialized) {
 		console_lock();
-		fb_set_suspend(bochs->fb.helper.fbdev, 1);
+		drm_fb_helper_set_suspend(&bochs->fb.helper, 1);
 		console_unlock();
 	}
 
@@ -126,7 +126,7 @@ static int bochs_pm_resume(struct device *dev)
 
 	if (bochs->fb.initialized) {
 		console_lock();
-		fb_set_suspend(bochs->fb.helper.fbdev, 0);
+		drm_fb_helper_set_suspend(&bochs->fb.helper, 0);
 		console_unlock();
 	}
 
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index 976d9798dc99..09a0637aab3e 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -24,9 +24,9 @@ static struct fb_ops bochsfb_ops = {
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
-	.fb_fillrect = sys_fillrect,
-	.fb_copyarea = sys_copyarea,
-	.fb_imageblit = sys_imageblit,
+	.fb_fillrect = drm_fb_helper_sys_fillrect,
+	.fb_copyarea = drm_fb_helper_sys_copyarea,
+	.fb_imageblit = drm_fb_helper_sys_imageblit,
 	.fb_pan_display = drm_fb_helper_pan_display,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcmap = drm_fb_helper_setcmap,
@@ -56,11 +56,9 @@ static int bochsfb_create(struct drm_fb_helper *helper,
 {
 	struct bochs_device *bochs =
 		container_of(helper, struct bochs_device, fb.helper);
-	struct drm_device *dev = bochs->dev;
 	struct fb_info *info;
 	struct drm_framebuffer *fb;
 	struct drm_mode_fb_cmd2 mode_cmd;
-	struct device *device = &dev->pdev->dev;
 	struct drm_gem_object *gobj = NULL;
 	struct bochs_bo *bo = NULL;
 	int size, ret;
@@ -106,22 +104,23 @@ static int bochsfb_create(struct drm_fb_helper *helper,
 	ttm_bo_unreserve(&bo->bo);
 
 	/* init fb device */
-	info = framebuffer_alloc(0, device);
-	if (info == NULL)
-		return -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info))
+		return PTR_ERR(info);
 
 	info->par = &bochs->fb.helper;
 
 	ret = bochs_framebuffer_init(bochs->dev, &bochs->fb.gfb, &mode_cmd, gobj);
-	if (ret)
+	if (ret) {
+		drm_fb_helper_release_fbi(helper);
 		return ret;
+	}
 
 	bochs->fb.size = size;
 
 	/* setup helper */
 	fb = &bochs->fb.gfb.base;
 	bochs->fb.helper.fb = fb;
-	bochs->fb.helper.fbdev = info;
 
 	strcpy(info->fix.id, "bochsdrmfb");
 
@@ -139,30 +138,17 @@ static int bochsfb_create(struct drm_fb_helper *helper,
 	info->fix.smem_start = 0;
 	info->fix.smem_len = size;
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
-		return -ENOMEM;
-	}
-
 	return 0;
 }
 
 static int bochs_fbdev_destroy(struct bochs_device *bochs)
 {
 	struct bochs_framebuffer *gfb = &bochs->fb.gfb;
-	struct fb_info *info;
 
 	DRM_DEBUG_DRIVER("\n");
 
-	if (bochs->fb.helper.fbdev) {
-		info = bochs->fb.helper.fbdev;
-
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&bochs->fb.helper);
+	drm_fb_helper_release_fbi(&bochs->fb.helper);
 
 	if (gfb->obj) {
 		drm_gem_object_unreference_unlocked(gfb->obj);
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 66286ff518d4..f69e6bf9bb0e 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -454,25 +454,17 @@ int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
 			   uint32_t handle, uint64_t *offset)
 {
 	struct drm_gem_object *obj;
-	int ret;
 	struct bochs_bo *bo;
 
-	mutex_lock(&dev->struct_mutex);
 	obj = drm_gem_object_lookup(dev, file, handle);
-	if (obj == NULL) {
-		ret = -ENOENT;
-		goto out_unlock;
-	}
+	if (obj == NULL)
+		return -ENOENT;
 
 	bo = gem_to_bochs_bo(obj);
 	*offset = bochs_bo_mmap_offset(bo);
 
-	drm_gem_object_unreference(obj);
-	ret = 0;
-out_unlock:
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
-
+	drm_gem_object_unreference_unlocked(obj);
+	return 0;
 }
 
 /* ---------------------------------------------------------------------- */
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index acef3223772c..2de52a53a803 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -1,24 +1,32 @@
+config DRM_BRIDGE
+	def_bool y
+	depends on DRM
+	help
+	  Bridge registration and lookup framework.
+
+menu "Display Interface Bridges"
+	depends on DRM && DRM_BRIDGE
+
 config DRM_DW_HDMI
 	tristate
-	depends on DRM
 	select DRM_KMS_HELPER
 
-config DRM_PTN3460
-	tristate "PTN3460 DP/LVDS bridge"
-	depends on DRM
+config DRM_NXP_PTN3460
+	tristate "NXP PTN3460 DP/LVDS bridge"
 	depends on OF
 	select DRM_KMS_HELPER
 	select DRM_PANEL
 	---help---
-	  ptn3460 eDP-LVDS bridge chip driver.
+	  NXP PTN3460 eDP-LVDS bridge chip driver.
 
-config DRM_PS8622
+config DRM_PARADE_PS8622
 	tristate "Parade eDP/LVDS bridge"
-	depends on DRM
 	depends on OF
 	select DRM_PANEL
 	select DRM_KMS_HELPER
 	select BACKLIGHT_LCD_SUPPORT
 	select BACKLIGHT_CLASS_DEVICE
 	---help---
-	  parade eDP-LVDS bridge chip driver.
+	  Parade eDP-LVDS bridge chip driver.
+
+endmenu
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 8dfebd984370..e2eef1c2f4c3 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,5 +1,5 @@
 ccflags-y := -Iinclude/drm
 
-obj-$(CONFIG_DRM_PS8622) += ps8622.o
-obj-$(CONFIG_DRM_PTN3460) += ptn3460.o
 obj-$(CONFIG_DRM_DW_HDMI) += dw_hdmi.o
+obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
+obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
diff --git a/drivers/gpu/drm/bridge/dw_hdmi.c b/drivers/gpu/drm/bridge/dw_hdmi.c
index 816d104ca4da..0083d4e7e7e2 100644
--- a/drivers/gpu/drm/bridge/dw_hdmi.c
+++ b/drivers/gpu/drm/bridge/dw_hdmi.c
@@ -18,6 +18,7 @@
 #include <linux/hdmi.h>
 #include <linux/mutex.h>
 #include <linux/of_device.h>
+#include <linux/spinlock.h>
 
 #include <drm/drm_of.h>
 #include <drm/drmP.h>
@@ -81,10 +82,6 @@ static const u16 csc_coeff_rgb_in_eitu709[3][4] = {
 };
 
 struct hdmi_vmode {
-	bool mdvi;
-	bool mhsyncpolarity;
-	bool mvsyncpolarity;
-	bool minterlaced;
 	bool mdataenablepolarity;
 
 	unsigned int mpixelclock;
@@ -123,12 +120,20 @@ struct dw_hdmi {
 	bool phy_enabled;
 	struct drm_display_mode previous_mode;
 
-	struct regmap *regmap;
 	struct i2c_adapter *ddc;
 	void __iomem *regs;
+	bool sink_is_hdmi;
+	bool sink_has_audio;
 
+	struct mutex mutex;		/* for state below and previous_mode */
+	bool disabled;			/* DRM has disabled our bridge */
+
+	spinlock_t audio_lock;
 	struct mutex audio_mutex;
 	unsigned int sample_rate;
+	unsigned int audio_cts;
+	unsigned int audio_n;
+	bool audio_enable;
 	int ratio;
 
 	void (*write)(struct dw_hdmi *hdmi, u8 val, int offset);
@@ -335,42 +340,76 @@ static unsigned int hdmi_compute_cts(unsigned int freq, unsigned long pixel_clk,
 }
 
 static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi,
-				     unsigned long pixel_clk)
+	unsigned long pixel_clk, unsigned int sample_rate, unsigned int ratio)
 {
-	unsigned int clk_n, clk_cts;
-
-	clk_n = hdmi_compute_n(hdmi->sample_rate, pixel_clk,
-			       hdmi->ratio);
-	clk_cts = hdmi_compute_cts(hdmi->sample_rate, pixel_clk,
-				   hdmi->ratio);
+	unsigned int n, cts;
 
-	if (!clk_cts) {
-		dev_dbg(hdmi->dev, "%s: pixel clock not supported: %lu\n",
-			__func__, pixel_clk);
-		return;
+	n = hdmi_compute_n(sample_rate, pixel_clk, ratio);
+	cts = hdmi_compute_cts(sample_rate, pixel_clk, ratio);
+	if (!cts) {
+		dev_err(hdmi->dev,
+			"%s: pixel clock/sample rate not supported: %luMHz / %ukHz\n",
+			__func__, pixel_clk, sample_rate);
 	}
 
-	dev_dbg(hdmi->dev, "%s: samplerate=%d  ratio=%d  pixelclk=%lu  N=%d cts=%d\n",
-		__func__, hdmi->sample_rate, hdmi->ratio,
-		pixel_clk, clk_n, clk_cts);
+	dev_dbg(hdmi->dev, "%s: samplerate=%ukHz ratio=%d pixelclk=%luMHz N=%d cts=%d\n",
+		__func__, sample_rate, ratio, pixel_clk, n, cts);
 
-	hdmi_set_cts_n(hdmi, clk_cts, clk_n);
+	spin_lock_irq(&hdmi->audio_lock);
+	hdmi->audio_n = n;
+	hdmi->audio_cts = cts;
+	hdmi_set_cts_n(hdmi, cts, hdmi->audio_enable ? n : 0);
+	spin_unlock_irq(&hdmi->audio_lock);
 }
 
 static void hdmi_init_clk_regenerator(struct dw_hdmi *hdmi)
 {
 	mutex_lock(&hdmi->audio_mutex);
-	hdmi_set_clk_regenerator(hdmi, 74250000);
+	hdmi_set_clk_regenerator(hdmi, 74250000, hdmi->sample_rate,
+				 hdmi->ratio);
 	mutex_unlock(&hdmi->audio_mutex);
 }
 
 static void hdmi_clk_regenerator_update_pixel_clock(struct dw_hdmi *hdmi)
 {
 	mutex_lock(&hdmi->audio_mutex);
-	hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock);
+	hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock,
+				 hdmi->sample_rate, hdmi->ratio);
 	mutex_unlock(&hdmi->audio_mutex);
 }
 
+void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate)
+{
+	mutex_lock(&hdmi->audio_mutex);
+	hdmi->sample_rate = rate;
+	hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock,
+				 hdmi->sample_rate, hdmi->ratio);
+	mutex_unlock(&hdmi->audio_mutex);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_rate);
+
+void dw_hdmi_audio_enable(struct dw_hdmi *hdmi)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&hdmi->audio_lock, flags);
+	hdmi->audio_enable = true;
+	hdmi_set_cts_n(hdmi, hdmi->audio_cts, hdmi->audio_n);
+	spin_unlock_irqrestore(&hdmi->audio_lock, flags);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_audio_enable);
+
+void dw_hdmi_audio_disable(struct dw_hdmi *hdmi)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&hdmi->audio_lock, flags);
+	hdmi->audio_enable = false;
+	hdmi_set_cts_n(hdmi, hdmi->audio_cts, 0);
+	spin_unlock_irqrestore(&hdmi->audio_lock, flags);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_audio_disable);
+
 /*
  * this submodule is responsible for the video data synchronization.
  * for example, for RGB 4:4:4 input, the data map is defined as
@@ -701,9 +740,9 @@ static int hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data,
 	return 0;
 }
 
-static void dw_hdmi_phy_enable_power(struct dw_hdmi *hdmi, u8 enable)
+static void dw_hdmi_phy_enable_powerdown(struct dw_hdmi *hdmi, bool enable)
 {
-	hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0,
+	hdmi_mask_writeb(hdmi, !enable, HDMI_PHY_CONF0,
 			 HDMI_PHY_CONF0_PDZ_OFFSET,
 			 HDMI_PHY_CONF0_PDZ_MASK);
 }
@@ -753,12 +792,12 @@ static void dw_hdmi_phy_sel_interface_control(struct dw_hdmi *hdmi, u8 enable)
 static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep,
 			      unsigned char res, int cscon)
 {
-	unsigned res_idx, i;
+	unsigned res_idx;
 	u8 val, msec;
-	const struct dw_hdmi_plat_data *plat_data = hdmi->plat_data;
-	const struct dw_hdmi_mpll_config *mpll_config = plat_data->mpll_cfg;
-	const struct dw_hdmi_curr_ctrl *curr_ctrl = plat_data->cur_ctr;
-	const struct dw_hdmi_phy_config *phy_config = plat_data->phy_config;
+	const struct dw_hdmi_plat_data *pdata = hdmi->plat_data;
+	const struct dw_hdmi_mpll_config *mpll_config = pdata->mpll_cfg;
+	const struct dw_hdmi_curr_ctrl *curr_ctrl = pdata->cur_ctr;
+	const struct dw_hdmi_phy_config *phy_config = pdata->phy_config;
 
 	if (prep)
 		return -EINVAL;
@@ -778,6 +817,30 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep,
 		return -EINVAL;
 	}
 
+	/* PLL/MPLL Cfg - always match on final entry */
+	for (; mpll_config->mpixelclock != ~0UL; mpll_config++)
+		if (hdmi->hdmi_data.video_mode.mpixelclock <=
+		    mpll_config->mpixelclock)
+			break;
+
+	for (; curr_ctrl->mpixelclock != ~0UL; curr_ctrl++)
+		if (hdmi->hdmi_data.video_mode.mpixelclock <=
+		    curr_ctrl->mpixelclock)
+			break;
+
+	for (; phy_config->mpixelclock != ~0UL; phy_config++)
+		if (hdmi->hdmi_data.video_mode.mpixelclock <=
+		    phy_config->mpixelclock)
+			break;
+
+	if (mpll_config->mpixelclock == ~0UL ||
+	    curr_ctrl->mpixelclock == ~0UL ||
+	    phy_config->mpixelclock == ~0UL) {
+		dev_err(hdmi->dev, "Pixel clock %d - unsupported by HDMI\n",
+			hdmi->hdmi_data.video_mode.mpixelclock);
+		return -EINVAL;
+	}
+
 	/* Enable csc path */
 	if (cscon)
 		val = HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH;
@@ -803,48 +866,23 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep,
 		    HDMI_PHY_I2CM_SLAVE_ADDR);
 	hdmi_phy_test_clear(hdmi, 0);
 
-	/* PLL/MPLL Cfg - always match on final entry */
-	for (i = 0; mpll_config[i].mpixelclock != (~0UL); i++)
-		if (hdmi->hdmi_data.video_mode.mpixelclock <=
-		    mpll_config[i].mpixelclock)
-			break;
-
-	hdmi_phy_i2c_write(hdmi, mpll_config[i].res[res_idx].cpce, 0x06);
-	hdmi_phy_i2c_write(hdmi, mpll_config[i].res[res_idx].gmp, 0x15);
-
-	for (i = 0; curr_ctrl[i].mpixelclock != (~0UL); i++)
-		if (hdmi->hdmi_data.video_mode.mpixelclock <=
-		    curr_ctrl[i].mpixelclock)
-			break;
-
-	if (curr_ctrl[i].mpixelclock == (~0UL)) {
-		dev_err(hdmi->dev, "Pixel clock %d - unsupported by HDMI\n",
-			hdmi->hdmi_data.video_mode.mpixelclock);
-		return -EINVAL;
-	}
+	hdmi_phy_i2c_write(hdmi, mpll_config->res[res_idx].cpce, 0x06);
+	hdmi_phy_i2c_write(hdmi, mpll_config->res[res_idx].gmp, 0x15);
 
 	/* CURRCTRL */
-	hdmi_phy_i2c_write(hdmi, curr_ctrl[i].curr[res_idx], 0x10);
+	hdmi_phy_i2c_write(hdmi, curr_ctrl->curr[res_idx], 0x10);
 
 	hdmi_phy_i2c_write(hdmi, 0x0000, 0x13);  /* PLLPHBYCTRL */
 	hdmi_phy_i2c_write(hdmi, 0x0006, 0x17);
 
-	for (i = 0; phy_config[i].mpixelclock != (~0UL); i++)
-		if (hdmi->hdmi_data.video_mode.mpixelclock <=
-		    phy_config[i].mpixelclock)
-			break;
-
-	/* RESISTANCE TERM 133Ohm Cfg */
-	hdmi_phy_i2c_write(hdmi, phy_config[i].term, 0x19);  /* TXTERM */
-	/* PREEMP Cgf 0.00 */
-	hdmi_phy_i2c_write(hdmi, phy_config[i].sym_ctr, 0x09); /* CKSYMTXCTRL */
-	/* TX/CK LVL 10 */
-	hdmi_phy_i2c_write(hdmi, phy_config[i].vlev_ctr, 0x0E); /* VLEVCTRL */
+	hdmi_phy_i2c_write(hdmi, phy_config->term, 0x19);  /* TXTERM */
+	hdmi_phy_i2c_write(hdmi, phy_config->sym_ctr, 0x09); /* CKSYMTXCTRL */
+	hdmi_phy_i2c_write(hdmi, phy_config->vlev_ctr, 0x0E); /* VLEVCTRL */
 
 	/* REMOVE CLK TERM */
 	hdmi_phy_i2c_write(hdmi, 0x8000, 0x05);  /* CKCALCTRL */
 
-	dw_hdmi_phy_enable_power(hdmi, 1);
+	dw_hdmi_phy_enable_powerdown(hdmi, false);
 
 	/* toggle TMDS enable */
 	dw_hdmi_phy_enable_tmds(hdmi, 0);
@@ -879,18 +917,17 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep,
 static int dw_hdmi_phy_init(struct dw_hdmi *hdmi)
 {
 	int i, ret;
-	bool cscon = false;
+	bool cscon;
 
 	/*check csc whether needed activated in HDMI mode */
-	cscon = (is_color_space_conversion(hdmi) &&
-			!hdmi->hdmi_data.video_mode.mdvi);
+	cscon = hdmi->sink_is_hdmi && is_color_space_conversion(hdmi);
 
 	/* HDMI Phy spec says to do the phy initialization sequence twice */
 	for (i = 0; i < 2; i++) {
 		dw_hdmi_phy_sel_data_en_pol(hdmi, 1);
 		dw_hdmi_phy_sel_interface_control(hdmi, 0);
 		dw_hdmi_phy_enable_tmds(hdmi, 0);
-		dw_hdmi_phy_enable_power(hdmi, 0);
+		dw_hdmi_phy_enable_powerdown(hdmi, true);
 
 		/* Enable CSC */
 		ret = hdmi_phy_configure(hdmi, 0, 8, cscon);
@@ -921,74 +958,76 @@ static void hdmi_tx_hdcp_config(struct dw_hdmi *hdmi)
 		  HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_MASK, HDMI_A_HDCPCFG1);
 }
 
-static void hdmi_config_AVI(struct dw_hdmi *hdmi)
+static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
 {
-	u8 val, pix_fmt, under_scan;
-	u8 act_ratio, coded_ratio, colorimetry, ext_colorimetry;
-	bool aspect_16_9;
+	struct hdmi_avi_infoframe frame;
+	u8 val;
 
-	aspect_16_9 = false; /* FIXME */
+	/* Initialise info frame from DRM mode */
+	drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
 
-	/* AVI Data Byte 1 */
 	if (hdmi->hdmi_data.enc_out_format == YCBCR444)
-		pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_YCBCR444;
+		frame.colorspace = HDMI_COLORSPACE_YUV444;
 	else if (hdmi->hdmi_data.enc_out_format == YCBCR422_8BITS)
-		pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_YCBCR422;
+		frame.colorspace = HDMI_COLORSPACE_YUV422;
 	else
-		pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_RGB;
-
-		under_scan =  HDMI_FC_AVICONF0_SCAN_INFO_NODATA;
-
-	/*
-	 * Active format identification data is present in the AVI InfoFrame.
-	 * Under scan info, no bar data
-	 */
-	val = pix_fmt | under_scan |
-		HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT |
-		HDMI_FC_AVICONF0_BAR_DATA_NO_DATA;
-
-	hdmi_writeb(hdmi, val, HDMI_FC_AVICONF0);
-
-	/* AVI Data Byte 2 -Set the Aspect Ratio */
-	if (aspect_16_9) {
-		act_ratio = HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_16_9;
-		coded_ratio = HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_16_9;
-	} else {
-		act_ratio = HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_4_3;
-		coded_ratio = HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_4_3;
-	}
+		frame.colorspace = HDMI_COLORSPACE_RGB;
 
 	/* Set up colorimetry */
 	if (hdmi->hdmi_data.enc_out_format == XVYCC444) {
-		colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO;
+		frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
 		if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
-			ext_colorimetry =
-				HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
+			frame.extended_colorimetry =
+				HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
 		else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/
-			ext_colorimetry =
-				HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709;
+			frame.extended_colorimetry =
+				HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
 	} else if (hdmi->hdmi_data.enc_out_format != RGB) {
-		if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
-			colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_SMPTE;
-		else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/
-			colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_ITUR;
-		ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
+		frame.colorimetry = hdmi->hdmi_data.colorimetry;
+		frame.extended_colorimetry = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
 	} else { /* Carries no data */
-		colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_NO_DATA;
-		ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
+		frame.colorimetry = HDMI_COLORIMETRY_NONE;
+		frame.extended_colorimetry = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
 	}
 
-	val = colorimetry | coded_ratio | act_ratio;
+	frame.scan_mode = HDMI_SCAN_MODE_NONE;
+
+	/*
+	 * The Designware IP uses a different byte format from standard
+	 * AVI info frames, though generally the bits are in the correct
+	 * bytes.
+	 */
+
+	/*
+	 * AVI data byte 1 differences: Colorspace in bits 4,5 rather than 5,6,
+	 * active aspect present in bit 6 rather than 4.
+	 */
+	val = (frame.colorspace & 3) << 4 | (frame.scan_mode & 0x3);
+	if (frame.active_aspect & 15)
+		val |= HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT;
+	if (frame.top_bar || frame.bottom_bar)
+		val |= HDMI_FC_AVICONF0_BAR_DATA_HORIZ_BAR;
+	if (frame.left_bar || frame.right_bar)
+		val |= HDMI_FC_AVICONF0_BAR_DATA_VERT_BAR;
+	hdmi_writeb(hdmi, val, HDMI_FC_AVICONF0);
+
+	/* AVI data byte 2 differences: none */
+	val = ((frame.colorimetry & 0x3) << 6) |
+	      ((frame.picture_aspect & 0x3) << 4) |
+	      (frame.active_aspect & 0xf);
 	hdmi_writeb(hdmi, val, HDMI_FC_AVICONF1);
 
-	/* AVI Data Byte 3 */
-	val = HDMI_FC_AVICONF2_IT_CONTENT_NO_DATA | ext_colorimetry |
-		HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT |
-		HDMI_FC_AVICONF2_SCALING_NONE;
+	/* AVI data byte 3 differences: none */
+	val = ((frame.extended_colorimetry & 0x7) << 4) |
+	      ((frame.quantization_range & 0x3) << 2) |
+	      (frame.nups & 0x3);
+	if (frame.itc)
+		val |= HDMI_FC_AVICONF2_IT_CONTENT_VALID;
 	hdmi_writeb(hdmi, val, HDMI_FC_AVICONF2);
 
-	/* AVI Data Byte 4 */
-	hdmi_writeb(hdmi, hdmi->vic, HDMI_FC_AVIVID);
+	/* AVI data byte 4 differences: none */
+	val = frame.video_code & 0x7f;
+	hdmi_writeb(hdmi, val, HDMI_FC_AVIVID);
 
 	/* AVI Data Byte 5- set up input and output pixel repetition */
 	val = (((hdmi->hdmi_data.video_mode.mpixelrepetitioninput + 1) <<
@@ -999,20 +1038,23 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi)
 		HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK);
 	hdmi_writeb(hdmi, val, HDMI_FC_PRCONF);
 
-	/* IT Content and quantization range = don't care */
-	val = HDMI_FC_AVICONF3_IT_CONTENT_TYPE_GRAPHICS |
-		HDMI_FC_AVICONF3_QUANT_RANGE_LIMITED;
+	/*
+	 * AVI data byte 5 differences: content type in 0,1 rather than 4,5,
+	 * ycc range in bits 2,3 rather than 6,7
+	 */
+	val = ((frame.ycc_quantization_range & 0x3) << 2) |
+	      (frame.content_type & 0x3);
 	hdmi_writeb(hdmi, val, HDMI_FC_AVICONF3);
 
 	/* AVI Data Bytes 6-13 */
-	hdmi_writeb(hdmi, 0, HDMI_FC_AVIETB0);
-	hdmi_writeb(hdmi, 0, HDMI_FC_AVIETB1);
-	hdmi_writeb(hdmi, 0, HDMI_FC_AVISBB0);
-	hdmi_writeb(hdmi, 0, HDMI_FC_AVISBB1);
-	hdmi_writeb(hdmi, 0, HDMI_FC_AVIELB0);
-	hdmi_writeb(hdmi, 0, HDMI_FC_AVIELB1);
-	hdmi_writeb(hdmi, 0, HDMI_FC_AVISRB0);
-	hdmi_writeb(hdmi, 0, HDMI_FC_AVISRB1);
+	hdmi_writeb(hdmi, frame.top_bar & 0xff, HDMI_FC_AVIETB0);
+	hdmi_writeb(hdmi, (frame.top_bar >> 8) & 0xff, HDMI_FC_AVIETB1);
+	hdmi_writeb(hdmi, frame.bottom_bar & 0xff, HDMI_FC_AVISBB0);
+	hdmi_writeb(hdmi, (frame.bottom_bar >> 8) & 0xff, HDMI_FC_AVISBB1);
+	hdmi_writeb(hdmi, frame.left_bar & 0xff, HDMI_FC_AVIELB0);
+	hdmi_writeb(hdmi, (frame.left_bar >> 8) & 0xff, HDMI_FC_AVIELB1);
+	hdmi_writeb(hdmi, frame.right_bar & 0xff, HDMI_FC_AVISRB0);
+	hdmi_writeb(hdmi, (frame.right_bar >> 8) & 0xff, HDMI_FC_AVISRB1);
 }
 
 static void hdmi_av_composer(struct dw_hdmi *hdmi,
@@ -1022,9 +1064,6 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
 	struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode;
 	int hblank, vblank, h_de_hs, v_de_vs, hsync_len, vsync_len;
 
-	vmode->mhsyncpolarity = !!(mode->flags & DRM_MODE_FLAG_PHSYNC);
-	vmode->mvsyncpolarity = !!(mode->flags & DRM_MODE_FLAG_PVSYNC);
-	vmode->minterlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
 	vmode->mpixelclock = mode->clock * 1000;
 
 	dev_dbg(hdmi->dev, "final pixclk = %d\n", vmode->mpixelclock);
@@ -1034,13 +1073,13 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
 		HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE :
 		HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE);
 
-	inv_val |= (vmode->mvsyncpolarity ?
+	inv_val |= mode->flags & DRM_MODE_FLAG_PVSYNC ?
 		HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_HIGH :
-		HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_LOW);
+		HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_LOW;
 
-	inv_val |= (vmode->mhsyncpolarity ?
+	inv_val |= mode->flags & DRM_MODE_FLAG_PHSYNC ?
 		HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_HIGH :
-		HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_LOW);
+		HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_LOW;
 
 	inv_val |= (vmode->mdataenablepolarity ?
 		HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_HIGH :
@@ -1049,17 +1088,17 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
 	if (hdmi->vic == 39)
 		inv_val |= HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH;
 	else
-		inv_val |= (vmode->minterlaced ?
+		inv_val |= mode->flags & DRM_MODE_FLAG_INTERLACE ?
 			HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH :
-			HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_LOW);
+			HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_LOW;
 
-	inv_val |= (vmode->minterlaced ?
+	inv_val |= mode->flags & DRM_MODE_FLAG_INTERLACE ?
 		HDMI_FC_INVIDCONF_IN_I_P_INTERLACED :
-		HDMI_FC_INVIDCONF_IN_I_P_PROGRESSIVE);
+		HDMI_FC_INVIDCONF_IN_I_P_PROGRESSIVE;
 
-	inv_val |= (vmode->mdvi ?
-		HDMI_FC_INVIDCONF_DVI_MODEZ_DVI_MODE :
-		HDMI_FC_INVIDCONF_DVI_MODEZ_HDMI_MODE);
+	inv_val |= hdmi->sink_is_hdmi ?
+		HDMI_FC_INVIDCONF_DVI_MODEZ_HDMI_MODE :
+		HDMI_FC_INVIDCONF_DVI_MODEZ_DVI_MODE;
 
 	hdmi_writeb(hdmi, inv_val, HDMI_FC_INVIDCONF);
 
@@ -1105,7 +1144,7 @@ static void dw_hdmi_phy_disable(struct dw_hdmi *hdmi)
 		return;
 
 	dw_hdmi_phy_enable_tmds(hdmi, 0);
-	dw_hdmi_phy_enable_power(hdmi, 0);
+	dw_hdmi_phy_enable_powerdown(hdmi, true);
 
 	hdmi->phy_enabled = false;
 }
@@ -1186,10 +1225,8 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
 
 	if (!hdmi->vic) {
 		dev_dbg(hdmi->dev, "Non-CEA mode used in HDMI\n");
-		hdmi->hdmi_data.video_mode.mdvi = true;
 	} else {
 		dev_dbg(hdmi->dev, "CEA mode used vic=%d\n", hdmi->vic);
-		hdmi->hdmi_data.video_mode.mdvi = false;
 	}
 
 	if ((hdmi->vic == 6) || (hdmi->vic == 7) ||
@@ -1200,18 +1237,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
 	else
 		hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709;
 
-	if ((hdmi->vic == 10) || (hdmi->vic == 11) ||
-	    (hdmi->vic == 12) || (hdmi->vic == 13) ||
-	    (hdmi->vic == 14) || (hdmi->vic == 15) ||
-	    (hdmi->vic == 25) || (hdmi->vic == 26) ||
-	    (hdmi->vic == 27) || (hdmi->vic == 28) ||
-	    (hdmi->vic == 29) || (hdmi->vic == 30) ||
-	    (hdmi->vic == 35) || (hdmi->vic == 36) ||
-	    (hdmi->vic == 37) || (hdmi->vic == 38))
-		hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 1;
-	else
-		hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0;
-
+	hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0;
 	hdmi->hdmi_data.video_mode.mpixelrepetitioninput = 0;
 
 	/* TODO: Get input format from IPU (via FB driver interface) */
@@ -1235,18 +1261,22 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
 	/* HDMI Initialization Step B.3 */
 	dw_hdmi_enable_video_path(hdmi);
 
-	/* not for DVI mode */
-	if (hdmi->hdmi_data.video_mode.mdvi) {
-		dev_dbg(hdmi->dev, "%s DVI mode\n", __func__);
-	} else {
-		dev_dbg(hdmi->dev, "%s CEA mode\n", __func__);
+	if (hdmi->sink_has_audio) {
+		dev_dbg(hdmi->dev, "sink has audio support\n");
 
 		/* HDMI Initialization Step E - Configure audio */
 		hdmi_clk_regenerator_update_pixel_clock(hdmi);
 		hdmi_enable_audio_clk(hdmi);
+	}
+
+	/* not for DVI mode */
+	if (hdmi->sink_is_hdmi) {
+		dev_dbg(hdmi->dev, "%s HDMI mode\n", __func__);
 
 		/* HDMI Initialization Step F - Configure AVI InfoFrame */
-		hdmi_config_AVI(hdmi);
+		hdmi_config_AVI(hdmi, mode);
+	} else {
+		dev_dbg(hdmi->dev, "%s DVI mode\n", __func__);
 	}
 
 	hdmi_video_packetize(hdmi);
@@ -1255,7 +1285,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
 	hdmi_tx_hdcp_config(hdmi);
 
 	dw_hdmi_clear_overflow(hdmi);
-	if (hdmi->cable_plugin && !hdmi->hdmi_data.video_mode.mdvi)
+	if (hdmi->cable_plugin && hdmi->sink_is_hdmi)
 		hdmi_enable_overflow_interrupts(hdmi);
 
 	return 0;
@@ -1348,10 +1378,12 @@ static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge,
 {
 	struct dw_hdmi *hdmi = bridge->driver_private;
 
-	dw_hdmi_setup(hdmi, mode);
+	mutex_lock(&hdmi->mutex);
 
 	/* Store the display mode for plugin/DKMS poweron events */
 	memcpy(&hdmi->previous_mode, mode, sizeof(hdmi->previous_mode));
+
+	mutex_unlock(&hdmi->mutex);
 }
 
 static bool dw_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
@@ -1365,14 +1397,20 @@ static void dw_hdmi_bridge_disable(struct drm_bridge *bridge)
 {
 	struct dw_hdmi *hdmi = bridge->driver_private;
 
+	mutex_lock(&hdmi->mutex);
+	hdmi->disabled = true;
 	dw_hdmi_poweroff(hdmi);
+	mutex_unlock(&hdmi->mutex);
 }
 
 static void dw_hdmi_bridge_enable(struct drm_bridge *bridge)
 {
 	struct dw_hdmi *hdmi = bridge->driver_private;
 
+	mutex_lock(&hdmi->mutex);
 	dw_hdmi_poweron(hdmi);
+	hdmi->disabled = false;
+	mutex_unlock(&hdmi->mutex);
 }
 
 static void dw_hdmi_bridge_nop(struct drm_bridge *bridge)
@@ -1405,6 +1443,8 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
 		dev_dbg(hdmi->dev, "got edid: width[%d] x height[%d]\n",
 			edid->width_cm, edid->height_cm);
 
+		hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid);
+		hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
 		drm_mode_connector_update_edid_property(connector, edid);
 		ret = drm_add_edid_modes(connector, edid);
 		kfree(edid);
@@ -1423,6 +1463,10 @@ dw_hdmi_connector_mode_valid(struct drm_connector *connector,
 					   struct dw_hdmi, connector);
 	enum drm_mode_status mode_status = MODE_OK;
 
+	/* We don't support double-clocked modes */
+	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+		return MODE_BAD;
+
 	if (hdmi->plat_data->mode_valid)
 		mode_status = hdmi->plat_data->mode_valid(connector, mode);
 
@@ -1489,21 +1533,21 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
 	phy_int_pol = hdmi_readb(hdmi, HDMI_PHY_POL0);
 
 	if (intr_stat & HDMI_IH_PHY_STAT0_HPD) {
+		hdmi_modb(hdmi, ~phy_int_pol, HDMI_PHY_HPD, HDMI_PHY_POL0);
+		mutex_lock(&hdmi->mutex);
 		if (phy_int_pol & HDMI_PHY_HPD) {
 			dev_dbg(hdmi->dev, "EVENT=plugin\n");
 
-			hdmi_modb(hdmi, 0, HDMI_PHY_HPD, HDMI_PHY_POL0);
-
-			dw_hdmi_poweron(hdmi);
+			if (!hdmi->disabled)
+				dw_hdmi_poweron(hdmi);
 		} else {
 			dev_dbg(hdmi->dev, "EVENT=plugout\n");
 
-			hdmi_modb(hdmi, HDMI_PHY_HPD, HDMI_PHY_HPD,
-				  HDMI_PHY_POL0);
-
-			dw_hdmi_poweroff(hdmi);
+			if (!hdmi->disabled)
+				dw_hdmi_poweroff(hdmi);
 		}
-		drm_helper_hpd_irq_event(hdmi->connector.dev);
+		mutex_unlock(&hdmi->mutex);
+		drm_helper_hpd_irq_event(hdmi->bridge->dev);
 	}
 
 	hdmi_writeb(hdmi, intr_stat, HDMI_IH_PHY_STAT0);
@@ -1570,8 +1614,11 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
 	hdmi->sample_rate = 48000;
 	hdmi->ratio = 100;
 	hdmi->encoder = encoder;
+	hdmi->disabled = true;
 
+	mutex_init(&hdmi->mutex);
 	mutex_init(&hdmi->audio_mutex);
+	spin_lock_init(&hdmi->audio_lock);
 
 	of_property_read_u32(np, "reg-io-width", &val);
 
diff --git a/drivers/gpu/drm/bridge/dw_hdmi.h b/drivers/gpu/drm/bridge/dw_hdmi.h
index 175dbc89a824..ee7f7ed2ab12 100644
--- a/drivers/gpu/drm/bridge/dw_hdmi.h
+++ b/drivers/gpu/drm/bridge/dw_hdmi.h
@@ -7,8 +7,8 @@
  * (at your option) any later version.
  */
 
-#ifndef __IMX_HDMI_H__
-#define __IMX_HDMI_H__
+#ifndef __DW_HDMI_H__
+#define __DW_HDMI_H__
 
 /* Identification Registers */
 #define HDMI_DESIGN_ID                          0x0000
@@ -525,7 +525,7 @@
 
 /* I2C Master Registers (E-DDC) */
 #define HDMI_I2CM_SLAVE                         0x7E00
-#define HDMI_I2CMESS                            0x7E01
+#define HDMI_I2CM_ADDRESS                       0x7E01
 #define HDMI_I2CM_DATAO                         0x7E02
 #define HDMI_I2CM_DATAI                         0x7E03
 #define HDMI_I2CM_OPERATION                     0x7E04
@@ -1031,4 +1031,4 @@ enum {
 	HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_LOW = 0x0,
 };
 
-#endif /* __IMX_HDMI_H__ */
+#endif /* __DW_HDMI_H__ */
diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 1b1bf2384815..1b1bf2384815 100644
--- a/drivers/gpu/drm/bridge/ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
diff --git a/drivers/gpu/drm/bridge/ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 1a6607beb29f..1a6607beb29f 100644
--- a/drivers/gpu/drm/bridge/ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index b9140032962d..b1619e29a564 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -92,7 +92,7 @@ static int cirrus_pm_suspend(struct device *dev)
 
 	if (cdev->mode_info.gfbdev) {
 		console_lock();
-		fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 1);
+		drm_fb_helper_set_suspend(&cdev->mode_info.gfbdev->helper, 1);
 		console_unlock();
 	}
 
@@ -109,7 +109,7 @@ static int cirrus_pm_resume(struct device *dev)
 
 	if (cdev->mode_info.gfbdev) {
 		console_lock();
-		fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 0);
+		drm_fb_helper_set_suspend(&cdev->mode_info.gfbdev->helper, 0);
 		console_unlock();
 	}
 
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 13ddf1c4bb8e..589103bcc06c 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -98,7 +98,7 @@ static void cirrus_fillrect(struct fb_info *info,
 			 const struct fb_fillrect *rect)
 {
 	struct cirrus_fbdev *afbdev = info->par;
-	sys_fillrect(info, rect);
+	drm_fb_helper_sys_fillrect(info, rect);
 	cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
 			 rect->height);
 }
@@ -107,7 +107,7 @@ static void cirrus_copyarea(struct fb_info *info,
 			 const struct fb_copyarea *area)
 {
 	struct cirrus_fbdev *afbdev = info->par;
-	sys_copyarea(info, area);
+	drm_fb_helper_sys_copyarea(info, area);
 	cirrus_dirty_update(afbdev, area->dx, area->dy, area->width,
 			 area->height);
 }
@@ -116,7 +116,7 @@ static void cirrus_imageblit(struct fb_info *info,
 			  const struct fb_image *image)
 {
 	struct cirrus_fbdev *afbdev = info->par;
-	sys_imageblit(info, image);
+	drm_fb_helper_sys_imageblit(info, image);
 	cirrus_dirty_update(afbdev, image->dx, image->dy, image->width,
 			 image->height);
 }
@@ -165,12 +165,10 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
 {
 	struct cirrus_fbdev *gfbdev =
 		container_of(helper, struct cirrus_fbdev, helper);
-	struct drm_device *dev = gfbdev->helper.dev;
 	struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
 	struct fb_info *info;
 	struct drm_framebuffer *fb;
 	struct drm_mode_fb_cmd2 mode_cmd;
-	struct device *device = &dev->pdev->dev;
 	void *sysram;
 	struct drm_gem_object *gobj = NULL;
 	struct cirrus_bo *bo = NULL;
@@ -195,9 +193,9 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
 	if (!sysram)
 		return -ENOMEM;
 
-	info = framebuffer_alloc(0, device);
-	if (info == NULL)
-		return -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info))
+		return PTR_ERR(info);
 
 	info->par = gfbdev;
 
@@ -216,11 +214,9 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
 
 	/* setup helper */
 	gfbdev->helper.fb = fb;
-	gfbdev->helper.fbdev = info;
 
 	strcpy(info->fix.id, "cirrusdrmfb");
 
-
 	info->flags = FBINFO_DEFAULT;
 	info->fbops = &cirrusfb_ops;
 
@@ -229,11 +225,6 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
 			       sizes->fb_height);
 
 	/* setup aperture base/size for vesafb takeover */
-	info->apertures = alloc_apertures(1);
-	if (!info->apertures) {
-		ret = -ENOMEM;
-		goto out_iounmap;
-	}
 	info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
 	info->apertures->ranges[0].size = cdev->mc.vram_size;
 
@@ -246,13 +237,6 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
 	info->fix.mmio_start = 0;
 	info->fix.mmio_len = 0;
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
-		ret = -ENOMEM;
-		goto out_iounmap;
-	}
-
 	DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
 	DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start);
 	DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len);
@@ -260,24 +244,15 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
 	DRM_INFO("   pitch is %d\n", fb->pitches[0]);
 
 	return 0;
-out_iounmap:
-	return ret;
 }
 
 static int cirrus_fbdev_destroy(struct drm_device *dev,
 				struct cirrus_fbdev *gfbdev)
 {
-	struct fb_info *info;
 	struct cirrus_framebuffer *gfb = &gfbdev->gfb;
 
-	if (gfbdev->helper.fbdev) {
-		info = gfbdev->helper.fbdev;
-
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&gfbdev->helper);
+	drm_fb_helper_release_fbi(&gfbdev->helper);
 
 	if (gfb->obj) {
 		drm_gem_object_unreference_unlocked(gfb->obj);
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index e4b976658087..055fd86ba717 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -293,25 +293,18 @@ cirrus_dumb_mmap_offset(struct drm_file *file,
 		     uint64_t *offset)
 {
 	struct drm_gem_object *obj;
-	int ret;
 	struct cirrus_bo *bo;
 
-	mutex_lock(&dev->struct_mutex);
 	obj = drm_gem_object_lookup(dev, file, handle);
-	if (obj == NULL) {
-		ret = -ENOENT;
-		goto out_unlock;
-	}
+	if (obj == NULL)
+		return -ENOENT;
 
 	bo = gem_to_cirrus_bo(obj);
 	*offset = cirrus_bo_mmap_offset(bo);
 
-	drm_gem_object_unreference(obj);
-	ret = 0;
-out_unlock:
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
+	drm_gem_object_unreference_unlocked(obj);
 
+	return 0;
 }
 
 bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index f6f2fb58eb37..434915448ea0 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -153,9 +153,15 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
 		if (!connector)
 			continue;
 
-		WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
-
-		connector->funcs->atomic_destroy_state(connector,
+		/*
+		 * FIXME: Async commits can race with connector unplugging and
+		 * there's currently nothing that prevents cleanup up state for
+		 * deleted connectors. As long as the callback doesn't look at
+		 * the connector we'll be fine though, so make sure that's the
+		 * case by setting all connector pointers to NULL.
+		 */
+		state->connector_states[i]->connector = NULL;
+		connector->funcs->atomic_destroy_state(NULL,
 						       state->connector_states[i]);
 		state->connectors[i] = NULL;
 		state->connector_states[i] = NULL;
@@ -1063,7 +1069,7 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
 	 * Changed connectors are already in @state, so only need to look at the
 	 * current configuration.
 	 */
-	list_for_each_entry(connector, &config->connector_list, head) {
+	drm_for_each_connector(connector, state->dev) {
 		if (connector->state->crtc != crtc)
 			continue;
 
@@ -1463,24 +1469,18 @@ retry:
 
 		if (get_user(obj_id, objs_ptr + copied_objs)) {
 			ret = -EFAULT;
-			goto fail;
+			goto out;
 		}
 
 		obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_ANY);
 		if (!obj || !obj->properties) {
 			ret = -ENOENT;
-			goto fail;
-		}
-
-		if (obj->type == DRM_MODE_OBJECT_PLANE) {
-			plane = obj_to_plane(obj);
-			plane_mask |= (1 << drm_plane_index(plane));
-			plane->old_fb = plane->fb;
+			goto out;
 		}
 
 		if (get_user(count_props, count_props_ptr + copied_objs)) {
 			ret = -EFAULT;
-			goto fail;
+			goto out;
 		}
 
 		copied_objs++;
@@ -1492,28 +1492,34 @@ retry:
 
 			if (get_user(prop_id, props_ptr + copied_props)) {
 				ret = -EFAULT;
-				goto fail;
+				goto out;
 			}
 
 			prop = drm_property_find(dev, prop_id);
 			if (!prop) {
 				ret = -ENOENT;
-				goto fail;
+				goto out;
 			}
 
 			if (copy_from_user(&prop_value,
 					   prop_values_ptr + copied_props,
 					   sizeof(prop_value))) {
 				ret = -EFAULT;
-				goto fail;
+				goto out;
 			}
 
 			ret = atomic_set_prop(state, obj, prop, prop_value);
 			if (ret)
-				goto fail;
+				goto out;
 
 			copied_props++;
 		}
+
+		if (obj->type == DRM_MODE_OBJECT_PLANE && count_props) {
+			plane = obj_to_plane(obj);
+			plane_mask |= (1 << drm_plane_index(plane));
+			plane->old_fb = plane->fb;
+		}
 	}
 
 	if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
@@ -1523,7 +1529,7 @@ retry:
 			e = create_vblank_event(dev, file_priv, arg->user_data);
 			if (!e) {
 				ret = -ENOMEM;
-				goto fail;
+				goto out;
 			}
 
 			crtc_state->event = e;
@@ -1533,13 +1539,15 @@ retry:
 	if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
 		ret = drm_atomic_check_only(state);
 		/* _check_only() does not free state, unlike _commit() */
-		drm_atomic_state_free(state);
+		if (!ret)
+			drm_atomic_state_free(state);
 	} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
 		ret = drm_atomic_async_commit(state);
 	} else {
 		ret = drm_atomic_commit(state);
 	}
 
+out:
 	/* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
 	 * locks (ie. while it is still safe to deref plane->state).  We
 	 * need to do this here because the driver entry points cannot
@@ -1552,41 +1560,35 @@ retry:
 				drm_framebuffer_reference(new_fb);
 			plane->fb = new_fb;
 			plane->crtc = plane->state->crtc;
-		} else {
-			plane->old_fb = NULL;
-		}
-		if (plane->old_fb) {
-			drm_framebuffer_unreference(plane->old_fb);
-			plane->old_fb = NULL;
+
+			if (plane->old_fb)
+				drm_framebuffer_unreference(plane->old_fb);
 		}
+		plane->old_fb = NULL;
 	}
 
-	drm_modeset_drop_locks(&ctx);
-	drm_modeset_acquire_fini(&ctx);
-
-	return ret;
+	if (ret == -EDEADLK) {
+		drm_atomic_state_clear(state);
+		drm_modeset_backoff(&ctx);
+		goto retry;
+	}
 
-fail:
-	if (ret == -EDEADLK)
-		goto backoff;
+	if (ret) {
+		if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+			for_each_crtc_in_state(state, crtc, crtc_state, i) {
+				if (!crtc_state->event)
+					continue;
 
-	if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
-		for_each_crtc_in_state(state, crtc, crtc_state, i) {
-			destroy_vblank_event(dev, file_priv, crtc_state->event);
-			crtc_state->event = NULL;
+				destroy_vblank_event(dev, file_priv,
+						     crtc_state->event);
+			}
 		}
-	}
 
-	drm_atomic_state_free(state);
+		drm_atomic_state_free(state);
+	}
 
 	drm_modeset_drop_locks(&ctx);
 	drm_modeset_acquire_fini(&ctx);
 
 	return ret;
-
-backoff:
-	drm_atomic_state_clear(state);
-	drm_modeset_backoff(&ctx);
-
-	goto retry;
 }
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 3e65daa3436f..aecb5d69bc2d 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -89,7 +89,7 @@ get_current_crtc_for_encoder(struct drm_device *dev,
 
 	WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
 
-	list_for_each_entry(connector, &config->connector_list, head) {
+	drm_for_each_connector(connector, dev) {
 		if (connector->state->best_encoder != encoder)
 			continue;
 
@@ -124,7 +124,7 @@ steal_encoder(struct drm_atomic_state *state,
 	if (IS_ERR(crtc_state))
 		return PTR_ERR(crtc_state);
 
-	crtc_state->mode_changed = true;
+	crtc_state->connectors_changed = true;
 
 	list_for_each_entry(connector, &config->connector_list, head) {
 		if (connector->state->best_encoder != encoder)
@@ -174,14 +174,14 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
 			idx = drm_crtc_index(connector->state->crtc);
 
 			crtc_state = state->crtc_states[idx];
-			crtc_state->mode_changed = true;
+			crtc_state->connectors_changed = true;
 		}
 
 		if (connector_state->crtc) {
 			idx = drm_crtc_index(connector_state->crtc);
 
 			crtc_state = state->crtc_states[idx];
-			crtc_state->mode_changed = true;
+			crtc_state->connectors_changed = true;
 		}
 	}
 
@@ -241,7 +241,7 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
 	idx = drm_crtc_index(connector_state->crtc);
 
 	crtc_state = state->crtc_states[idx];
-	crtc_state->mode_changed = true;
+	crtc_state->connectors_changed = true;
 
 	DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n",
 			 connector->base.id,
@@ -264,7 +264,8 @@ mode_fixup(struct drm_atomic_state *state)
 	bool ret;
 
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		if (!crtc_state->mode_changed)
+		if (!crtc_state->mode_changed &&
+		    !crtc_state->connectors_changed)
 			continue;
 
 		drm_mode_copy(&crtc_state->adjusted_mode, &crtc_state->mode);
@@ -306,7 +307,7 @@ mode_fixup(struct drm_atomic_state *state)
 						 encoder->base.id, encoder->name);
 				return ret;
 			}
-		} else {
+		} else if (funcs->mode_fixup) {
 			ret = funcs->mode_fixup(encoder, &crtc_state->mode,
 						&crtc_state->adjusted_mode);
 			if (!ret) {
@@ -320,7 +321,8 @@ mode_fixup(struct drm_atomic_state *state)
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
 		const struct drm_crtc_helper_funcs *funcs;
 
-		if (!crtc_state->mode_changed)
+		if (!crtc_state->mode_changed &&
+		    !crtc_state->connectors_changed)
 			continue;
 
 		funcs = crtc->helper_private;
@@ -346,9 +348,14 @@ mode_fixup(struct drm_atomic_state *state)
  *
  * Check the state object to see if the requested state is physically possible.
  * This does all the crtc and connector related computations for an atomic
- * update. It computes and updates crtc_state->mode_changed, adds any additional
- * connectors needed for full modesets and calls down into ->mode_fixup
- * functions of the driver backend.
+ * update and adds any additional connectors needed for full modesets and calls
+ * down into ->mode_fixup functions of the driver backend.
+ *
+ * crtc_state->mode_changed is set when the input mode is changed.
+ * crtc_state->connectors_changed is set when a connector is added or
+ * removed from the crtc.
+ * crtc_state->active_changed is set when crtc_state->active changes,
+ * which is used for dpms.
  *
  * IMPORTANT:
  *
@@ -381,7 +388,17 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
 		if (crtc->state->enable != crtc_state->enable) {
 			DRM_DEBUG_ATOMIC("[CRTC:%d] enable changed\n",
 					 crtc->base.id);
+
+			/*
+			 * For clarity this assignment is done here, but
+			 * enable == 0 is only true when there are no
+			 * connectors and a NULL mode.
+			 *
+			 * The other way around is true as well. enable != 0
+			 * iff connectors are attached and a mode is set.
+			 */
 			crtc_state->mode_changed = true;
+			crtc_state->connectors_changed = true;
 		}
 	}
 
@@ -456,6 +473,9 @@ EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
  * This does all the plane update related checks using by calling into the
  * ->atomic_check hooks provided by the driver.
  *
+ * It also sets crtc_state->planes_changed to indicate that a crtc has
+ * updated planes.
+ *
  * RETURNS
  * Zero for success or -errno
  */
@@ -648,15 +668,29 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
 	struct drm_crtc_state *old_crtc_state;
 	int i;
 
-	/* clear out existing links */
+	/* clear out existing links and update dpms */
 	for_each_connector_in_state(old_state, connector, old_conn_state, i) {
-		if (!connector->encoder)
-			continue;
+		if (connector->encoder) {
+			WARN_ON(!connector->encoder->crtc);
 
-		WARN_ON(!connector->encoder->crtc);
+			connector->encoder->crtc = NULL;
+			connector->encoder = NULL;
+		}
 
-		connector->encoder->crtc = NULL;
-		connector->encoder = NULL;
+		crtc = connector->state->crtc;
+		if ((!crtc && old_conn_state->crtc) ||
+		    (crtc && drm_atomic_crtc_needs_modeset(crtc->state))) {
+			struct drm_property *dpms_prop =
+				dev->mode_config.dpms_property;
+			int mode = DRM_MODE_DPMS_OFF;
+
+			if (crtc && crtc->state->active)
+				mode = DRM_MODE_DPMS_ON;
+
+			connector->dpms = mode;
+			drm_object_property_set_value(&connector->base,
+						      dpms_prop, mode);
+		}
 	}
 
 	/* set new links */
@@ -673,10 +707,16 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
 
 	/* set legacy state in the crtc structure */
 	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+		struct drm_plane *primary = crtc->primary;
+
 		crtc->mode = crtc->state->mode;
 		crtc->enabled = crtc->state->enable;
-		crtc->x = crtc->primary->state->src_x >> 16;
-		crtc->y = crtc->primary->state->src_y >> 16;
+
+		if (drm_atomic_get_existing_plane_state(old_state, primary) &&
+		    primary->state->crtc == crtc) {
+			crtc->x = primary->state->src_x >> 16;
+			crtc->y = primary->state->src_y >> 16;
+		}
 
 		if (crtc->state->enable)
 			drm_calc_timestamping_constants(crtc,
@@ -926,7 +966,7 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
 			continue;
 
 		old_crtc_state->enable = true;
-		old_crtc_state->last_vblank_count = drm_vblank_count(dev, i);
+		old_crtc_state->last_vblank_count = drm_crtc_vblank_count(crtc);
 	}
 
 	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
@@ -935,7 +975,7 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
 
 		ret = wait_event_timeout(dev->vblank[i].queue,
 				old_crtc_state->last_vblank_count !=
-					drm_vblank_count(dev, i),
+					drm_crtc_vblank_count(crtc),
 				msecs_to_jiffies(50));
 
 		drm_crtc_vblank_put(crtc);
@@ -1146,7 +1186,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
 		if (!funcs || !funcs->atomic_begin)
 			continue;
 
-		funcs->atomic_begin(crtc);
+		funcs->atomic_begin(crtc, old_crtc_state);
 	}
 
 	for_each_plane_in_state(old_state, plane, old_plane_state, i) {
@@ -1176,7 +1216,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
 		if (!funcs || !funcs->atomic_flush)
 			continue;
 
-		funcs->atomic_flush(crtc);
+		funcs->atomic_flush(crtc, old_crtc_state);
 	}
 }
 EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
@@ -1212,7 +1252,7 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
 
 	crtc_funcs = crtc->helper_private;
 	if (crtc_funcs && crtc_funcs->atomic_begin)
-		crtc_funcs->atomic_begin(crtc);
+		crtc_funcs->atomic_begin(crtc, old_crtc_state);
 
 	drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
 		struct drm_plane_state *old_plane_state =
@@ -1235,7 +1275,7 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
 	}
 
 	if (crtc_funcs && crtc_funcs->atomic_flush)
-		crtc_funcs->atomic_flush(crtc);
+		crtc_funcs->atomic_flush(crtc, old_crtc_state);
 }
 EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
 
@@ -1923,10 +1963,6 @@ retry:
 	if (ret != 0)
 		goto fail;
 
-	/* TODO: ->page_flip is the only driver callback where the core
-	 * doesn't update plane->fb. For now patch it up here. */
-	plane->fb = plane->state->fb;
-
 	/* Driver takes ownership of state on successful async commit. */
 	return 0;
 fail:
@@ -1960,9 +1996,12 @@ EXPORT_SYMBOL(drm_atomic_helper_page_flip);
  * implementing the legacy DPMS connector interface. It computes the new desired
  * ->active state for the corresponding CRTC (if the connector is enabled) and
  *  updates it.
+ *
+ * Returns:
+ * Returns 0 on success, negative errno numbers on failure.
  */
-void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
-				      int mode)
+int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
+				     int mode)
 {
 	struct drm_mode_config *config = &connector->dev->mode_config;
 	struct drm_atomic_state *state;
@@ -1971,6 +2010,7 @@ void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
 	struct drm_connector *tmp_connector;
 	int ret;
 	bool active = false;
+	int old_mode = connector->dpms;
 
 	if (mode != DRM_MODE_DPMS_ON)
 		mode = DRM_MODE_DPMS_OFF;
@@ -1979,22 +2019,23 @@ void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
 	crtc = connector->state->crtc;
 
 	if (!crtc)
-		return;
+		return 0;
 
-	/* FIXME: ->dpms has no return value so can't forward the -ENOMEM. */
 	state = drm_atomic_state_alloc(connector->dev);
 	if (!state)
-		return;
+		return -ENOMEM;
 
 	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
 retry:
 	crtc_state = drm_atomic_get_crtc_state(state, crtc);
-	if (IS_ERR(crtc_state))
-		return;
+	if (IS_ERR(crtc_state)) {
+		ret = PTR_ERR(crtc_state);
+		goto fail;
+	}
 
 	WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
 
-	list_for_each_entry(tmp_connector, &config->connector_list, head) {
+	drm_for_each_connector(tmp_connector, connector->dev) {
 		if (tmp_connector->state->crtc != crtc)
 			continue;
 
@@ -2009,17 +2050,16 @@ retry:
 	if (ret != 0)
 		goto fail;
 
-	/* Driver takes ownership of state on successful async commit. */
-	return;
+	/* Driver takes ownership of state on successful commit. */
+	return 0;
 fail:
 	if (ret == -EDEADLK)
 		goto backoff;
 
+	connector->dpms = old_mode;
 	drm_atomic_state_free(state);
 
-	WARN(1, "Driver bug: Changing ->active failed with ret=%i\n", ret);
-
-	return;
+	return ret;
 backoff:
 	drm_atomic_state_clear(state);
 	drm_atomic_legacy_backoff(state);
@@ -2080,6 +2120,7 @@ void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
 	state->mode_changed = false;
 	state->active_changed = false;
 	state->planes_changed = false;
+	state->connectors_changed = false;
 	state->event = NULL;
 }
 EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 9b23525c0ed0..192a5f9eeb74 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -53,6 +53,10 @@ struct drm_ctx_list {
  */
 void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
 {
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return;
+
 	mutex_lock(&dev->struct_mutex);
 	idr_remove(&dev->ctx_idr, ctx_handle);
 	mutex_unlock(&dev->struct_mutex);
@@ -85,10 +89,13 @@ static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
  *
  * Initialise the drm_device::ctx_idr
  */
-int drm_legacy_ctxbitmap_init(struct drm_device * dev)
+void drm_legacy_ctxbitmap_init(struct drm_device * dev)
 {
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return;
+
 	idr_init(&dev->ctx_idr);
-	return 0;
 }
 
 /**
@@ -101,6 +108,10 @@ int drm_legacy_ctxbitmap_init(struct drm_device * dev)
  */
 void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
 {
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return;
+
 	mutex_lock(&dev->struct_mutex);
 	idr_destroy(&dev->ctx_idr);
 	mutex_unlock(&dev->struct_mutex);
@@ -119,6 +130,10 @@ void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
 {
 	struct drm_ctx_list *pos, *tmp;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return;
+
 	mutex_lock(&dev->ctxlist_mutex);
 
 	list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) {
@@ -161,6 +176,10 @@ int drm_legacy_getsareactx(struct drm_device *dev, void *data,
 	struct drm_local_map *map;
 	struct drm_map_list *_entry;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	mutex_lock(&dev->struct_mutex);
 
 	map = idr_find(&dev->ctx_idr, request->ctx_id);
@@ -205,6 +224,10 @@ int drm_legacy_setsareactx(struct drm_device *dev, void *data,
 	struct drm_local_map *map = NULL;
 	struct drm_map_list *r_list = NULL;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	mutex_lock(&dev->struct_mutex);
 	list_for_each_entry(r_list, &dev->maplist, head) {
 		if (r_list->map
@@ -305,6 +328,10 @@ int drm_legacy_resctx(struct drm_device *dev, void *data,
 	struct drm_ctx ctx;
 	int i;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	if (res->count >= DRM_RESERVED_CONTEXTS) {
 		memset(&ctx, 0, sizeof(ctx));
 		for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
@@ -335,6 +362,10 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
 	struct drm_ctx_list *ctx_entry;
 	struct drm_ctx *ctx = data;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	ctx->handle = drm_legacy_ctxbitmap_next(dev);
 	if (ctx->handle == DRM_KERNEL_CONTEXT) {
 		/* Skip kernel's context and get a new one. */
@@ -378,6 +409,10 @@ int drm_legacy_getctx(struct drm_device *dev, void *data,
 {
 	struct drm_ctx *ctx = data;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	/* This is 0, because we don't handle any context flags */
 	ctx->flags = 0;
 
@@ -400,6 +435,10 @@ int drm_legacy_switchctx(struct drm_device *dev, void *data,
 {
 	struct drm_ctx *ctx = data;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	DRM_DEBUG("%d\n", ctx->handle);
 	return drm_context_switch(dev, dev->last_context, ctx->handle);
 }
@@ -420,6 +459,10 @@ int drm_legacy_newctx(struct drm_device *dev, void *data,
 {
 	struct drm_ctx *ctx = data;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	DRM_DEBUG("%d\n", ctx->handle);
 	drm_context_switch_complete(dev, file_priv, ctx->handle);
 
@@ -442,6 +485,10 @@ int drm_legacy_rmctx(struct drm_device *dev, void *data,
 {
 	struct drm_ctx *ctx = data;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	DRM_DEBUG("%d\n", ctx->handle);
 	if (ctx->handle != DRM_KERNEL_CONTEXT) {
 		if (dev->driver->context_dtor)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index fed748311b92..33d877c65ced 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -615,7 +615,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
 	if (atomic_read(&fb->refcount.refcount) > 1) {
 		drm_modeset_lock_all(dev);
 		/* remove from any CRTC */
-		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		drm_for_each_crtc(crtc, dev) {
 			if (crtc->primary->fb == fb) {
 				/* should turn off the crtc */
 				memset(&set, 0, sizeof(struct drm_mode_set));
@@ -627,7 +627,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
 			}
 		}
 
-		list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+		drm_for_each_plane(plane, dev) {
 			if (plane->fb == fb)
 				drm_plane_force_disable(plane);
 		}
@@ -736,7 +736,7 @@ unsigned int drm_crtc_index(struct drm_crtc *crtc)
 	unsigned int index = 0;
 	struct drm_crtc *tmp;
 
-	list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
+	drm_for_each_crtc(tmp, crtc->dev) {
 		if (tmp == crtc)
 			return index;
 
@@ -988,7 +988,7 @@ unsigned int drm_connector_index(struct drm_connector *connector)
 
 	WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
 
-	list_for_each_entry(tmp, &connector->dev->mode_config.connector_list, head) {
+	drm_for_each_connector(tmp, connector->dev) {
 		if (tmp == connector)
 			return index;
 
@@ -1054,7 +1054,7 @@ void drm_connector_unplug_all(struct drm_device *dev)
 {
 	struct drm_connector *connector;
 
-	/* taking the mode config mutex ends up in a clash with sysfs */
+	/* FIXME: taking the mode config mutex ends up in a clash with sysfs */
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
 		drm_connector_unregister(connector);
 
@@ -1151,7 +1151,7 @@ EXPORT_SYMBOL(drm_encoder_cleanup);
 int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
 			     unsigned long possible_crtcs,
 			     const struct drm_plane_funcs *funcs,
-			     const uint32_t *formats, uint32_t format_count,
+			     const uint32_t *formats, unsigned int format_count,
 			     enum drm_plane_type type)
 {
 	struct drm_mode_config *config = &dev->mode_config;
@@ -1225,7 +1225,7 @@ EXPORT_SYMBOL(drm_universal_plane_init);
 int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
 		   unsigned long possible_crtcs,
 		   const struct drm_plane_funcs *funcs,
-		   const uint32_t *formats, uint32_t format_count,
+		   const uint32_t *formats, unsigned int format_count,
 		   bool is_primary)
 {
 	enum drm_plane_type type;
@@ -1280,7 +1280,7 @@ unsigned int drm_plane_index(struct drm_plane *plane)
 	unsigned int index = 0;
 	struct drm_plane *tmp;
 
-	list_for_each_entry(tmp, &plane->dev->mode_config.plane_list, head) {
+	drm_for_each_plane(tmp, plane->dev) {
 		if (tmp == plane)
 			return index;
 
@@ -1305,7 +1305,7 @@ drm_plane_from_index(struct drm_device *dev, int idx)
 	struct drm_plane *plane;
 	unsigned int i = 0;
 
-	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+	drm_for_each_plane(plane, dev) {
 		if (i == idx)
 			return plane;
 		i++;
@@ -1679,70 +1679,6 @@ int drm_mode_create_suggested_offset_properties(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
 
-static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
-{
-	uint32_t total_objects = 0;
-
-	total_objects += dev->mode_config.num_crtc;
-	total_objects += dev->mode_config.num_connector;
-	total_objects += dev->mode_config.num_encoder;
-
-	group->id_list = kcalloc(total_objects, sizeof(uint32_t), GFP_KERNEL);
-	if (!group->id_list)
-		return -ENOMEM;
-
-	group->num_crtcs = 0;
-	group->num_connectors = 0;
-	group->num_encoders = 0;
-	return 0;
-}
-
-void drm_mode_group_destroy(struct drm_mode_group *group)
-{
-	kfree(group->id_list);
-	group->id_list = NULL;
-}
-
-/*
- * NOTE: Driver's shouldn't ever call drm_mode_group_init_legacy_group - it is
- * the drm core's responsibility to set up mode control groups.
- */
-int drm_mode_group_init_legacy_group(struct drm_device *dev,
-				     struct drm_mode_group *group)
-{
-	struct drm_crtc *crtc;
-	struct drm_encoder *encoder;
-	struct drm_connector *connector;
-	int ret;
-
-	ret = drm_mode_group_init(dev, group);
-	if (ret)
-		return ret;
-
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
-		group->id_list[group->num_crtcs++] = crtc->base.id;
-
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
-		group->id_list[group->num_crtcs + group->num_encoders++] =
-		encoder->base.id;
-
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
-		group->id_list[group->num_crtcs + group->num_encoders +
-			       group->num_connectors++] = connector->base.id;
-
-	return 0;
-}
-EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
-
-void drm_reinit_primary_mode_group(struct drm_device *dev)
-{
-	drm_modeset_lock_all(dev);
-	drm_mode_group_destroy(&dev->primary->mode_group);
-	drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group);
-	drm_modeset_unlock_all(dev);
-}
-EXPORT_SYMBOL(drm_reinit_primary_mode_group);
-
 /**
  * drm_mode_getresources - get graphics configuration
  * @dev: drm device for the ioctl
@@ -1771,12 +1707,11 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
 	int crtc_count = 0;
 	int fb_count = 0;
 	int encoder_count = 0;
-	int copied = 0, i;
+	int copied = 0;
 	uint32_t __user *fb_id;
 	uint32_t __user *crtc_id;
 	uint32_t __user *connector_id;
 	uint32_t __user *encoder_id;
-	struct drm_mode_group *mode_group;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return -EINVAL;
@@ -1809,24 +1744,14 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
 	/* mode_config.mutex protects the connector list against e.g. DP MST
 	 * connector hot-adding. CRTC/Plane lists are invariant. */
 	mutex_lock(&dev->mode_config.mutex);
-	if (!drm_is_primary_client(file_priv)) {
+	drm_for_each_crtc(crtc, dev)
+		crtc_count++;
 
-		mode_group = NULL;
-		list_for_each(lh, &dev->mode_config.crtc_list)
-			crtc_count++;
+	drm_for_each_connector(connector, dev)
+		connector_count++;
 
-		list_for_each(lh, &dev->mode_config.connector_list)
-			connector_count++;
-
-		list_for_each(lh, &dev->mode_config.encoder_list)
-			encoder_count++;
-	} else {
-
-		mode_group = &file_priv->master->minor->mode_group;
-		crtc_count = mode_group->num_crtcs;
-		connector_count = mode_group->num_connectors;
-		encoder_count = mode_group->num_encoders;
-	}
+	drm_for_each_encoder(encoder, dev)
+		encoder_count++;
 
 	card_res->max_height = dev->mode_config.max_height;
 	card_res->min_height = dev->mode_config.min_height;
@@ -1837,25 +1762,13 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
 	if (card_res->count_crtcs >= crtc_count) {
 		copied = 0;
 		crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
-		if (!mode_group) {
-			list_for_each_entry(crtc, &dev->mode_config.crtc_list,
-					    head) {
-				DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
-				if (put_user(crtc->base.id, crtc_id + copied)) {
-					ret = -EFAULT;
-					goto out;
-				}
-				copied++;
-			}
-		} else {
-			for (i = 0; i < mode_group->num_crtcs; i++) {
-				if (put_user(mode_group->id_list[i],
-					     crtc_id + copied)) {
-					ret = -EFAULT;
-					goto out;
-				}
-				copied++;
+		drm_for_each_crtc(crtc, dev) {
+			DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+			if (put_user(crtc->base.id, crtc_id + copied)) {
+				ret = -EFAULT;
+				goto out;
 			}
+			copied++;
 		}
 	}
 	card_res->count_crtcs = crtc_count;
@@ -1864,29 +1777,15 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
 	if (card_res->count_encoders >= encoder_count) {
 		copied = 0;
 		encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
-		if (!mode_group) {
-			list_for_each_entry(encoder,
-					    &dev->mode_config.encoder_list,
-					    head) {
-				DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
-						encoder->name);
-				if (put_user(encoder->base.id, encoder_id +
-					     copied)) {
-					ret = -EFAULT;
-					goto out;
-				}
-				copied++;
-			}
-		} else {
-			for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) {
-				if (put_user(mode_group->id_list[i],
-					     encoder_id + copied)) {
-					ret = -EFAULT;
-					goto out;
-				}
-				copied++;
+		drm_for_each_encoder(encoder, dev) {
+			DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
+					encoder->name);
+			if (put_user(encoder->base.id, encoder_id +
+				     copied)) {
+				ret = -EFAULT;
+				goto out;
 			}
-
+			copied++;
 		}
 	}
 	card_res->count_encoders = encoder_count;
@@ -1895,31 +1794,16 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
 	if (card_res->count_connectors >= connector_count) {
 		copied = 0;
 		connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
-		if (!mode_group) {
-			list_for_each_entry(connector,
-					    &dev->mode_config.connector_list,
-					    head) {
-				DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-					connector->base.id,
-					connector->name);
-				if (put_user(connector->base.id,
-					     connector_id + copied)) {
-					ret = -EFAULT;
-					goto out;
-				}
-				copied++;
-			}
-		} else {
-			int start = mode_group->num_crtcs +
-				mode_group->num_encoders;
-			for (i = start; i < start + mode_group->num_connectors; i++) {
-				if (put_user(mode_group->id_list[i],
-					     connector_id + copied)) {
-					ret = -EFAULT;
-					goto out;
-				}
-				copied++;
+		drm_for_each_connector(connector, dev) {
+			DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+				connector->base.id,
+				connector->name);
+			if (put_user(connector->base.id,
+				     connector_id + copied)) {
+				ret = -EFAULT;
+				goto out;
 			}
+			copied++;
 		}
 	}
 	card_res->count_connectors = connector_count;
@@ -2187,7 +2071,7 @@ static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
 
 	/* For atomic drivers only state objects are synchronously updated and
 	 * protected by modeset locks, so check those first. */
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+	drm_for_each_connector(connector, dev) {
 		if (!connector->state)
 			continue;
 
@@ -2291,7 +2175,7 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
 		plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
 
 		/* Plane lists are invariant, no locking needed. */
-		list_for_each_entry(plane, &config->plane_list, head) {
+		drm_for_each_plane(plane, dev) {
 			/*
 			 * Unless userspace set the 'universal planes'
 			 * capability bit, only advertise overlays.
@@ -2596,7 +2480,7 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
 	 * connectors from it), hence we need to refcount the fbs across all
 	 * crtcs. Atomic modeset will have saner semantics ...
 	 */
-	list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head)
+	drm_for_each_crtc(tmp, crtc->dev)
 		tmp->primary->old_fb = tmp->primary->fb;
 
 	fb = set->fb;
@@ -2607,7 +2491,7 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
 		crtc->primary->fb = fb;
 	}
 
-	list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
+	drm_for_each_crtc(tmp, crtc->dev) {
 		if (tmp->primary->fb)
 			drm_framebuffer_reference(tmp->primary->fb);
 		if (tmp->primary->old_fb)
@@ -4301,7 +4185,6 @@ void drm_property_unreference_blob(struct drm_property_blob *blob)
 		mutex_unlock(&dev->mode_config.blob_lock);
 	else
 		might_lock(&dev->mode_config.blob_lock);
-
 }
 EXPORT_SYMBOL(drm_property_unreference_blob);
 
@@ -4472,9 +4355,7 @@ static int drm_property_replace_global_blob(struct drm_device *dev,
 			goto err_created;
 	}
 
-	if (old_blob)
-		drm_property_unreference_blob(old_blob);
-
+	drm_property_unreference_blob(old_blob);
 	*replace = new_blob;
 
 	return 0;
@@ -4872,9 +4753,9 @@ static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
 
 	/* Do DPMS ourselves */
 	if (property == connector->dev->mode_config.dpms_property) {
-		if (connector->funcs->dpms)
-			(*connector->funcs->dpms)(connector, (int)value);
 		ret = 0;
+		if (connector->funcs->dpms)
+			ret = (*connector->funcs->dpms)(connector, (int)value);
 	} else if (connector->funcs->set_property)
 		ret = connector->funcs->set_property(connector, property, value);
 
@@ -5349,13 +5230,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
 		/* Keep the old fb, don't unref it. */
 		crtc->primary->old_fb = NULL;
 	} else {
-		/*
-		 * Warn if the driver hasn't properly updated the crtc->fb
-		 * field to reflect that the new framebuffer is now used.
-		 * Failing to do so will screw with the reference counting
-		 * on framebuffers.
-		 */
-		WARN_ON(crtc->primary->fb != fb);
+		crtc->primary->fb = fb;
 		/* Unref only the old framebuffer. */
 		fb = NULL;
 	}
@@ -5386,21 +5261,23 @@ void drm_mode_config_reset(struct drm_device *dev)
 	struct drm_encoder *encoder;
 	struct drm_connector *connector;
 
-	list_for_each_entry(plane, &dev->mode_config.plane_list, head)
+	drm_for_each_plane(plane, dev)
 		if (plane->funcs->reset)
 			plane->funcs->reset(plane);
 
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+	drm_for_each_crtc(crtc, dev)
 		if (crtc->funcs->reset)
 			crtc->funcs->reset(crtc);
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+	drm_for_each_encoder(encoder, dev)
 		if (encoder->funcs->reset)
 			encoder->funcs->reset(encoder);
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+	mutex_lock(&dev->mode_config.mutex);
+	drm_for_each_connector(connector, dev)
 		if (connector->funcs->reset)
 			connector->funcs->reset(connector);
+	mutex_unlock(&dev->mode_config.mutex);
 }
 EXPORT_SYMBOL(drm_mode_config_reset);
 
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 393114df88a3..ef534758a02c 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -121,7 +121,7 @@ bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
 		WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 	}
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+	drm_for_each_connector(connector, dev)
 		if (connector->encoder == encoder)
 			return true;
 	return false;
@@ -151,7 +151,7 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
 	if (!oops_in_progress)
 		WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+	drm_for_each_encoder(encoder, dev)
 		if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
 			return true;
 	return false;
@@ -180,7 +180,7 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
 
 	drm_warn_on_modeset_not_all_locked(dev);
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 		if (!drm_helper_encoder_in_use(encoder)) {
 			drm_encoder_disable(encoder);
 			/* disconnect encoder from any connector */
@@ -188,7 +188,7 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
 		}
 	}
 
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+	drm_for_each_crtc(crtc, dev) {
 		const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
 		crtc->enabled = drm_helper_crtc_in_use(crtc);
 		if (!crtc->enabled) {
@@ -230,7 +230,7 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
 	const struct drm_encoder_helper_funcs *encoder_funcs;
 	struct drm_encoder *encoder;
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 		encoder_funcs = encoder->helper_private;
 		/* Disable unused encoders */
 		if (encoder->crtc == NULL)
@@ -305,7 +305,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
 	 * adjust it according to limitations or connector properties, and also
 	 * a chance to reject the mode entirely.
 	 */
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 
 		if (encoder->crtc != crtc)
 			continue;
@@ -334,7 +334,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
 	crtc->hwmode = *adjusted_mode;
 
 	/* Prepare the encoders and CRTCs before setting the mode. */
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 
 		if (encoder->crtc != crtc)
 			continue;
@@ -359,7 +359,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
 	if (!ret)
 	    goto done;
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 
 		if (encoder->crtc != crtc)
 			continue;
@@ -376,7 +376,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
 	crtc_funcs->commit(crtc);
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 
 		if (encoder->crtc != crtc)
 			continue;
@@ -418,11 +418,11 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
 	struct drm_encoder *encoder;
 
 	/* Decouple all encoders and their attached connectors from this crtc */
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 		if (encoder->crtc != crtc)
 			continue;
 
-		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		drm_for_each_connector(connector, dev) {
 			if (connector->encoder != encoder)
 				continue;
 
@@ -519,12 +519,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
 	 * restored, not the drivers personal bookkeeping.
 	 */
 	count = 0;
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 		save_encoders[count++] = *encoder;
 	}
 
 	count = 0;
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+	drm_for_each_connector(connector, dev) {
 		save_connectors[count++] = *connector;
 	}
 
@@ -562,7 +562,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
 
 	/* a) traverse passed in connector list and get encoders for them */
 	count = 0;
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+	drm_for_each_connector(connector, dev) {
 		const struct drm_connector_helper_funcs *connector_funcs =
 			connector->helper_private;
 		new_encoder = connector->encoder;
@@ -602,7 +602,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
 	}
 
 	count = 0;
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+	drm_for_each_connector(connector, dev) {
 		if (!connector->encoder)
 			continue;
 
@@ -685,12 +685,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
 fail:
 	/* Restore all previous data. */
 	count = 0;
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 		*encoder = save_encoders[count++];
 	}
 
 	count = 0;
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+	drm_for_each_connector(connector, dev) {
 		*connector = save_connectors[count++];
 	}
 
@@ -712,7 +712,7 @@ static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
 	struct drm_connector *connector;
 	struct drm_device *dev = encoder->dev;
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+	drm_for_each_connector(connector, dev)
 		if (connector->encoder == encoder)
 			if (connector->dpms < dpms)
 				dpms = connector->dpms;
@@ -746,7 +746,7 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
 	struct drm_connector *connector;
 	struct drm_device *dev = crtc->dev;
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+	drm_for_each_connector(connector, dev)
 		if (connector->encoder && connector->encoder->crtc == crtc)
 			if (connector->dpms < dpms)
 				dpms = connector->dpms;
@@ -762,15 +762,18 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
  * implementing the DPMS connector attribute. It computes the new desired DPMS
  * state for all encoders and crtcs in the output mesh and calls the ->dpms()
  * callback provided by the driver appropriately.
+ *
+ * Returns:
+ * Always returns 0.
  */
-void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
+int drm_helper_connector_dpms(struct drm_connector *connector, int mode)
 {
 	struct drm_encoder *encoder = connector->encoder;
 	struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
 	int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF;
 
 	if (mode == connector->dpms)
-		return;
+		return 0;
 
 	old_dpms = connector->dpms;
 	connector->dpms = mode;
@@ -802,7 +805,7 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
 		}
 	}
 
-	return;
+	return 0;
 }
 EXPORT_SYMBOL(drm_helper_connector_dpms);
 
@@ -862,7 +865,7 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
 	bool ret;
 
 	drm_modeset_lock_all(dev);
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+	drm_for_each_crtc(crtc, dev) {
 
 		if (!crtc->enabled)
 			continue;
@@ -876,7 +879,7 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
 
 		/* Turn off outputs that were already powered off */
 		if (drm_helper_choose_crtc_dpms(crtc)) {
-			list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+			drm_for_each_encoder(encoder, dev) {
 
 				if(encoder->crtc != crtc)
 					continue;
@@ -928,15 +931,15 @@ int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mod
 	if (crtc->funcs->atomic_duplicate_state)
 		crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
 	else {
-		crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
-		if (!crtc_state)
-			return -ENOMEM;
-		if (crtc->state)
-			__drm_atomic_helper_crtc_duplicate_state(crtc, crtc_state);
-		else
-			crtc_state->crtc = crtc;
+		if (!crtc->state)
+			drm_atomic_helper_crtc_reset(crtc);
+
+		crtc_state = drm_atomic_helper_crtc_duplicate_state(crtc);
 	}
 
+	if (!crtc_state)
+		return -ENOMEM;
+
 	crtc_state->planes_changed = true;
 	crtc_state->mode_changed = true;
 	ret = drm_atomic_set_mode_for_crtc(crtc_state, mode);
@@ -957,11 +960,11 @@ int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mod
 	ret = drm_helper_crtc_mode_set_base(crtc, x, y, old_fb);
 
 out:
-	if (crtc->funcs->atomic_destroy_state)
-		crtc->funcs->atomic_destroy_state(crtc, crtc_state);
-	else {
-		__drm_atomic_helper_crtc_destroy_state(crtc, crtc_state);
-		kfree(crtc_state);
+	if (crtc_state) {
+		if (crtc->funcs->atomic_destroy_state)
+			crtc->funcs->atomic_destroy_state(crtc, crtc_state);
+		else
+			drm_atomic_helper_crtc_destroy_state(crtc, crtc_state);
 	}
 
 	return ret;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index eb603f1defc2..e23df5fd3836 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -2632,6 +2632,16 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
 			seq_printf(m, "%02x ", buf[i]);
 		seq_printf(m, "\n");
 
+		/* dump the standard OUI branch header */
+		ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
+		seq_printf(m, "branch oui: ");
+		for (i = 0; i < 0x3; i++)
+			seq_printf(m, "%02x", buf[i]);
+		seq_printf(m, " devid: ");
+		for (i = 0x3; i < 0x8; i++)
+			seq_printf(m, "%c", buf[i]);
+		seq_printf(m, " revision: hw: %x.%x sw: %x.%x", buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
+		seq_printf(m, "\n");
 		bret = dump_dp_payload_table(mgr, buf);
 		if (bret == true) {
 			seq_printf(m, "payload table: ");
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index b7bf4ce8c012..53d09a19f7e1 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -285,7 +285,6 @@ static void drm_minor_free(struct drm_device *dev, unsigned int type)
 	if (!minor)
 		return;
 
-	drm_mode_group_destroy(&minor->mode_group);
 	put_device(minor->kdev);
 
 	spin_lock_irqsave(&drm_minor_lock, flags);
@@ -582,11 +581,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
 	if (drm_ht_create(&dev->map_hash, 12))
 		goto err_minors;
 
-	ret = drm_legacy_ctxbitmap_init(dev);
-	if (ret) {
-		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
-		goto err_ht;
-	}
+	drm_legacy_ctxbitmap_init(dev);
 
 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
 		ret = drm_gem_init(dev);
@@ -600,7 +595,6 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
 
 err_ctxbitmap:
 	drm_legacy_ctxbitmap_cleanup(dev);
-err_ht:
 	drm_ht_remove(&dev->map_hash);
 err_minors:
 	drm_minor_free(dev, DRM_MINOR_LEGACY);
@@ -705,20 +699,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
 			goto err_minors;
 	}
 
-	/* setup grouping for legacy outputs */
-	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		ret = drm_mode_group_init_legacy_group(dev,
-				&dev->primary->mode_group);
-		if (ret)
-			goto err_unload;
-	}
-
 	ret = 0;
 	goto out_unlock;
 
-err_unload:
-	if (dev->driver->unload)
-		dev->driver->unload(dev);
 err_minors:
 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 7087da37dae0..05bb7311ac5d 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3413,7 +3413,7 @@ struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
 	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+	drm_for_each_connector(connector, dev)
 		if (connector->encoder == encoder && connector->eld[0])
 			return connector;
 
@@ -3802,7 +3802,7 @@ int drm_add_modes_noedid(struct drm_connector *connector,
 	struct drm_display_mode *mode;
 	struct drm_device *dev = connector->dev;
 
-	count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
+	count = ARRAY_SIZE(drm_dmt_modes);
 	if (hdisplay < 0)
 		hdisplay = 0;
 	if (vdisplay < 0)
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 5c1aca443e54..c19a62561183 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -209,23 +209,11 @@ int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_framebuffer *fb;
-	int ret;
-
-	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
-	if (ret)
-		return ret;
 
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret) {
-		mutex_unlock(&dev->mode_config.mutex);
-		return ret;
-	}
-
-	list_for_each_entry(fb, &dev->mode_config.fb_list, head)
+	mutex_lock(&dev->mode_config.fb_lock);
+	drm_for_each_fb(fb, dev)
 		drm_fb_cma_describe(fb, m);
-
-	mutex_unlock(&dev->struct_mutex);
-	mutex_unlock(&dev->mode_config.mutex);
+	mutex_unlock(&dev->mode_config.fb_lock);
 
 	return 0;
 }
@@ -234,9 +222,9 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show);
 
 static struct fb_ops drm_fbdev_cma_ops = {
 	.owner		= THIS_MODULE,
-	.fb_fillrect	= sys_fillrect,
-	.fb_copyarea	= sys_copyarea,
-	.fb_imageblit	= sys_imageblit,
+	.fb_fillrect	= drm_fb_helper_sys_fillrect,
+	.fb_copyarea	= drm_fb_helper_sys_copyarea,
+	.fb_imageblit	= drm_fb_helper_sys_imageblit,
 	.fb_check_var	= drm_fb_helper_check_var,
 	.fb_set_par	= drm_fb_helper_set_par,
 	.fb_blank	= drm_fb_helper_blank,
@@ -275,10 +263,9 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
 	if (IS_ERR(obj))
 		return -ENOMEM;
 
-	fbi = framebuffer_alloc(0, dev->dev);
-	if (!fbi) {
-		dev_err(dev->dev, "Failed to allocate framebuffer info.\n");
-		ret = -ENOMEM;
+	fbi = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(fbi)) {
+		ret = PTR_ERR(fbi);
 		goto err_drm_gem_cma_free_object;
 	}
 
@@ -286,23 +273,16 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
 	if (IS_ERR(fbdev_cma->fb)) {
 		dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
 		ret = PTR_ERR(fbdev_cma->fb);
-		goto err_framebuffer_release;
+		goto err_fb_info_destroy;
 	}
 
 	fb = &fbdev_cma->fb->fb;
 	helper->fb = fb;
-	helper->fbdev = fbi;
 
 	fbi->par = helper;
 	fbi->flags = FBINFO_FLAG_DEFAULT;
 	fbi->fbops = &drm_fbdev_cma_ops;
 
-	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
-	if (ret) {
-		dev_err(dev->dev, "Failed to allocate color map.\n");
-		goto err_drm_fb_cma_destroy;
-	}
-
 	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
 	drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
 
@@ -317,11 +297,8 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
 
 	return 0;
 
-err_drm_fb_cma_destroy:
-	drm_framebuffer_unregister_private(fb);
-	drm_fb_cma_destroy(fb);
-err_framebuffer_release:
-	framebuffer_release(fbi);
+err_fb_info_destroy:
+	drm_fb_helper_release_fbi(helper);
 err_drm_gem_cma_free_object:
 	drm_gem_cma_free_object(&obj->base);
 	return ret;
@@ -397,20 +374,8 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
  */
 void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
 {
-	if (fbdev_cma->fb_helper.fbdev) {
-		struct fb_info *info;
-		int ret;
-
-		info = fbdev_cma->fb_helper.fbdev;
-		ret = unregister_framebuffer(info);
-		if (ret < 0)
-			DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
-
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
+	drm_fb_helper_release_fbi(&fbdev_cma->fb_helper);
 
 	if (fbdev_cma->fb) {
 		drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index cac422916c7a..418d299f3b12 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -56,8 +56,8 @@ static LIST_HEAD(kernel_fb_helper_list);
  * Teardown is done with drm_fb_helper_fini().
  *
  * At runtime drivers should restore the fbdev console by calling
- * drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They
- * should also notify the fb helper code from updates to the output
+ * drm_fb_helper_restore_fbdev_mode_unlocked() from their ->lastclose callback.
+ * They should also notify the fb helper code from updates to the output
  * configuration by calling drm_fb_helper_hotplug_event(). For easier
  * integration with the output polling code in drm_crtc_helper.c the modeset
  * code provides a ->output_poll_changed callback.
@@ -89,8 +89,9 @@ static LIST_HEAD(kernel_fb_helper_list);
  * connectors to the fbdev, e.g. if some are reserved for special purposes or
  * not adequate to be used for the fbcon.
  *
- * Since this is part of the initial setup before the fbdev is published, no
- * locking is required.
+ * This function is protected against concurrent connector hotadds/removals
+ * using drm_fb_helper_add_one_connector() and
+ * drm_fb_helper_remove_one_connector().
  */
 int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
 {
@@ -98,7 +99,8 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
 	struct drm_connector *connector;
 	int i;
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+	mutex_lock(&dev->mode_config.mutex);
+	drm_for_each_connector(connector, dev) {
 		struct drm_fb_helper_connector *fb_helper_connector;
 
 		fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
@@ -108,6 +110,7 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
 		fb_helper_connector->connector = connector;
 		fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
 	}
+	mutex_unlock(&dev->mode_config.mutex);
 	return 0;
 fail:
 	for (i = 0; i < fb_helper->connector_count; i++) {
@@ -115,6 +118,8 @@ fail:
 		fb_helper->connector_info[i] = NULL;
 	}
 	fb_helper->connector_count = 0;
+	mutex_unlock(&dev->mode_config.mutex);
+
 	return -ENOMEM;
 }
 EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
@@ -163,11 +168,14 @@ static void remove_from_modeset(struct drm_mode_set *set,
 	}
 	set->num_connectors--;
 
-	/* because i915 is pissy about this..
+	/*
 	 * TODO maybe need to makes sure we set it back to !=NULL somewhere?
 	 */
-	if (set->num_connectors == 0)
+	if (set->num_connectors == 0) {
 		set->fb = NULL;
+		drm_mode_destroy(connector->dev, set->mode);
+		set->mode = NULL;
+	}
 }
 
 int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
@@ -269,7 +277,7 @@ static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
 	struct drm_device *dev = crtc->dev;
 	struct drm_crtc *c;
 
-	list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
+	drm_for_each_crtc(c, dev) {
 		if (crtc->base.id == c->base.id)
 			return c->primary->fb;
 	}
@@ -321,7 +329,7 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
 
 	drm_warn_on_modeset_not_all_locked(dev);
 
-	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+	drm_for_each_plane(plane, dev) {
 		if (plane->type != DRM_PLANE_TYPE_PRIMARY)
 			drm_plane_force_disable(plane);
 
@@ -349,21 +357,6 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
 	}
 	return error;
 }
-/**
- * drm_fb_helper_restore_fbdev_mode - restore fbdev configuration
- * @fb_helper: fbcon to restore
- *
- * This should be called from driver's drm ->lastclose callback
- * when implementing an fbcon on top of kms using this helper. This ensures that
- * the user isn't greeted with a black screen when e.g. X dies.
- *
- * Use this variant if you need to bypass locking (panic), or already
- * hold all modeset locks.  Otherwise use drm_fb_helper_restore_fbdev_mode_unlocked()
- */
-static bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
-{
-	return restore_fbdev_mode(fb_helper);
-}
 
 /**
  * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
@@ -393,6 +386,31 @@ bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
 }
 EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
 
+static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
+{
+	struct drm_device *dev = fb_helper->dev;
+	struct drm_crtc *crtc;
+	int bound = 0, crtcs_bound = 0;
+
+	/* Sometimes user space wants everything disabled, so don't steal the
+	 * display if there's a master. */
+	if (dev->primary->master)
+		return false;
+
+	drm_for_each_crtc(crtc, dev) {
+		if (crtc->primary->fb)
+			crtcs_bound++;
+		if (crtc->primary->fb == fb_helper->fb)
+			bound++;
+	}
+
+	if (bound < crtcs_bound)
+		return false;
+
+	return true;
+}
+
+#ifdef CONFIG_MAGIC_SYSRQ
 /*
  * restore fbcon display for all kms driver's using this helper, used for sysrq
  * and panic handling.
@@ -411,67 +429,15 @@ static bool drm_fb_helper_force_kernel_mode(void)
 		if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 			continue;
 
-		/*
-		 * NOTE: Use trylock mode to avoid deadlocks and sleeping in
-		 * panic context.
-		 */
-		if (__drm_modeset_lock_all(dev, true) != 0) {
-			error = true;
-			continue;
-		}
-
-		ret = drm_fb_helper_restore_fbdev_mode(helper);
+		drm_modeset_lock_all(dev);
+		ret = restore_fbdev_mode(helper);
 		if (ret)
 			error = true;
-
 		drm_modeset_unlock_all(dev);
 	}
 	return error;
 }
 
-static int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
-			void *panic_str)
-{
-	/*
-	 * It's a waste of time and effort to switch back to text console
-	 * if the kernel should reboot before panic messages can be seen.
-	 */
-	if (panic_timeout < 0)
-		return 0;
-
-	pr_err("panic occurred, switching back to text console\n");
-	return drm_fb_helper_force_kernel_mode();
-}
-
-static struct notifier_block paniced = {
-	.notifier_call = drm_fb_helper_panic,
-};
-
-static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
-{
-	struct drm_device *dev = fb_helper->dev;
-	struct drm_crtc *crtc;
-	int bound = 0, crtcs_bound = 0;
-
-	/* Sometimes user space wants everything disabled, so don't steal the
-	 * display if there's a master. */
-	if (dev->primary->master)
-		return false;
-
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		if (crtc->primary->fb)
-			crtcs_bound++;
-		if (crtc->primary->fb == fb_helper->fb)
-			bound++;
-	}
-
-	if (bound < crtcs_bound)
-		return false;
-
-	return true;
-}
-
-#ifdef CONFIG_MAGIC_SYSRQ
 static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
 {
 	bool ret;
@@ -504,14 +470,6 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
 	int i, j;
 
 	/*
-	 * fbdev->blank can be called from irq context in case of a panic.
-	 * Since we already have our own special panic handler which will
-	 * restore the fbdev console mode completely, just bail out early.
-	 */
-	if (oops_in_progress)
-		return;
-
-	/*
 	 * For each CRTC in this fb, turn the connectors on/off.
 	 */
 	drm_modeset_lock_all(dev);
@@ -544,6 +502,9 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
  */
 int drm_fb_helper_blank(int blank, struct fb_info *info)
 {
+	if (oops_in_progress)
+		return -EBUSY;
+
 	switch (blank) {
 	/* Display: On; HSync: On, VSync: On */
 	case FB_BLANK_UNBLANK:
@@ -655,7 +616,7 @@ int drm_fb_helper_init(struct drm_device *dev,
 	}
 
 	i = 0;
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+	drm_for_each_crtc(crtc, dev) {
 		fb_helper->crtc_info[i].mode_set.crtc = crtc;
 		i++;
 	}
@@ -667,14 +628,91 @@ out_free:
 }
 EXPORT_SYMBOL(drm_fb_helper_init);
 
+/**
+ * drm_fb_helper_alloc_fbi - allocate fb_info and some of its members
+ * @fb_helper: driver-allocated fbdev helper
+ *
+ * A helper to alloc fb_info and the members cmap and apertures. Called
+ * by the driver within the fb_probe fb_helper callback function.
+ *
+ * RETURNS:
+ * fb_info pointer if things went okay, pointer containing error code
+ * otherwise
+ */
+struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
+{
+	struct device *dev = fb_helper->dev->dev;
+	struct fb_info *info;
+	int ret;
+
+	info = framebuffer_alloc(0, dev);
+	if (!info)
+		return ERR_PTR(-ENOMEM);
+
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret)
+		goto err_release;
+
+	info->apertures = alloc_apertures(1);
+	if (!info->apertures) {
+		ret = -ENOMEM;
+		goto err_free_cmap;
+	}
+
+	fb_helper->fbdev = info;
+
+	return info;
+
+err_free_cmap:
+	fb_dealloc_cmap(&info->cmap);
+err_release:
+	framebuffer_release(info);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(drm_fb_helper_alloc_fbi);
+
+/**
+ * drm_fb_helper_unregister_fbi - unregister fb_info framebuffer device
+ * @fb_helper: driver-allocated fbdev helper
+ *
+ * A wrapper around unregister_framebuffer, to release the fb_info
+ * framebuffer device
+ */
+void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper)
+{
+	if (fb_helper && fb_helper->fbdev)
+		unregister_framebuffer(fb_helper->fbdev);
+}
+EXPORT_SYMBOL(drm_fb_helper_unregister_fbi);
+
+/**
+ * drm_fb_helper_release_fbi - dealloc fb_info and its members
+ * @fb_helper: driver-allocated fbdev helper
+ *
+ * A helper to free memory taken by fb_info and the members cmap and
+ * apertures
+ */
+void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper)
+{
+	if (fb_helper) {
+		struct fb_info *info = fb_helper->fbdev;
+
+		if (info) {
+			if (info->cmap.len)
+				fb_dealloc_cmap(&info->cmap);
+			framebuffer_release(info);
+		}
+
+		fb_helper->fbdev = NULL;
+	}
+}
+EXPORT_SYMBOL(drm_fb_helper_release_fbi);
+
 void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
 {
 	if (!list_empty(&fb_helper->kernel_fb_list)) {
 		list_del(&fb_helper->kernel_fb_list);
 		if (list_empty(&kernel_fb_helper_list)) {
-			pr_info("drm: unregistered panic notifier\n");
-			atomic_notifier_chain_unregister(&panic_notifier_list,
-							 &paniced);
 			unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
 		}
 	}
@@ -684,6 +722,149 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
 }
 EXPORT_SYMBOL(drm_fb_helper_fini);
 
+/**
+ * drm_fb_helper_unlink_fbi - wrapper around unlink_framebuffer
+ * @fb_helper: driver-allocated fbdev helper
+ *
+ * A wrapper around unlink_framebuffer implemented by fbdev core
+ */
+void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
+{
+	if (fb_helper && fb_helper->fbdev)
+		unlink_framebuffer(fb_helper->fbdev);
+}
+EXPORT_SYMBOL(drm_fb_helper_unlink_fbi);
+
+/**
+ * drm_fb_helper_sys_read - wrapper around fb_sys_read
+ * @info: fb_info struct pointer
+ * @buf: userspace buffer to read from framebuffer memory
+ * @count: number of bytes to read from framebuffer memory
+ * @ppos: read offset within framebuffer memory
+ *
+ * A wrapper around fb_sys_read implemented by fbdev core
+ */
+ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
+			       size_t count, loff_t *ppos)
+{
+	return fb_sys_read(info, buf, count, ppos);
+}
+EXPORT_SYMBOL(drm_fb_helper_sys_read);
+
+/**
+ * drm_fb_helper_sys_write - wrapper around fb_sys_write
+ * @info: fb_info struct pointer
+ * @buf: userspace buffer to write to framebuffer memory
+ * @count: number of bytes to write to framebuffer memory
+ * @ppos: write offset within framebuffer memory
+ *
+ * A wrapper around fb_sys_write implemented by fbdev core
+ */
+ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	return fb_sys_write(info, buf, count, ppos);
+}
+EXPORT_SYMBOL(drm_fb_helper_sys_write);
+
+/**
+ * drm_fb_helper_sys_fillrect - wrapper around sys_fillrect
+ * @info: fbdev registered by the helper
+ * @rect: info about rectangle to fill
+ *
+ * A wrapper around sys_fillrect implemented by fbdev core
+ */
+void drm_fb_helper_sys_fillrect(struct fb_info *info,
+				const struct fb_fillrect *rect)
+{
+	sys_fillrect(info, rect);
+}
+EXPORT_SYMBOL(drm_fb_helper_sys_fillrect);
+
+/**
+ * drm_fb_helper_sys_copyarea - wrapper around sys_copyarea
+ * @info: fbdev registered by the helper
+ * @area: info about area to copy
+ *
+ * A wrapper around sys_copyarea implemented by fbdev core
+ */
+void drm_fb_helper_sys_copyarea(struct fb_info *info,
+				const struct fb_copyarea *area)
+{
+	sys_copyarea(info, area);
+}
+EXPORT_SYMBOL(drm_fb_helper_sys_copyarea);
+
+/**
+ * drm_fb_helper_sys_imageblit - wrapper around sys_imageblit
+ * @info: fbdev registered by the helper
+ * @image: info about image to blit
+ *
+ * A wrapper around sys_imageblit implemented by fbdev core
+ */
+void drm_fb_helper_sys_imageblit(struct fb_info *info,
+				 const struct fb_image *image)
+{
+	sys_imageblit(info, image);
+}
+EXPORT_SYMBOL(drm_fb_helper_sys_imageblit);
+
+/**
+ * drm_fb_helper_cfb_fillrect - wrapper around cfb_fillrect
+ * @info: fbdev registered by the helper
+ * @rect: info about rectangle to fill
+ *
+ * A wrapper around cfb_imageblit implemented by fbdev core
+ */
+void drm_fb_helper_cfb_fillrect(struct fb_info *info,
+				const struct fb_fillrect *rect)
+{
+	cfb_fillrect(info, rect);
+}
+EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect);
+
+/**
+ * drm_fb_helper_cfb_copyarea - wrapper around cfb_copyarea
+ * @info: fbdev registered by the helper
+ * @area: info about area to copy
+ *
+ * A wrapper around cfb_copyarea implemented by fbdev core
+ */
+void drm_fb_helper_cfb_copyarea(struct fb_info *info,
+				const struct fb_copyarea *area)
+{
+	cfb_copyarea(info, area);
+}
+EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea);
+
+/**
+ * drm_fb_helper_cfb_imageblit - wrapper around cfb_imageblit
+ * @info: fbdev registered by the helper
+ * @image: info about image to blit
+ *
+ * A wrapper around cfb_imageblit implemented by fbdev core
+ */
+void drm_fb_helper_cfb_imageblit(struct fb_info *info,
+				 const struct fb_image *image)
+{
+	cfb_imageblit(info, image);
+}
+EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit);
+
+/**
+ * drm_fb_helper_set_suspend - wrapper around fb_set_suspend
+ * @fb_helper: driver-allocated fbdev helper
+ * @state: desired state, zero to resume, non-zero to suspend
+ *
+ * A wrapper around fb_set_suspend implemented by fbdev core
+ */
+void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state)
+{
+	if (fb_helper && fb_helper->fbdev)
+		fb_set_suspend(fb_helper->fbdev, state);
+}
+EXPORT_SYMBOL(drm_fb_helper_set_suspend);
+
 static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
 		     u16 blue, u16 regno, struct fb_info *info)
 {
@@ -771,9 +952,10 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
 	int i, j, rc = 0;
 	int start;
 
-	if (__drm_modeset_lock_all(dev, !!oops_in_progress)) {
+	if (oops_in_progress)
 		return -EBUSY;
-	}
+
+	drm_modeset_lock_all(dev);
 	if (!drm_fb_helper_is_bound(fb_helper)) {
 		drm_modeset_unlock_all(dev);
 		return -EBUSY;
@@ -922,6 +1104,9 @@ int drm_fb_helper_set_par(struct fb_info *info)
 	struct drm_fb_helper *fb_helper = info->par;
 	struct fb_var_screeninfo *var = &info->var;
 
+	if (oops_in_progress)
+		return -EBUSY;
+
 	if (var->pixclock != 0) {
 		DRM_ERROR("PIXEL CLOCK SET\n");
 		return -EINVAL;
@@ -947,9 +1132,10 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
 	int ret = 0;
 	int i;
 
-	if (__drm_modeset_lock_all(dev, !!oops_in_progress)) {
+	if (oops_in_progress)
 		return -EBUSY;
-	}
+
+	drm_modeset_lock_all(dev);
 	if (!drm_fb_helper_is_bound(fb_helper)) {
 		drm_modeset_unlock_all(dev);
 		return -EBUSY;
@@ -1109,12 +1295,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
 	dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
 			info->node, info->fix.id);
 
-	/* Switch back to kernel console on panic */
-	/* multi card linked list maybe */
 	if (list_empty(&kernel_fb_helper_list)) {
-		dev_info(fb_helper->dev->dev, "registered panic notifier\n");
-		atomic_notifier_chain_register(&panic_notifier_list,
-					       &paniced);
 		register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
 	}
 
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 16a164770713..3c2d4abd71c5 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -766,7 +766,7 @@ drm_gem_object_free(struct kref *kref)
 	struct drm_gem_object *obj = (struct drm_gem_object *) kref;
 	struct drm_device *dev = obj->dev;
 
-	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
 	if (dev->driver->gem_free_object != NULL)
 		dev->driver->gem_free_object(obj);
@@ -778,22 +778,14 @@ void drm_gem_vm_open(struct vm_area_struct *vma)
 	struct drm_gem_object *obj = vma->vm_private_data;
 
 	drm_gem_object_reference(obj);
-
-	mutex_lock(&obj->dev->struct_mutex);
-	drm_vm_open_locked(obj->dev, vma);
-	mutex_unlock(&obj->dev->struct_mutex);
 }
 EXPORT_SYMBOL(drm_gem_vm_open);
 
 void drm_gem_vm_close(struct vm_area_struct *vma)
 {
 	struct drm_gem_object *obj = vma->vm_private_data;
-	struct drm_device *dev = obj->dev;
 
-	mutex_lock(&dev->struct_mutex);
-	drm_vm_close_locked(obj->dev, vma);
-	drm_gem_object_unreference(obj);
-	mutex_unlock(&dev->struct_mutex);
+	drm_gem_object_unreference_unlocked(obj);
 }
 EXPORT_SYMBOL(drm_gem_vm_close);
 
@@ -850,7 +842,6 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
 	 */
 	drm_gem_object_reference(obj);
 
-	drm_vm_open_locked(dev, vma);
 	return 0;
 }
 EXPORT_SYMBOL(drm_gem_mmap_obj);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index bd75f303da63..86cc793cdf79 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -289,20 +289,15 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
 {
 	struct drm_gem_object *gem_obj;
 
-	mutex_lock(&drm->struct_mutex);
-
 	gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
 	if (!gem_obj) {
 		dev_err(drm->dev, "failed to lookup GEM object\n");
-		mutex_unlock(&drm->struct_mutex);
 		return -EINVAL;
 	}
 
 	*offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
 
-	drm_gem_object_unreference(gem_obj);
-
-	mutex_unlock(&drm->struct_mutex);
+	drm_gem_object_unreference_unlocked(gem_obj);
 
 	return 0;
 }
@@ -381,11 +376,8 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj,
 			  struct seq_file *m)
 {
 	struct drm_gem_object *obj = &cma_obj->base;
-	struct drm_device *dev = obj->dev;
 	uint64_t off;
 
-	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
 	off = drm_vma_node_start(&obj->vma_node);
 
 	seq_printf(m, "%2d (%2d) %08llx %pad %p %zu",
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 9cfcd0aef0df..ddfa6014c2c2 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -95,7 +95,7 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
 		return -EFAULT;
 
 	version = compat_alloc_user_space(sizeof(*version));
-	if (!access_ok(VERIFY_WRITE, version, sizeof(*version)))
+	if (!version)
 		return -EFAULT;
 	if (__put_user(v32.name_len, &version->name_len)
 	    || __put_user((void __user *)(unsigned long)v32.name,
@@ -142,7 +142,7 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd,
 		return -EFAULT;
 
 	u = compat_alloc_user_space(sizeof(*u));
-	if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
+	if (!u)
 		return -EFAULT;
 	if (__put_user(uq32.unique_len, &u->unique_len)
 	    || __put_user((void __user *)(unsigned long)uq32.unique,
@@ -170,7 +170,7 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd,
 		return -EFAULT;
 
 	u = compat_alloc_user_space(sizeof(*u));
-	if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
+	if (!u)
 		return -EFAULT;
 	if (__put_user(uq32.unique_len, &u->unique_len)
 	    || __put_user((void __user *)(unsigned long)uq32.unique,
@@ -202,7 +202,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd,
 		return -EFAULT;
 
 	map = compat_alloc_user_space(sizeof(*map));
-	if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+	if (!map)
 		return -EFAULT;
 	if (__put_user(idx, &map->offset))
 		return -EFAULT;
@@ -239,7 +239,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
 		return -EFAULT;
 
 	map = compat_alloc_user_space(sizeof(*map));
-	if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+	if (!map)
 		return -EFAULT;
 	if (__put_user(m32.offset, &map->offset)
 	    || __put_user(m32.size, &map->size)
@@ -279,7 +279,7 @@ static int compat_drm_rmmap(struct file *file, unsigned int cmd,
 		return -EFAULT;
 
 	map = compat_alloc_user_space(sizeof(*map));
-	if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+	if (!map)
 		return -EFAULT;
 	if (__put_user((void *)(unsigned long)handle, &map->handle))
 		return -EFAULT;
@@ -308,7 +308,7 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
 		return -EFAULT;
 
 	client = compat_alloc_user_space(sizeof(*client));
-	if (!access_ok(VERIFY_WRITE, client, sizeof(*client)))
+	if (!client)
 		return -EFAULT;
 	if (__put_user(idx, &client->idx))
 		return -EFAULT;
@@ -347,7 +347,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
 	int i, err;
 
 	stats = compat_alloc_user_space(sizeof(*stats));
-	if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats)))
+	if (!stats)
 		return -EFAULT;
 
 	err = drm_ioctl(file, DRM_IOCTL_GET_STATS, (unsigned long)stats);
@@ -384,8 +384,7 @@ static int compat_drm_addbufs(struct file *file, unsigned int cmd,
 	unsigned long agp_start;
 
 	buf = compat_alloc_user_space(sizeof(*buf));
-	if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf))
-	    || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)))
+	if (!buf || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)))
 		return -EFAULT;
 
 	if (__copy_in_user(buf, argp, offsetof(drm_buf_desc32_t, agp_start))
@@ -416,7 +415,7 @@ static int compat_drm_markbufs(struct file *file, unsigned int cmd,
 		return -EFAULT;
 
 	buf = compat_alloc_user_space(sizeof(*buf));
-	if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf)))
+	if (!buf)
 		return -EFAULT;
 
 	if (__put_user(b32.size, &buf->size)
@@ -457,7 +456,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
 
 	nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc);
 	request = compat_alloc_user_space(nbytes);
-	if (!access_ok(VERIFY_WRITE, request, nbytes))
+	if (!request)
 		return -EFAULT;
 	list = (struct drm_buf_desc *) (request + 1);
 
@@ -518,7 +517,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
 		return -EINVAL;
 	nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub);
 	request = compat_alloc_user_space(nbytes);
-	if (!access_ok(VERIFY_WRITE, request, nbytes))
+	if (!request)
 		return -EFAULT;
 	list = (struct drm_buf_pub *) (request + 1);
 
@@ -565,7 +564,7 @@ static int compat_drm_freebufs(struct file *file, unsigned int cmd,
 		return -EFAULT;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+	if (!request)
 		return -EFAULT;
 	if (__put_user(req32.count, &request->count)
 	    || __put_user((int __user *)(unsigned long)req32.list,
@@ -591,7 +590,7 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
 		return -EFAULT;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+	if (!request)
 		return -EFAULT;
 	if (__put_user(req32.ctx_id, &request->ctx_id)
 	    || __put_user((void *)(unsigned long)req32.handle,
@@ -615,7 +614,7 @@ static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
 		return -EFAULT;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+	if (!request)
 		return -EFAULT;
 	if (__put_user(ctx_id, &request->ctx_id))
 		return -EFAULT;
@@ -648,7 +647,7 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd,
 		return -EFAULT;
 
 	res = compat_alloc_user_space(sizeof(*res));
-	if (!access_ok(VERIFY_WRITE, res, sizeof(*res)))
+	if (!res)
 		return -EFAULT;
 	if (__put_user(res32.count, &res->count)
 	    || __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts,
@@ -691,7 +690,7 @@ static int compat_drm_dma(struct file *file, unsigned int cmd,
 		return -EFAULT;
 
 	d = compat_alloc_user_space(sizeof(*d));
-	if (!access_ok(VERIFY_WRITE, d, sizeof(*d)))
+	if (!d)
 		return -EFAULT;
 
 	if (__put_user(d32.context, &d->context)
@@ -766,7 +765,7 @@ static int compat_drm_agp_info(struct file *file, unsigned int cmd,
 	int err;
 
 	info = compat_alloc_user_space(sizeof(*info));
-	if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
+	if (!info)
 		return -EFAULT;
 
 	err = drm_ioctl(file, DRM_IOCTL_AGP_INFO, (unsigned long)info);
@@ -809,7 +808,7 @@ static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
 		return -EFAULT;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	if (!request
 	    || __put_user(req32.size, &request->size)
 	    || __put_user(req32.type, &request->type))
 		return -EFAULT;
@@ -836,7 +835,7 @@ static int compat_drm_agp_free(struct file *file, unsigned int cmd,
 	u32 handle;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	if (!request
 	    || get_user(handle, &argp->handle)
 	    || __put_user(handle, &request->handle))
 		return -EFAULT;
@@ -860,7 +859,7 @@ static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
 		return -EFAULT;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	if (!request
 	    || __put_user(req32.handle, &request->handle)
 	    || __put_user(req32.offset, &request->offset))
 		return -EFAULT;
@@ -876,7 +875,7 @@ static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
 	u32 handle;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	if (!request
 	    || get_user(handle, &argp->handle)
 	    || __put_user(handle, &request->handle))
 		return -EFAULT;
@@ -899,8 +898,7 @@ static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
 	unsigned long x;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
-	    || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+	if (!request || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
 	    || __get_user(x, &argp->size)
 	    || __put_user(x, &request->size))
 		return -EFAULT;
@@ -925,8 +923,7 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd,
 	unsigned long x;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
-	    || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+	if (!request || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
 	    || __get_user(x, &argp->handle)
 	    || __put_user(x << PAGE_SHIFT, &request->handle))
 		return -EFAULT;
@@ -954,7 +951,7 @@ static int compat_drm_update_draw(struct file *file, unsigned int cmd,
 		return -EFAULT;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ||
+	if (!request ||
 	    __put_user(update32.handle, &request->handle) ||
 	    __put_user(update32.type, &request->type) ||
 	    __put_user(update32.num, &request->num) ||
@@ -996,7 +993,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
 		return -EFAULT;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	if (!request
 	    || __put_user(req32.request.type, &request->request.type)
 	    || __put_user(req32.request.sequence, &request->request.sequence)
 	    || __put_user(req32.request.signal, &request->request.signal))
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index b1d303fa2327..9a860ca1e9d7 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -480,7 +480,7 @@ static int drm_version(struct drm_device *dev, void *data,
  * indicated permissions. If so, returns zero. Otherwise returns an
  * error code suitable for ioctl return.
  */
-static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
+int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
 {
 	/* ROOT_ONLY is only for CAP_SYS_ADMIN */
 	if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
@@ -508,6 +508,7 @@ static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
 
 	return 0;
 }
+EXPORT_SYMBOL(drm_ioctl_permit);
 
 #define DRM_IOCTL_DEF(ioctl, _func, _flags)	\
 	[DRM_IOCTL_NR(ioctl)] = {		\
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index b50fa0afd907..22d207e211e7 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -43,8 +43,8 @@
 #include <linux/export.h>
 
 /* Access macro for slots in vblank timestamp ringbuffer. */
-#define vblanktimestamp(dev, crtc, count) \
-	((dev)->vblank[crtc].time[(count) % DRM_VBLANKTIME_RBSIZE])
+#define vblanktimestamp(dev, pipe, count) \
+	((dev)->vblank[pipe].time[(count) % DRM_VBLANKTIME_RBSIZE])
 
 /* Retry timestamp calculation up to 3 times to satisfy
  * drm_timestamp_precision before giving up.
@@ -57,7 +57,7 @@
 #define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
 
 static bool
-drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
 			  struct timeval *tvblank, unsigned flags);
 
 static unsigned int drm_timestamp_precision = 20;  /* Default to 20 usecs. */
@@ -107,7 +107,7 @@ static void store_vblank(struct drm_device *dev, int crtc,
 /**
  * drm_update_vblank_count - update the master vblank counter
  * @dev: DRM device
- * @crtc: counter to update
+ * @pipe: counter to update
  *
  * Call back into the driver to update the appropriate vblank counter
  * (specified by @crtc).  Deal with wraparound, if it occurred, and
@@ -120,9 +120,9 @@ static void store_vblank(struct drm_device *dev, int crtc,
  * Note: caller must hold dev->vbl_lock since this reads & writes
  * device vblank fields.
  */
-static void drm_update_vblank_count(struct drm_device *dev, int crtc)
+static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	u32 cur_vblank, diff;
 	bool rc;
 	struct timeval t_vblank;
@@ -140,21 +140,21 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
 	 * corresponding vblank timestamp.
 	 */
 	do {
-		cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
-		rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
-	} while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
+		cur_vblank = dev->driver->get_vblank_counter(dev, pipe);
+		rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, 0);
+	} while (cur_vblank != dev->driver->get_vblank_counter(dev, pipe));
 
 	/* Deal with counter wrap */
 	diff = cur_vblank - vblank->last;
 	if (cur_vblank < vblank->last) {
 		diff += dev->max_vblank_count + 1;
 
-		DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
-			  crtc, vblank->last, cur_vblank, diff);
+		DRM_DEBUG("last_vblank[%u]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
+			  pipe, vblank->last, cur_vblank, diff);
 	}
 
-	DRM_DEBUG("updating vblank count on crtc %d, missed %d\n",
-		  crtc, diff);
+	DRM_DEBUG("updating vblank count on crtc %u, missed %d\n",
+		  pipe, diff);
 
 	if (diff == 0)
 		return;
@@ -167,7 +167,7 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
 	if (!rc)
 		t_vblank = (struct timeval) {0, 0};
 
-	store_vblank(dev, crtc, diff, &t_vblank);
+	store_vblank(dev, pipe, diff, &t_vblank);
 }
 
 /*
@@ -176,9 +176,9 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
  * are preserved, even if there are any spurious vblank irq's after
  * disable.
  */
-static void vblank_disable_and_save(struct drm_device *dev, int crtc)
+static void vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	unsigned long irqflags;
 	u32 vblcount;
 	s64 diff_ns;
@@ -206,8 +206,8 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
 	 * vblank interrupt is disabled.
 	 */
 	if (!vblank->enabled &&
-	    drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0)) {
-		drm_update_vblank_count(dev, crtc);
+	    drm_get_last_vbltimestamp(dev, pipe, &tvblank, 0)) {
+		drm_update_vblank_count(dev, pipe);
 		spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
 		return;
 	}
@@ -218,7 +218,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
 	 * hardware potentially runtime suspended.
 	 */
 	if (vblank->enabled) {
-		dev->driver->disable_vblank(dev, crtc);
+		dev->driver->disable_vblank(dev, pipe);
 		vblank->enabled = false;
 	}
 
@@ -235,9 +235,9 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
 	 * delayed gpu counter increment.
 	 */
 	do {
-		vblank->last = dev->driver->get_vblank_counter(dev, crtc);
-		vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
-	} while (vblank->last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
+		vblank->last = dev->driver->get_vblank_counter(dev, pipe);
+		vblrc = drm_get_last_vbltimestamp(dev, pipe, &tvblank, 0);
+	} while (vblank->last != dev->driver->get_vblank_counter(dev, pipe) && (--count) && vblrc);
 
 	if (!count)
 		vblrc = 0;
@@ -247,7 +247,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
 	 */
 	vblcount = vblank->count;
 	diff_ns = timeval_to_ns(&tvblank) -
-		  timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+		  timeval_to_ns(&vblanktimestamp(dev, pipe, vblcount));
 
 	/* If there is at least 1 msec difference between the last stored
 	 * timestamp and tvblank, then we are currently executing our
@@ -262,7 +262,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
 	 * hope for the best.
 	 */
 	if (vblrc && (abs64(diff_ns) > 1000000))
-		store_vblank(dev, crtc, 1, &tvblank);
+		store_vblank(dev, pipe, 1, &tvblank);
 
 	spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
 }
@@ -271,16 +271,16 @@ static void vblank_disable_fn(unsigned long arg)
 {
 	struct drm_vblank_crtc *vblank = (void *)arg;
 	struct drm_device *dev = vblank->dev;
+	unsigned int pipe = vblank->pipe;
 	unsigned long irqflags;
-	int crtc = vblank->crtc;
 
 	if (!dev->vblank_disable_allowed)
 		return;
 
 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
 	if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) {
-		DRM_DEBUG("disabling vblank on crtc %d\n", crtc);
-		vblank_disable_and_save(dev, crtc);
+		DRM_DEBUG("disabling vblank on crtc %u\n", pipe);
+		vblank_disable_and_save(dev, pipe);
 	}
 	spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 }
@@ -293,14 +293,14 @@ static void vblank_disable_fn(unsigned long arg)
  */
 void drm_vblank_cleanup(struct drm_device *dev)
 {
-	int crtc;
+	unsigned int pipe;
 
 	/* Bail if the driver didn't call drm_vblank_init() */
 	if (dev->num_crtcs == 0)
 		return;
 
-	for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
-		struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	for (pipe = 0; pipe < dev->num_crtcs; pipe++) {
+		struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 
 		WARN_ON(vblank->enabled &&
 			drm_core_check_feature(dev, DRIVER_MODESET));
@@ -316,17 +316,18 @@ EXPORT_SYMBOL(drm_vblank_cleanup);
 
 /**
  * drm_vblank_init - initialize vblank support
- * @dev: drm_device
- * @num_crtcs: number of crtcs supported by @dev
+ * @dev: DRM device
+ * @num_crtcs: number of CRTCs supported by @dev
  *
  * This function initializes vblank support for @num_crtcs display pipelines.
  *
  * Returns:
  * Zero on success or a negative error code on failure.
  */
-int drm_vblank_init(struct drm_device *dev, int num_crtcs)
+int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
 {
-	int i, ret = -ENOMEM;
+	int ret = -ENOMEM;
+	unsigned int i;
 
 	spin_lock_init(&dev->vbl_lock);
 	spin_lock_init(&dev->vblank_time_lock);
@@ -341,7 +342,7 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
 		struct drm_vblank_crtc *vblank = &dev->vblank[i];
 
 		vblank->dev = dev;
-		vblank->crtc = i;
+		vblank->pipe = i;
 		init_waitqueue_head(&vblank->queue);
 		setup_timer(&vblank->disable_timer, vblank_disable_fn,
 			    (unsigned long)vblank);
@@ -624,17 +625,17 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc,
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
 			framedur_ns /= 2;
 	} else
-		DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
+		DRM_ERROR("crtc %u: Can't calculate constants, dotclock = 0!\n",
 			  crtc->base.id);
 
 	crtc->pixeldur_ns = pixeldur_ns;
 	crtc->linedur_ns  = linedur_ns;
 	crtc->framedur_ns = framedur_ns;
 
-	DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
+	DRM_DEBUG("crtc %u: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
 		  crtc->base.id, mode->crtc_htotal,
 		  mode->crtc_vtotal, mode->crtc_vdisplay);
-	DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
+	DRM_DEBUG("crtc %u: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
 		  crtc->base.id, dotclock, framedur_ns,
 		  linedur_ns, pixeldur_ns);
 }
@@ -643,7 +644,7 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
 /**
  * drm_calc_vbltimestamp_from_scanoutpos - precise vblank timestamp helper
  * @dev: DRM device
- * @crtc: Which CRTC's vblank timestamp to retrieve
+ * @pipe: index of CRTC whose vblank timestamp to retrieve
  * @max_error: Desired maximum allowable error in timestamps (nanosecs)
  *             On return contains true maximum error of timestamp
  * @vblank_time: Pointer to struct timeval which should receive the timestamp
@@ -686,7 +687,8 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
  * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval.
  *
  */
-int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
+int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
+					  unsigned int pipe,
 					  int *max_error,
 					  struct timeval *vblank_time,
 					  unsigned flags,
@@ -700,8 +702,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
 	int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
 	bool invbl;
 
-	if (crtc < 0 || crtc >= dev->num_crtcs) {
-		DRM_ERROR("Invalid crtc %d\n", crtc);
+	if (pipe >= dev->num_crtcs) {
+		DRM_ERROR("Invalid crtc %u\n", pipe);
 		return -EINVAL;
 	}
 
@@ -720,7 +722,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
 	 * Happens during initial modesetting of a crtc.
 	 */
 	if (framedur_ns == 0) {
-		DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
+		DRM_DEBUG("crtc %u: Noop due to uninitialized mode.\n", pipe);
 		return -EAGAIN;
 	}
 
@@ -736,13 +738,13 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
 		 * Get vertical and horizontal scanout position vpos, hpos,
 		 * and bounding timestamps stime, etime, pre/post query.
 		 */
-		vbl_status = dev->driver->get_scanout_position(dev, crtc, flags, &vpos,
+		vbl_status = dev->driver->get_scanout_position(dev, pipe, flags, &vpos,
 							       &hpos, &stime, &etime);
 
 		/* Return as no-op if scanout query unsupported or failed. */
 		if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
-			DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
-				  crtc, vbl_status);
+			DRM_DEBUG("crtc %u : scanoutpos query failed [%d].\n",
+				  pipe, vbl_status);
 			return -EIO;
 		}
 
@@ -756,8 +758,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
 
 	/* Noisy system timing? */
 	if (i == DRM_TIMESTAMP_MAXRETRIES) {
-		DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n",
-			  crtc, duration_ns/1000, *max_error/1000, i);
+		DRM_DEBUG("crtc %u: Noisy timestamp %d us > %d us [%d reps].\n",
+			  pipe, duration_ns/1000, *max_error/1000, i);
 	}
 
 	/* Return upper bound of timestamp precision error. */
@@ -790,8 +792,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
 		etime = ktime_sub_ns(etime, delta_ns);
 	*vblank_time = ktime_to_timeval(etime);
 
-	DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
-		  crtc, (int)vbl_status, hpos, vpos,
+	DRM_DEBUG("crtc %u : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
+		  pipe, (int)vbl_status, hpos, vpos,
 		  (long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
 		  (long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
 		  duration_ns/1000, i);
@@ -816,7 +818,7 @@ static struct timeval get_drm_timestamp(void)
  * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
  *                             vblank interval
  * @dev: DRM device
- * @crtc: which CRTC's vblank timestamp to retrieve
+ * @pipe: index of CRTC whose vblank timestamp to retrieve
  * @tvblank: Pointer to target struct timeval which should receive the timestamp
  * @flags: Flags to pass to driver:
  *         0 = Default,
@@ -833,7 +835,7 @@ static struct timeval get_drm_timestamp(void)
  * True if timestamp is considered to be very precise, false otherwise.
  */
 static bool
-drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
 			  struct timeval *tvblank, unsigned flags)
 {
 	int ret;
@@ -843,7 +845,7 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
 
 	/* Query driver if possible and precision timestamping enabled. */
 	if (dev->driver->get_vblank_timestamp && (max_error > 0)) {
-		ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error,
+		ret = dev->driver->get_vblank_timestamp(dev, pipe, &max_error,
 							tvblank, flags);
 		if (ret > 0)
 			return true;
@@ -860,7 +862,7 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
 /**
  * drm_vblank_count - retrieve "cooked" vblank counter value
  * @dev: DRM device
- * @crtc: which counter to retrieve
+ * @pipe: index of CRTC for which to retrieve the counter
  *
  * Fetches the "cooked" vblank count value that represents the number of
  * vblank events since the system was booted, including lost events due to
@@ -871,12 +873,13 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
  * Returns:
  * The software vblank counter.
  */
-u32 drm_vblank_count(struct drm_device *dev, int crtc)
+u32 drm_vblank_count(struct drm_device *dev, int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 
-	if (WARN_ON(crtc >= dev->num_crtcs))
+	if (WARN_ON(pipe >= dev->num_crtcs))
 		return 0;
+
 	return vblank->count;
 }
 EXPORT_SYMBOL(drm_vblank_count);
@@ -901,11 +904,10 @@ u32 drm_crtc_vblank_count(struct drm_crtc *crtc)
 EXPORT_SYMBOL(drm_crtc_vblank_count);
 
 /**
- * drm_vblank_count_and_time - retrieve "cooked" vblank counter value
- * and the system timestamp corresponding to that vblank counter value.
- *
+ * drm_vblank_count_and_time - retrieve "cooked" vblank counter value and the
+ *     system timestamp corresponding to that vblank counter value.
  * @dev: DRM device
- * @crtc: which counter to retrieve
+ * @pipe: index of CRTC whose counter to retrieve
  * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
  *
  * Fetches the "cooked" vblank count value that represents the number of
@@ -913,13 +915,13 @@ EXPORT_SYMBOL(drm_crtc_vblank_count);
  * modesetting activity. Returns corresponding system timestamp of the time
  * of the vblank interval that corresponds to the current vblank counter value.
  */
-u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
 			      struct timeval *vblanktime)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	u32 cur_vblank;
 
-	if (WARN_ON(crtc >= dev->num_crtcs))
+	if (WARN_ON(pipe >= dev->num_crtcs))
 		return 0;
 
 	/*
@@ -930,7 +932,7 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
 	do {
 		cur_vblank = vblank->count;
 		smp_rmb();
-		*vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
+		*vblanktime = vblanktimestamp(dev, pipe, cur_vblank);
 		smp_rmb();
 	} while (cur_vblank != vblank->count);
 
@@ -957,7 +959,7 @@ static void send_vblank_event(struct drm_device *dev,
 /**
  * drm_send_vblank_event - helper to send vblank event after pageflip
  * @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
  * @e: the event to send
  *
  * Updates sequence # and timestamp on event, and sends it to userspace.
@@ -965,20 +967,20 @@ static void send_vblank_event(struct drm_device *dev,
  *
  * This is the legacy version of drm_crtc_send_vblank_event().
  */
-void drm_send_vblank_event(struct drm_device *dev, int crtc,
-		struct drm_pending_vblank_event *e)
+void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
+			   struct drm_pending_vblank_event *e)
 {
 	struct timeval now;
 	unsigned int seq;
 
-	if (crtc >= 0) {
-		seq = drm_vblank_count_and_time(dev, crtc, &now);
+	if (dev->num_crtcs > 0) {
+		seq = drm_vblank_count_and_time(dev, pipe, &now);
 	} else {
 		seq = 0;
 
 		now = get_drm_timestamp();
 	}
-	e->pipe = crtc;
+	e->pipe = pipe;
 	send_vblank_event(dev, e, seq, &now);
 }
 EXPORT_SYMBOL(drm_send_vblank_event);
@@ -1003,11 +1005,14 @@ EXPORT_SYMBOL(drm_crtc_send_vblank_event);
 /**
  * drm_vblank_enable - enable the vblank interrupt on a CRTC
  * @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
+ *
+ * Returns:
+ * Zero on success or a negative error code on failure.
  */
-static int drm_vblank_enable(struct drm_device *dev, int crtc)
+static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	int ret = 0;
 
 	assert_spin_locked(&dev->vbl_lock);
@@ -1022,13 +1027,13 @@ static int drm_vblank_enable(struct drm_device *dev, int crtc)
 		 * timestamps. Filtercode in drm_handle_vblank() will
 		 * prevent double-accounting of same vblank interval.
 		 */
-		ret = dev->driver->enable_vblank(dev, crtc);
-		DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
+		ret = dev->driver->enable_vblank(dev, pipe);
+		DRM_DEBUG("enabling vblank on crtc %u, ret: %d\n", pipe, ret);
 		if (ret)
 			atomic_dec(&vblank->refcount);
 		else {
 			vblank->enabled = true;
-			drm_update_vblank_count(dev, crtc);
+			drm_update_vblank_count(dev, pipe);
 		}
 	}
 
@@ -1040,7 +1045,7 @@ static int drm_vblank_enable(struct drm_device *dev, int crtc)
 /**
  * drm_vblank_get - get a reference count on vblank events
  * @dev: DRM device
- * @crtc: which CRTC to own
+ * @pipe: index of CRTC to own
  *
  * Acquire a reference count on vblank events to avoid having them disabled
  * while in use.
@@ -1048,24 +1053,24 @@ static int drm_vblank_enable(struct drm_device *dev, int crtc)
  * This is the legacy version of drm_crtc_vblank_get().
  *
  * Returns:
- * Zero on success, nonzero on failure.
+ * Zero on success or a negative error code on failure.
  */
-int drm_vblank_get(struct drm_device *dev, int crtc)
+int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	unsigned long irqflags;
 	int ret = 0;
 
 	if (!dev->num_crtcs)
 		return -EINVAL;
 
-	if (WARN_ON(crtc >= dev->num_crtcs))
+	if (WARN_ON(pipe >= dev->num_crtcs))
 		return -EINVAL;
 
 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
 	/* Going from 0->1 means we have to enable interrupts again */
 	if (atomic_add_return(1, &vblank->refcount) == 1) {
-		ret = drm_vblank_enable(dev, crtc);
+		ret = drm_vblank_enable(dev, pipe);
 	} else {
 		if (!vblank->enabled) {
 			atomic_dec(&vblank->refcount);
@@ -1088,7 +1093,7 @@ EXPORT_SYMBOL(drm_vblank_get);
  * This is the native kms version of drm_vblank_get().
  *
  * Returns:
- * Zero on success, nonzero on failure.
+ * Zero on success or a negative error code on failure.
  */
 int drm_crtc_vblank_get(struct drm_crtc *crtc)
 {
@@ -1097,23 +1102,23 @@ int drm_crtc_vblank_get(struct drm_crtc *crtc)
 EXPORT_SYMBOL(drm_crtc_vblank_get);
 
 /**
- * drm_vblank_put - give up ownership of vblank events
+ * drm_vblank_put - release ownership of vblank events
  * @dev: DRM device
- * @crtc: which counter to give up
+ * @pipe: index of CRTC to release
  *
  * Release ownership of a given vblank counter, turning off interrupts
  * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
  *
  * This is the legacy version of drm_crtc_vblank_put().
  */
-void drm_vblank_put(struct drm_device *dev, int crtc)
+void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 
-	if (WARN_ON(atomic_read(&vblank->refcount) == 0))
+	if (WARN_ON(pipe >= dev->num_crtcs))
 		return;
 
-	if (WARN_ON(crtc >= dev->num_crtcs))
+	if (WARN_ON(atomic_read(&vblank->refcount) == 0))
 		return;
 
 	/* Last user schedules interrupt disable */
@@ -1147,30 +1152,34 @@ EXPORT_SYMBOL(drm_crtc_vblank_put);
 /**
  * drm_wait_one_vblank - wait for one vblank
  * @dev: DRM device
- * @crtc: crtc index
+ * @pipe: CRTC index
  *
  * This waits for one vblank to pass on @crtc, using the irq driver interfaces.
  * It is a failure to call this when the vblank irq for @crtc is disabled, e.g.
  * due to lack of driver support or because the crtc is off.
  */
-void drm_wait_one_vblank(struct drm_device *dev, int crtc)
+void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe)
 {
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	int ret;
 	u32 last;
 
-	ret = drm_vblank_get(dev, crtc);
-	if (WARN(ret, "vblank not available on crtc %i, ret=%i\n", crtc, ret))
+	if (WARN_ON(pipe >= dev->num_crtcs))
+		return;
+
+	ret = drm_vblank_get(dev, pipe);
+	if (WARN(ret, "vblank not available on crtc %i, ret=%i\n", pipe, ret))
 		return;
 
-	last = drm_vblank_count(dev, crtc);
+	last = drm_vblank_count(dev, pipe);
 
-	ret = wait_event_timeout(dev->vblank[crtc].queue,
-				 last != drm_vblank_count(dev, crtc),
+	ret = wait_event_timeout(vblank->queue,
+				 last != drm_vblank_count(dev, pipe),
 				 msecs_to_jiffies(100));
 
-	WARN(ret == 0, "vblank wait timed out on crtc %i\n", crtc);
+	WARN(ret == 0, "vblank wait timed out on crtc %i\n", pipe);
 
-	drm_vblank_put(dev, crtc);
+	drm_vblank_put(dev, pipe);
 }
 EXPORT_SYMBOL(drm_wait_one_vblank);
 
@@ -1191,7 +1200,7 @@ EXPORT_SYMBOL(drm_crtc_wait_one_vblank);
 /**
  * drm_vblank_off - disable vblank events on a CRTC
  * @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
  *
  * Drivers can use this function to shut down the vblank interrupt handling when
  * disabling a crtc. This function ensures that the latest vblank frame count is
@@ -1202,21 +1211,21 @@ EXPORT_SYMBOL(drm_crtc_wait_one_vblank);
  *
  * This is the legacy version of drm_crtc_vblank_off().
  */
-void drm_vblank_off(struct drm_device *dev, int crtc)
+void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	struct drm_pending_vblank_event *e, *t;
 	struct timeval now;
 	unsigned long irqflags;
 	unsigned int seq;
 
-	if (WARN_ON(crtc >= dev->num_crtcs))
+	if (WARN_ON(pipe >= dev->num_crtcs))
 		return;
 
 	spin_lock_irqsave(&dev->event_lock, irqflags);
 
 	spin_lock(&dev->vbl_lock);
-	vblank_disable_and_save(dev, crtc);
+	vblank_disable_and_save(dev, pipe);
 	wake_up(&vblank->queue);
 
 	/*
@@ -1230,16 +1239,16 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
 	spin_unlock(&dev->vbl_lock);
 
 	/* Send any queued vblank events, lest the natives grow disquiet */
-	seq = drm_vblank_count_and_time(dev, crtc, &now);
+	seq = drm_vblank_count_and_time(dev, pipe, &now);
 
 	list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
-		if (e->pipe != crtc)
+		if (e->pipe != pipe)
 			continue;
 		DRM_DEBUG("Sending premature vblank event on disable: \
 			  wanted %d, current %d\n",
 			  e->event.sequence, seq);
 		list_del(&e->base.link);
-		drm_vblank_put(dev, e->pipe);
+		drm_vblank_put(dev, pipe);
 		send_vblank_event(dev, e, seq, &now);
 	}
 	spin_unlock_irqrestore(&dev->event_lock, irqflags);
@@ -1267,7 +1276,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_off);
 
 /**
  * drm_crtc_vblank_reset - reset vblank state to off on a CRTC
- * @crtc: CRTC in question
+ * @drm_crtc: CRTC in question
  *
  * Drivers can use this function to reset the vblank state to off at load time.
  * Drivers should use this together with the drm_crtc_vblank_off() and
@@ -1300,7 +1309,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_reset);
 /**
  * drm_vblank_on - enable vblank events on a CRTC
  * @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
  *
  * This functions restores the vblank interrupt state captured with
  * drm_vblank_off() again. Note that calls to drm_vblank_on() and
@@ -1309,12 +1318,12 @@ EXPORT_SYMBOL(drm_crtc_vblank_reset);
  *
  * This is the legacy version of drm_crtc_vblank_on().
  */
-void drm_vblank_on(struct drm_device *dev, int crtc)
+void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	unsigned long irqflags;
 
-	if (WARN_ON(crtc >= dev->num_crtcs))
+	if (WARN_ON(pipe >= dev->num_crtcs))
 		return;
 
 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
@@ -1332,7 +1341,7 @@ void drm_vblank_on(struct drm_device *dev, int crtc)
 	 * vblank counter value before and after a modeset
 	 */
 	vblank->last =
-		(dev->driver->get_vblank_counter(dev, crtc) - 1) &
+		(dev->driver->get_vblank_counter(dev, pipe) - 1) &
 		dev->max_vblank_count;
 	/*
 	 * re-enable interrupts if there are users left, or the
@@ -1340,7 +1349,7 @@ void drm_vblank_on(struct drm_device *dev, int crtc)
 	 */
 	if (atomic_read(&vblank->refcount) != 0 ||
 	    (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
-		WARN_ON(drm_vblank_enable(dev, crtc));
+		WARN_ON(drm_vblank_enable(dev, pipe));
 	spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 }
 EXPORT_SYMBOL(drm_vblank_on);
@@ -1365,7 +1374,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_on);
 /**
  * drm_vblank_pre_modeset - account for vblanks across mode sets
  * @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
  *
  * Account for vblank events across mode setting events, which will likely
  * reset the hardware frame counter.
@@ -1385,15 +1394,15 @@ EXPORT_SYMBOL(drm_crtc_vblank_on);
  * Drivers must call drm_vblank_post_modeset() when re-enabling the same crtc
  * again.
  */
-void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
+void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 
 	/* vblank is not initialized (IRQ not installed ?), or has been freed */
 	if (!dev->num_crtcs)
 		return;
 
-	if (WARN_ON(crtc >= dev->num_crtcs))
+	if (WARN_ON(pipe >= dev->num_crtcs))
 		return;
 
 	/*
@@ -1405,7 +1414,7 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
 	 */
 	if (!vblank->inmodeset) {
 		vblank->inmodeset = 0x1;
-		if (drm_vblank_get(dev, crtc) == 0)
+		if (drm_vblank_get(dev, pipe) == 0)
 			vblank->inmodeset |= 0x2;
 	}
 }
@@ -1414,27 +1423,30 @@ EXPORT_SYMBOL(drm_vblank_pre_modeset);
 /**
  * drm_vblank_post_modeset - undo drm_vblank_pre_modeset changes
  * @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
  *
  * This function again drops the temporary vblank reference acquired in
  * drm_vblank_pre_modeset.
  */
-void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
+void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	unsigned long irqflags;
 
 	/* vblank is not initialized (IRQ not installed ?), or has been freed */
 	if (!dev->num_crtcs)
 		return;
 
+	if (WARN_ON(pipe >= dev->num_crtcs))
+		return;
+
 	if (vblank->inmodeset) {
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
 		dev->vblank_disable_allowed = true;
 		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 
 		if (vblank->inmodeset & 0x2)
-			drm_vblank_put(dev, crtc);
+			drm_vblank_put(dev, pipe);
 
 		vblank->inmodeset = 0;
 	}
@@ -1456,7 +1468,7 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
 		    struct drm_file *file_priv)
 {
 	struct drm_modeset_ctl *modeset = data;
-	unsigned int crtc;
+	unsigned int pipe;
 
 	/* If drm_vblank_init() hasn't been called yet, just no-op */
 	if (!dev->num_crtcs)
@@ -1466,16 +1478,16 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return 0;
 
-	crtc = modeset->crtc;
-	if (crtc >= dev->num_crtcs)
+	pipe = modeset->crtc;
+	if (pipe >= dev->num_crtcs)
 		return -EINVAL;
 
 	switch (modeset->cmd) {
 	case _DRM_PRE_MODESET:
-		drm_vblank_pre_modeset(dev, crtc);
+		drm_vblank_pre_modeset(dev, pipe);
 		break;
 	case _DRM_POST_MODESET:
-		drm_vblank_post_modeset(dev, crtc);
+		drm_vblank_post_modeset(dev, pipe);
 		break;
 	default:
 		return -EINVAL;
@@ -1484,7 +1496,7 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
 	return 0;
 }
 
-static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
+static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
 				  union drm_wait_vblank *vblwait,
 				  struct drm_file *file_priv)
 {
@@ -1538,7 +1550,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
 		vblwait->reply.sequence = vblwait->request.sequence;
 	}
 
-	DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
+	DRM_DEBUG("event on vblank count %d, current %d, crtc %u\n",
 		  vblwait->request.sequence, seq, pipe);
 
 	trace_drm_vblank_event_queued(current->pid, pipe,
@@ -1587,7 +1599,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
 	struct drm_vblank_crtc *vblank;
 	union drm_wait_vblank *vblwait = data;
 	int ret;
-	unsigned int flags, seq, crtc, high_crtc;
+	unsigned int flags, seq, pipe, high_pipe;
 
 	if (!dev->irq_enabled)
 		return -EINVAL;
@@ -1606,22 +1618,22 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
 	}
 
 	flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
-	high_crtc = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
-	if (high_crtc)
-		crtc = high_crtc >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
+	high_pipe = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
+	if (high_pipe)
+		pipe = high_pipe >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
 	else
-		crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
-	if (crtc >= dev->num_crtcs)
+		pipe = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
+	if (pipe >= dev->num_crtcs)
 		return -EINVAL;
 
-	vblank = &dev->vblank[crtc];
+	vblank = &dev->vblank[pipe];
 
-	ret = drm_vblank_get(dev, crtc);
+	ret = drm_vblank_get(dev, pipe);
 	if (ret) {
 		DRM_DEBUG("failed to acquire vblank counter, %d\n", ret);
 		return ret;
 	}
-	seq = drm_vblank_count(dev, crtc);
+	seq = drm_vblank_count(dev, pipe);
 
 	switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
 	case _DRM_VBLANK_RELATIVE:
@@ -1638,7 +1650,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
 		/* must hold on to the vblank ref until the event fires
 		 * drm_vblank_put will be called asynchronously
 		 */
-		return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
+		return drm_queue_vblank_event(dev, pipe, vblwait, file_priv);
 	}
 
 	if ((flags & _DRM_VBLANK_NEXTONMISS) &&
@@ -1646,11 +1658,11 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
 		vblwait->request.sequence = seq + 1;
 	}
 
-	DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
-		  vblwait->request.sequence, crtc);
+	DRM_DEBUG("waiting on vblank count %d, crtc %u\n",
+		  vblwait->request.sequence, pipe);
 	vblank->last_wait = vblwait->request.sequence;
 	DRM_WAIT_ON(ret, vblank->queue, 3 * HZ,
-		    (((drm_vblank_count(dev, crtc) -
+		    (((drm_vblank_count(dev, pipe) -
 		       vblwait->request.sequence) <= (1 << 23)) ||
 		     !vblank->enabled ||
 		     !dev->irq_enabled));
@@ -1658,7 +1670,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
 	if (ret != -EINTR) {
 		struct timeval now;
 
-		vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now);
+		vblwait->reply.sequence = drm_vblank_count_and_time(dev, pipe, &now);
 		vblwait->reply.tval_sec = now.tv_sec;
 		vblwait->reply.tval_usec = now.tv_usec;
 
@@ -1669,11 +1681,11 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
 	}
 
 done:
-	drm_vblank_put(dev, crtc);
+	drm_vblank_put(dev, pipe);
 	return ret;
 }
 
-static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
+static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
 {
 	struct drm_pending_vblank_event *e, *t;
 	struct timeval now;
@@ -1681,10 +1693,10 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
 
 	assert_spin_locked(&dev->event_lock);
 
-	seq = drm_vblank_count_and_time(dev, crtc, &now);
+	seq = drm_vblank_count_and_time(dev, pipe, &now);
 
 	list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
-		if (e->pipe != crtc)
+		if (e->pipe != pipe)
 			continue;
 		if ((seq - e->event.sequence) > (1<<23))
 			continue;
@@ -1693,26 +1705,26 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
 			  e->event.sequence, seq);
 
 		list_del(&e->base.link);
-		drm_vblank_put(dev, e->pipe);
+		drm_vblank_put(dev, pipe);
 		send_vblank_event(dev, e, seq, &now);
 	}
 
-	trace_drm_vblank_event(crtc, seq);
+	trace_drm_vblank_event(pipe, seq);
 }
 
 /**
  * drm_handle_vblank - handle a vblank event
  * @dev: DRM device
- * @crtc: where this event occurred
+ * @pipe: index of CRTC where this event occurred
  *
  * Drivers should call this routine in their vblank interrupt handlers to
  * update the vblank counter and send any signals that may be pending.
  *
  * This is the legacy version of drm_crtc_handle_vblank().
  */
-bool drm_handle_vblank(struct drm_device *dev, int crtc)
+bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	u32 vblcount;
 	s64 diff_ns;
 	struct timeval tvblank;
@@ -1721,7 +1733,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
 	if (WARN_ON_ONCE(!dev->num_crtcs))
 		return false;
 
-	if (WARN_ON(crtc >= dev->num_crtcs))
+	if (WARN_ON(pipe >= dev->num_crtcs))
 		return false;
 
 	spin_lock_irqsave(&dev->event_lock, irqflags);
@@ -1745,11 +1757,11 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
 
 	/* Get current timestamp and count. */
 	vblcount = vblank->count;
-	drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
+	drm_get_last_vbltimestamp(dev, pipe, &tvblank, DRM_CALLED_FROM_VBLIRQ);
 
 	/* Compute time difference to timestamp of last vblank */
 	diff_ns = timeval_to_ns(&tvblank) -
-		  timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+		  timeval_to_ns(&vblanktimestamp(dev, pipe, vblcount));
 
 	/* Update vblank timestamp and count if at least
 	 * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds
@@ -1761,15 +1773,15 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
 	 * ignore those for accounting.
 	 */
 	if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS)
-		store_vblank(dev, crtc, 1, &tvblank);
+		store_vblank(dev, pipe, 1, &tvblank);
 	else
-		DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
-			  crtc, (int) diff_ns);
+		DRM_DEBUG("crtc %u: Redundant vblirq ignored. diff_ns = %d\n",
+			  pipe, (int) diff_ns);
 
 	spin_unlock(&dev->vblank_time_lock);
 
 	wake_up(&vblank->queue);
-	drm_handle_vblank_events(dev, crtc);
+	drm_handle_vblank_events(dev, pipe);
 
 	spin_unlock_irqrestore(&dev->event_lock, irqflags);
 
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index c1dc61473db5..9b731786e4db 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -42,7 +42,7 @@ struct drm_file;
 #define DRM_KERNEL_CONTEXT		0
 #define DRM_RESERVED_CONTEXTS		1
 
-int drm_legacy_ctxbitmap_init(struct drm_device *dev);
+void drm_legacy_ctxbitmap_init(struct drm_device *dev);
 void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
 void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
 void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index f861361a635e..4924d381b664 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -61,6 +61,9 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
 	struct drm_master *master = file_priv->master;
 	int ret = 0;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	++file_priv->lock_count;
 
 	if (lock->context == DRM_KERNEL_CONTEXT) {
@@ -153,6 +156,9 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
 	struct drm_lock *lock = data;
 	struct drm_master *master = file_priv->master;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	if (lock->context == DRM_KERNEL_CONTEXT) {
 		DRM_ERROR("Process %d using kernel context %d\n",
 			  task_pid_nr(current), lock->context);
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index c0a5cd8c5262..fba321ca4344 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -55,41 +55,27 @@
  *     drm_modeset_acquire_fini(&ctx);
  */
 
-
 /**
- * __drm_modeset_lock_all - internal helper to grab all modeset locks
- * @dev: DRM device
- * @trylock: trylock mode for atomic contexts
- *
- * This is a special version of drm_modeset_lock_all() which can also be used in
- * atomic contexts. Then @trylock must be set to true.
+ * drm_modeset_lock_all - take all modeset locks
+ * @dev: drm device
  *
- * Returns:
- * 0 on success or negative error code on failure.
+ * This function takes all modeset locks, suitable where a more fine-grained
+ * scheme isn't (yet) implemented. Locks must be dropped with
+ * drm_modeset_unlock_all.
  */
-int __drm_modeset_lock_all(struct drm_device *dev,
-			   bool trylock)
+void drm_modeset_lock_all(struct drm_device *dev)
 {
 	struct drm_mode_config *config = &dev->mode_config;
 	struct drm_modeset_acquire_ctx *ctx;
 	int ret;
 
-	ctx = kzalloc(sizeof(*ctx),
-		      trylock ? GFP_ATOMIC : GFP_KERNEL);
-	if (!ctx)
-		return -ENOMEM;
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (WARN_ON(!ctx))
+		return;
 
-	if (trylock) {
-		if (!mutex_trylock(&config->mutex)) {
-			ret = -EBUSY;
-			goto out;
-		}
-	} else {
-		mutex_lock(&config->mutex);
-	}
+	mutex_lock(&config->mutex);
 
 	drm_modeset_acquire_init(ctx, 0);
-	ctx->trylock_only = trylock;
 
 retry:
 	ret = drm_modeset_lock(&config->connection_mutex, ctx);
@@ -108,7 +94,7 @@ retry:
 
 	drm_warn_on_modeset_not_all_locked(dev);
 
-	return 0;
+	return;
 
 fail:
 	if (ret == -EDEADLK) {
@@ -116,23 +102,7 @@ fail:
 		goto retry;
 	}
 
-out:
 	kfree(ctx);
-	return ret;
-}
-EXPORT_SYMBOL(__drm_modeset_lock_all);
-
-/**
- * drm_modeset_lock_all - take all modeset locks
- * @dev: drm device
- *
- * This function takes all modeset locks, suitable where a more fine-grained
- * scheme isn't (yet) implemented. Locks must be dropped with
- * drm_modeset_unlock_all.
- */
-void drm_modeset_lock_all(struct drm_device *dev)
-{
-	WARN_ON(__drm_modeset_lock_all(dev, false) != 0);
 }
 EXPORT_SYMBOL(drm_modeset_lock_all);
 
@@ -276,7 +246,7 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
 	if (oops_in_progress)
 		return;
 
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+	drm_for_each_crtc(crtc, dev)
 		WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 
 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
@@ -464,18 +434,17 @@ EXPORT_SYMBOL(drm_modeset_unlock);
 int drm_modeset_lock_all_crtcs(struct drm_device *dev,
 		struct drm_modeset_acquire_ctx *ctx)
 {
-	struct drm_mode_config *config = &dev->mode_config;
 	struct drm_crtc *crtc;
 	struct drm_plane *plane;
 	int ret = 0;
 
-	list_for_each_entry(crtc, &config->crtc_list, head) {
+	drm_for_each_crtc(crtc, dev) {
 		ret = drm_modeset_lock(&crtc->mutex, ctx);
 		if (ret)
 			return ret;
 	}
 
-	list_for_each_entry(plane, &config->plane_list, head) {
+	drm_for_each_plane(plane, dev) {
 		ret = drm_modeset_lock(&plane->mutex, ctx);
 		if (ret)
 			return ret;
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index aaa130736bf8..be3884073ea4 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -19,7 +19,7 @@ static uint32_t drm_crtc_port_mask(struct drm_device *dev,
 	unsigned int index = 0;
 	struct drm_crtc *tmp;
 
-	list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
+	drm_for_each_crtc(tmp, dev) {
 		if (tmp->port == port)
 			return 1 << index;
 
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 2f0ed11024eb..5e5a07af02c8 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -91,13 +91,14 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
 	 */
 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+	drm_for_each_connector(connector, dev) {
 		if (connector->encoder && connector->encoder->crtc == crtc) {
 			if (connector_list != NULL && count < num_connectors)
 				*(connector_list++) = connector;
 
 			count++;
 		}
+	}
 
 	return count;
 }
@@ -436,7 +437,7 @@ int drm_plane_helper_commit(struct drm_plane *plane,
 
 	for (i = 0; i < 2; i++) {
 		if (crtc_funcs[i] && crtc_funcs[i]->atomic_begin)
-			crtc_funcs[i]->atomic_begin(crtc[i]);
+			crtc_funcs[i]->atomic_begin(crtc[i], crtc[i]->state);
 	}
 
 	/*
@@ -451,7 +452,7 @@ int drm_plane_helper_commit(struct drm_plane *plane,
 
 	for (i = 0; i < 2; i++) {
 		if (crtc_funcs[i] && crtc_funcs[i]->atomic_flush)
-			crtc_funcs[i]->atomic_flush(crtc[i]);
+			crtc_funcs[i]->atomic_flush(crtc[i], crtc[i]->state);
 	}
 
 	/*
@@ -525,10 +526,12 @@ int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
 
 	if (plane->funcs->atomic_duplicate_state)
 		plane_state = plane->funcs->atomic_duplicate_state(plane);
-	else if (plane->state)
+	else {
+		if (!plane->state)
+			drm_atomic_helper_plane_reset(plane);
+
 		plane_state = drm_atomic_helper_plane_duplicate_state(plane);
-	else
-		plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+	}
 	if (!plane_state)
 		return -ENOMEM;
 	plane_state->plane = plane;
@@ -572,10 +575,12 @@ int drm_plane_helper_disable(struct drm_plane *plane)
 
 	if (plane->funcs->atomic_duplicate_state)
 		plane_state = plane->funcs->atomic_duplicate_state(plane);
-	else if (plane->state)
+	else {
+		if (!plane->state)
+			drm_atomic_helper_plane_reset(plane);
+
 		plane_state = drm_atomic_helper_plane_duplicate_state(plane);
-	else
-		plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+	}
 	if (!plane_state)
 		return -ENOMEM;
 	plane_state->plane = plane;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 04203c0d2ecb..d734780b31c0 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -93,6 +93,27 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
 	return 1;
 }
 
+#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
+static void __drm_kms_helper_poll_enable(struct drm_device *dev)
+{
+	bool poll = false;
+	struct drm_connector *connector;
+
+	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
+	if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
+		return;
+
+	drm_for_each_connector(connector, dev) {
+		if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
+					 DRM_CONNECTOR_POLL_DISCONNECT))
+			poll = true;
+	}
+
+	if (poll)
+		schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
+}
+
 static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
 							      uint32_t maxX, uint32_t maxY, bool merge_type_bits)
 {
@@ -153,7 +174,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
 
 	/* Re-enable polling in case the global poll config changed. */
 	if (drm_kms_helper_poll != dev->mode_config.poll_running)
-		drm_kms_helper_poll_enable(dev);
+		__drm_kms_helper_poll_enable(dev);
 
 	dev->mode_config.poll_running = drm_kms_helper_poll;
 
@@ -295,7 +316,6 @@ void drm_kms_helper_hotplug_event(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
 
-#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
 static void output_poll_execute(struct work_struct *work)
 {
 	struct delayed_work *delayed_work = to_delayed_work(work);
@@ -312,7 +332,7 @@ static void output_poll_execute(struct work_struct *work)
 		goto out;
 
 	mutex_lock(&dev->mode_config.mutex);
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+	drm_for_each_connector(connector, dev) {
 
 		/* Ignore forced connectors. */
 		if (connector->force)
@@ -407,20 +427,9 @@ EXPORT_SYMBOL(drm_kms_helper_poll_disable);
  */
 void drm_kms_helper_poll_enable(struct drm_device *dev)
 {
-	bool poll = false;
-	struct drm_connector *connector;
-
-	if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
-		return;
-
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
-					 DRM_CONNECTOR_POLL_DISCONNECT))
-			poll = true;
-	}
-
-	if (poll)
-		schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
+	mutex_lock(&dev->mode_config.mutex);
+	__drm_kms_helper_poll_enable(dev);
+	mutex_unlock(&dev->mode_config.mutex);
 }
 EXPORT_SYMBOL(drm_kms_helper_poll_enable);
 
@@ -495,7 +504,7 @@ bool drm_helper_hpd_irq_event(struct drm_device *dev)
 		return false;
 
 	mutex_lock(&dev->mode_config.mutex);
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+	drm_for_each_connector(connector, dev) {
 
 		/* Only handle HPD capable connectors. */
 		if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 43003c4ad80b..df0b61a60501 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -56,7 +56,7 @@ config DRM_EXYNOS_DSI
 
 config DRM_EXYNOS_DP
 	bool "EXYNOS DRM DP driver support"
-	depends on DRM_EXYNOS && (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS)
+	depends on DRM_EXYNOS && (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON)
 	default DRM_EXYNOS
 	select DRM_PANEL
 	help
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 7de0b1084fcd..02aecfed6354 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -3,10 +3,9 @@
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
 ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos
-exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o \
-		exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \
-		exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
-		exynos_drm_plane.o exynos_drm_dmabuf.o
+exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fbdev.o \
+		exynos_drm_fb.o exynos_drm_gem.o exynos_drm_core.o \
+		exynos_drm_plane.o
 
 exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD)	+= exynos_drm_fimd.o
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 8b1225f245fc..b3c730770b0f 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -54,6 +54,13 @@ static const char * const decon_clks_name[] = {
 	"sclk_decon_eclk",
 };
 
+static const uint32_t decon_formats[] = {
+	DRM_FORMAT_XRGB1555,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+};
+
 static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
 {
 	struct decon_context *ctx = crtc->ctx;
@@ -152,15 +159,15 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
 #define OFFSIZE(x)		(((x) & 0x3fff) << 14)
 #define PAGEWIDTH(x)		((x) & 0x3fff)
 
-static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
+static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
+				 struct drm_framebuffer *fb)
 {
-	struct exynos_drm_plane *plane = &ctx->planes[win];
 	unsigned long val;
 
 	val = readl(ctx->addr + DECON_WINCONx(win));
 	val &= ~WINCONx_BPPMODE_MASK;
 
-	switch (plane->pixel_format) {
+	switch (fb->pixel_format) {
 	case DRM_FORMAT_XRGB1555:
 		val |= WINCONx_BPPMODE_16BPP_I1555;
 		val |= WINCONx_HAWSWP_F;
@@ -186,7 +193,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
 		return;
 	}
 
-	DRM_DEBUG_KMS("bpp = %u\n", plane->bpp);
+	DRM_DEBUG_KMS("bpp = %u\n", fb->bits_per_pixel);
 
 	/*
 	 * In case of exynos, setting dma-burst to 16Word causes permanent
@@ -196,7 +203,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
 	 * movement causes unstable DMA which results into iommu crash/tear.
 	 */
 
-	if (plane->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
+	if (fb->width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
 		val &= ~WINCONx_BURSTLEN_MASK;
 		val |= WINCONx_BURSTLEN_8WORD;
 	}
@@ -219,27 +226,35 @@ static void decon_shadow_protect_win(struct decon_context *ctx, int win,
 	writel(val, ctx->addr + DECON_SHADOWCON);
 }
 
-static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
+static void decon_atomic_begin(struct exynos_drm_crtc *crtc,
+					struct exynos_drm_plane *plane)
 {
 	struct decon_context *ctx = crtc->ctx;
-	struct exynos_drm_plane *plane;
-	u32 val;
 
-	if (win < 0 || win >= WINDOWS_NR)
+	if (ctx->suspended)
 		return;
 
-	plane = &ctx->planes[win];
+	decon_shadow_protect_win(ctx, plane->zpos, true);
+}
+
+static void decon_update_plane(struct exynos_drm_crtc *crtc,
+			       struct exynos_drm_plane *plane)
+{
+	struct decon_context *ctx = crtc->ctx;
+	struct drm_plane_state *state = plane->base.state;
+	unsigned int win = plane->zpos;
+	unsigned int bpp = state->fb->bits_per_pixel >> 3;
+	unsigned int pitch = state->fb->pitches[0];
+	u32 val;
 
 	if (ctx->suspended)
 		return;
 
-	decon_shadow_protect_win(ctx, win, true);
-
 	val = COORDINATE_X(plane->crtc_x) | COORDINATE_Y(plane->crtc_y);
 	writel(val, ctx->addr + DECON_VIDOSDxA(win));
 
-	val = COORDINATE_X(plane->crtc_x + plane->crtc_width - 1) |
-		COORDINATE_Y(plane->crtc_y + plane->crtc_height - 1);
+	val = COORDINATE_X(plane->crtc_x + plane->crtc_w - 1) |
+		COORDINATE_Y(plane->crtc_y + plane->crtc_h - 1);
 	writel(val, ctx->addr + DECON_VIDOSDxB(win));
 
 	val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
@@ -252,42 +267,33 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
 
 	writel(plane->dma_addr[0], ctx->addr + DECON_VIDW0xADD0B0(win));
 
-	val = plane->dma_addr[0] + plane->pitch * plane->crtc_height;
+	val = plane->dma_addr[0] + pitch * plane->crtc_h;
 	writel(val, ctx->addr + DECON_VIDW0xADD1B0(win));
 
-	val = OFFSIZE(plane->pitch - plane->crtc_width * (plane->bpp >> 3))
-		| PAGEWIDTH(plane->crtc_width * (plane->bpp >> 3));
+	val = OFFSIZE(pitch - plane->crtc_w * bpp)
+		| PAGEWIDTH(plane->crtc_w * bpp);
 	writel(val, ctx->addr + DECON_VIDW0xADD2(win));
 
-	decon_win_set_pixfmt(ctx, win);
+	decon_win_set_pixfmt(ctx, win, state->fb);
 
 	/* window enable */
 	val = readl(ctx->addr + DECON_WINCONx(win));
 	val |= WINCONx_ENWIN_F;
 	writel(val, ctx->addr + DECON_WINCONx(win));
 
-	decon_shadow_protect_win(ctx, win, false);
-
 	/* standalone update */
 	val = readl(ctx->addr + DECON_UPDATE);
 	val |= STANDALONE_UPDATE_F;
 	writel(val, ctx->addr + DECON_UPDATE);
-
-	if (ctx->i80_if)
-		atomic_set(&ctx->win_updated, 1);
 }
 
-static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
+static void decon_disable_plane(struct exynos_drm_crtc *crtc,
+				struct exynos_drm_plane *plane)
 {
 	struct decon_context *ctx = crtc->ctx;
-	struct exynos_drm_plane *plane;
+	unsigned int win = plane->zpos;
 	u32 val;
 
-	if (win < 0 || win >= WINDOWS_NR)
-		return;
-
-	plane = &ctx->planes[win];
-
 	if (ctx->suspended)
 		return;
 
@@ -306,6 +312,20 @@ static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
 	writel(val, ctx->addr + DECON_UPDATE);
 }
 
+static void decon_atomic_flush(struct exynos_drm_crtc *crtc,
+				struct exynos_drm_plane *plane)
+{
+	struct decon_context *ctx = crtc->ctx;
+
+	if (ctx->suspended)
+		return;
+
+	decon_shadow_protect_win(ctx, plane->zpos, false);
+
+	if (ctx->i80_if)
+		atomic_set(&ctx->win_updated, 1);
+}
+
 static void decon_swreset(struct decon_context *ctx)
 {
 	unsigned int tries;
@@ -378,7 +398,7 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
 	 * a destroyed buffer later.
 	 */
 	for (i = 0; i < WINDOWS_NR; i++)
-		decon_win_disable(crtc, i);
+		decon_disable_plane(crtc, &ctx->planes[i]);
 
 	decon_swreset(ctx);
 
@@ -407,7 +427,7 @@ void decon_te_irq_handler(struct exynos_drm_crtc *crtc)
 		writel(val, ctx->addr + DECON_TRIGCON);
 	}
 
-	drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+	drm_crtc_handle_vblank(&ctx->crtc->base);
 }
 
 static void decon_clear_channels(struct exynos_drm_crtc *crtc)
@@ -460,10 +480,11 @@ static struct exynos_drm_crtc_ops decon_crtc_ops = {
 	.enable_vblank		= decon_enable_vblank,
 	.disable_vblank		= decon_disable_vblank,
 	.commit			= decon_commit,
-	.win_commit		= decon_win_commit,
-	.win_disable		= decon_win_disable,
+	.atomic_begin		= decon_atomic_begin,
+	.update_plane		= decon_update_plane,
+	.disable_plane		= decon_disable_plane,
+	.atomic_flush		= decon_atomic_flush,
 	.te_handler		= decon_te_irq_handler,
-	.clear_channels		= decon_clear_channels,
 };
 
 static int decon_bind(struct device *dev, struct device *master, void *data)
@@ -483,7 +504,8 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
 		type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
 							DRM_PLANE_TYPE_OVERLAY;
 		ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
-				1 << ctx->pipe, type, zpos);
+				1 << ctx->pipe, type, decon_formats,
+				ARRAY_SIZE(decon_formats), zpos);
 		if (ret)
 			return ret;
 	}
@@ -497,7 +519,9 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
 		goto err;
 	}
 
-	ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, dev);
+	decon_clear_channels(ctx->crtc);
+
+	ret = drm_iommu_attach_device(drm_dev, dev);
 	if (ret)
 		goto err;
 
@@ -514,8 +538,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data)
 	decon_disable(ctx->crtc);
 
 	/* detach this sub driver from iommu mapping if supported. */
-	if (is_drm_iommu_supported(ctx->drm_dev))
-		drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+	drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
 }
 
 static const struct component_ops decon_component_ops = {
@@ -533,7 +556,7 @@ static irqreturn_t decon_vsync_irq_handler(int irq, void *dev_id)
 
 	val = readl(ctx->addr + DECON_VIDINTCON1);
 	if (val & VIDINTCON1_INTFRMPEND) {
-		drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+		drm_crtc_handle_vblank(&ctx->crtc->base);
 
 		/* clear */
 		writel(VIDINTCON1_INTFRMPEND, ctx->addr + DECON_VIDINTCON1);
@@ -547,13 +570,21 @@ static irqreturn_t decon_lcd_sys_irq_handler(int irq, void *dev_id)
 {
 	struct decon_context *ctx = dev_id;
 	u32 val;
+	int win;
 
 	if (!test_bit(BIT_CLKS_ENABLED, &ctx->enabled))
 		goto out;
 
 	val = readl(ctx->addr + DECON_VIDINTCON1);
 	if (val & VIDINTCON1_INTFRMDONEPEND) {
-		exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+		for (win = 0 ; win < WINDOWS_NR ; win++) {
+			struct exynos_drm_plane *plane = &ctx->planes[win];
+
+			if (!plane->pending_fb)
+				continue;
+
+			exynos_drm_crtc_finish_update(ctx->crtc, plane);
+		}
 
 		/* clear */
 		writel(VIDINTCON1_INTFRMDONEPEND,
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 362532afd1a5..cbdb78ef3bac 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -61,7 +61,7 @@ struct decon_context {
 	atomic_t			wait_vsync_event;
 
 	struct exynos_drm_panel_info panel;
-	struct exynos_drm_display *display;
+	struct drm_encoder *encoder;
 };
 
 static const struct of_device_id decon_driver_dt_match[] = {
@@ -70,6 +70,18 @@ static const struct of_device_id decon_driver_dt_match[] = {
 };
 MODULE_DEVICE_TABLE(of, decon_driver_dt_match);
 
+static const uint32_t decon_formats[] = {
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_RGBX8888,
+	DRM_FORMAT_BGRX8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_ABGR8888,
+	DRM_FORMAT_RGBA8888,
+	DRM_FORMAT_BGRA8888,
+};
+
 static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
 {
 	struct decon_context *ctx = crtc->ctx;
@@ -126,7 +138,9 @@ static int decon_ctx_initialize(struct decon_context *ctx,
 	ctx->drm_dev = drm_dev;
 	ctx->pipe = priv->pipe++;
 
-	ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, ctx->dev);
+	decon_clear_channels(ctx->crtc);
+
+	ret = drm_iommu_attach_device(drm_dev, ctx->dev);
 	if (ret)
 		priv->pipe--;
 
@@ -136,8 +150,7 @@ static int decon_ctx_initialize(struct decon_context *ctx,
 static void decon_ctx_remove(struct decon_context *ctx)
 {
 	/* detach this sub driver from iommu mapping if supported. */
-	if (is_drm_iommu_supported(ctx->drm_dev))
-		drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+	drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
 }
 
 static u32 decon_calc_clkdiv(struct decon_context *ctx,
@@ -271,16 +284,16 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
 	}
 }
 
-static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
+static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
+				 struct drm_framebuffer *fb)
 {
-	struct exynos_drm_plane *plane = &ctx->planes[win];
 	unsigned long val;
 	int padding;
 
 	val = readl(ctx->regs + WINCON(win));
 	val &= ~WINCONx_BPPMODE_MASK;
 
-	switch (plane->pixel_format) {
+	switch (fb->pixel_format) {
 	case DRM_FORMAT_RGB565:
 		val |= WINCONx_BPPMODE_16BPP_565;
 		val |= WINCONx_BURSTLEN_16WORD;
@@ -329,7 +342,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
 		break;
 	}
 
-	DRM_DEBUG_KMS("bpp = %d\n", plane->bpp);
+	DRM_DEBUG_KMS("bpp = %d\n", fb->bits_per_pixel);
 
 	/*
 	 * In case of exynos, setting dma-burst to 16Word causes permanent
@@ -339,8 +352,8 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
 	 * movement causes unstable DMA which results into iommu crash/tear.
 	 */
 
-	padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width;
-	if (plane->fb_width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) {
+	padding = (fb->pitches[0] / (fb->bits_per_pixel >> 3)) - fb->width;
+	if (fb->width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) {
 		val &= ~WINCONx_BURSTLEN_MASK;
 		val |= WINCONx_BURSTLEN_8WORD;
 	}
@@ -382,23 +395,30 @@ static void decon_shadow_protect_win(struct decon_context *ctx,
 	writel(val, ctx->regs + SHADOWCON);
 }
 
-static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
+static void decon_atomic_begin(struct exynos_drm_crtc *crtc,
+					struct exynos_drm_plane *plane)
 {
 	struct decon_context *ctx = crtc->ctx;
-	struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
-	struct exynos_drm_plane *plane;
-	int padding;
-	unsigned long val, alpha;
-	unsigned int last_x;
-	unsigned int last_y;
 
 	if (ctx->suspended)
 		return;
 
-	if (win < 0 || win >= WINDOWS_NR)
-		return;
+	decon_shadow_protect_win(ctx, plane->zpos, true);
+}
 
-	plane = &ctx->planes[win];
+static void decon_update_plane(struct exynos_drm_crtc *crtc,
+			       struct exynos_drm_plane *plane)
+{
+	struct decon_context *ctx = crtc->ctx;
+	struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
+	struct drm_plane_state *state = plane->base.state;
+	int padding;
+	unsigned long val, alpha;
+	unsigned int last_x;
+	unsigned int last_y;
+	unsigned int win = plane->zpos;
+	unsigned int bpp = state->fb->bits_per_pixel >> 3;
+	unsigned int pitch = state->fb->pitches[0];
 
 	if (ctx->suspended)
 		return;
@@ -413,18 +433,15 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
 	 * is set.
 	 */
 
-	/* protect windows */
-	decon_shadow_protect_win(ctx, win, true);
-
 	/* buffer start address */
 	val = (unsigned long)plane->dma_addr[0];
 	writel(val, ctx->regs + VIDW_BUF_START(win));
 
-	padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width;
+	padding = (pitch / bpp) - state->fb->width;
 
 	/* buffer size */
-	writel(plane->fb_width + padding, ctx->regs + VIDW_WHOLE_X(win));
-	writel(plane->fb_height, ctx->regs + VIDW_WHOLE_Y(win));
+	writel(state->fb->width + padding, ctx->regs + VIDW_WHOLE_X(win));
+	writel(state->fb->height, ctx->regs + VIDW_WHOLE_Y(win));
 
 	/* offset from the start of the buffer to read */
 	writel(plane->src_x, ctx->regs + VIDW_OFFSET_X(win));
@@ -433,25 +450,25 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
 	DRM_DEBUG_KMS("start addr = 0x%lx\n",
 			(unsigned long)val);
 	DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
-			plane->crtc_width, plane->crtc_height);
+			plane->crtc_w, plane->crtc_h);
 
 	/*
 	 * OSD position.
 	 * In case the window layout goes of LCD layout, DECON fails.
 	 */
-	if ((plane->crtc_x + plane->crtc_width) > mode->hdisplay)
-		plane->crtc_x = mode->hdisplay - plane->crtc_width;
-	if ((plane->crtc_y + plane->crtc_height) > mode->vdisplay)
-		plane->crtc_y = mode->vdisplay - plane->crtc_height;
+	if ((plane->crtc_x + plane->crtc_w) > mode->hdisplay)
+		plane->crtc_x = mode->hdisplay - plane->crtc_w;
+	if ((plane->crtc_y + plane->crtc_h) > mode->vdisplay)
+		plane->crtc_y = mode->vdisplay - plane->crtc_h;
 
 	val = VIDOSDxA_TOPLEFT_X(plane->crtc_x) |
 		VIDOSDxA_TOPLEFT_Y(plane->crtc_y);
 	writel(val, ctx->regs + VIDOSD_A(win));
 
-	last_x = plane->crtc_x + plane->crtc_width;
+	last_x = plane->crtc_x + plane->crtc_w;
 	if (last_x)
 		last_x--;
-	last_y = plane->crtc_y + plane->crtc_height;
+	last_y = plane->crtc_y + plane->crtc_h;
 	if (last_y)
 		last_y--;
 
@@ -475,7 +492,7 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
 
 	writel(alpha, ctx->regs + VIDOSD_D(win));
 
-	decon_win_set_pixfmt(ctx, win);
+	decon_win_set_pixfmt(ctx, win, state->fb);
 
 	/* hardware window 0 doesn't support color key. */
 	if (win != 0)
@@ -495,17 +512,13 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
 	writel(val, ctx->regs + DECON_UPDATE);
 }
 
-static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
+static void decon_disable_plane(struct exynos_drm_crtc *crtc,
+				struct exynos_drm_plane *plane)
 {
 	struct decon_context *ctx = crtc->ctx;
-	struct exynos_drm_plane *plane;
+	unsigned int win = plane->zpos;
 	u32 val;
 
-	if (win < 0 || win >= WINDOWS_NR)
-		return;
-
-	plane = &ctx->planes[win];
-
 	if (ctx->suspended)
 		return;
 
@@ -517,14 +530,22 @@ static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
 	val &= ~WINCONx_ENWIN;
 	writel(val, ctx->regs + WINCON(win));
 
-	/* unprotect windows */
-	decon_shadow_protect_win(ctx, win, false);
-
 	val = readl(ctx->regs + DECON_UPDATE);
 	val |= DECON_UPDATE_STANDALONE_F;
 	writel(val, ctx->regs + DECON_UPDATE);
 }
 
+static void decon_atomic_flush(struct exynos_drm_crtc *crtc,
+					struct exynos_drm_plane *plane)
+{
+	struct decon_context *ctx = crtc->ctx;
+
+	if (ctx->suspended)
+		return;
+
+	decon_shadow_protect_win(ctx, plane->zpos, false);
+}
+
 static void decon_init(struct decon_context *ctx)
 {
 	u32 val;
@@ -601,7 +622,7 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
 	 * a destroyed buffer later.
 	 */
 	for (i = 0; i < WINDOWS_NR; i++)
-		decon_win_disable(crtc, i);
+		decon_disable_plane(crtc, &ctx->planes[i]);
 
 	clk_disable_unprepare(ctx->vclk);
 	clk_disable_unprepare(ctx->eclk);
@@ -621,9 +642,10 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = {
 	.enable_vblank = decon_enable_vblank,
 	.disable_vblank = decon_disable_vblank,
 	.wait_for_vblank = decon_wait_for_vblank,
-	.win_commit = decon_win_commit,
-	.win_disable = decon_win_disable,
-	.clear_channels = decon_clear_channels,
+	.atomic_begin = decon_atomic_begin,
+	.update_plane = decon_update_plane,
+	.disable_plane = decon_disable_plane,
+	.atomic_flush = decon_atomic_flush,
 };
 
 
@@ -631,6 +653,7 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
 {
 	struct decon_context *ctx = (struct decon_context *)dev_id;
 	u32 val, clear_bit;
+	int win;
 
 	val = readl(ctx->regs + VIDINTCON1);
 
@@ -643,8 +666,15 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
 		goto out;
 
 	if (!ctx->i80_if) {
-		drm_handle_vblank(ctx->drm_dev, ctx->pipe);
-		exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+		drm_crtc_handle_vblank(&ctx->crtc->base);
+		for (win = 0 ; win < WINDOWS_NR ; win++) {
+			struct exynos_drm_plane *plane = &ctx->planes[win];
+
+			if (!plane->pending_fb)
+				continue;
+
+			exynos_drm_crtc_finish_update(ctx->crtc, plane);
+		}
 
 		/* set wait vsync event to zero and wake up queue. */
 		if (atomic_read(&ctx->wait_vsync_event)) {
@@ -675,7 +705,8 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
 		type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
 						DRM_PLANE_TYPE_OVERLAY;
 		ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
-					1 << ctx->pipe, type, zpos);
+					1 << ctx->pipe, type, decon_formats,
+					ARRAY_SIZE(decon_formats), zpos);
 		if (ret)
 			return ret;
 	}
@@ -689,8 +720,8 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
 		return PTR_ERR(ctx->crtc);
 	}
 
-	if (ctx->display)
-		exynos_drm_create_enc_conn(drm_dev, ctx->display);
+	if (ctx->encoder)
+		exynos_dpi_bind(drm_dev, ctx->encoder);
 
 	return 0;
 
@@ -703,8 +734,8 @@ static void decon_unbind(struct device *dev, struct device *master,
 
 	decon_disable(ctx->crtc);
 
-	if (ctx->display)
-		exynos_dpi_remove(ctx->display);
+	if (ctx->encoder)
+		exynos_dpi_remove(ctx->encoder);
 
 	decon_ctx_remove(ctx);
 }
@@ -789,9 +820,9 @@ static int decon_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, ctx);
 
-	ctx->display = exynos_dpi_probe(dev);
-	if (IS_ERR(ctx->display)) {
-		ret = PTR_ERR(ctx->display);
+	ctx->encoder = exynos_dpi_probe(dev);
+	if (IS_ERR(ctx->encoder)) {
+		ret = PTR_ERR(ctx->encoder);
 		goto err_iounmap;
 	}
 
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index 172b8002a2c8..d66ade0efac8 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -32,19 +32,20 @@
 #include <drm/drm_panel.h>
 
 #include "exynos_dp_core.h"
+#include "exynos_drm_crtc.h"
 
 #define ctx_from_connector(c)	container_of(c, struct exynos_dp_device, \
 					connector)
 
 static inline struct exynos_drm_crtc *dp_to_crtc(struct exynos_dp_device *dp)
 {
-	return to_exynos_crtc(dp->encoder->crtc);
+	return to_exynos_crtc(dp->encoder.crtc);
 }
 
-static inline struct exynos_dp_device *
-display_to_dp(struct exynos_drm_display *d)
+static inline struct exynos_dp_device *encoder_to_dp(
+						struct drm_encoder *e)
 {
-	return container_of(d, struct exynos_dp_device, display);
+	return container_of(e, struct exynos_dp_device, encoder);
 }
 
 struct bridge_init {
@@ -795,9 +796,6 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp)
 	/* Configure video slave mode */
 	exynos_dp_enable_video_master(dp, 0);
 
-	/* Enable video */
-	exynos_dp_start_video(dp);
-
 	timeout_loop = 0;
 
 	for (;;) {
@@ -891,9 +889,9 @@ static void exynos_dp_hotplug(struct work_struct *work)
 		drm_helper_hpd_irq_event(dp->drm_dev);
 }
 
-static void exynos_dp_commit(struct exynos_drm_display *display)
+static void exynos_dp_commit(struct drm_encoder *encoder)
 {
-	struct exynos_dp_device *dp = display_to_dp(display);
+	struct exynos_dp_device *dp = encoder_to_dp(encoder);
 	int ret;
 
 	/* Keep the panel disabled while we configure video */
@@ -938,6 +936,9 @@ static void exynos_dp_commit(struct exynos_drm_display *display)
 		if (drm_panel_enable(dp->panel))
 			DRM_ERROR("failed to enable the panel\n");
 	}
+
+	/* Enable video */
+	exynos_dp_start_video(dp);
 }
 
 static enum drm_connector_status exynos_dp_detect(
@@ -994,7 +995,7 @@ static struct drm_encoder *exynos_dp_best_encoder(
 {
 	struct exynos_dp_device *dp = ctx_from_connector(connector);
 
-	return dp->encoder;
+	return &dp->encoder;
 }
 
 static struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = {
@@ -1019,15 +1020,12 @@ static int exynos_drm_attach_lcd_bridge(struct exynos_dp_device *dp,
 	return 0;
 }
 
-static int exynos_dp_create_connector(struct exynos_drm_display *display,
-				struct drm_encoder *encoder)
+static int exynos_dp_create_connector(struct drm_encoder *encoder)
 {
-	struct exynos_dp_device *dp = display_to_dp(display);
+	struct exynos_dp_device *dp = encoder_to_dp(encoder);
 	struct drm_connector *connector = &dp->connector;
 	int ret;
 
-	dp->encoder = encoder;
-
 	/* Pre-empt DP connector creation if there's a bridge */
 	if (dp->bridge) {
 		ret = exynos_drm_attach_lcd_bridge(dp, encoder);
@@ -1054,20 +1052,22 @@ static int exynos_dp_create_connector(struct exynos_drm_display *display,
 	return ret;
 }
 
-static void exynos_dp_phy_init(struct exynos_dp_device *dp)
+static bool exynos_dp_mode_fixup(struct drm_encoder *encoder,
+				 const struct drm_display_mode *mode,
+				 struct drm_display_mode *adjusted_mode)
 {
-	if (dp->phy)
-		phy_power_on(dp->phy);
+	return true;
 }
 
-static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
+static void exynos_dp_mode_set(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
 {
-	if (dp->phy)
-		phy_power_off(dp->phy);
 }
 
-static void exynos_dp_poweron(struct exynos_dp_device *dp)
+static void exynos_dp_enable(struct drm_encoder *encoder)
 {
+	struct exynos_dp_device *dp = encoder_to_dp(encoder);
 	struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
 
 	if (dp->dpms_mode == DRM_MODE_DPMS_ON)
@@ -1084,14 +1084,17 @@ static void exynos_dp_poweron(struct exynos_dp_device *dp)
 		crtc->ops->clock_enable(dp_to_crtc(dp), true);
 
 	clk_prepare_enable(dp->clock);
-	exynos_dp_phy_init(dp);
+	phy_power_on(dp->phy);
 	exynos_dp_init_dp(dp);
 	enable_irq(dp->irq);
-	exynos_dp_commit(&dp->display);
+	exynos_dp_commit(&dp->encoder);
+
+	dp->dpms_mode = DRM_MODE_DPMS_ON;
 }
 
-static void exynos_dp_poweroff(struct exynos_dp_device *dp)
+static void exynos_dp_disable(struct drm_encoder *encoder)
 {
+	struct exynos_dp_device *dp = encoder_to_dp(encoder);
 	struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
 
 	if (dp->dpms_mode != DRM_MODE_DPMS_ON)
@@ -1106,7 +1109,7 @@ static void exynos_dp_poweroff(struct exynos_dp_device *dp)
 
 	disable_irq(dp->irq);
 	flush_work(&dp->hotplug_work);
-	exynos_dp_phy_exit(dp);
+	phy_power_off(dp->phy);
 	clk_disable_unprepare(dp->clock);
 
 	if (crtc->ops->clock_enable)
@@ -1116,31 +1119,19 @@ static void exynos_dp_poweroff(struct exynos_dp_device *dp)
 		if (drm_panel_unprepare(dp->panel))
 			DRM_ERROR("failed to turnoff the panel\n");
 	}
-}
-
-static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
-{
-	struct exynos_dp_device *dp = display_to_dp(display);
 
-	switch (mode) {
-	case DRM_MODE_DPMS_ON:
-		exynos_dp_poweron(dp);
-		break;
-	case DRM_MODE_DPMS_STANDBY:
-	case DRM_MODE_DPMS_SUSPEND:
-	case DRM_MODE_DPMS_OFF:
-		exynos_dp_poweroff(dp);
-		break;
-	default:
-		break;
-	}
-	dp->dpms_mode = mode;
+	dp->dpms_mode = DRM_MODE_DPMS_OFF;
 }
 
-static struct exynos_drm_display_ops exynos_dp_display_ops = {
-	.create_connector = exynos_dp_create_connector,
-	.dpms = exynos_dp_dpms,
-	.commit = exynos_dp_commit,
+static struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = {
+	.mode_fixup = exynos_dp_mode_fixup,
+	.mode_set = exynos_dp_mode_set,
+	.enable = exynos_dp_enable,
+	.disable = exynos_dp_disable,
+};
+
+static struct drm_encoder_funcs exynos_dp_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
 };
 
 static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev)
@@ -1219,9 +1210,10 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
 	struct exynos_dp_device *dp = dev_get_drvdata(dev);
 	struct platform_device *pdev = to_platform_device(dev);
 	struct drm_device *drm_dev = data;
+	struct drm_encoder *encoder = &dp->encoder;
 	struct resource *res;
 	unsigned int irq_flags;
-	int ret = 0;
+	int pipe, ret = 0;
 
 	dp->dev = &pdev->dev;
 	dp->dpms_mode = DRM_MODE_DPMS_OFF;
@@ -1297,7 +1289,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
 
 	INIT_WORK(&dp->hotplug_work, exynos_dp_hotplug);
 
-	exynos_dp_phy_init(dp);
+	phy_power_on(dp->phy);
 
 	exynos_dp_init_dp(dp);
 
@@ -1311,7 +1303,28 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
 
 	dp->drm_dev = drm_dev;
 
-	return exynos_drm_create_enc_conn(drm_dev, &dp->display);
+	pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
+						  EXYNOS_DISPLAY_TYPE_LCD);
+	if (pipe < 0)
+		return pipe;
+
+	encoder->possible_crtcs = 1 << pipe;
+
+	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+	drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+
+	drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs);
+
+	ret = exynos_dp_create_connector(encoder);
+	if (ret) {
+		DRM_ERROR("failed to create connector ret = %d\n", ret);
+		drm_encoder_cleanup(encoder);
+		return ret;
+	}
+
+	return 0;
 }
 
 static void exynos_dp_unbind(struct device *dev, struct device *master,
@@ -1319,7 +1332,7 @@ static void exynos_dp_unbind(struct device *dev, struct device *master,
 {
 	struct exynos_dp_device *dp = dev_get_drvdata(dev);
 
-	exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_OFF);
+	exynos_dp_disable(&dp->encoder);
 }
 
 static const struct component_ops exynos_dp_ops = {
@@ -1338,8 +1351,6 @@ static int exynos_dp_probe(struct platform_device *pdev)
 	if (!dp)
 		return -ENOMEM;
 
-	dp->display.type = EXYNOS_DISPLAY_TYPE_LCD;
-	dp->display.ops = &exynos_dp_display_ops;
 	platform_set_drvdata(pdev, dp);
 
 	panel_node = of_parse_phandle(dev->of_node, "panel", 0);
@@ -1377,7 +1388,7 @@ static int exynos_dp_suspend(struct device *dev)
 {
 	struct exynos_dp_device *dp = dev_get_drvdata(dev);
 
-	exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_OFF);
+	exynos_dp_disable(&dp->encoder);
 	return 0;
 }
 
@@ -1385,7 +1396,7 @@ static int exynos_dp_resume(struct device *dev)
 {
 	struct exynos_dp_device *dp = dev_get_drvdata(dev);
 
-	exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_ON);
+	exynos_dp_enable(&dp->encoder);
 	return 0;
 }
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
index a4e799679669..e413b6f7b0e7 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.h
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.h
@@ -147,11 +147,10 @@ struct link_train {
 };
 
 struct exynos_dp_device {
-	struct exynos_drm_display display;
+	struct drm_encoder	encoder;
 	struct device		*dev;
 	struct drm_device	*drm_dev;
 	struct drm_connector	connector;
-	struct drm_encoder	*encoder;
 	struct drm_panel	*panel;
 	struct drm_bridge	*bridge;
 	struct clk		*clock;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
deleted file mode 100644
index 24994ba10e28..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/* exynos_drm_buf.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Author: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/exynos_drm.h>
-
-#include "exynos_drm_drv.h"
-#include "exynos_drm_gem.h"
-#include "exynos_drm_buf.h"
-#include "exynos_drm_iommu.h"
-
-static int lowlevel_buffer_allocate(struct drm_device *dev,
-		unsigned int flags, struct exynos_drm_gem_buf *buf)
-{
-	int ret = 0;
-	enum dma_attr attr;
-	unsigned int nr_pages;
-
-	if (buf->dma_addr) {
-		DRM_DEBUG_KMS("already allocated.\n");
-		return 0;
-	}
-
-	init_dma_attrs(&buf->dma_attrs);
-
-	/*
-	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
-	 * region will be allocated else physically contiguous
-	 * as possible.
-	 */
-	if (!(flags & EXYNOS_BO_NONCONTIG))
-		dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
-
-	/*
-	 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
-	 * else cachable mapping.
-	 */
-	if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
-		attr = DMA_ATTR_WRITE_COMBINE;
-	else
-		attr = DMA_ATTR_NON_CONSISTENT;
-
-	dma_set_attr(attr, &buf->dma_attrs);
-	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
-
-	nr_pages = buf->size >> PAGE_SHIFT;
-
-	if (!is_drm_iommu_supported(dev)) {
-		dma_addr_t start_addr;
-		unsigned int i = 0;
-
-		buf->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
-		if (!buf->pages) {
-			DRM_ERROR("failed to allocate pages.\n");
-			return -ENOMEM;
-		}
-
-		buf->cookie = dma_alloc_attrs(dev->dev,
-					buf->size,
-					&buf->dma_addr, GFP_KERNEL,
-					&buf->dma_attrs);
-		if (!buf->cookie) {
-			DRM_ERROR("failed to allocate buffer.\n");
-			ret = -ENOMEM;
-			goto err_free;
-		}
-
-		start_addr = buf->dma_addr;
-		while (i < nr_pages) {
-			buf->pages[i] = phys_to_page(start_addr);
-			start_addr += PAGE_SIZE;
-			i++;
-		}
-	} else {
-
-		buf->pages = dma_alloc_attrs(dev->dev, buf->size,
-					&buf->dma_addr, GFP_KERNEL,
-					&buf->dma_attrs);
-		if (!buf->pages) {
-			DRM_ERROR("failed to allocate buffer.\n");
-			return -ENOMEM;
-		}
-	}
-
-	buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
-	if (IS_ERR(buf->sgt)) {
-		DRM_ERROR("failed to get sg table.\n");
-		ret = PTR_ERR(buf->sgt);
-		goto err_free_attrs;
-	}
-
-	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
-			(unsigned long)buf->dma_addr,
-			buf->size);
-
-	return ret;
-
-err_free_attrs:
-	dma_free_attrs(dev->dev, buf->size, buf->pages,
-			(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
-	buf->dma_addr = (dma_addr_t)NULL;
-err_free:
-	if (!is_drm_iommu_supported(dev))
-		drm_free_large(buf->pages);
-
-	return ret;
-}
-
-static void lowlevel_buffer_deallocate(struct drm_device *dev,
-		unsigned int flags, struct exynos_drm_gem_buf *buf)
-{
-	if (!buf->dma_addr) {
-		DRM_DEBUG_KMS("dma_addr is invalid.\n");
-		return;
-	}
-
-	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
-			(unsigned long)buf->dma_addr,
-			buf->size);
-
-	sg_free_table(buf->sgt);
-
-	kfree(buf->sgt);
-	buf->sgt = NULL;
-
-	if (!is_drm_iommu_supported(dev)) {
-		dma_free_attrs(dev->dev, buf->size, buf->cookie,
-				(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
-		drm_free_large(buf->pages);
-	} else
-		dma_free_attrs(dev->dev, buf->size, buf->pages,
-				(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
-
-	buf->dma_addr = (dma_addr_t)NULL;
-}
-
-struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
-						unsigned int size)
-{
-	struct exynos_drm_gem_buf *buffer;
-
-	DRM_DEBUG_KMS("desired size = 0x%x\n", size);
-
-	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
-	if (!buffer)
-		return NULL;
-
-	buffer->size = size;
-	return buffer;
-}
-
-void exynos_drm_fini_buf(struct drm_device *dev,
-				struct exynos_drm_gem_buf *buffer)
-{
-	kfree(buffer);
-	buffer = NULL;
-}
-
-int exynos_drm_alloc_buf(struct drm_device *dev,
-		struct exynos_drm_gem_buf *buf, unsigned int flags)
-{
-
-	/*
-	 * allocate memory region and set the memory information
-	 * to vaddr and dma_addr of a buffer object.
-	 */
-	if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
-		return -ENOMEM;
-
-	return 0;
-}
-
-void exynos_drm_free_buf(struct drm_device *dev,
-		unsigned int flags, struct exynos_drm_gem_buf *buffer)
-{
-
-	lowlevel_buffer_deallocate(dev, flags, buffer);
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
deleted file mode 100644
index a6412f19673c..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* exynos_drm_buf.h
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Author: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_BUF_H_
-#define _EXYNOS_DRM_BUF_H_
-
-/* create and initialize buffer object. */
-struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
-						unsigned int size);
-
-/* destroy buffer object. */
-void exynos_drm_fini_buf(struct drm_device *dev,
-				struct exynos_drm_gem_buf *buffer);
-
-/* allocate physical memory region and setup sgt. */
-int exynos_drm_alloc_buf(struct drm_device *dev,
-				struct exynos_drm_gem_buf *buf,
-				unsigned int flags);
-
-/* release physical memory region, and sgt. */
-void exynos_drm_free_buf(struct drm_device *dev,
-				unsigned int flags,
-				struct exynos_drm_gem_buf *buffer);
-
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 4c9f972eaa07..c68a6a2a9b57 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -15,46 +15,10 @@
 #include <drm/drmP.h>
 #include "exynos_drm_drv.h"
 #include "exynos_drm_crtc.h"
-#include "exynos_drm_encoder.h"
 #include "exynos_drm_fbdev.h"
 
 static LIST_HEAD(exynos_drm_subdrv_list);
 
-int exynos_drm_create_enc_conn(struct drm_device *dev,
-					struct exynos_drm_display *display)
-{
-	struct drm_encoder *encoder;
-	int ret;
-	unsigned long possible_crtcs = 0;
-
-	ret = exynos_drm_crtc_get_pipe_from_type(dev, display->type);
-	if (ret < 0)
-		return ret;
-
-	possible_crtcs |= 1 << ret;
-
-	/* create and initialize a encoder for this sub driver. */
-	encoder = exynos_drm_encoder_create(dev, display, possible_crtcs);
-	if (!encoder) {
-		DRM_ERROR("failed to create encoder\n");
-		return -EFAULT;
-	}
-
-	display->encoder = encoder;
-
-	ret = display->ops->create_connector(display, encoder);
-	if (ret) {
-		DRM_ERROR("failed to create connector ret = %d\n", ret);
-		goto err_destroy_encoder;
-	}
-
-	return 0;
-
-err_destroy_encoder:
-	encoder->funcs->destroy(encoder);
-	return ret;
-}
-
 int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
 {
 	if (!subdrv)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 644b4b76e071..0872aa2f450f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -19,21 +19,15 @@
 
 #include "exynos_drm_crtc.h"
 #include "exynos_drm_drv.h"
-#include "exynos_drm_encoder.h"
 #include "exynos_drm_plane.h"
 
 static void exynos_drm_crtc_enable(struct drm_crtc *crtc)
 {
 	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
 
-	if (exynos_crtc->enabled)
-		return;
-
 	if (exynos_crtc->ops->enable)
 		exynos_crtc->ops->enable(exynos_crtc);
 
-	exynos_crtc->enabled = true;
-
 	drm_crtc_vblank_on(crtc);
 }
 
@@ -41,20 +35,10 @@ static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
 {
 	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
 
-	if (!exynos_crtc->enabled)
-		return;
-
-	/* wait for the completion of page flip. */
-	if (!wait_event_timeout(exynos_crtc->pending_flip_queue,
-				(exynos_crtc->event == NULL), HZ/20))
-		exynos_crtc->event = NULL;
-
 	drm_crtc_vblank_off(crtc);
 
 	if (exynos_crtc->ops->disable)
 		exynos_crtc->ops->disable(exynos_crtc);
-
-	exynos_crtc->enabled = false;
 }
 
 static bool
@@ -80,18 +64,36 @@ exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
 		exynos_crtc->ops->commit(exynos_crtc);
 }
 
-static void exynos_crtc_atomic_begin(struct drm_crtc *crtc)
+static void exynos_crtc_atomic_begin(struct drm_crtc *crtc,
+				     struct drm_crtc_state *old_crtc_state)
 {
 	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+	struct drm_plane *plane;
 
-	if (crtc->state->event) {
-		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
-		exynos_crtc->event = crtc->state->event;
+	exynos_crtc->event = crtc->state->event;
+
+	drm_atomic_crtc_for_each_plane(plane, crtc) {
+		struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
+
+		if (exynos_crtc->ops->atomic_begin)
+			exynos_crtc->ops->atomic_begin(exynos_crtc,
+							exynos_plane);
 	}
 }
 
-static void exynos_crtc_atomic_flush(struct drm_crtc *crtc)
+static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
+				     struct drm_crtc_state *old_crtc_state)
 {
+	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+	struct drm_plane *plane;
+
+	drm_atomic_crtc_for_each_plane(plane, crtc) {
+		struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
+
+		if (exynos_crtc->ops->atomic_flush)
+			exynos_crtc->ops->atomic_flush(exynos_crtc,
+							exynos_plane);
+	}
 }
 
 static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
@@ -139,13 +141,13 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
 	if (!exynos_crtc)
 		return ERR_PTR(-ENOMEM);
 
-	init_waitqueue_head(&exynos_crtc->pending_flip_queue);
-
 	exynos_crtc->pipe = pipe;
 	exynos_crtc->type = type;
 	exynos_crtc->ops = ops;
 	exynos_crtc->ctx = ctx;
 
+	init_waitqueue_head(&exynos_crtc->wait_update);
+
 	crtc = &exynos_crtc->base;
 
 	private->crtc[pipe] = crtc;
@@ -171,11 +173,8 @@ int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
 	struct exynos_drm_crtc *exynos_crtc =
 		to_exynos_crtc(private->crtc[pipe]);
 
-	if (!exynos_crtc->enabled)
-		return -EPERM;
-
 	if (exynos_crtc->ops->enable_vblank)
-		exynos_crtc->ops->enable_vblank(exynos_crtc);
+		return exynos_crtc->ops->enable_vblank(exynos_crtc);
 
 	return 0;
 }
@@ -186,31 +185,34 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe)
 	struct exynos_drm_crtc *exynos_crtc =
 		to_exynos_crtc(private->crtc[pipe]);
 
-	if (!exynos_crtc->enabled)
-		return;
-
 	if (exynos_crtc->ops->disable_vblank)
 		exynos_crtc->ops->disable_vblank(exynos_crtc);
 }
 
-void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe)
+void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc)
 {
-	struct exynos_drm_private *dev_priv = dev->dev_private;
-	struct drm_crtc *drm_crtc = dev_priv->crtc[pipe];
-	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(drm_crtc);
+	wait_event_timeout(exynos_crtc->wait_update,
+			   (atomic_read(&exynos_crtc->pending_update) == 0),
+			   msecs_to_jiffies(50));
+}
+
+void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc,
+				struct exynos_drm_plane *exynos_plane)
+{
+	struct drm_crtc *crtc = &exynos_crtc->base;
 	unsigned long flags;
 
-	spin_lock_irqsave(&dev->event_lock, flags);
-	if (exynos_crtc->event) {
+	exynos_plane->pending_fb = NULL;
 
-		drm_send_vblank_event(dev, -1, exynos_crtc->event);
-		drm_vblank_put(dev, pipe);
-		wake_up(&exynos_crtc->pending_flip_queue);
+	if (atomic_dec_and_test(&exynos_crtc->pending_update))
+		wake_up(&exynos_crtc->wait_update);
 
-	}
+	spin_lock_irqsave(&crtc->dev->event_lock, flags);
+	if (exynos_crtc->event)
+		drm_crtc_send_vblank_event(crtc, exynos_crtc->event);
 
 	exynos_crtc->event = NULL;
-	spin_unlock_irqrestore(&dev->event_lock, flags);
+	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 }
 
 void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb)
@@ -237,7 +239,7 @@ void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb)
 }
 
 int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
-					unsigned int out_type)
+				       enum exynos_drm_output_type out_type)
 {
 	struct drm_crtc *crtc;
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 0f3aa70818e3..f87d4abda6f7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -25,12 +25,14 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
 					void *context);
 int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe);
 void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe);
-void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe);
+void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc);
+void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc,
+				   struct exynos_drm_plane *exynos_plane);
 void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb);
 
 /* This function gets pipe value to crtc device matched with out_type. */
 int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
-					unsigned int out_type);
+				       enum exynos_drm_output_type out_type);
 
 /*
  * This function calls the crtc device(manager)'s te_handler() callback
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
deleted file mode 100644
index cd485c091b30..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ /dev/null
@@ -1,286 +0,0 @@
-/* exynos_drm_dmabuf.c
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * Author: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/exynos_drm.h>
-#include "exynos_drm_dmabuf.h"
-#include "exynos_drm_drv.h"
-#include "exynos_drm_gem.h"
-
-#include <linux/dma-buf.h>
-
-struct exynos_drm_dmabuf_attachment {
-	struct sg_table sgt;
-	enum dma_data_direction dir;
-	bool is_mapped;
-};
-
-static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf)
-{
-	return to_exynos_gem_obj(buf->priv);
-}
-
-static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
-					struct device *dev,
-					struct dma_buf_attachment *attach)
-{
-	struct exynos_drm_dmabuf_attachment *exynos_attach;
-
-	exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
-	if (!exynos_attach)
-		return -ENOMEM;
-
-	exynos_attach->dir = DMA_NONE;
-	attach->priv = exynos_attach;
-
-	return 0;
-}
-
-static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
-					struct dma_buf_attachment *attach)
-{
-	struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
-	struct sg_table *sgt;
-
-	if (!exynos_attach)
-		return;
-
-	sgt = &exynos_attach->sgt;
-
-	if (exynos_attach->dir != DMA_NONE)
-		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
-				exynos_attach->dir);
-
-	sg_free_table(sgt);
-	kfree(exynos_attach);
-	attach->priv = NULL;
-}
-
-static struct sg_table *
-		exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
-					enum dma_data_direction dir)
-{
-	struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
-	struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf);
-	struct drm_device *dev = gem_obj->base.dev;
-	struct exynos_drm_gem_buf *buf;
-	struct scatterlist *rd, *wr;
-	struct sg_table *sgt = NULL;
-	unsigned int i;
-	int nents, ret;
-
-	/* just return current sgt if already requested. */
-	if (exynos_attach->dir == dir && exynos_attach->is_mapped)
-		return &exynos_attach->sgt;
-
-	buf = gem_obj->buffer;
-	if (!buf) {
-		DRM_ERROR("buffer is null.\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	sgt = &exynos_attach->sgt;
-
-	ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
-	if (ret) {
-		DRM_ERROR("failed to alloc sgt.\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	mutex_lock(&dev->struct_mutex);
-
-	rd = buf->sgt->sgl;
-	wr = sgt->sgl;
-	for (i = 0; i < sgt->orig_nents; ++i) {
-		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
-		rd = sg_next(rd);
-		wr = sg_next(wr);
-	}
-
-	if (dir != DMA_NONE) {
-		nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
-		if (!nents) {
-			DRM_ERROR("failed to map sgl with iommu.\n");
-			sg_free_table(sgt);
-			sgt = ERR_PTR(-EIO);
-			goto err_unlock;
-		}
-	}
-
-	exynos_attach->is_mapped = true;
-	exynos_attach->dir = dir;
-	attach->priv = exynos_attach;
-
-	DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
-
-err_unlock:
-	mutex_unlock(&dev->struct_mutex);
-	return sgt;
-}
-
-static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
-						struct sg_table *sgt,
-						enum dma_data_direction dir)
-{
-	/* Nothing to do. */
-}
-
-static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
-						unsigned long page_num)
-{
-	/* TODO */
-
-	return NULL;
-}
-
-static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
-						unsigned long page_num,
-						void *addr)
-{
-	/* TODO */
-}
-
-static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf,
-					unsigned long page_num)
-{
-	/* TODO */
-
-	return NULL;
-}
-
-static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
-					unsigned long page_num, void *addr)
-{
-	/* TODO */
-}
-
-static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
-	struct vm_area_struct *vma)
-{
-	return -ENOTTY;
-}
-
-static struct dma_buf_ops exynos_dmabuf_ops = {
-	.attach			= exynos_gem_attach_dma_buf,
-	.detach			= exynos_gem_detach_dma_buf,
-	.map_dma_buf		= exynos_gem_map_dma_buf,
-	.unmap_dma_buf		= exynos_gem_unmap_dma_buf,
-	.kmap			= exynos_gem_dmabuf_kmap,
-	.kmap_atomic		= exynos_gem_dmabuf_kmap_atomic,
-	.kunmap			= exynos_gem_dmabuf_kunmap,
-	.kunmap_atomic		= exynos_gem_dmabuf_kunmap_atomic,
-	.mmap			= exynos_gem_dmabuf_mmap,
-	.release		= drm_gem_dmabuf_release,
-};
-
-struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
-				struct drm_gem_object *obj, int flags)
-{
-	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
-	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
-	exp_info.ops = &exynos_dmabuf_ops;
-	exp_info.size = exynos_gem_obj->base.size;
-	exp_info.flags = flags;
-	exp_info.priv = obj;
-
-	return dma_buf_export(&exp_info);
-}
-
-struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
-				struct dma_buf *dma_buf)
-{
-	struct dma_buf_attachment *attach;
-	struct sg_table *sgt;
-	struct scatterlist *sgl;
-	struct exynos_drm_gem_obj *exynos_gem_obj;
-	struct exynos_drm_gem_buf *buffer;
-	int ret;
-
-	/* is this one of own objects? */
-	if (dma_buf->ops == &exynos_dmabuf_ops) {
-		struct drm_gem_object *obj;
-
-		obj = dma_buf->priv;
-
-		/* is it from our device? */
-		if (obj->dev == drm_dev) {
-			/*
-			 * Importing dmabuf exported from out own gem increases
-			 * refcount on gem itself instead of f_count of dmabuf.
-			 */
-			drm_gem_object_reference(obj);
-			return obj;
-		}
-	}
-
-	attach = dma_buf_attach(dma_buf, drm_dev->dev);
-	if (IS_ERR(attach))
-		return ERR_PTR(-EINVAL);
-
-	get_dma_buf(dma_buf);
-
-	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
-	if (IS_ERR(sgt)) {
-		ret = PTR_ERR(sgt);
-		goto err_buf_detach;
-	}
-
-	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
-	if (!buffer) {
-		ret = -ENOMEM;
-		goto err_unmap_attach;
-	}
-
-	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
-	if (!exynos_gem_obj) {
-		ret = -ENOMEM;
-		goto err_free_buffer;
-	}
-
-	sgl = sgt->sgl;
-
-	buffer->size = dma_buf->size;
-	buffer->dma_addr = sg_dma_address(sgl);
-
-	if (sgt->nents == 1) {
-		/* always physically continuous memory if sgt->nents is 1. */
-		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
-	} else {
-		/*
-		 * this case could be CONTIG or NONCONTIG type but for now
-		 * sets NONCONTIG.
-		 * TODO. we have to find a way that exporter can notify
-		 * the type of its own buffer to importer.
-		 */
-		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
-	}
-
-	exynos_gem_obj->buffer = buffer;
-	buffer->sgt = sgt;
-	exynos_gem_obj->base.import_attach = attach;
-
-	DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr,
-								buffer->size);
-
-	return &exynos_gem_obj->base;
-
-err_free_buffer:
-	kfree(buffer);
-	buffer = NULL;
-err_unmap_attach:
-	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
-err_buf_detach:
-	dma_buf_detach(dma_buf, attach);
-	dma_buf_put(dma_buf);
-
-	return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
deleted file mode 100644
index 886de9ff484d..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* exynos_drm_dmabuf.h
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * Author: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_DMABUF_H_
-#define _EXYNOS_DRM_DMABUF_H_
-
-struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
-				struct drm_gem_object *obj, int flags);
-
-struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
-						struct dma_buf *dma_buf);
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 7cb6595c1894..c748b8790de3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -20,26 +20,24 @@
 #include <video/of_videomode.h>
 #include <video/videomode.h>
 
-#include "exynos_drm_drv.h"
+#include "exynos_drm_crtc.h"
 
 struct exynos_dpi {
-	struct exynos_drm_display display;
+	struct drm_encoder encoder;
 	struct device *dev;
 	struct device_node *panel_node;
 
 	struct drm_panel *panel;
 	struct drm_connector connector;
-	struct drm_encoder *encoder;
 
 	struct videomode *vm;
-	int dpms_mode;
 };
 
 #define connector_to_dpi(c) container_of(c, struct exynos_dpi, connector)
 
-static inline struct exynos_dpi *display_to_dpi(struct exynos_drm_display *d)
+static inline struct exynos_dpi *encoder_to_dpi(struct drm_encoder *e)
 {
-	return container_of(d, struct exynos_dpi, display);
+	return container_of(e, struct exynos_dpi, encoder);
 }
 
 static enum drm_connector_status
@@ -99,7 +97,7 @@ exynos_dpi_best_encoder(struct drm_connector *connector)
 {
 	struct exynos_dpi *ctx = connector_to_dpi(connector);
 
-	return ctx->encoder;
+	return &ctx->encoder;
 }
 
 static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
@@ -107,15 +105,12 @@ static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
 	.best_encoder = exynos_dpi_best_encoder,
 };
 
-static int exynos_dpi_create_connector(struct exynos_drm_display *display,
-				       struct drm_encoder *encoder)
+static int exynos_dpi_create_connector(struct drm_encoder *encoder)
 {
-	struct exynos_dpi *ctx = display_to_dpi(display);
+	struct exynos_dpi *ctx = encoder_to_dpi(encoder);
 	struct drm_connector *connector = &ctx->connector;
 	int ret;
 
-	ctx->encoder = encoder;
-
 	connector->polled = DRM_CONNECTOR_POLL_HPD;
 
 	ret = drm_connector_init(encoder->dev, connector,
@@ -133,46 +128,48 @@ static int exynos_dpi_create_connector(struct exynos_drm_display *display,
 	return 0;
 }
 
-static void exynos_dpi_poweron(struct exynos_dpi *ctx)
+static bool exynos_dpi_mode_fixup(struct drm_encoder *encoder,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void exynos_dpi_mode_set(struct drm_encoder *encoder,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
 {
+}
+
+static void exynos_dpi_enable(struct drm_encoder *encoder)
+{
+	struct exynos_dpi *ctx = encoder_to_dpi(encoder);
+
 	if (ctx->panel) {
 		drm_panel_prepare(ctx->panel);
 		drm_panel_enable(ctx->panel);
 	}
 }
 
-static void exynos_dpi_poweroff(struct exynos_dpi *ctx)
+static void exynos_dpi_disable(struct drm_encoder *encoder)
 {
+	struct exynos_dpi *ctx = encoder_to_dpi(encoder);
+
 	if (ctx->panel) {
 		drm_panel_disable(ctx->panel);
 		drm_panel_unprepare(ctx->panel);
 	}
 }
 
-static void exynos_dpi_dpms(struct exynos_drm_display *display, int mode)
-{
-	struct exynos_dpi *ctx = display_to_dpi(display);
-
-	switch (mode) {
-	case DRM_MODE_DPMS_ON:
-		if (ctx->dpms_mode != DRM_MODE_DPMS_ON)
-				exynos_dpi_poweron(ctx);
-			break;
-	case DRM_MODE_DPMS_STANDBY:
-	case DRM_MODE_DPMS_SUSPEND:
-	case DRM_MODE_DPMS_OFF:
-		if (ctx->dpms_mode == DRM_MODE_DPMS_ON)
-			exynos_dpi_poweroff(ctx);
-		break;
-	default:
-		break;
-	}
-	ctx->dpms_mode = mode;
-}
+static struct drm_encoder_helper_funcs exynos_dpi_encoder_helper_funcs = {
+	.mode_fixup = exynos_dpi_mode_fixup,
+	.mode_set = exynos_dpi_mode_set,
+	.enable = exynos_dpi_enable,
+	.disable = exynos_dpi_disable,
+};
 
-static struct exynos_drm_display_ops exynos_dpi_display_ops = {
-	.create_connector = exynos_dpi_create_connector,
-	.dpms = exynos_dpi_dpms
+static struct drm_encoder_funcs exynos_dpi_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
 };
 
 /* of_* functions will be removed after merge of of_graph patches */
@@ -299,7 +296,34 @@ static int exynos_dpi_parse_dt(struct exynos_dpi *ctx)
 	return 0;
 }
 
-struct exynos_drm_display *exynos_dpi_probe(struct device *dev)
+int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder)
+{
+	int ret;
+
+	ret = exynos_drm_crtc_get_pipe_from_type(dev, EXYNOS_DISPLAY_TYPE_LCD);
+	if (ret < 0)
+		return ret;
+
+	encoder->possible_crtcs = 1 << ret;
+
+	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+	drm_encoder_init(dev, encoder, &exynos_dpi_encoder_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+
+	drm_encoder_helper_add(encoder, &exynos_dpi_encoder_helper_funcs);
+
+	ret = exynos_dpi_create_connector(encoder);
+	if (ret) {
+		DRM_ERROR("failed to create connector ret = %d\n", ret);
+		drm_encoder_cleanup(encoder);
+		return ret;
+	}
+
+	return 0;
+}
+
+struct drm_encoder *exynos_dpi_probe(struct device *dev)
 {
 	struct exynos_dpi *ctx;
 	int ret;
@@ -308,10 +332,7 @@ struct exynos_drm_display *exynos_dpi_probe(struct device *dev)
 	if (!ctx)
 		return ERR_PTR(-ENOMEM);
 
-	ctx->display.type = EXYNOS_DISPLAY_TYPE_LCD;
-	ctx->display.ops = &exynos_dpi_display_ops;
 	ctx->dev = dev;
-	ctx->dpms_mode = DRM_MODE_DPMS_OFF;
 
 	ret = exynos_dpi_parse_dt(ctx);
 	if (ret < 0) {
@@ -325,14 +346,14 @@ struct exynos_drm_display *exynos_dpi_probe(struct device *dev)
 			return ERR_PTR(-EPROBE_DEFER);
 	}
 
-	return &ctx->display;
+	return &ctx->encoder;
 }
 
-int exynos_dpi_remove(struct exynos_drm_display *display)
+int exynos_dpi_remove(struct drm_encoder *encoder)
 {
-	struct exynos_dpi *ctx = display_to_dpi(display);
+	struct exynos_dpi *ctx = encoder_to_dpi(encoder);
 
-	exynos_dpi_dpms(&ctx->display, DRM_MODE_DPMS_OFF);
+	exynos_dpi_disable(&ctx->encoder);
 
 	if (ctx->panel)
 		drm_panel_detach(ctx->panel);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 63a68c60a353..831d2e4cacf9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -13,6 +13,8 @@
 
 #include <linux/pm_runtime.h>
 #include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc_helper.h>
 
 #include <linux/component.h>
@@ -21,13 +23,11 @@
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_crtc.h"
-#include "exynos_drm_encoder.h"
 #include "exynos_drm_fbdev.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_gem.h"
 #include "exynos_drm_plane.h"
 #include "exynos_drm_vidi.h"
-#include "exynos_drm_dmabuf.h"
 #include "exynos_drm_g2d.h"
 #include "exynos_drm_ipp.h"
 #include "exynos_drm_iommu.h"
@@ -38,15 +38,112 @@
 #define DRIVER_MAJOR	1
 #define DRIVER_MINOR	0
 
+struct exynos_atomic_commit {
+	struct work_struct	work;
+	struct drm_device	*dev;
+	struct drm_atomic_state *state;
+	u32			crtcs;
+};
+
+static void exynos_atomic_wait_for_commit(struct drm_atomic_state *state)
+{
+	struct drm_crtc_state *crtc_state;
+	struct drm_crtc *crtc;
+	int i, ret;
+
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+		if (!crtc->state->enable)
+			continue;
+
+		ret = drm_crtc_vblank_get(crtc);
+		if (ret)
+			continue;
+
+		exynos_drm_crtc_wait_pending_update(exynos_crtc);
+		drm_crtc_vblank_put(crtc);
+	}
+}
+
+static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit)
+{
+	struct drm_device *dev = commit->dev;
+	struct exynos_drm_private *priv = dev->dev_private;
+	struct drm_atomic_state *state = commit->state;
+	struct drm_plane *plane;
+	struct drm_crtc *crtc;
+	struct drm_plane_state *plane_state;
+	struct drm_crtc_state *crtc_state;
+	int i;
+
+	drm_atomic_helper_commit_modeset_disables(dev, state);
+
+	drm_atomic_helper_commit_modeset_enables(dev, state);
+
+	/*
+	 * Exynos can't update planes with CRTCs and encoders disabled,
+	 * its updates routines, specially for FIMD, requires the clocks
+	 * to be enabled. So it is necessary to handle the modeset operations
+	 * *before* the commit_planes() step, this way it will always
+	 * have the relevant clocks enabled to perform the update.
+	 */
+
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+		atomic_set(&exynos_crtc->pending_update, 0);
+	}
+
+	for_each_plane_in_state(state, plane, plane_state, i) {
+		struct exynos_drm_crtc *exynos_crtc =
+						to_exynos_crtc(plane->crtc);
+
+		if (!plane->crtc)
+			continue;
+
+		atomic_inc(&exynos_crtc->pending_update);
+	}
+
+	drm_atomic_helper_commit_planes(dev, state);
+
+	exynos_atomic_wait_for_commit(state);
+
+	drm_atomic_helper_cleanup_planes(dev, state);
+
+	drm_atomic_state_free(state);
+
+	spin_lock(&priv->lock);
+	priv->pending &= ~commit->crtcs;
+	spin_unlock(&priv->lock);
+
+	wake_up_all(&priv->wait);
+
+	kfree(commit);
+}
+
+static void exynos_drm_atomic_work(struct work_struct *work)
+{
+	struct exynos_atomic_commit *commit = container_of(work,
+				struct exynos_atomic_commit, work);
+
+	exynos_atomic_commit_complete(commit);
+}
+
 static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
 {
 	struct exynos_drm_private *private;
-	int ret;
+	struct drm_encoder *encoder;
+	unsigned int clone_mask;
+	int cnt, ret;
 
 	private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL);
 	if (!private)
 		return -ENOMEM;
 
+	init_waitqueue_head(&private->wait);
+	spin_lock_init(&private->lock);
+
 	dev_set_drvdata(dev->dev, dev);
 	dev->dev_private = (void *)private;
 
@@ -67,7 +164,13 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
 	exynos_drm_mode_config_init(dev);
 
 	/* setup possible_clones. */
-	exynos_drm_encoder_setup(dev);
+	cnt = 0;
+	clone_mask = 0;
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+		clone_mask |= (1 << (cnt++));
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+		encoder->possible_clones = clone_mask;
 
 	platform_set_drvdata(dev->platformdev, dev);
 
@@ -143,6 +246,64 @@ static int exynos_drm_unload(struct drm_device *dev)
 	return 0;
 }
 
+static int commit_is_pending(struct exynos_drm_private *priv, u32 crtcs)
+{
+	bool pending;
+
+	spin_lock(&priv->lock);
+	pending = priv->pending & crtcs;
+	spin_unlock(&priv->lock);
+
+	return pending;
+}
+
+int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
+			 bool async)
+{
+	struct exynos_drm_private *priv = dev->dev_private;
+	struct exynos_atomic_commit *commit;
+	int i, ret;
+
+	commit = kzalloc(sizeof(*commit), GFP_KERNEL);
+	if (!commit)
+		return -ENOMEM;
+
+	ret = drm_atomic_helper_prepare_planes(dev, state);
+	if (ret) {
+		kfree(commit);
+		return ret;
+	}
+
+	/* This is the point of no return */
+
+	INIT_WORK(&commit->work, exynos_drm_atomic_work);
+	commit->dev = dev;
+	commit->state = state;
+
+	/* Wait until all affected CRTCs have completed previous commits and
+	 * mark them as pending.
+	 */
+	for (i = 0; i < dev->mode_config.num_crtc; ++i) {
+		if (state->crtcs[i])
+			commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
+	}
+
+	wait_event(priv->wait, !commit_is_pending(priv, commit->crtcs));
+
+	spin_lock(&priv->lock);
+	priv->pending |= commit->crtcs;
+	spin_unlock(&priv->lock);
+
+	drm_atomic_helper_swap_state(dev, state);
+
+	if (async)
+		schedule_work(&commit->work);
+	else
+		exynos_atomic_commit_complete(commit);
+
+	return 0;
+}
+
 static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
 {
 	struct drm_connector *connector;
@@ -242,25 +403,25 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
 
 static const struct drm_ioctl_desc exynos_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
+			DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, exynos_drm_gem_get_ioctl,
+			DRM_UNLOCKED | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, vidi_connection_ioctl,
 			DRM_UNLOCKED | DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET,
-			exynos_drm_gem_get_ioctl, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION,
-			vidi_connection_ioctl, DRM_UNLOCKED | DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER,
-			exynos_g2d_get_ver_ioctl, DRM_UNLOCKED | DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST,
-			exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
-			exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY,
-			exynos_drm_ipp_get_property, DRM_UNLOCKED | DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY,
-			exynos_drm_ipp_set_property, DRM_UNLOCKED | DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF,
-			exynos_drm_ipp_queue_buf, DRM_UNLOCKED | DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL,
-			exynos_drm_ipp_cmd_ctrl, DRM_UNLOCKED | DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER, exynos_g2d_get_ver_ioctl,
+			DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST, exynos_g2d_set_cmdlist_ioctl,
+			DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl,
+			DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY, exynos_drm_ipp_get_property,
+			DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY, exynos_drm_ipp_set_property,
+			DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF, exynos_drm_ipp_queue_buf,
+			DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL, exynos_drm_ipp_cmd_ctrl,
+			DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
 };
 
 static const struct file_operations exynos_drm_driver_fops = {
@@ -277,11 +438,10 @@ static const struct file_operations exynos_drm_driver_fops = {
 };
 
 static struct drm_driver exynos_drm_driver = {
-	.driver_features	= DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+	.driver_features	= DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME
+				  | DRIVER_ATOMIC | DRIVER_RENDER,
 	.load			= exynos_drm_load,
 	.unload			= exynos_drm_unload,
-	.suspend		= exynos_drm_suspend,
-	.resume			= exynos_drm_resume,
 	.open			= exynos_drm_open,
 	.preclose		= exynos_drm_preclose,
 	.lastclose		= exynos_drm_lastclose,
@@ -297,8 +457,12 @@ static struct drm_driver exynos_drm_driver = {
 	.dumb_destroy		= drm_gem_dumb_destroy,
 	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd,
 	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle,
-	.gem_prime_export	= exynos_dmabuf_prime_export,
-	.gem_prime_import	= exynos_dmabuf_prime_import,
+	.gem_prime_export	= drm_gem_prime_export,
+	.gem_prime_import	= drm_gem_prime_import,
+	.gem_prime_get_sg_table	= exynos_drm_gem_prime_get_sg_table,
+	.gem_prime_import_sg_table	= exynos_drm_gem_prime_import_sg_table,
+	.gem_prime_vmap		= exynos_drm_gem_prime_vmap,
+	.gem_prime_vunmap	= exynos_drm_gem_prime_vunmap,
 	.ioctls			= exynos_ioctls,
 	.num_ioctls		= ARRAY_SIZE(exynos_ioctls),
 	.fops			= &exynos_drm_driver_fops,
@@ -345,9 +509,6 @@ static struct platform_driver exynos_drm_platform_driver;
  * because connector requires pipe number of its crtc during initialization.
  */
 static struct platform_driver *const exynos_drm_kms_drivers[] = {
-#ifdef CONFIG_DRM_EXYNOS_VIDI
-	&vidi_driver,
-#endif
 #ifdef CONFIG_DRM_EXYNOS_FIMD
 	&fimd_driver,
 #endif
@@ -370,6 +531,9 @@ static struct platform_driver *const exynos_drm_kms_drivers[] = {
 	&mixer_driver,
 	&hdmi_driver,
 #endif
+#ifdef CONFIG_DRM_EXYNOS_VIDI
+	&vidi_driver,
+#endif
 };
 
 static struct platform_driver *const exynos_drm_non_kms_drivers[] = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index dd00f160c1e5..b7ba21dfb696 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -44,23 +44,14 @@ enum exynos_drm_output_type {
  *	- the unit is screen coordinates.
  * @src_y: offset y on a framebuffer to be displayed.
  *	- the unit is screen coordinates.
- * @src_width: width of a partial image to be displayed from framebuffer.
- * @src_height: height of a partial image to be displayed from framebuffer.
- * @fb_width: width of a framebuffer.
- * @fb_height: height of a framebuffer.
+ * @src_w: width of a partial image to be displayed from framebuffer.
+ * @src_h: height of a partial image to be displayed from framebuffer.
  * @crtc_x: offset x on hardware screen.
  * @crtc_y: offset y on hardware screen.
- * @crtc_width: window width to be displayed (hardware screen).
- * @crtc_height: window height to be displayed (hardware screen).
- * @mode_width: width of screen mode.
- * @mode_height: height of screen mode.
+ * @crtc_w: window width to be displayed (hardware screen).
+ * @crtc_h: window height to be displayed (hardware screen).
  * @h_ratio: horizontal scaling ratio, 16.16 fixed point
  * @v_ratio: vertical scaling ratio, 16.16 fixed point
- * @refresh: refresh rate.
- * @scan_flag: interlace or progressive way.
- *	(it could be DRM_MODE_FLAG_*)
- * @bpp: pixel size.(in bit)
- * @pixel_format: fourcc pixel format of this overlay
  * @dma_addr: array of bus(accessed by dma) address to the memory region
  *	      allocated for a overlay.
  * @zpos: order of overlay layer(z position).
@@ -73,73 +64,17 @@ struct exynos_drm_plane {
 	struct drm_plane base;
 	unsigned int src_x;
 	unsigned int src_y;
-	unsigned int src_width;
-	unsigned int src_height;
-	unsigned int fb_width;
-	unsigned int fb_height;
+	unsigned int src_w;
+	unsigned int src_h;
 	unsigned int crtc_x;
 	unsigned int crtc_y;
-	unsigned int crtc_width;
-	unsigned int crtc_height;
-	unsigned int mode_width;
-	unsigned int mode_height;
+	unsigned int crtc_w;
+	unsigned int crtc_h;
 	unsigned int h_ratio;
 	unsigned int v_ratio;
-	unsigned int refresh;
-	unsigned int scan_flag;
-	unsigned int bpp;
-	unsigned int pitch;
-	uint32_t pixel_format;
 	dma_addr_t dma_addr[MAX_FB_BUFFER];
 	unsigned int zpos;
-};
-
-/*
- * Exynos DRM Display Structure.
- *	- this structure is common to analog tv, digital tv and lcd panel.
- *
- * @create_connector: initialize and register a new connector
- * @remove: cleans up the display for removal
- * @mode_fixup: fix mode data comparing to hw specific display mode.
- * @mode_set: convert drm_display_mode to hw specific display mode and
- *	      would be called by encoder->mode_set().
- * @check_mode: check if mode is valid or not.
- * @dpms: display device on or off.
- * @commit: apply changes to hw
- */
-struct exynos_drm_display;
-struct exynos_drm_display_ops {
-	int (*create_connector)(struct exynos_drm_display *display,
-				struct drm_encoder *encoder);
-	void (*remove)(struct exynos_drm_display *display);
-	void (*mode_fixup)(struct exynos_drm_display *display,
-				struct drm_connector *connector,
-				const struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode);
-	void (*mode_set)(struct exynos_drm_display *display,
-				struct drm_display_mode *mode);
-	int (*check_mode)(struct exynos_drm_display *display,
-				struct drm_display_mode *mode);
-	void (*dpms)(struct exynos_drm_display *display, int mode);
-	void (*commit)(struct exynos_drm_display *display);
-};
-
-/*
- * Exynos drm display structure, maps 1:1 with an encoder/connector
- *
- * @list: the list entry for this manager
- * @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI.
- * @encoder: encoder object this display maps to
- * @connector: connector object this display maps to
- * @ops: pointer to callbacks for exynos drm specific functionality
- * @ctx: A pointer to the display's implementation specific context
- */
-struct exynos_drm_display {
-	struct list_head list;
-	enum exynos_drm_output_type type;
-	struct drm_encoder *encoder;
-	struct drm_connector *connector;
-	struct exynos_drm_display_ops *ops;
+	struct drm_framebuffer *pending_fb;
 };
 
 /*
@@ -153,8 +88,10 @@ struct exynos_drm_display {
  * @disable_vblank: specific driver callback for disabling vblank interrupt.
  * @wait_for_vblank: wait for vblank interrupt to make sure that
  *	hardware overlay is updated.
- * @win_commit: apply hardware specific overlay data to registers.
- * @win_disable: disable hardware specific overlay.
+ * @atomic_begin: prepare a window to receive a update
+ * @atomic_flush: mark the end of a window update
+ * @update_plane: apply hardware specific overlay data to registers.
+ * @disable_plane: disable hardware specific overlay.
  * @te_handler: trigger to transfer video image at the tearing effect
  *	synchronization signal if there is a page flip request.
  * @clock_enable: optional function enabling/disabling display domain clock,
@@ -173,11 +110,16 @@ struct exynos_drm_crtc_ops {
 	int (*enable_vblank)(struct exynos_drm_crtc *crtc);
 	void (*disable_vblank)(struct exynos_drm_crtc *crtc);
 	void (*wait_for_vblank)(struct exynos_drm_crtc *crtc);
-	void (*win_commit)(struct exynos_drm_crtc *crtc, unsigned int zpos);
-	void (*win_disable)(struct exynos_drm_crtc *crtc, unsigned int zpos);
+	void (*atomic_begin)(struct exynos_drm_crtc *crtc,
+			      struct exynos_drm_plane *plane);
+	void (*update_plane)(struct exynos_drm_crtc *crtc,
+			     struct exynos_drm_plane *plane);
+	void (*disable_plane)(struct exynos_drm_crtc *crtc,
+			      struct exynos_drm_plane *plane);
+	void (*atomic_flush)(struct exynos_drm_crtc *crtc,
+			      struct exynos_drm_plane *plane);
 	void (*te_handler)(struct exynos_drm_crtc *crtc);
 	void (*clock_enable)(struct exynos_drm_crtc *crtc, bool enable);
-	void (*clear_channels)(struct exynos_drm_crtc *crtc);
 };
 
 /*
@@ -194,6 +136,8 @@ struct exynos_drm_crtc_ops {
  *	this pipe value.
  * @enabled: if the crtc is enabled or not
  * @event: vblank event that is currently queued for flip
+ * @wait_update: wait all pending planes updates to finish
+ * @pending_update: number of pending plane updates in this crtc
  * @ops: pointer to callbacks for exynos drm specific functionality
  * @ctx: A pointer to the crtc's implementation specific context
  */
@@ -201,9 +145,9 @@ struct exynos_drm_crtc {
 	struct drm_crtc			base;
 	enum exynos_drm_output_type	type;
 	unsigned int			pipe;
-	bool				enabled;
-	wait_queue_head_t		pending_flip_queue;
 	struct drm_pending_vblank_event	*event;
+	wait_queue_head_t		wait_update;
+	atomic_t			pending_update;
 	const struct exynos_drm_crtc_ops	*ops;
 	void				*ctx;
 };
@@ -229,6 +173,9 @@ struct drm_exynos_file_private {
  * @da_space_size: size of device address space.
  *	if 0 then default value is used for it.
  * @pipe: the pipe number for this crtc/manager.
+ * @pending: the crtcs that have pending updates to finish
+ * @lock: protect access to @pending
+ * @wait: wait an atomic commit to finish
  */
 struct exynos_drm_private {
 	struct drm_fb_helper *fb_helper;
@@ -244,6 +191,11 @@ struct exynos_drm_private {
 	unsigned long da_space_size;
 
 	unsigned int pipe;
+
+	/* for atomic commit */
+	u32			pending;
+	spinlock_t		lock;
+	wait_queue_head_t	wait;
 };
 
 /*
@@ -285,20 +237,26 @@ int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
 void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
 
 #ifdef CONFIG_DRM_EXYNOS_DPI
-struct exynos_drm_display * exynos_dpi_probe(struct device *dev);
-int exynos_dpi_remove(struct exynos_drm_display *display);
+struct drm_encoder *exynos_dpi_probe(struct device *dev);
+int exynos_dpi_remove(struct drm_encoder *encoder);
+int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder);
 #else
-static inline struct exynos_drm_display *
+static inline struct drm_encoder *
 exynos_dpi_probe(struct device *dev) { return NULL; }
-static inline int exynos_dpi_remove(struct exynos_drm_display *display)
+static inline int exynos_dpi_remove(struct drm_encoder *encoder)
+{
+	return 0;
+}
+static inline int exynos_dpi_bind(struct drm_device *dev,
+				  struct drm_encoder *encoder)
 {
 	return 0;
 }
 #endif
 
-/* This function creates a encoder and a connector, and initializes them. */
-int exynos_drm_create_enc_conn(struct drm_device *dev,
-				struct exynos_drm_display *display);
+int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
+			 bool async);
+
 
 extern struct platform_driver fimd_driver;
 extern struct platform_driver exynos5433_decon_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 0e58b36cb8c2..12b03b364703 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -259,7 +259,7 @@ struct exynos_dsi_driver_data {
 };
 
 struct exynos_dsi {
-	struct exynos_drm_display display;
+	struct drm_encoder encoder;
 	struct mipi_dsi_host dsi_host;
 	struct drm_connector connector;
 	struct device_node *panel_node;
@@ -295,9 +295,9 @@ struct exynos_dsi {
 #define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
 #define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector)
 
-static inline struct exynos_dsi *display_to_dsi(struct exynos_drm_display *d)
+static inline struct exynos_dsi *encoder_to_dsi(struct drm_encoder *e)
 {
-	return container_of(d, struct exynos_dsi, display);
+	return container_of(e, struct exynos_dsi, encoder);
 }
 
 enum reg_idx {
@@ -1272,7 +1272,7 @@ static irqreturn_t exynos_dsi_irq(int irq, void *dev_id)
 static irqreturn_t exynos_dsi_te_irq_handler(int irq, void *dev_id)
 {
 	struct exynos_dsi *dsi = (struct exynos_dsi *)dev_id;
-	struct drm_encoder *encoder = dsi->display.encoder;
+	struct drm_encoder *encoder = &dsi->encoder;
 
 	if (dsi->state & DSIM_STATE_VIDOUT_AVAILABLE)
 		exynos_drm_crtc_te_handler(encoder->crtc);
@@ -1518,16 +1518,17 @@ static void exynos_dsi_poweroff(struct exynos_dsi *dsi)
 		dev_err(dsi->dev, "cannot disable regulators %d\n", ret);
 }
 
-static int exynos_dsi_enable(struct exynos_dsi *dsi)
+static void exynos_dsi_enable(struct drm_encoder *encoder)
 {
+	struct exynos_dsi *dsi = encoder_to_dsi(encoder);
 	int ret;
 
 	if (dsi->state & DSIM_STATE_ENABLED)
-		return 0;
+		return;
 
 	ret = exynos_dsi_poweron(dsi);
 	if (ret < 0)
-		return ret;
+		return;
 
 	dsi->state |= DSIM_STATE_ENABLED;
 
@@ -1535,7 +1536,7 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi)
 	if (ret < 0) {
 		dsi->state &= ~DSIM_STATE_ENABLED;
 		exynos_dsi_poweroff(dsi);
-		return ret;
+		return;
 	}
 
 	exynos_dsi_set_display_mode(dsi);
@@ -1547,16 +1548,16 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi)
 		exynos_dsi_set_display_enable(dsi, false);
 		drm_panel_unprepare(dsi->panel);
 		exynos_dsi_poweroff(dsi);
-		return ret;
+		return;
 	}
 
 	dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
-
-	return 0;
 }
 
-static void exynos_dsi_disable(struct exynos_dsi *dsi)
+static void exynos_dsi_disable(struct drm_encoder *encoder)
 {
+	struct exynos_dsi *dsi = encoder_to_dsi(encoder);
+
 	if (!(dsi->state & DSIM_STATE_ENABLED))
 		return;
 
@@ -1571,26 +1572,6 @@ static void exynos_dsi_disable(struct exynos_dsi *dsi)
 	exynos_dsi_poweroff(dsi);
 }
 
-static void exynos_dsi_dpms(struct exynos_drm_display *display, int mode)
-{
-	struct exynos_dsi *dsi = display_to_dsi(display);
-
-	if (dsi->panel) {
-		switch (mode) {
-		case DRM_MODE_DPMS_ON:
-			exynos_dsi_enable(dsi);
-			break;
-		case DRM_MODE_DPMS_STANDBY:
-		case DRM_MODE_DPMS_SUSPEND:
-		case DRM_MODE_DPMS_OFF:
-			exynos_dsi_disable(dsi);
-			break;
-		default:
-			break;
-		}
-	}
-}
-
 static enum drm_connector_status
 exynos_dsi_detect(struct drm_connector *connector, bool force)
 {
@@ -1601,10 +1582,10 @@ exynos_dsi_detect(struct drm_connector *connector, bool force)
 		if (dsi->panel)
 			drm_panel_attach(dsi->panel, &dsi->connector);
 	} else if (!dsi->panel_node) {
-		struct exynos_drm_display *display;
+		struct drm_encoder *encoder;
 
-		display = platform_get_drvdata(to_platform_device(dsi->dev));
-		exynos_dsi_dpms(display, DRM_MODE_DPMS_OFF);
+		encoder = platform_get_drvdata(to_platform_device(dsi->dev));
+		exynos_dsi_disable(encoder);
 		drm_panel_detach(dsi->panel);
 		dsi->panel = NULL;
 	}
@@ -1647,7 +1628,7 @@ exynos_dsi_best_encoder(struct drm_connector *connector)
 {
 	struct exynos_dsi *dsi = connector_to_dsi(connector);
 
-	return dsi->display.encoder;
+	return &dsi->encoder;
 }
 
 static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
@@ -1655,10 +1636,9 @@ static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
 	.best_encoder = exynos_dsi_best_encoder,
 };
 
-static int exynos_dsi_create_connector(struct exynos_drm_display *display,
-				       struct drm_encoder *encoder)
+static int exynos_dsi_create_connector(struct drm_encoder *encoder)
 {
-	struct exynos_dsi *dsi = display_to_dsi(display);
+	struct exynos_dsi *dsi = encoder_to_dsi(encoder);
 	struct drm_connector *connector = &dsi->connector;
 	int ret;
 
@@ -1679,26 +1659,40 @@ static int exynos_dsi_create_connector(struct exynos_drm_display *display,
 	return 0;
 }
 
-static void exynos_dsi_mode_set(struct exynos_drm_display *display,
-			 struct drm_display_mode *mode)
+static bool exynos_dsi_mode_fixup(struct drm_encoder *encoder,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
 {
-	struct exynos_dsi *dsi = display_to_dsi(display);
-	struct videomode *vm = &dsi->vm;
+	return true;
+}
 
-	vm->hactive = mode->hdisplay;
-	vm->vactive = mode->vdisplay;
-	vm->vfront_porch = mode->vsync_start - mode->vdisplay;
-	vm->vback_porch = mode->vtotal - mode->vsync_end;
-	vm->vsync_len = mode->vsync_end - mode->vsync_start;
-	vm->hfront_porch = mode->hsync_start - mode->hdisplay;
-	vm->hback_porch = mode->htotal - mode->hsync_end;
-	vm->hsync_len = mode->hsync_end - mode->hsync_start;
+static void exynos_dsi_mode_set(struct drm_encoder *encoder,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	struct exynos_dsi *dsi = encoder_to_dsi(encoder);
+	struct videomode *vm = &dsi->vm;
+	struct drm_display_mode *m = adjusted_mode;
+
+	vm->hactive = m->hdisplay;
+	vm->vactive = m->vdisplay;
+	vm->vfront_porch = m->vsync_start - m->vdisplay;
+	vm->vback_porch = m->vtotal - m->vsync_end;
+	vm->vsync_len = m->vsync_end - m->vsync_start;
+	vm->hfront_porch = m->hsync_start - m->hdisplay;
+	vm->hback_porch = m->htotal - m->hsync_end;
+	vm->hsync_len = m->hsync_end - m->hsync_start;
 }
 
-static struct exynos_drm_display_ops exynos_dsi_display_ops = {
-	.create_connector = exynos_dsi_create_connector,
+static struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = {
+	.mode_fixup = exynos_dsi_mode_fixup,
 	.mode_set = exynos_dsi_mode_set,
-	.dpms = exynos_dsi_dpms
+	.enable = exynos_dsi_enable,
+	.disable = exynos_dsi_disable,
+};
+
+static struct drm_encoder_funcs exynos_dsi_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
 };
 
 MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
@@ -1821,22 +1815,35 @@ end:
 static int exynos_dsi_bind(struct device *dev, struct device *master,
 				void *data)
 {
-	struct exynos_drm_display *display = dev_get_drvdata(dev);
-	struct exynos_dsi *dsi = display_to_dsi(display);
+	struct drm_encoder *encoder = dev_get_drvdata(dev);
+	struct exynos_dsi *dsi = encoder_to_dsi(encoder);
 	struct drm_device *drm_dev = data;
 	struct drm_bridge *bridge;
 	int ret;
 
-	ret = exynos_drm_create_enc_conn(drm_dev, display);
+	ret = exynos_drm_crtc_get_pipe_from_type(drm_dev,
+						  EXYNOS_DISPLAY_TYPE_LCD);
+	if (ret < 0)
+		return ret;
+
+	encoder->possible_crtcs = 1 << ret;
+
+	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+	drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+
+	drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs);
+
+	ret = exynos_dsi_create_connector(encoder);
 	if (ret) {
-		DRM_ERROR("Encoder create [%d] failed with %d\n",
-			  display->type, ret);
+		DRM_ERROR("failed to create connector ret = %d\n", ret);
+		drm_encoder_cleanup(encoder);
 		return ret;
 	}
 
 	bridge = of_drm_find_bridge(dsi->bridge_node);
 	if (bridge) {
-		display->encoder->bridge = bridge;
 		drm_bridge_attach(drm_dev, bridge);
 	}
 
@@ -1846,10 +1853,10 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
 static void exynos_dsi_unbind(struct device *dev, struct device *master,
 				void *data)
 {
-	struct exynos_drm_display *display = dev_get_drvdata(dev);
-	struct exynos_dsi *dsi = display_to_dsi(display);
+	struct drm_encoder *encoder = dev_get_drvdata(dev);
+	struct exynos_dsi *dsi = encoder_to_dsi(encoder);
 
-	exynos_dsi_dpms(display, DRM_MODE_DPMS_OFF);
+	exynos_dsi_disable(encoder);
 
 	mipi_dsi_host_unregister(&dsi->dsi_host);
 }
@@ -1870,9 +1877,6 @@ static int exynos_dsi_probe(struct platform_device *pdev)
 	if (!dsi)
 		return -ENOMEM;
 
-	dsi->display.type = EXYNOS_DISPLAY_TYPE_LCD;
-	dsi->display.ops = &exynos_dsi_display_ops;
-
 	/* To be checked as invalid one */
 	dsi->te_gpio = -ENOENT;
 
@@ -1948,7 +1952,7 @@ static int exynos_dsi_probe(struct platform_device *pdev)
 		return ret;
 	}
 
-	platform_set_drvdata(pdev, &dsi->display);
+	platform_set_drvdata(pdev, &dsi->encoder);
 
 	return component_add(dev, &exynos_dsi_component_ops);
 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
deleted file mode 100644
index 7b89fd520e45..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/* exynos_drm_encoder.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Authors:
- *	Inki Dae <inki.dae@samsung.com>
- *	Joonyoung Shim <jy0922.shim@samsung.com>
- *	Seung-Woo Kim <sw0312.kim@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "exynos_drm_drv.h"
-#include "exynos_drm_encoder.h"
-
-#define to_exynos_encoder(x)	container_of(x, struct exynos_drm_encoder,\
-				drm_encoder)
-
-/*
- * exynos specific encoder structure.
- *
- * @drm_encoder: encoder object.
- * @display: the display structure that maps to this encoder
- */
-struct exynos_drm_encoder {
-	struct drm_encoder		drm_encoder;
-	struct exynos_drm_display	*display;
-};
-
-static bool
-exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder,
-			       const struct drm_display_mode *mode,
-			       struct drm_display_mode *adjusted_mode)
-{
-	struct drm_device *dev = encoder->dev;
-	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
-	struct exynos_drm_display *display = exynos_encoder->display;
-	struct drm_connector *connector;
-
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		if (connector->encoder != encoder)
-			continue;
-
-		if (display->ops->mode_fixup)
-			display->ops->mode_fixup(display, connector, mode,
-					adjusted_mode);
-	}
-
-	return true;
-}
-
-static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder,
-					 struct drm_display_mode *mode,
-					 struct drm_display_mode *adjusted_mode)
-{
-	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
-	struct exynos_drm_display *display = exynos_encoder->display;
-
-	if (display->ops->mode_set)
-		display->ops->mode_set(display, adjusted_mode);
-}
-
-static void exynos_drm_encoder_enable(struct drm_encoder *encoder)
-{
-	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
-	struct exynos_drm_display *display = exynos_encoder->display;
-
-	if (display->ops->dpms)
-		display->ops->dpms(display, DRM_MODE_DPMS_ON);
-
-	if (display->ops->commit)
-		display->ops->commit(display);
-}
-
-static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
-{
-	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
-	struct exynos_drm_display *display = exynos_encoder->display;
-
-	if (display->ops->dpms)
-		display->ops->dpms(display, DRM_MODE_DPMS_OFF);
-}
-
-static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = {
-	.mode_fixup	= exynos_drm_encoder_mode_fixup,
-	.mode_set	= exynos_drm_encoder_mode_set,
-	.enable		= exynos_drm_encoder_enable,
-	.disable	= exynos_drm_encoder_disable,
-};
-
-static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
-{
-	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
-
-	drm_encoder_cleanup(encoder);
-	kfree(exynos_encoder);
-}
-
-static struct drm_encoder_funcs exynos_encoder_funcs = {
-	.destroy = exynos_drm_encoder_destroy,
-};
-
-static unsigned int exynos_drm_encoder_clones(struct drm_encoder *encoder)
-{
-	struct drm_encoder *clone;
-	struct drm_device *dev = encoder->dev;
-	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
-	struct exynos_drm_display *display = exynos_encoder->display;
-	unsigned int clone_mask = 0;
-	int cnt = 0;
-
-	list_for_each_entry(clone, &dev->mode_config.encoder_list, head) {
-		switch (display->type) {
-		case EXYNOS_DISPLAY_TYPE_LCD:
-		case EXYNOS_DISPLAY_TYPE_HDMI:
-		case EXYNOS_DISPLAY_TYPE_VIDI:
-			clone_mask |= (1 << (cnt++));
-			break;
-		default:
-			continue;
-		}
-	}
-
-	return clone_mask;
-}
-
-void exynos_drm_encoder_setup(struct drm_device *dev)
-{
-	struct drm_encoder *encoder;
-
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
-		encoder->possible_clones = exynos_drm_encoder_clones(encoder);
-}
-
-struct drm_encoder *
-exynos_drm_encoder_create(struct drm_device *dev,
-			   struct exynos_drm_display *display,
-			   unsigned long possible_crtcs)
-{
-	struct drm_encoder *encoder;
-	struct exynos_drm_encoder *exynos_encoder;
-
-	if (!possible_crtcs)
-		return NULL;
-
-	exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL);
-	if (!exynos_encoder)
-		return NULL;
-
-	exynos_encoder->display = display;
-	encoder = &exynos_encoder->drm_encoder;
-	encoder->possible_crtcs = possible_crtcs;
-
-	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
-	drm_encoder_init(dev, encoder, &exynos_encoder_funcs,
-			DRM_MODE_ENCODER_TMDS);
-
-	drm_encoder_helper_add(encoder, &exynos_encoder_helper_funcs);
-
-	DRM_DEBUG_KMS("encoder has been created\n");
-
-	return encoder;
-}
-
-struct exynos_drm_display *exynos_drm_get_display(struct drm_encoder *encoder)
-{
-	return to_exynos_encoder(encoder)->display;
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
deleted file mode 100644
index 26305d8dd93a..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Authors:
- *	Inki Dae <inki.dae@samsung.com>
- *	Joonyoung Shim <jy0922.shim@samsung.com>
- *	Seung-Woo Kim <sw0312.kim@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_ENCODER_H_
-#define _EXYNOS_DRM_ENCODER_H_
-
-void exynos_drm_encoder_setup(struct drm_device *dev);
-struct drm_encoder *exynos_drm_encoder_create(struct drm_device *dev,
-			struct exynos_drm_display *mgr,
-			unsigned long possible_crtcs);
-struct exynos_drm_display *exynos_drm_get_display(struct drm_encoder *encoder);
-
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 2b6320e6eae2..084280859589 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -23,7 +23,6 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_fbdev.h"
-#include "exynos_drm_gem.h"
 #include "exynos_drm_iommu.h"
 #include "exynos_drm_crtc.h"
 
@@ -33,12 +32,10 @@
  * exynos specific framebuffer structure.
  *
  * @fb: drm framebuffer obejct.
- * @buf_cnt: a buffer count to drm framebuffer.
  * @exynos_gem_obj: array of exynos specific gem object containing a gem object.
  */
 struct exynos_drm_fb {
 	struct drm_framebuffer		fb;
-	unsigned int			buf_cnt;
 	struct exynos_drm_gem_obj	*exynos_gem_obj[MAX_FB_BUFFER];
 };
 
@@ -98,10 +95,6 @@ static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
 {
 	struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
 
-	/* This fb should have only one gem object. */
-	if (WARN_ON(exynos_fb->buf_cnt != 1))
-		return -EINVAL;
-
 	return drm_gem_handle_create(file_priv,
 			&exynos_fb->exynos_gem_obj[0]->base, handle);
 }
@@ -122,138 +115,96 @@ static struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
 	.dirty		= exynos_drm_fb_dirty,
 };
 
-void exynos_drm_fb_set_buf_cnt(struct drm_framebuffer *fb,
-						unsigned int cnt)
-{
-	struct exynos_drm_fb *exynos_fb;
-
-	exynos_fb = to_exynos_fb(fb);
-
-	exynos_fb->buf_cnt = cnt;
-}
-
-unsigned int exynos_drm_fb_get_buf_cnt(struct drm_framebuffer *fb)
-{
-	struct exynos_drm_fb *exynos_fb;
-
-	exynos_fb = to_exynos_fb(fb);
-
-	return exynos_fb->buf_cnt;
-}
-
 struct drm_framebuffer *
 exynos_drm_framebuffer_init(struct drm_device *dev,
 			    struct drm_mode_fb_cmd2 *mode_cmd,
-			    struct drm_gem_object *obj)
+			    struct exynos_drm_gem_obj **gem_obj,
+			    int count)
 {
 	struct exynos_drm_fb *exynos_fb;
-	struct exynos_drm_gem_obj *exynos_gem_obj;
+	int i;
 	int ret;
 
-	exynos_gem_obj = to_exynos_gem_obj(obj);
-
-	ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
-	if (ret < 0)
-		return ERR_PTR(ret);
-
 	exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
 	if (!exynos_fb)
 		return ERR_PTR(-ENOMEM);
 
+	for (i = 0; i < count; i++) {
+		ret = check_fb_gem_memory_type(dev, gem_obj[i]);
+		if (ret < 0)
+			goto err;
+
+		exynos_fb->exynos_gem_obj[i] = gem_obj[i];
+	}
+
 	drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
-	exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
 
 	ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
-	if (ret) {
-		kfree(exynos_fb);
+	if (ret < 0) {
 		DRM_ERROR("failed to initialize framebuffer\n");
-		return ERR_PTR(ret);
+		goto err;
 	}
 
 	return &exynos_fb->fb;
+
+err:
+	kfree(exynos_fb);
+	return ERR_PTR(ret);
 }
 
 static struct drm_framebuffer *
 exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
 		      struct drm_mode_fb_cmd2 *mode_cmd)
 {
+	struct exynos_drm_gem_obj *gem_objs[MAX_FB_BUFFER];
 	struct drm_gem_object *obj;
-	struct exynos_drm_gem_obj *exynos_gem_obj;
-	struct exynos_drm_fb *exynos_fb;
-	int i, ret;
-
-	exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
-	if (!exynos_fb)
-		return ERR_PTR(-ENOMEM);
-
-	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
-	if (!obj) {
-		DRM_ERROR("failed to lookup gem object\n");
-		ret = -ENOENT;
-		goto err_free;
-	}
-
-	drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
-	exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
-	exynos_fb->buf_cnt = drm_format_num_planes(mode_cmd->pixel_format);
-
-	DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
+	struct drm_framebuffer *fb;
+	int i;
+	int ret;
 
-	for (i = 1; i < exynos_fb->buf_cnt; i++) {
+	for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
 		obj = drm_gem_object_lookup(dev, file_priv,
-				mode_cmd->handles[i]);
+					    mode_cmd->handles[i]);
 		if (!obj) {
 			DRM_ERROR("failed to lookup gem object\n");
 			ret = -ENOENT;
-			exynos_fb->buf_cnt = i;
-			goto err_unreference;
+			goto err;
 		}
 
-		exynos_gem_obj = to_exynos_gem_obj(obj);
-		exynos_fb->exynos_gem_obj[i] = exynos_gem_obj;
-
-		ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
-		if (ret < 0)
-			goto err_unreference;
+		gem_objs[i] = to_exynos_gem_obj(obj);
 	}
 
-	ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
-	if (ret) {
-		DRM_ERROR("failed to init framebuffer.\n");
-		goto err_unreference;
+	fb = exynos_drm_framebuffer_init(dev, mode_cmd, gem_objs, i);
+	if (IS_ERR(fb)) {
+		ret = PTR_ERR(fb);
+		goto err;
 	}
 
-	return &exynos_fb->fb;
+	return fb;
 
-err_unreference:
-	for (i = 0; i < exynos_fb->buf_cnt; i++) {
-		struct drm_gem_object *obj;
+err:
+	while (i--)
+		drm_gem_object_unreference_unlocked(&gem_objs[i]->base);
 
-		obj = &exynos_fb->exynos_gem_obj[i]->base;
-		if (obj)
-			drm_gem_object_unreference_unlocked(obj);
-	}
-err_free:
-	kfree(exynos_fb);
 	return ERR_PTR(ret);
 }
 
-struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
-						int index)
+struct exynos_drm_gem_obj *exynos_drm_fb_gem_obj(struct drm_framebuffer *fb,
+						 int index)
 {
 	struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
-	struct exynos_drm_gem_buf *buffer;
+	struct exynos_drm_gem_obj *obj;
 
 	if (index >= MAX_FB_BUFFER)
 		return NULL;
 
-	buffer = exynos_fb->exynos_gem_obj[index]->buffer;
-	if (!buffer)
+	obj = exynos_fb->exynos_gem_obj[index];
+	if (!obj)
 		return NULL;
 
-	DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)buffer->dma_addr);
+	DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)obj->dma_addr);
 
-	return buffer;
+	return obj;
 }
 
 static void exynos_drm_output_poll_changed(struct drm_device *dev)
@@ -267,41 +218,6 @@ static void exynos_drm_output_poll_changed(struct drm_device *dev)
 		exynos_drm_fbdev_init(dev);
 }
 
-static int exynos_atomic_commit(struct drm_device *dev,
-				struct drm_atomic_state *state,
-				bool async)
-{
-	int ret;
-
-	ret = drm_atomic_helper_prepare_planes(dev, state);
-	if (ret)
-		return ret;
-
-	/* This is the point of no return */
-
-	drm_atomic_helper_swap_state(dev, state);
-
-	drm_atomic_helper_commit_modeset_disables(dev, state);
-
-	drm_atomic_helper_commit_modeset_enables(dev, state);
-
-	/*
-	 * Exynos can't update planes with CRTCs and encoders disabled,
-	 * its updates routines, specially for FIMD, requires the clocks
-	 * to be enabled. So it is necessary to handle the modeset operations
-	 * *before* the commit_planes() step, this way it will always
-	 * have the relevant clocks enabled to perform the update.
-	 */
-
-	drm_atomic_helper_commit_planes(dev, state);
-
-	drm_atomic_helper_cleanup_planes(dev, state);
-
-	drm_atomic_state_free(state);
-
-	return 0;
-}
-
 static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
 	.fb_create = exynos_user_fb_create,
 	.output_poll_changed = exynos_drm_output_poll_changed,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index 517471b37566..85e4445b920e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -14,22 +14,18 @@
 #ifndef _EXYNOS_DRM_FB_H_
 #define _EXYNOS_DRM_FB_H
 
+#include "exynos_drm_gem.h"
+
 struct drm_framebuffer *
 exynos_drm_framebuffer_init(struct drm_device *dev,
 			    struct drm_mode_fb_cmd2 *mode_cmd,
-			    struct drm_gem_object *obj);
+			    struct exynos_drm_gem_obj **gem_obj,
+			    int count);
 
-/* get memory information of a drm framebuffer */
-struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
+/* get gem object of a drm framebuffer */
+struct exynos_drm_gem_obj *exynos_drm_fb_gem_obj(struct drm_framebuffer *fb,
 						 int index);
 
 void exynos_drm_mode_config_init(struct drm_device *dev);
 
-/* set a buffer count to drm framebuffer. */
-void exynos_drm_fb_set_buf_cnt(struct drm_framebuffer *fb,
-						unsigned int cnt);
-
-/* get a buffer count to drm framebuffer. */
-unsigned int exynos_drm_fb_get_buf_cnt(struct drm_framebuffer *fb);
-
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index e0b085b4bdfa..a221f753ad9c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -21,7 +21,6 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_fbdev.h"
-#include "exynos_drm_gem.h"
 #include "exynos_drm_iommu.h"
 
 #define MAX_CONNECTOR		4
@@ -32,7 +31,7 @@
 
 struct exynos_drm_fbdev {
 	struct drm_fb_helper		drm_fb_helper;
-	struct exynos_drm_gem_obj	*exynos_gem_obj;
+	struct exynos_drm_gem_obj	*obj;
 };
 
 static int exynos_drm_fb_mmap(struct fb_info *info,
@@ -40,8 +39,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
 {
 	struct drm_fb_helper *helper = info->par;
 	struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
-	struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
-	struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
+	struct exynos_drm_gem_obj *obj = exynos_fbd->obj;
 	unsigned long vm_size;
 	int ret;
 
@@ -49,11 +47,11 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
 
 	vm_size = vma->vm_end - vma->vm_start;
 
-	if (vm_size > buffer->size)
+	if (vm_size > obj->size)
 		return -EINVAL;
 
-	ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
-		buffer->dma_addr, buffer->size, &buffer->dma_attrs);
+	ret = dma_mmap_attrs(helper->dev->dev, vma, obj->pages, obj->dma_addr,
+			     obj->size, &obj->dma_attrs);
 	if (ret < 0) {
 		DRM_ERROR("failed to mmap.\n");
 		return ret;
@@ -65,9 +63,9 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
 static struct fb_ops exynos_drm_fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_mmap        = exynos_drm_fb_mmap,
-	.fb_fillrect	= cfb_fillrect,
-	.fb_copyarea	= cfb_copyarea,
-	.fb_imageblit	= cfb_imageblit,
+	.fb_fillrect	= drm_fb_helper_cfb_fillrect,
+	.fb_copyarea	= drm_fb_helper_cfb_copyarea,
+	.fb_imageblit	= drm_fb_helper_cfb_imageblit,
 	.fb_check_var	= drm_fb_helper_check_var,
 	.fb_set_par	= drm_fb_helper_set_par,
 	.fb_blank	= drm_fb_helper_blank,
@@ -76,42 +74,42 @@ static struct fb_ops exynos_drm_fb_ops = {
 };
 
 static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
-				     struct drm_fb_helper_surface_size *sizes,
-				     struct drm_framebuffer *fb)
+				   struct drm_fb_helper_surface_size *sizes,
+				   struct exynos_drm_gem_obj *obj)
 {
-	struct fb_info *fbi = helper->fbdev;
-	struct exynos_drm_gem_buf *buffer;
+	struct fb_info *fbi;
+	struct drm_framebuffer *fb = helper->fb;
 	unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
 	unsigned int nr_pages;
 	unsigned long offset;
 
+	fbi = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(fbi)) {
+		DRM_ERROR("failed to allocate fb info.\n");
+		return PTR_ERR(fbi);
+	}
+
+	fbi->par = helper;
+	fbi->flags = FBINFO_FLAG_DEFAULT;
+	fbi->fbops = &exynos_drm_fb_ops;
+
 	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
 	drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
 
-	/* RGB formats use only one buffer */
-	buffer = exynos_drm_fb_buffer(fb, 0);
-	if (!buffer) {
-		DRM_DEBUG_KMS("buffer is null.\n");
-		return -EFAULT;
-	}
-
-	nr_pages = buffer->size >> PAGE_SHIFT;
+	nr_pages = obj->size >> PAGE_SHIFT;
 
-	buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
-			nr_pages, VM_MAP,
+	obj->kvaddr = (void __iomem *) vmap(obj->pages, nr_pages, VM_MAP,
 			pgprot_writecombine(PAGE_KERNEL));
-	if (!buffer->kvaddr) {
+	if (!obj->kvaddr) {
 		DRM_ERROR("failed to map pages to kernel space.\n");
+		drm_fb_helper_release_fbi(helper);
 		return -EIO;
 	}
 
-	/* buffer count to framebuffer always is 1 at booting time. */
-	exynos_drm_fb_set_buf_cnt(fb, 1);
-
 	offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
 	offset += fbi->var.yoffset * fb->pitches[0];
 
-	fbi->screen_base = buffer->kvaddr + offset;
+	fbi->screen_base = obj->kvaddr + offset;
 	fbi->screen_size = size;
 	fbi->fix.smem_len = size;
 
@@ -122,9 +120,8 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
 				    struct drm_fb_helper_surface_size *sizes)
 {
 	struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
-	struct exynos_drm_gem_obj *exynos_gem_obj;
+	struct exynos_drm_gem_obj *obj;
 	struct drm_device *dev = helper->dev;
-	struct fb_info *fbi;
 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
 	struct platform_device *pdev = dev->platformdev;
 	unsigned long size;
@@ -142,69 +139,44 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
 
 	mutex_lock(&dev->struct_mutex);
 
-	fbi = framebuffer_alloc(0, &pdev->dev);
-	if (!fbi) {
-		DRM_ERROR("failed to allocate fb info.\n");
-		ret = -ENOMEM;
-		goto out;
-	}
-
 	size = mode_cmd.pitches[0] * mode_cmd.height;
 
-	exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
+	obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
 	/*
 	 * If physically contiguous memory allocation fails and if IOMMU is
 	 * supported then try to get buffer from non physically contiguous
 	 * memory area.
 	 */
-	if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
+	if (IS_ERR(obj) && is_drm_iommu_supported(dev)) {
 		dev_warn(&pdev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
-		exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
-							size);
+		obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG, size);
 	}
 
-	if (IS_ERR(exynos_gem_obj)) {
-		ret = PTR_ERR(exynos_gem_obj);
-		goto err_release_framebuffer;
+	if (IS_ERR(obj)) {
+		ret = PTR_ERR(obj);
+		goto out;
 	}
 
-	exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
+	exynos_fbdev->obj = obj;
 
-	helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
-			&exynos_gem_obj->base);
+	helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd, &obj, 1);
 	if (IS_ERR(helper->fb)) {
 		DRM_ERROR("failed to create drm framebuffer.\n");
 		ret = PTR_ERR(helper->fb);
 		goto err_destroy_gem;
 	}
 
-	helper->fbdev = fbi;
-
-	fbi->par = helper;
-	fbi->flags = FBINFO_FLAG_DEFAULT;
-	fbi->fbops = &exynos_drm_fb_ops;
-
-	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
-	if (ret) {
-		DRM_ERROR("failed to allocate cmap.\n");
-		goto err_destroy_framebuffer;
-	}
-
-	ret = exynos_drm_fbdev_update(helper, sizes, helper->fb);
+	ret = exynos_drm_fbdev_update(helper, sizes, obj);
 	if (ret < 0)
-		goto err_dealloc_cmap;
+		goto err_destroy_framebuffer;
 
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
 
-err_dealloc_cmap:
-	fb_dealloc_cmap(&fbi->cmap);
 err_destroy_framebuffer:
 	drm_framebuffer_cleanup(helper->fb);
 err_destroy_gem:
-	exynos_drm_gem_destroy(exynos_gem_obj);
-err_release_framebuffer:
-	framebuffer_release(fbi);
+	exynos_drm_gem_destroy(obj);
 
 /*
  * if failed, all resources allocated above would be released by
@@ -297,11 +269,11 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
 				      struct drm_fb_helper *fb_helper)
 {
 	struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
-	struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
+	struct exynos_drm_gem_obj *obj = exynos_fbd->obj;
 	struct drm_framebuffer *fb;
 
-	if (exynos_gem_obj->buffer->kvaddr)
-		vunmap(exynos_gem_obj->buffer->kvaddr);
+	if (obj->kvaddr)
+		vunmap(obj->kvaddr);
 
 	/* release drm framebuffer and real buffer */
 	if (fb_helper->fb && fb_helper->fb->funcs) {
@@ -312,21 +284,8 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
 		}
 	}
 
-	/* release linux framebuffer */
-	if (fb_helper->fbdev) {
-		struct fb_info *info;
-		int ret;
-
-		info = fb_helper->fbdev;
-		ret = unregister_framebuffer(info);
-		if (ret < 0)
-			DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
-
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(fb_helper);
+	drm_fb_helper_release_fbi(fb_helper);
 
 	drm_fb_helper_fini(fb_helper);
 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 794e56c8798e..750a9e6b9e8d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -59,6 +59,7 @@
 #define VIDWnALPHA1(win)	(VIDW_ALPHA + 0x04 + (win) * 8)
 
 #define VIDWx_BUF_START(win, buf)	(VIDW_BUF_START(buf) + (win) * 8)
+#define VIDWx_BUF_START_S(win, buf)	(VIDW_BUF_START_S(buf) + (win) * 8)
 #define VIDWx_BUF_END(win, buf)		(VIDW_BUF_END(buf) + (win) * 8)
 #define VIDWx_BUF_SIZE(win, buf)	(VIDW_BUF_SIZE(buf) + (win) * 4)
 
@@ -169,7 +170,7 @@ struct fimd_context {
 
 	struct exynos_drm_panel_info panel;
 	struct fimd_driver_data *driver_data;
-	struct exynos_drm_display *display;
+	struct drm_encoder *encoder;
 };
 
 static const struct of_device_id fimd_driver_dt_match[] = {
@@ -187,6 +188,14 @@ static const struct of_device_id fimd_driver_dt_match[] = {
 };
 MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
 
+static const uint32_t fimd_formats[] = {
+	DRM_FORMAT_C8,
+	DRM_FORMAT_XRGB1555,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+};
+
 static inline struct fimd_driver_data *drm_fimd_get_driver_data(
 	struct platform_device *pdev)
 {
@@ -348,13 +357,6 @@ static void fimd_clear_channels(struct exynos_drm_crtc *crtc)
 	pm_runtime_put(ctx->dev);
 }
 
-static void fimd_iommu_detach_devices(struct fimd_context *ctx)
-{
-	/* detach this sub driver from iommu mapping if supported. */
-	if (is_drm_iommu_supported(ctx->drm_dev))
-		drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
-}
-
 static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
 		const struct drm_display_mode *mode)
 {
@@ -486,9 +488,9 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
 }
 
 
-static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
+static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
+				struct drm_framebuffer *fb)
 {
-	struct exynos_drm_plane *plane = &ctx->planes[win];
 	unsigned long val;
 
 	val = WINCONx_ENWIN;
@@ -498,11 +500,11 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
 	 * So the request format is ARGB8888 then change it to XRGB8888.
 	 */
 	if (ctx->driver_data->has_limited_fmt && !win) {
-		if (plane->pixel_format == DRM_FORMAT_ARGB8888)
-			plane->pixel_format = DRM_FORMAT_XRGB8888;
+		if (fb->pixel_format == DRM_FORMAT_ARGB8888)
+			fb->pixel_format = DRM_FORMAT_XRGB8888;
 	}
 
-	switch (plane->pixel_format) {
+	switch (fb->pixel_format) {
 	case DRM_FORMAT_C8:
 		val |= WINCON0_BPPMODE_8BPP_PALETTE;
 		val |= WINCONx_BURSTLEN_8WORD;
@@ -538,7 +540,7 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
 		break;
 	}
 
-	DRM_DEBUG_KMS("bpp = %d\n", plane->bpp);
+	DRM_DEBUG_KMS("bpp = %d\n", fb->bits_per_pixel);
 
 	/*
 	 * In case of exynos, setting dma-burst to 16Word causes permanent
@@ -548,7 +550,7 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
 	 * movement causes unstable DMA which results into iommu crash/tear.
 	 */
 
-	if (plane->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
+	if (fb->width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
 		val &= ~WINCONx_BURSTLEN_MASK;
 		val |= WINCONx_BURSTLEN_4WORD;
 	}
@@ -598,6 +600,16 @@ static void fimd_shadow_protect_win(struct fimd_context *ctx,
 {
 	u32 reg, bits, val;
 
+	/*
+	 * SHADOWCON/PRTCON register is used for enabling timing.
+	 *
+	 * for example, once only width value of a register is set,
+	 * if the dma is started then fimd hardware could malfunction so
+	 * with protect window setting, the register fields with prefix '_F'
+	 * wouldn't be updated at vsync also but updated once unprotect window
+	 * is set.
+	 */
+
 	if (ctx->driver_data->has_shadowcon) {
 		reg = SHADOWCON;
 		bits = SHADOWCON_WINx_PROTECT(win);
@@ -614,41 +626,45 @@ static void fimd_shadow_protect_win(struct fimd_context *ctx,
 	writel(val, ctx->regs + reg);
 }
 
-static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
+static void fimd_atomic_begin(struct exynos_drm_crtc *crtc,
+			       struct exynos_drm_plane *plane)
 {
 	struct fimd_context *ctx = crtc->ctx;
-	struct exynos_drm_plane *plane;
-	dma_addr_t dma_addr;
-	unsigned long val, size, offset;
-	unsigned int last_x, last_y, buf_offsize, line_size;
 
 	if (ctx->suspended)
 		return;
 
-	if (win < 0 || win >= WINDOWS_NR)
-		return;
+	fimd_shadow_protect_win(ctx, plane->zpos, true);
+}
 
-	plane = &ctx->planes[win];
+static void fimd_atomic_flush(struct exynos_drm_crtc *crtc,
+			       struct exynos_drm_plane *plane)
+{
+	struct fimd_context *ctx = crtc->ctx;
 
 	if (ctx->suspended)
 		return;
 
-	/*
-	 * SHADOWCON/PRTCON register is used for enabling timing.
-	 *
-	 * for example, once only width value of a register is set,
-	 * if the dma is started then fimd hardware could malfunction so
-	 * with protect window setting, the register fields with prefix '_F'
-	 * wouldn't be updated at vsync also but updated once unprotect window
-	 * is set.
-	 */
+	fimd_shadow_protect_win(ctx, plane->zpos, false);
+}
 
-	/* protect windows */
-	fimd_shadow_protect_win(ctx, win, true);
+static void fimd_update_plane(struct exynos_drm_crtc *crtc,
+			      struct exynos_drm_plane *plane)
+{
+	struct fimd_context *ctx = crtc->ctx;
+	struct drm_plane_state *state = plane->base.state;
+	dma_addr_t dma_addr;
+	unsigned long val, size, offset;
+	unsigned int last_x, last_y, buf_offsize, line_size;
+	unsigned int win = plane->zpos;
+	unsigned int bpp = state->fb->bits_per_pixel >> 3;
+	unsigned int pitch = state->fb->pitches[0];
 
+	if (ctx->suspended)
+		return;
 
-	offset = plane->src_x * (plane->bpp >> 3);
-	offset += plane->src_y * plane->pitch;
+	offset = plane->src_x * bpp;
+	offset += plane->src_y * pitch;
 
 	/* buffer start address */
 	dma_addr = plane->dma_addr[0] + offset;
@@ -656,18 +672,18 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
 	writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
 
 	/* buffer end address */
-	size = plane->pitch * plane->crtc_height;
+	size = pitch * plane->crtc_h;
 	val = (unsigned long)(dma_addr + size);
 	writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
 
 	DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
 			(unsigned long)dma_addr, val, size);
 	DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
-			plane->crtc_width, plane->crtc_height);
+			plane->crtc_w, plane->crtc_h);
 
 	/* buffer size */
-	buf_offsize = plane->pitch - (plane->crtc_width * (plane->bpp >> 3));
-	line_size = plane->crtc_width * (plane->bpp >> 3);
+	buf_offsize = pitch - (plane->crtc_w * bpp);
+	line_size = plane->crtc_w * bpp;
 	val = VIDW_BUF_SIZE_OFFSET(buf_offsize) |
 		VIDW_BUF_SIZE_PAGEWIDTH(line_size) |
 		VIDW_BUF_SIZE_OFFSET_E(buf_offsize) |
@@ -681,10 +697,10 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
 		VIDOSDxA_TOPLEFT_Y_E(plane->crtc_y);
 	writel(val, ctx->regs + VIDOSD_A(win));
 
-	last_x = plane->crtc_x + plane->crtc_width;
+	last_x = plane->crtc_x + plane->crtc_w;
 	if (last_x)
 		last_x--;
-	last_y = plane->crtc_y + plane->crtc_height;
+	last_y = plane->crtc_y + plane->crtc_h;
 	if (last_y)
 		last_y--;
 
@@ -701,13 +717,13 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
 		u32 offset = VIDOSD_D(win);
 		if (win == 0)
 			offset = VIDOSD_C(win);
-		val = plane->crtc_width * plane->crtc_height;
+		val = plane->crtc_w * plane->crtc_h;
 		writel(val, ctx->regs + offset);
 
 		DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val);
 	}
 
-	fimd_win_set_pixfmt(ctx, win);
+	fimd_win_set_pixfmt(ctx, win, state->fb);
 
 	/* hardware window 0 doesn't support color key. */
 	if (win != 0)
@@ -718,36 +734,23 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
 	if (ctx->driver_data->has_shadowcon)
 		fimd_enable_shadow_channel_path(ctx, win, true);
 
-	/* Enable DMA channel and unprotect windows */
-	fimd_shadow_protect_win(ctx, win, false);
-
 	if (ctx->i80_if)
 		atomic_set(&ctx->win_updated, 1);
 }
 
-static void fimd_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
+static void fimd_disable_plane(struct exynos_drm_crtc *crtc,
+			       struct exynos_drm_plane *plane)
 {
 	struct fimd_context *ctx = crtc->ctx;
-	struct exynos_drm_plane *plane;
-
-	if (win < 0 || win >= WINDOWS_NR)
-		return;
-
-	plane = &ctx->planes[win];
+	unsigned int win = plane->zpos;
 
 	if (ctx->suspended)
 		return;
 
-	/* protect windows */
-	fimd_shadow_protect_win(ctx, win, true);
-
 	fimd_enable_video_output(ctx, win, false);
 
 	if (ctx->driver_data->has_shadowcon)
 		fimd_enable_shadow_channel_path(ctx, win, false);
-
-	/* unprotect windows */
-	fimd_shadow_protect_win(ctx, win, false);
 }
 
 static void fimd_enable(struct exynos_drm_crtc *crtc)
@@ -795,7 +798,7 @@ static void fimd_disable(struct exynos_drm_crtc *crtc)
 	 * a destroyed buffer later.
 	 */
 	for (i = 0; i < WINDOWS_NR; i++)
-		fimd_win_disable(crtc, i);
+		fimd_disable_plane(crtc, &ctx->planes[i]);
 
 	fimd_enable_vblank(crtc);
 	fimd_wait_for_vblank(crtc);
@@ -862,7 +865,7 @@ static void fimd_te_handler(struct exynos_drm_crtc *crtc)
 	}
 
 	if (test_bit(0, &ctx->irq_flags))
-		drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+		drm_crtc_handle_vblank(&ctx->crtc->base);
 }
 
 static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
@@ -890,17 +893,19 @@ static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
 	.enable_vblank = fimd_enable_vblank,
 	.disable_vblank = fimd_disable_vblank,
 	.wait_for_vblank = fimd_wait_for_vblank,
-	.win_commit = fimd_win_commit,
-	.win_disable = fimd_win_disable,
+	.atomic_begin = fimd_atomic_begin,
+	.update_plane = fimd_update_plane,
+	.disable_plane = fimd_disable_plane,
+	.atomic_flush = fimd_atomic_flush,
 	.te_handler = fimd_te_handler,
 	.clock_enable = fimd_dp_clock_enable,
-	.clear_channels = fimd_clear_channels,
 };
 
 static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
 {
 	struct fimd_context *ctx = (struct fimd_context *)dev_id;
-	u32 val, clear_bit;
+	u32 val, clear_bit, start, start_s;
+	int win;
 
 	val = readl(ctx->regs + VIDINTCON1);
 
@@ -912,15 +917,25 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
 	if (ctx->pipe < 0 || !ctx->drm_dev)
 		goto out;
 
-	if (ctx->i80_if) {
-		exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+	if (!ctx->i80_if)
+		drm_crtc_handle_vblank(&ctx->crtc->base);
+
+	for (win = 0 ; win < WINDOWS_NR ; win++) {
+		struct exynos_drm_plane *plane = &ctx->planes[win];
+
+		if (!plane->pending_fb)
+			continue;
 
+		start = readl(ctx->regs + VIDWx_BUF_START(win, 0));
+		start_s = readl(ctx->regs + VIDWx_BUF_START_S(win, 0));
+		if (start == start_s)
+			exynos_drm_crtc_finish_update(ctx->crtc, plane);
+	}
+
+	if (ctx->i80_if) {
 		/* Exits triggering mode */
 		atomic_set(&ctx->triggering, 0);
 	} else {
-		drm_handle_vblank(ctx->drm_dev, ctx->pipe);
-		exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
-
 		/* set wait vsync event to zero and wake up queue. */
 		if (atomic_read(&ctx->wait_vsync_event)) {
 			atomic_set(&ctx->wait_vsync_event, 0);
@@ -949,7 +964,8 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
 		type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
 						DRM_PLANE_TYPE_OVERLAY;
 		ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
-					1 << ctx->pipe, type, zpos);
+					1 << ctx->pipe, type, fimd_formats,
+					ARRAY_SIZE(fimd_formats), zpos);
 		if (ret)
 			return ret;
 	}
@@ -961,10 +977,13 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
 	if (IS_ERR(ctx->crtc))
 		return PTR_ERR(ctx->crtc);
 
-	if (ctx->display)
-		exynos_drm_create_enc_conn(drm_dev, ctx->display);
+	if (ctx->encoder)
+		exynos_dpi_bind(drm_dev, ctx->encoder);
+
+	if (is_drm_iommu_supported(drm_dev))
+		fimd_clear_channels(ctx->crtc);
 
-	ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, dev);
+	ret = drm_iommu_attach_device(drm_dev, dev);
 	if (ret)
 		priv->pipe--;
 
@@ -978,10 +997,10 @@ static void fimd_unbind(struct device *dev, struct device *master,
 
 	fimd_disable(ctx->crtc);
 
-	fimd_iommu_detach_devices(ctx);
+	drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
 
-	if (ctx->display)
-		exynos_dpi_remove(ctx->display);
+	if (ctx->encoder)
+		exynos_dpi_remove(ctx->encoder);
 }
 
 static const struct component_ops fimd_component_ops = {
@@ -1088,10 +1107,9 @@ static int fimd_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, ctx);
 
-	ctx->display = exynos_dpi_probe(dev);
-	if (IS_ERR(ctx->display)) {
-		return PTR_ERR(ctx->display);
-	}
+	ctx->encoder = exynos_dpi_probe(dev);
+	if (IS_ERR(ctx->encoder))
+		return PTR_ERR(ctx->encoder);
 
 	pm_runtime_enable(dev);
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 81a250830808..535b4ad6c4b1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -48,11 +48,13 @@
 
 /* registers for base address */
 #define G2D_SRC_BASE_ADDR		0x0304
+#define G2D_SRC_STRIDE_REG		0x0308
 #define G2D_SRC_COLOR_MODE		0x030C
 #define G2D_SRC_LEFT_TOP		0x0310
 #define G2D_SRC_RIGHT_BOTTOM		0x0314
 #define G2D_SRC_PLANE2_BASE_ADDR	0x0318
 #define G2D_DST_BASE_ADDR		0x0404
+#define G2D_DST_STRIDE_REG		0x0408
 #define G2D_DST_COLOR_MODE		0x040C
 #define G2D_DST_LEFT_TOP		0x0410
 #define G2D_DST_RIGHT_BOTTOM		0x0414
@@ -148,6 +150,7 @@ struct g2d_cmdlist {
  * A structure of buffer description
  *
  * @format: color format
+ * @stride: buffer stride/pitch in bytes
  * @left_x: the x coordinates of left top corner
  * @top_y: the y coordinates of left top corner
  * @right_x: the x coordinates of right bottom corner
@@ -156,6 +159,7 @@ struct g2d_cmdlist {
  */
 struct g2d_buf_desc {
 	unsigned int	format;
+	unsigned int	stride;
 	unsigned int	left_x;
 	unsigned int	top_y;
 	unsigned int	right_x;
@@ -589,6 +593,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
 
 	switch (reg_offset) {
 	case G2D_SRC_BASE_ADDR:
+	case G2D_SRC_STRIDE_REG:
 	case G2D_SRC_COLOR_MODE:
 	case G2D_SRC_LEFT_TOP:
 	case G2D_SRC_RIGHT_BOTTOM:
@@ -598,6 +603,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
 		reg_type = REG_TYPE_SRC_PLANE2;
 		break;
 	case G2D_DST_BASE_ADDR:
+	case G2D_DST_STRIDE_REG:
 	case G2D_DST_COLOR_MODE:
 	case G2D_DST_LEFT_TOP:
 	case G2D_DST_RIGHT_BOTTOM:
@@ -652,8 +658,8 @@ static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc,
 						enum g2d_reg_type reg_type,
 						unsigned long size)
 {
-	unsigned int width, height;
-	unsigned long area;
+	int width, height;
+	unsigned long bpp, last_pos;
 
 	/*
 	 * check source and destination buffers only.
@@ -662,22 +668,37 @@ static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc,
 	if (reg_type != REG_TYPE_SRC && reg_type != REG_TYPE_DST)
 		return true;
 
-	width = buf_desc->right_x - buf_desc->left_x;
+	/* This check also makes sure that right_x > left_x. */
+	width = (int)buf_desc->right_x - (int)buf_desc->left_x;
 	if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) {
-		DRM_ERROR("width[%u] is out of range!\n", width);
+		DRM_ERROR("width[%d] is out of range!\n", width);
 		return false;
 	}
 
-	height = buf_desc->bottom_y - buf_desc->top_y;
+	/* This check also makes sure that bottom_y > top_y. */
+	height = (int)buf_desc->bottom_y - (int)buf_desc->top_y;
 	if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) {
-		DRM_ERROR("height[%u] is out of range!\n", height);
+		DRM_ERROR("height[%d] is out of range!\n", height);
 		return false;
 	}
 
-	area = (unsigned long)width * (unsigned long)height *
-					g2d_get_buf_bpp(buf_desc->format);
-	if (area > size) {
-		DRM_ERROR("area[%lu] is out of range[%lu]!\n", area, size);
+	bpp = g2d_get_buf_bpp(buf_desc->format);
+
+	/* Compute the position of the last byte that the engine accesses. */
+	last_pos = ((unsigned long)buf_desc->bottom_y - 1) *
+		(unsigned long)buf_desc->stride +
+		(unsigned long)buf_desc->right_x * bpp - 1;
+
+	/*
+	 * Since right_x > left_x and bottom_y > top_y we already know
+	 * that the first_pos < last_pos (first_pos being the position
+	 * of the first byte the engine accesses), it just remains to
+	 * check if last_pos is smaller then the buffer size.
+	 */
+
+	if (last_pos >= size) {
+		DRM_ERROR("last engine access position [%lu] "
+			"is out of range [%lu]!\n", last_pos, size);
 		return false;
 	}
 
@@ -973,8 +994,6 @@ static int g2d_check_reg_offset(struct device *dev,
 				goto err;
 
 			reg_type = g2d_get_reg_type(reg_offset);
-			if (reg_type == REG_TYPE_NONE)
-				goto err;
 
 			/* check userptr buffer type. */
 			if ((cmdlist->data[index] & ~0x7fffffff) >> 31) {
@@ -983,14 +1002,22 @@ static int g2d_check_reg_offset(struct device *dev,
 			} else
 				buf_info->types[reg_type] = BUF_TYPE_GEM;
 			break;
+		case G2D_SRC_STRIDE_REG:
+		case G2D_DST_STRIDE_REG:
+			if (for_addr)
+				goto err;
+
+			reg_type = g2d_get_reg_type(reg_offset);
+
+			buf_desc = &buf_info->descs[reg_type];
+			buf_desc->stride = cmdlist->data[index + 1];
+			break;
 		case G2D_SRC_COLOR_MODE:
 		case G2D_DST_COLOR_MODE:
 			if (for_addr)
 				goto err;
 
 			reg_type = g2d_get_reg_type(reg_offset);
-			if (reg_type == REG_TYPE_NONE)
-				goto err;
 
 			buf_desc = &buf_info->descs[reg_type];
 			value = cmdlist->data[index + 1];
@@ -1003,8 +1030,6 @@ static int g2d_check_reg_offset(struct device *dev,
 				goto err;
 
 			reg_type = g2d_get_reg_type(reg_offset);
-			if (reg_type == REG_TYPE_NONE)
-				goto err;
 
 			buf_desc = &buf_info->descs[reg_type];
 			value = cmdlist->data[index + 1];
@@ -1018,8 +1043,6 @@ static int g2d_check_reg_offset(struct device *dev,
 				goto err;
 
 			reg_type = g2d_get_reg_type(reg_offset);
-			if (reg_type == REG_TYPE_NONE)
-				goto err;
 
 			buf_desc = &buf_info->descs[reg_type];
 			value = cmdlist->data[index + 1];
@@ -1319,9 +1342,6 @@ static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
 		return ret;
 	}
 
-	if (!is_drm_iommu_supported(drm_dev))
-		return 0;
-
 	ret = drm_iommu_attach_device(drm_dev, dev);
 	if (ret < 0) {
 		dev_err(dev, "failed to enable iommu.\n");
@@ -1334,9 +1354,6 @@ static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
 
 static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
 {
-	if (!is_drm_iommu_supported(drm_dev))
-		return;
-
 	drm_iommu_detach_device(drm_dev, dev);
 }
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 0d5b9698d384..62b9ea1b07fb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -13,98 +13,112 @@
 #include <drm/drm_vma_manager.h>
 
 #include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
 #include <drm/exynos_drm.h>
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_gem.h"
-#include "exynos_drm_buf.h"
 #include "exynos_drm_iommu.h"
 
-static unsigned int convert_to_vm_err_msg(int msg)
+static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj)
 {
-	unsigned int out_msg;
+	struct drm_device *dev = obj->base.dev;
+	enum dma_attr attr;
+	unsigned int nr_pages;
 
-	switch (msg) {
-	case 0:
-	case -ERESTARTSYS:
-	case -EINTR:
-		out_msg = VM_FAULT_NOPAGE;
-		break;
+	if (obj->dma_addr) {
+		DRM_DEBUG_KMS("already allocated.\n");
+		return 0;
+	}
 
-	case -ENOMEM:
-		out_msg = VM_FAULT_OOM;
-		break;
+	init_dma_attrs(&obj->dma_attrs);
 
-	default:
-		out_msg = VM_FAULT_SIGBUS;
-		break;
-	}
+	/*
+	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
+	 * region will be allocated else physically contiguous
+	 * as possible.
+	 */
+	if (!(obj->flags & EXYNOS_BO_NONCONTIG))
+		dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &obj->dma_attrs);
 
-	return out_msg;
-}
+	/*
+	 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
+	 * else cachable mapping.
+	 */
+	if (obj->flags & EXYNOS_BO_WC || !(obj->flags & EXYNOS_BO_CACHABLE))
+		attr = DMA_ATTR_WRITE_COMBINE;
+	else
+		attr = DMA_ATTR_NON_CONSISTENT;
 
-static int check_gem_flags(unsigned int flags)
-{
-	if (flags & ~(EXYNOS_BO_MASK)) {
-		DRM_ERROR("invalid flags.\n");
-		return -EINVAL;
-	}
+	dma_set_attr(attr, &obj->dma_attrs);
+	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &obj->dma_attrs);
 
-	return 0;
-}
+	nr_pages = obj->size >> PAGE_SHIFT;
 
-static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
-					struct vm_area_struct *vma)
-{
-	DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
+	if (!is_drm_iommu_supported(dev)) {
+		dma_addr_t start_addr;
+		unsigned int i = 0;
 
-	/* non-cachable as default. */
-	if (obj->flags & EXYNOS_BO_CACHABLE)
-		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-	else if (obj->flags & EXYNOS_BO_WC)
-		vma->vm_page_prot =
-			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
-	else
-		vma->vm_page_prot =
-			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
-}
+		obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
+		if (!obj->pages) {
+			DRM_ERROR("failed to allocate pages.\n");
+			return -ENOMEM;
+		}
 
-static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
-{
-	/* TODO */
+		obj->cookie = dma_alloc_attrs(dev->dev,
+					obj->size,
+					&obj->dma_addr, GFP_KERNEL,
+					&obj->dma_attrs);
+		if (!obj->cookie) {
+			DRM_ERROR("failed to allocate buffer.\n");
+			drm_free_large(obj->pages);
+			return -ENOMEM;
+		}
 
-	return roundup(size, PAGE_SIZE);
+		start_addr = obj->dma_addr;
+		while (i < nr_pages) {
+			obj->pages[i] = phys_to_page(start_addr);
+			start_addr += PAGE_SIZE;
+			i++;
+		}
+	} else {
+		obj->pages = dma_alloc_attrs(dev->dev, obj->size,
+					&obj->dma_addr, GFP_KERNEL,
+					&obj->dma_attrs);
+		if (!obj->pages) {
+			DRM_ERROR("failed to allocate buffer.\n");
+			return -ENOMEM;
+		}
+	}
+
+	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
+			(unsigned long)obj->dma_addr,
+			obj->size);
+
+	return 0;
 }
 
-static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
-					struct vm_area_struct *vma,
-					unsigned long f_vaddr,
-					pgoff_t page_offset)
+static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj)
 {
-	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
-	struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
-	struct scatterlist *sgl;
-	unsigned long pfn;
-	int i;
+	struct drm_device *dev = obj->base.dev;
 
-	if (!buf->sgt)
-		return -EINTR;
-
-	if (page_offset >= (buf->size >> PAGE_SHIFT)) {
-		DRM_ERROR("invalid page offset\n");
-		return -EINVAL;
+	if (!obj->dma_addr) {
+		DRM_DEBUG_KMS("dma_addr is invalid.\n");
+		return;
 	}
 
-	sgl = buf->sgt->sgl;
-	for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
-		if (page_offset < (sgl->length >> PAGE_SHIFT))
-			break;
-		page_offset -=	(sgl->length >> PAGE_SHIFT);
-	}
+	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
+			(unsigned long)obj->dma_addr, obj->size);
 
-	pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
+	if (!is_drm_iommu_supported(dev)) {
+		dma_free_attrs(dev->dev, obj->size, obj->cookie,
+				(dma_addr_t)obj->dma_addr, &obj->dma_attrs);
+		drm_free_large(obj->pages);
+	} else
+		dma_free_attrs(dev->dev, obj->size, obj->pages,
+				(dma_addr_t)obj->dma_addr, &obj->dma_attrs);
 
-	return vm_insert_mixed(vma, f_vaddr, pfn);
+	obj->dma_addr = (dma_addr_t)NULL;
 }
 
 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -131,11 +145,7 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
 
 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
 {
-	struct drm_gem_object *obj;
-	struct exynos_drm_gem_buf *buf;
-
-	obj = &exynos_gem_obj->base;
-	buf = exynos_gem_obj->buffer;
+	struct drm_gem_object *obj = &exynos_gem_obj->base;
 
 	DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
 
@@ -148,12 +158,9 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
 	if (obj->import_attach)
 		goto out;
 
-	exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
+	exynos_drm_free_buf(exynos_gem_obj);
 
 out:
-	exynos_drm_fini_buf(obj->dev, buf);
-	exynos_gem_obj->buffer = NULL;
-
 	drm_gem_free_mmap_offset(obj);
 
 	/* release file pointer to gem object. */
@@ -180,7 +187,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
 
 	drm_gem_object_unreference_unlocked(obj);
 
-	return exynos_gem_obj->buffer->size;
+	return exynos_gem_obj->size;
 }
 
 
@@ -193,7 +200,7 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
 
 	exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
 	if (!exynos_gem_obj)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 	exynos_gem_obj->size = size;
 	obj = &exynos_gem_obj->base;
@@ -202,7 +209,7 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
 	if (ret < 0) {
 		DRM_ERROR("failed to initialize gem object\n");
 		kfree(exynos_gem_obj);
-		return NULL;
+		return ERR_PTR(ret);
 	}
 
 	DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
@@ -215,47 +222,35 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
 						unsigned long size)
 {
 	struct exynos_drm_gem_obj *exynos_gem_obj;
-	struct exynos_drm_gem_buf *buf;
 	int ret;
 
+	if (flags & ~(EXYNOS_BO_MASK)) {
+		DRM_ERROR("invalid flags.\n");
+		return ERR_PTR(-EINVAL);
+	}
+
 	if (!size) {
 		DRM_ERROR("invalid size.\n");
 		return ERR_PTR(-EINVAL);
 	}
 
-	size = roundup_gem_size(size, flags);
-
-	ret = check_gem_flags(flags);
-	if (ret)
-		return ERR_PTR(ret);
-
-	buf = exynos_drm_init_buf(dev, size);
-	if (!buf)
-		return ERR_PTR(-ENOMEM);
+	size = roundup(size, PAGE_SIZE);
 
 	exynos_gem_obj = exynos_drm_gem_init(dev, size);
-	if (!exynos_gem_obj) {
-		ret = -ENOMEM;
-		goto err_fini_buf;
-	}
-
-	exynos_gem_obj->buffer = buf;
+	if (IS_ERR(exynos_gem_obj))
+		return exynos_gem_obj;
 
 	/* set memory type and cache attribute from user side. */
 	exynos_gem_obj->flags = flags;
 
-	ret = exynos_drm_alloc_buf(dev, buf, flags);
-	if (ret < 0)
-		goto err_gem_fini;
+	ret = exynos_drm_alloc_buf(exynos_gem_obj);
+	if (ret < 0) {
+		drm_gem_object_release(&exynos_gem_obj->base);
+		kfree(exynos_gem_obj);
+		return ERR_PTR(ret);
+	}
 
 	return exynos_gem_obj;
-
-err_gem_fini:
-	drm_gem_object_release(&exynos_gem_obj->base);
-	kfree(exynos_gem_obj);
-err_fini_buf:
-	exynos_drm_fini_buf(dev, buf);
-	return ERR_PTR(ret);
 }
 
 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -294,7 +289,7 @@ dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
 
 	exynos_gem_obj = to_exynos_gem_obj(obj);
 
-	return &exynos_gem_obj->buffer->dma_addr;
+	return &exynos_gem_obj->dma_addr;
 }
 
 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
@@ -322,7 +317,6 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
 				      struct vm_area_struct *vma)
 {
 	struct drm_device *drm_dev = exynos_gem_obj->base.dev;
-	struct exynos_drm_gem_buf *buffer;
 	unsigned long vm_size;
 	int ret;
 
@@ -331,19 +325,13 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
 
 	vm_size = vma->vm_end - vma->vm_start;
 
-	/*
-	 * a buffer contains information to physically continuous memory
-	 * allocated by user request or at framebuffer creation.
-	 */
-	buffer = exynos_gem_obj->buffer;
-
 	/* check if user-requested size is valid. */
-	if (vm_size > buffer->size)
+	if (vm_size > exynos_gem_obj->size)
 		return -EINVAL;
 
-	ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
-				buffer->dma_addr, buffer->size,
-				&buffer->dma_attrs);
+	ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem_obj->pages,
+				exynos_gem_obj->dma_addr, exynos_gem_obj->size,
+				&exynos_gem_obj->dma_attrs);
 	if (ret < 0) {
 		DRM_ERROR("failed to mmap.\n");
 		return ret;
@@ -503,15 +491,6 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
 
 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
 {
-	struct exynos_drm_gem_obj *exynos_gem_obj;
-	struct exynos_drm_gem_buf *buf;
-
-	exynos_gem_obj = to_exynos_gem_obj(obj);
-	buf = exynos_gem_obj->buffer;
-
-	if (obj->import_attach)
-		drm_prime_gem_destroy(obj, buf->sgt);
-
 	exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
 }
 
@@ -595,24 +574,34 @@ unlock:
 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct drm_gem_object *obj = vma->vm_private_data;
-	struct drm_device *dev = obj->dev;
-	unsigned long f_vaddr;
+	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+	unsigned long pfn;
 	pgoff_t page_offset;
 	int ret;
 
 	page_offset = ((unsigned long)vmf->virtual_address -
 			vma->vm_start) >> PAGE_SHIFT;
-	f_vaddr = (unsigned long)vmf->virtual_address;
 
-	mutex_lock(&dev->struct_mutex);
-
-	ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
-	if (ret < 0)
-		DRM_ERROR("failed to map a buffer with user.\n");
+	if (page_offset >= (exynos_gem_obj->size >> PAGE_SHIFT)) {
+		DRM_ERROR("invalid page offset\n");
+		ret = -EINVAL;
+		goto out;
+	}
 
-	mutex_unlock(&dev->struct_mutex);
+	pfn = page_to_pfn(exynos_gem_obj->pages[page_offset]);
+	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
 
-	return convert_to_vm_err_msg(ret);
+out:
+	switch (ret) {
+	case 0:
+	case -ERESTARTSYS:
+	case -EINTR:
+		return VM_FAULT_NOPAGE;
+	case -ENOMEM:
+		return VM_FAULT_OOM;
+	default:
+		return VM_FAULT_SIGBUS;
+	}
 }
 
 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -631,11 +620,17 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 	obj = vma->vm_private_data;
 	exynos_gem_obj = to_exynos_gem_obj(obj);
 
-	ret = check_gem_flags(exynos_gem_obj->flags);
-	if (ret)
-		goto err_close_vm;
+	DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem_obj->flags);
 
-	update_vm_cache_attr(exynos_gem_obj, vma);
+	/* non-cachable as default. */
+	if (exynos_gem_obj->flags & EXYNOS_BO_CACHABLE)
+		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+	else if (exynos_gem_obj->flags & EXYNOS_BO_WC)
+		vma->vm_page_prot =
+			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+	else
+		vma->vm_page_prot =
+			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
 
 	ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma);
 	if (ret)
@@ -649,3 +644,76 @@ err_close_vm:
 
 	return ret;
 }
+
+/* low-level interface prime helpers */
+struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+	int npages;
+
+	npages = exynos_gem_obj->size >> PAGE_SHIFT;
+
+	return drm_prime_pages_to_sg(exynos_gem_obj->pages, npages);
+}
+
+struct drm_gem_object *
+exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
+				     struct dma_buf_attachment *attach,
+				     struct sg_table *sgt)
+{
+	struct exynos_drm_gem_obj *exynos_gem_obj;
+	int npages;
+	int ret;
+
+	exynos_gem_obj = exynos_drm_gem_init(dev, attach->dmabuf->size);
+	if (IS_ERR(exynos_gem_obj)) {
+		ret = PTR_ERR(exynos_gem_obj);
+		return ERR_PTR(ret);
+	}
+
+	exynos_gem_obj->dma_addr = sg_dma_address(sgt->sgl);
+
+	npages = exynos_gem_obj->size >> PAGE_SHIFT;
+	exynos_gem_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+	if (!exynos_gem_obj->pages) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem_obj->pages, NULL,
+			npages);
+	if (ret < 0)
+		goto err_free_large;
+
+	if (sgt->nents == 1) {
+		/* always physically continuous memory if sgt->nents is 1. */
+		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
+	} else {
+		/*
+		 * this case could be CONTIG or NONCONTIG type but for now
+		 * sets NONCONTIG.
+		 * TODO. we have to find a way that exporter can notify
+		 * the type of its own buffer to importer.
+		 */
+		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
+	}
+
+	return &exynos_gem_obj->base;
+
+err_free_large:
+	drm_free_large(exynos_gem_obj->pages);
+err:
+	drm_gem_object_release(&exynos_gem_obj->base);
+	kfree(exynos_gem_obj);
+	return ERR_PTR(ret);
+}
+
+void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
+{
+	return NULL;
+}
+
+void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+	/* Nothing to do */
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 6f42e2248288..cd62f8410d1e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -20,35 +20,6 @@
 #define IS_NONCONTIG_BUFFER(f)		(f & EXYNOS_BO_NONCONTIG)
 
 /*
- * exynos drm gem buffer structure.
- *
- * @cookie: cookie returned by dma_alloc_attrs
- * @kvaddr: kernel virtual address to allocated memory region.
- * *userptr: user space address.
- * @dma_addr: bus address(accessed by dma) to allocated memory region.
- *	- this address could be physical address without IOMMU and
- *	device address with IOMMU.
- * @write: whether pages will be written to by the caller.
- * @pages: Array of backing pages.
- * @sgt: sg table to transfer page data.
- * @size: size of allocated memory region.
- * @pfnmap: indicate whether memory region from userptr is mmaped with
- *	VM_PFNMAP or not.
- */
-struct exynos_drm_gem_buf {
-	void 			*cookie;
-	void __iomem		*kvaddr;
-	unsigned long		userptr;
-	dma_addr_t		dma_addr;
-	struct dma_attrs	dma_attrs;
-	unsigned int		write;
-	struct page		**pages;
-	struct sg_table		*sgt;
-	unsigned long		size;
-	bool			pfnmap;
-};
-
-/*
  * exynos drm buffer structure.
  *
  * @base: a gem object.
@@ -59,18 +30,28 @@ struct exynos_drm_gem_buf {
  *	by user request or at framebuffer creation.
  *	continuous memory region allocated by user request
  *	or at framebuffer creation.
+ * @flags: indicate memory type to allocated buffer and cache attruibute.
  * @size: size requested from user, in bytes and this size is aligned
  *	in page unit.
- * @flags: indicate memory type to allocated buffer and cache attruibute.
+ * @cookie: cookie returned by dma_alloc_attrs
+ * @kvaddr: kernel virtual address to allocated memory region.
+ * @dma_addr: bus address(accessed by dma) to allocated memory region.
+ *	- this address could be physical address without IOMMU and
+ *	device address with IOMMU.
+ * @pages: Array of backing pages.
  *
  * P.S. this object would be transferred to user as kms_bo.handle so
  *	user can access the buffer through kms_bo.handle.
  */
 struct exynos_drm_gem_obj {
-	struct drm_gem_object		base;
-	struct exynos_drm_gem_buf	*buffer;
-	unsigned long			size;
-	unsigned int			flags;
+	struct drm_gem_object	base;
+	unsigned int		flags;
+	unsigned long		size;
+	void			*cookie;
+	void __iomem		*kvaddr;
+	dma_addr_t		dma_addr;
+	struct dma_attrs	dma_attrs;
+	struct page		**pages;
 };
 
 struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
@@ -177,4 +158,13 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
 				struct sg_table *sgt,
 				enum dma_data_direction dir);
 
+/* low-level interface prime helpers */
+struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *
+exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
+				     struct dma_buf_attachment *attach,
+				     struct sg_table *sgt);
+void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj);
+void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index f1c6b76c127f..808a0a013780 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -582,9 +582,17 @@ static int gsc_src_set_transf(struct device *dev,
 		break;
 	case EXYNOS_DRM_DEGREE_180:
 		cfg |= GSC_IN_ROT_180;
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg &= ~GSC_IN_ROT_XFLIP;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg &= ~GSC_IN_ROT_YFLIP;
 		break;
 	case EXYNOS_DRM_DEGREE_270:
 		cfg |= GSC_IN_ROT_270;
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg &= ~GSC_IN_ROT_XFLIP;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg &= ~GSC_IN_ROT_YFLIP;
 		break;
 	default:
 		dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
@@ -845,9 +853,17 @@ static int gsc_dst_set_transf(struct device *dev,
 		break;
 	case EXYNOS_DRM_DEGREE_180:
 		cfg |= GSC_IN_ROT_180;
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg &= ~GSC_IN_ROT_XFLIP;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg &= ~GSC_IN_ROT_YFLIP;
 		break;
 	case EXYNOS_DRM_DEGREE_270:
 		cfg |= GSC_IN_ROT_270;
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg &= ~GSC_IN_ROT_XFLIP;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg &= ~GSC_IN_ROT_YFLIP;
 		break;
 	default:
 		dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
index d4ec7465e9cc..055e8ec2ef21 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -87,10 +87,8 @@ int drm_iommu_attach_device(struct drm_device *drm_dev,
 	struct device *dev = drm_dev->dev;
 	int ret;
 
-	if (!dev->archdata.mapping) {
-		DRM_ERROR("iommu_mapping is null.\n");
-		return -EFAULT;
-	}
+	if (!dev->archdata.mapping)
+		return 0;
 
 	subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
 					sizeof(*subdrv_dev->dma_parms),
@@ -144,17 +142,3 @@ void drm_iommu_detach_device(struct drm_device *drm_dev,
 	iommu_detach_device(mapping->domain, subdrv_dev);
 	drm_release_iommu_mapping(drm_dev);
 }
-
-int drm_iommu_attach_device_if_possible(struct exynos_drm_crtc *exynos_crtc,
-			struct drm_device *drm_dev, struct device *subdrv_dev)
-{
-	int ret = 0;
-
-	if (is_drm_iommu_supported(drm_dev)) {
-		if (exynos_crtc->ops->clear_channels)
-			exynos_crtc->ops->clear_channels(exynos_crtc);
-		return drm_iommu_attach_device(drm_dev, subdrv_dev);
-	}
-
-	return ret;
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index 8341c7a475b4..dc1b5441f491 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -29,19 +29,11 @@ void drm_iommu_detach_device(struct drm_device *dev_dev,
 
 static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
 {
-#ifdef CONFIG_ARM_DMA_USE_IOMMU
 	struct device *dev = drm_dev->dev;
 
 	return dev->archdata.mapping ? true : false;
-#else
-	return false;
-#endif
 }
 
-int drm_iommu_attach_device_if_possible(
-		struct exynos_drm_crtc *exynos_crtc, struct drm_device *drm_dev,
-		struct device *subdrv_dev);
-
 #else
 
 static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
@@ -69,12 +61,5 @@ static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
 	return false;
 }
 
-static inline int drm_iommu_attach_device_if_possible(
-		struct exynos_drm_crtc *exynos_crtc, struct drm_device *drm_dev,
-		struct device *subdrv_dev)
-{
-	return 0;
-}
-
 #endif
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 67e5451e066f..67d24236e745 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -1622,12 +1622,10 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
 		INIT_LIST_HEAD(&ippdrv->cmd_list);
 		mutex_init(&ippdrv->cmd_lock);
 
-		if (is_drm_iommu_supported(drm_dev)) {
-			ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
-			if (ret) {
-				DRM_ERROR("failed to activate iommu\n");
-				goto err;
-			}
+		ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
+		if (ret) {
+			DRM_ERROR("failed to activate iommu\n");
+			goto err;
 		}
 	}
 
@@ -1637,8 +1635,7 @@ err:
 	/* get ipp driver entry */
 	list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list,
 						drv_list) {
-		if (is_drm_iommu_supported(drm_dev))
-			drm_iommu_detach_device(drm_dev, ippdrv->dev);
+		drm_iommu_detach_device(drm_dev, ippdrv->dev);
 
 		ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
 				ippdrv->prop_list.ipp_id);
@@ -1654,8 +1651,7 @@ static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
 
 	/* get ipp driver entry */
 	list_for_each_entry_safe(ippdrv, t, &exynos_drm_ippdrv_list, drv_list) {
-		if (is_drm_iommu_supported(drm_dev))
-			drm_iommu_detach_device(drm_dev, ippdrv->dev);
+		drm_iommu_detach_device(drm_dev, ippdrv->dev);
 
 		ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
 				ippdrv->prop_list.ipp_id);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index a729980d3c2f..714822441467 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -20,12 +20,6 @@
 #include "exynos_drm_gem.h"
 #include "exynos_drm_plane.h"
 
-static const uint32_t formats[] = {
-	DRM_FORMAT_XRGB8888,
-	DRM_FORMAT_ARGB8888,
-	DRM_FORMAT_NV12,
-};
-
 /*
  * This function is to get X or Y size shown via screen. This needs length and
  * start position of CRTC.
@@ -97,29 +91,18 @@ static void exynos_plane_mode_set(struct drm_plane *plane,
 	/* set drm framebuffer data. */
 	exynos_plane->src_x = src_x;
 	exynos_plane->src_y = src_y;
-	exynos_plane->src_width = (actual_w * exynos_plane->h_ratio) >> 16;
-	exynos_plane->src_height = (actual_h * exynos_plane->v_ratio) >> 16;
-	exynos_plane->fb_width = fb->width;
-	exynos_plane->fb_height = fb->height;
-	exynos_plane->bpp = fb->bits_per_pixel;
-	exynos_plane->pitch = fb->pitches[0];
-	exynos_plane->pixel_format = fb->pixel_format;
+	exynos_plane->src_w = (actual_w * exynos_plane->h_ratio) >> 16;
+	exynos_plane->src_h = (actual_h * exynos_plane->v_ratio) >> 16;
 
 	/* set plane range to be displayed. */
 	exynos_plane->crtc_x = crtc_x;
 	exynos_plane->crtc_y = crtc_y;
-	exynos_plane->crtc_width = actual_w;
-	exynos_plane->crtc_height = actual_h;
-
-	/* set drm mode data. */
-	exynos_plane->mode_width = mode->hdisplay;
-	exynos_plane->mode_height = mode->vdisplay;
-	exynos_plane->refresh = mode->vrefresh;
-	exynos_plane->scan_flag = mode->flags;
+	exynos_plane->crtc_w = actual_w;
+	exynos_plane->crtc_h = actual_h;
 
 	DRM_DEBUG_KMS("plane : offset_x/y(%d,%d), width/height(%d,%d)",
 			exynos_plane->crtc_x, exynos_plane->crtc_y,
-			exynos_plane->crtc_width, exynos_plane->crtc_height);
+			exynos_plane->crtc_w, exynos_plane->crtc_h);
 
 	plane->crtc = crtc;
 }
@@ -143,17 +126,17 @@ static int exynos_plane_atomic_check(struct drm_plane *plane,
 	if (!state->fb)
 		return 0;
 
-	nr = exynos_drm_fb_get_buf_cnt(state->fb);
+	nr = drm_format_num_planes(state->fb->pixel_format);
 	for (i = 0; i < nr; i++) {
-		struct exynos_drm_gem_buf *buffer =
-					exynos_drm_fb_buffer(state->fb, i);
+		struct exynos_drm_gem_obj *obj =
+					exynos_drm_fb_gem_obj(state->fb, i);
 
-		if (!buffer) {
-			DRM_DEBUG_KMS("buffer is null\n");
+		if (!obj) {
+			DRM_DEBUG_KMS("gem object is null\n");
 			return -EFAULT;
 		}
 
-		exynos_plane->dma_addr[i] = buffer->dma_addr +
+		exynos_plane->dma_addr[i] = obj->dma_addr +
 					    state->fb->offsets[i];
 
 		DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
@@ -179,8 +162,10 @@ static void exynos_plane_atomic_update(struct drm_plane *plane,
 			      state->src_x >> 16, state->src_y >> 16,
 			      state->src_w >> 16, state->src_h >> 16);
 
-	if (exynos_crtc->ops->win_commit)
-		exynos_crtc->ops->win_commit(exynos_crtc, exynos_plane->zpos);
+	exynos_plane->pending_fb = state->fb;
+
+	if (exynos_crtc->ops->update_plane)
+		exynos_crtc->ops->update_plane(exynos_crtc, exynos_plane);
 }
 
 static void exynos_plane_atomic_disable(struct drm_plane *plane,
@@ -192,9 +177,9 @@ static void exynos_plane_atomic_disable(struct drm_plane *plane,
 	if (!old_state->crtc)
 		return;
 
-	if (exynos_crtc->ops->win_disable)
-		exynos_crtc->ops->win_disable(exynos_crtc,
-					      exynos_plane->zpos);
+	if (exynos_crtc->ops->disable_plane)
+		exynos_crtc->ops->disable_plane(exynos_crtc,
+						exynos_plane);
 }
 
 static const struct drm_plane_helper_funcs plane_helper_funcs = {
@@ -226,13 +211,14 @@ static void exynos_plane_attach_zpos_property(struct drm_plane *plane,
 int exynos_plane_init(struct drm_device *dev,
 		      struct exynos_drm_plane *exynos_plane,
 		      unsigned long possible_crtcs, enum drm_plane_type type,
+		      const uint32_t *formats, unsigned int fcount,
 		      unsigned int zpos)
 {
 	int err;
 
 	err = drm_universal_plane_init(dev, &exynos_plane->base, possible_crtcs,
-				       &exynos_plane_funcs, formats,
-				       ARRAY_SIZE(formats), type);
+				       &exynos_plane_funcs, formats, fcount,
+				       type);
 	if (err) {
 		DRM_ERROR("failed to initialize plane\n");
 		return err;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
index 8c88ae983c38..476c9340b591 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -12,4 +12,5 @@
 int exynos_plane_init(struct drm_device *dev,
 		      struct exynos_drm_plane *exynos_plane,
 		      unsigned long possible_crtcs, enum drm_plane_type type,
+		      const uint32_t *formats, unsigned int fcount,
 		      unsigned int zpos);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 3413393d8a16..75718e1bc3dd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -25,7 +25,6 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_crtc.h"
 #include "exynos_drm_plane.h"
-#include "exynos_drm_encoder.h"
 #include "exynos_drm_vidi.h"
 
 /* vidi has totally three virtual windows. */
@@ -35,11 +34,10 @@
 					connector)
 
 struct vidi_context {
-	struct exynos_drm_display	display;
+	struct drm_encoder		encoder;
 	struct platform_device		*pdev;
 	struct drm_device		*drm_dev;
 	struct exynos_drm_crtc		*crtc;
-	struct drm_encoder		*encoder;
 	struct drm_connector		connector;
 	struct exynos_drm_plane		planes[WINDOWS_NR];
 	struct edid			*raw_edid;
@@ -55,9 +53,9 @@ struct vidi_context {
 	int				pipe;
 };
 
-static inline struct vidi_context *display_to_vidi(struct exynos_drm_display *d)
+static inline struct vidi_context *encoder_to_vidi(struct drm_encoder *e)
 {
-	return container_of(d, struct vidi_context, display);
+	return container_of(e, struct vidi_context, encoder);
 }
 
 static const char fake_edid_info[] = {
@@ -85,6 +83,12 @@ static const char fake_edid_info[] = {
 	0x00, 0x00, 0x00, 0x06
 };
 
+static const uint32_t formats[] = {
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_NV12,
+};
+
 static int vidi_enable_vblank(struct exynos_drm_crtc *crtc)
 {
 	struct vidi_context *ctx = crtc->ctx;
@@ -100,7 +104,7 @@ static int vidi_enable_vblank(struct exynos_drm_crtc *crtc)
 	/*
 	 * in case of page flip request, vidi_finish_pageflip function
 	 * will not be called because direct_vblank is true and then
-	 * that function will be called by crtc_ops->win_commit callback
+	 * that function will be called by crtc_ops->update_plane callback
 	 */
 	schedule_work(&ctx->work);
 
@@ -118,19 +122,14 @@ static void vidi_disable_vblank(struct exynos_drm_crtc *crtc)
 		ctx->vblank_on = false;
 }
 
-static void vidi_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
+static void vidi_update_plane(struct exynos_drm_crtc *crtc,
+			      struct exynos_drm_plane *plane)
 {
 	struct vidi_context *ctx = crtc->ctx;
-	struct exynos_drm_plane *plane;
 
 	if (ctx->suspended)
 		return;
 
-	if (win < 0 || win >= WINDOWS_NR)
-		return;
-
-	plane = &ctx->planes[win];
-
 	DRM_DEBUG_KMS("dma_addr = %pad\n", plane->dma_addr);
 
 	if (ctx->vblank_on)
@@ -179,13 +178,14 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
 	.disable = vidi_disable,
 	.enable_vblank = vidi_enable_vblank,
 	.disable_vblank = vidi_disable_vblank,
-	.win_commit = vidi_win_commit,
+	.update_plane = vidi_update_plane,
 };
 
 static void vidi_fake_vblank_handler(struct work_struct *work)
 {
 	struct vidi_context *ctx = container_of(work, struct vidi_context,
 					work);
+	int win;
 
 	if (ctx->pipe < 0)
 		return;
@@ -196,7 +196,7 @@ static void vidi_fake_vblank_handler(struct work_struct *work)
 	mutex_lock(&ctx->lock);
 
 	if (ctx->direct_vblank) {
-		drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+		drm_crtc_handle_vblank(&ctx->crtc->base);
 		ctx->direct_vblank = false;
 		mutex_unlock(&ctx->lock);
 		return;
@@ -204,7 +204,14 @@ static void vidi_fake_vblank_handler(struct work_struct *work)
 
 	mutex_unlock(&ctx->lock);
 
-	exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+	for (win = 0 ; win < WINDOWS_NR ; win++) {
+		struct exynos_drm_plane *plane = &ctx->planes[win];
+
+		if (!plane->pending_fb)
+			continue;
+
+		exynos_drm_crtc_finish_update(ctx->crtc, plane);
+	}
 }
 
 static int vidi_show_connection(struct device *dev,
@@ -259,9 +266,7 @@ static DEVICE_ATTR(connection, 0644, vidi_show_connection,
 int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
 				struct drm_file *file_priv)
 {
-	struct vidi_context *ctx = NULL;
-	struct drm_encoder *encoder;
-	struct exynos_drm_display *display;
+	struct vidi_context *ctx = dev_get_drvdata(drm_dev->dev);
 	struct drm_exynos_vidi_connection *vidi = data;
 
 	if (!vidi) {
@@ -274,21 +279,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
 		return -EINVAL;
 	}
 
-	list_for_each_entry(encoder, &drm_dev->mode_config.encoder_list,
-								head) {
-		display = exynos_drm_get_display(encoder);
-
-		if (display->type == EXYNOS_DISPLAY_TYPE_VIDI) {
-			ctx = display_to_vidi(display);
-			break;
-		}
-	}
-
-	if (!ctx) {
-		DRM_DEBUG_KMS("not found virtual device type encoder.\n");
-		return -EINVAL;
-	}
-
 	if (ctx->connected == vidi->connection) {
 		DRM_DEBUG_KMS("same connection request.\n");
 		return -EINVAL;
@@ -381,7 +371,7 @@ static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector)
 {
 	struct vidi_context *ctx = ctx_from_connector(connector);
 
-	return ctx->encoder;
+	return &ctx->encoder;
 }
 
 static struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
@@ -389,14 +379,12 @@ static struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
 	.best_encoder = vidi_best_encoder,
 };
 
-static int vidi_create_connector(struct exynos_drm_display *display,
-				struct drm_encoder *encoder)
+static int vidi_create_connector(struct drm_encoder *encoder)
 {
-	struct vidi_context *ctx = display_to_vidi(display);
+	struct vidi_context *ctx = encoder_to_vidi(encoder);
 	struct drm_connector *connector = &ctx->connector;
 	int ret;
 
-	ctx->encoder = encoder;
 	connector->polled = DRM_CONNECTOR_POLL_HPD;
 
 	ret = drm_connector_init(ctx->drm_dev, connector,
@@ -413,19 +401,47 @@ static int vidi_create_connector(struct exynos_drm_display *display,
 	return 0;
 }
 
+static bool exynos_vidi_mode_fixup(struct drm_encoder *encoder,
+				 const struct drm_display_mode *mode,
+				 struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void exynos_vidi_mode_set(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void exynos_vidi_enable(struct drm_encoder *encoder)
+{
+}
+
+static void exynos_vidi_disable(struct drm_encoder *encoder)
+{
+}
 
-static struct exynos_drm_display_ops vidi_display_ops = {
-	.create_connector = vidi_create_connector,
+static struct drm_encoder_helper_funcs exynos_vidi_encoder_helper_funcs = {
+	.mode_fixup = exynos_vidi_mode_fixup,
+	.mode_set = exynos_vidi_mode_set,
+	.enable = exynos_vidi_enable,
+	.disable = exynos_vidi_disable,
+};
+
+static struct drm_encoder_funcs exynos_vidi_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
 };
 
 static int vidi_bind(struct device *dev, struct device *master, void *data)
 {
 	struct vidi_context *ctx = dev_get_drvdata(dev);
 	struct drm_device *drm_dev = data;
+	struct drm_encoder *encoder = &ctx->encoder;
 	struct exynos_drm_plane *exynos_plane;
 	enum drm_plane_type type;
 	unsigned int zpos;
-	int ret;
+	int pipe, ret;
 
 	vidi_ctx_initialize(ctx, drm_dev);
 
@@ -433,7 +449,8 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
 		type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
 						DRM_PLANE_TYPE_OVERLAY;
 		ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
-					1 << ctx->pipe, type, zpos);
+					1 << ctx->pipe, type, formats,
+					ARRAY_SIZE(formats), zpos);
 		if (ret)
 			return ret;
 	}
@@ -447,9 +464,24 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
 		return PTR_ERR(ctx->crtc);
 	}
 
-	ret = exynos_drm_create_enc_conn(drm_dev, &ctx->display);
+	pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
+						  EXYNOS_DISPLAY_TYPE_VIDI);
+	if (pipe < 0)
+		return pipe;
+
+	encoder->possible_crtcs = 1 << pipe;
+
+	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+	drm_encoder_init(drm_dev, encoder, &exynos_vidi_encoder_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+
+	drm_encoder_helper_add(encoder, &exynos_vidi_encoder_helper_funcs);
+
+	ret = vidi_create_connector(encoder);
 	if (ret) {
-		ctx->crtc->base.funcs->destroy(&ctx->crtc->base);
+		DRM_ERROR("failed to create connector ret = %d\n", ret);
+		drm_encoder_cleanup(encoder);
 		return ret;
 	}
 
@@ -475,8 +507,6 @@ static int vidi_probe(struct platform_device *pdev)
 	if (!ctx)
 		return -ENOMEM;
 
-	ctx->display.type = EXYNOS_DISPLAY_TYPE_VIDI;
-	ctx->display.ops = &vidi_display_ops;
 	ctx->default_win = 0;
 	ctx->pdev = pdev;
 
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 4a00990e4ae4..932f7fa240f8 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -22,7 +22,6 @@
 #include "regs-hdmi.h"
 
 #include <linux/kernel.h>
-#include <linux/spinlock.h>
 #include <linux/wait.h>
 #include <linux/i2c.h>
 #include <linux/platform_device.h>
@@ -33,8 +32,8 @@
 #include <linux/clk.h>
 #include <linux/regulator/consumer.h>
 #include <linux/io.h>
-#include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/of_device.h>
 #include <linux/of_gpio.h>
 #include <linux/hdmi.h>
 #include <linux/component.h>
@@ -48,7 +47,6 @@
 #include "exynos_mixer.h"
 
 #include <linux/gpio.h>
-#include <media/s5p_hdmi.h>
 
 #define ctx_from_connector(c)	container_of(c, struct hdmi_context, connector)
 
@@ -88,109 +86,14 @@ struct hdmi_resources {
 	int				regul_count;
 };
 
-struct hdmi_tg_regs {
-	u8 cmd[1];
-	u8 h_fsz[2];
-	u8 hact_st[2];
-	u8 hact_sz[2];
-	u8 v_fsz[2];
-	u8 vsync[2];
-	u8 vsync2[2];
-	u8 vact_st[2];
-	u8 vact_sz[2];
-	u8 field_chg[2];
-	u8 vact_st2[2];
-	u8 vact_st3[2];
-	u8 vact_st4[2];
-	u8 vsync_top_hdmi[2];
-	u8 vsync_bot_hdmi[2];
-	u8 field_top_hdmi[2];
-	u8 field_bot_hdmi[2];
-	u8 tg_3d[1];
-};
-
-struct hdmi_v13_core_regs {
-	u8 h_blank[2];
-	u8 v_blank[3];
-	u8 h_v_line[3];
-	u8 vsync_pol[1];
-	u8 int_pro_mode[1];
-	u8 v_blank_f[3];
-	u8 h_sync_gen[3];
-	u8 v_sync_gen1[3];
-	u8 v_sync_gen2[3];
-	u8 v_sync_gen3[3];
-};
-
-struct hdmi_v14_core_regs {
-	u8 h_blank[2];
-	u8 v2_blank[2];
-	u8 v1_blank[2];
-	u8 v_line[2];
-	u8 h_line[2];
-	u8 hsync_pol[1];
-	u8 vsync_pol[1];
-	u8 int_pro_mode[1];
-	u8 v_blank_f0[2];
-	u8 v_blank_f1[2];
-	u8 h_sync_start[2];
-	u8 h_sync_end[2];
-	u8 v_sync_line_bef_2[2];
-	u8 v_sync_line_bef_1[2];
-	u8 v_sync_line_aft_2[2];
-	u8 v_sync_line_aft_1[2];
-	u8 v_sync_line_aft_pxl_2[2];
-	u8 v_sync_line_aft_pxl_1[2];
-	u8 v_blank_f2[2]; /* for 3D mode */
-	u8 v_blank_f3[2]; /* for 3D mode */
-	u8 v_blank_f4[2]; /* for 3D mode */
-	u8 v_blank_f5[2]; /* for 3D mode */
-	u8 v_sync_line_aft_3[2];
-	u8 v_sync_line_aft_4[2];
-	u8 v_sync_line_aft_5[2];
-	u8 v_sync_line_aft_6[2];
-	u8 v_sync_line_aft_pxl_3[2];
-	u8 v_sync_line_aft_pxl_4[2];
-	u8 v_sync_line_aft_pxl_5[2];
-	u8 v_sync_line_aft_pxl_6[2];
-	u8 vact_space_1[2];
-	u8 vact_space_2[2];
-	u8 vact_space_3[2];
-	u8 vact_space_4[2];
-	u8 vact_space_5[2];
-	u8 vact_space_6[2];
-};
-
-struct hdmi_v13_conf {
-	struct hdmi_v13_core_regs core;
-	struct hdmi_tg_regs tg;
-};
-
-struct hdmi_v14_conf {
-	struct hdmi_v14_core_regs core;
-	struct hdmi_tg_regs tg;
-};
-
-struct hdmi_conf_regs {
-	int pixel_clock;
-	int cea_video_id;
-	enum hdmi_picture_aspect aspect_ratio;
-	union {
-		struct hdmi_v13_conf v13_conf;
-		struct hdmi_v14_conf v14_conf;
-	} conf;
-};
-
 struct hdmi_context {
-	struct exynos_drm_display	display;
+	struct drm_encoder		encoder;
 	struct device			*dev;
 	struct drm_device		*drm_dev;
 	struct drm_connector		connector;
-	struct drm_encoder		*encoder;
 	bool				hpd;
 	bool				powered;
 	bool				dvi_mode;
-	struct mutex			hdmi_mutex;
 
 	void __iomem			*regs;
 	int				irq;
@@ -201,22 +104,20 @@ struct hdmi_context {
 
 	/* current hdmiphy conf regs */
 	struct drm_display_mode		current_mode;
-	struct hdmi_conf_regs		mode_conf;
+	u8				cea_video_id;
 
 	struct hdmi_resources		res;
+	const struct hdmi_driver_data	*drv_data;
 
 	int				hpd_gpio;
 	void __iomem			*regs_hdmiphy;
-	const struct hdmiphy_config		*phy_confs;
-	unsigned int			phy_conf_count;
 
 	struct regmap			*pmureg;
-	enum hdmi_type			type;
 };
 
-static inline struct hdmi_context *display_to_hdmi(struct exynos_drm_display *d)
+static inline struct hdmi_context *encoder_to_hdmi(struct drm_encoder *e)
 {
-	return container_of(d, struct hdmi_context, display);
+	return container_of(e, struct hdmi_context, encoder);
 }
 
 struct hdmiphy_config {
@@ -624,6 +525,16 @@ static inline void hdmi_reg_writeb(struct hdmi_context *hdata,
 	writeb(value, hdata->regs + reg_id);
 }
 
+static inline void hdmi_reg_writev(struct hdmi_context *hdata, u32 reg_id,
+				   int bytes, u32 val)
+{
+	while (--bytes >= 0) {
+		writeb(val & 0xff, hdata->regs + reg_id);
+		val >>= 8;
+		reg_id += 4;
+	}
+}
+
 static inline void hdmi_reg_writemask(struct hdmi_context *hdata,
 				 u32 reg_id, u32 value, u32 mask)
 {
@@ -930,7 +841,7 @@ static void hdmi_v14_regs_dump(struct hdmi_context *hdata, char *prefix)
 
 static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
 {
-	if (hdata->type == HDMI_TYPE13)
+	if (hdata->drv_data->type == HDMI_TYPE13)
 		hdmi_v13_regs_dump(hdata, prefix);
 	else
 		hdmi_v14_regs_dump(hdata, prefix);
@@ -957,7 +868,7 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
 	u32 hdr_sum;
 	u8 chksum;
 	u32 mod;
-	u32 vic;
+	u8 ar;
 
 	mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
 	if (hdata->dvi_mode) {
@@ -988,27 +899,22 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
 		 * Set the aspect ratio as per the mode, mentioned in
 		 * Table 9 AVI InfoFrame Data Byte 2 of CEA-861-D Standard
 		 */
-		switch (hdata->mode_conf.aspect_ratio) {
+		ar = hdata->current_mode.picture_aspect_ratio;
+		switch (ar) {
 		case HDMI_PICTURE_ASPECT_4_3:
-			hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2),
-					hdata->mode_conf.aspect_ratio |
-					AVI_4_3_CENTER_RATIO);
+			ar |= AVI_4_3_CENTER_RATIO;
 			break;
 		case HDMI_PICTURE_ASPECT_16_9:
-			hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2),
-					hdata->mode_conf.aspect_ratio |
-					AVI_16_9_CENTER_RATIO);
+			ar |= AVI_16_9_CENTER_RATIO;
 			break;
 		case HDMI_PICTURE_ASPECT_NONE:
 		default:
-			hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2),
-					hdata->mode_conf.aspect_ratio |
-					AVI_SAME_AS_PIC_ASPECT_RATIO);
+			ar |= AVI_SAME_AS_PIC_ASPECT_RATIO;
 			break;
 		}
+		hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), ar);
 
-		vic = hdata->mode_conf.cea_video_id;
-		hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
+		hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), hdata->cea_video_id);
 
 		chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
 					infoframe->any.length, hdr_sum);
@@ -1038,10 +944,10 @@ static enum drm_connector_status hdmi_detect(struct drm_connector *connector,
 {
 	struct hdmi_context *hdata = ctx_from_connector(connector);
 
-	hdata->hpd = gpio_get_value(hdata->hpd_gpio);
+	if (gpio_get_value(hdata->hpd_gpio))
+		return connector_status_connected;
 
-	return hdata->hpd ? connector_status_connected :
-			connector_status_disconnected;
+	return connector_status_disconnected;
 }
 
 static void hdmi_connector_destroy(struct drm_connector *connector)
@@ -1091,8 +997,8 @@ static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
 {
 	int i;
 
-	for (i = 0; i < hdata->phy_conf_count; i++)
-		if (hdata->phy_confs[i].pixel_clock == pixel_clock)
+	for (i = 0; i < hdata->drv_data->phy_conf_count; i++)
+		if (hdata->drv_data->phy_confs[i].pixel_clock == pixel_clock)
 			return i;
 
 	DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock);
@@ -1125,7 +1031,7 @@ static struct drm_encoder *hdmi_best_encoder(struct drm_connector *connector)
 {
 	struct hdmi_context *hdata = ctx_from_connector(connector);
 
-	return hdata->encoder;
+	return &hdata->encoder;
 }
 
 static struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
@@ -1134,14 +1040,12 @@ static struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
 	.best_encoder = hdmi_best_encoder,
 };
 
-static int hdmi_create_connector(struct exynos_drm_display *display,
-			struct drm_encoder *encoder)
+static int hdmi_create_connector(struct drm_encoder *encoder)
 {
-	struct hdmi_context *hdata = display_to_hdmi(display);
+	struct hdmi_context *hdata = encoder_to_hdmi(encoder);
 	struct drm_connector *connector = &hdata->connector;
 	int ret;
 
-	hdata->encoder = encoder;
 	connector->interlace_allowed = true;
 	connector->polled = DRM_CONNECTOR_POLL_HPD;
 
@@ -1159,23 +1063,30 @@ static int hdmi_create_connector(struct exynos_drm_display *display,
 	return 0;
 }
 
-static void hdmi_mode_fixup(struct exynos_drm_display *display,
-				struct drm_connector *connector,
-				const struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode)
+static bool hdmi_mode_fixup(struct drm_encoder *encoder,
+			    const struct drm_display_mode *mode,
+			    struct drm_display_mode *adjusted_mode)
 {
+	struct drm_device *dev = encoder->dev;
+	struct drm_connector *connector;
 	struct drm_display_mode *m;
 	int mode_ok;
 
-	DRM_DEBUG_KMS("%s\n", __FILE__);
-
 	drm_mode_set_crtcinfo(adjusted_mode, 0);
 
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (connector->encoder == encoder)
+			break;
+	}
+
+	if (connector->encoder != encoder)
+		return true;
+
 	mode_ok = hdmi_mode_valid(connector, adjusted_mode);
 
 	/* just return if user desired mode exists. */
 	if (mode_ok == MODE_OK)
-		return;
+		return true;
 
 	/*
 	 * otherwise, find the most suitable mode among modes and change it
@@ -1195,6 +1106,8 @@ static void hdmi_mode_fixup(struct exynos_drm_display *display,
 			break;
 		}
 	}
+
+	return true;
 }
 
 static void hdmi_set_acr(u32 freq, u8 *acr)
@@ -1257,7 +1170,7 @@ static void hdmi_reg_acr(struct hdmi_context *hdata, u8 *acr)
 	hdmi_reg_writeb(hdata, HDMI_ACR_CTS1, acr[2]);
 	hdmi_reg_writeb(hdata, HDMI_ACR_CTS2, acr[1]);
 
-	if (hdata->type == HDMI_TYPE13)
+	if (hdata->drv_data->type == HDMI_TYPE13)
 		hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 4);
 	else
 		hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4);
@@ -1391,7 +1304,7 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
 				HDMI_VID_PREAMBLE_DIS | HDMI_GUARD_BAND_DIS);
 	}
 
-	if (hdata->type == HDMI_TYPE13) {
+	if (hdata->drv_data->type == HDMI_TYPE13) {
 		/* choose bluescreen (fecal) color */
 		hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12);
 		hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_1, 0x34);
@@ -1424,66 +1337,94 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
 
 static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
 {
-	const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg;
-	const struct hdmi_v13_core_regs *core =
-		&hdata->mode_conf.conf.v13_conf.core;
+	struct drm_display_mode *m = &hdata->current_mode;
+	unsigned int val;
 	int tries;
 
-	/* setting core registers */
-	hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]);
-	hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_0, core->v_blank[0]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_1, core->v_blank[1]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_2, core->v_blank[2]);
-	hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_0, core->h_v_line[0]);
-	hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_1, core->h_v_line[1]);
-	hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_2, core->h_v_line[2]);
-	hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]);
-	hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_0, core->v_blank_f[0]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_1, core->v_blank_f[1]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_2, core->v_blank_f[2]);
-	hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_0, core->h_sync_gen[0]);
-	hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_1, core->h_sync_gen[1]);
-	hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_2, core->h_sync_gen[2]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_0, core->v_sync_gen1[0]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_1, core->v_sync_gen1[1]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_2, core->v_sync_gen1[2]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_0, core->v_sync_gen2[0]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_1, core->v_sync_gen2[1]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_2, core->v_sync_gen2[2]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_0, core->v_sync_gen3[0]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_1, core->v_sync_gen3[1]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_2, core->v_sync_gen3[2]);
+	hdmi_reg_writev(hdata, HDMI_H_BLANK_0, 2, m->htotal - m->hdisplay);
+	hdmi_reg_writev(hdata, HDMI_V13_H_V_LINE_0, 3,
+			(m->htotal << 12) | m->vtotal);
+
+	val = (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
+	hdmi_reg_writev(hdata, HDMI_VSYNC_POL, 1, val);
+
+	val = (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0;
+	hdmi_reg_writev(hdata, HDMI_INT_PRO_MODE, 1, val);
+
+	val = (m->hsync_start - m->hdisplay - 2);
+	val |= ((m->hsync_end - m->hdisplay - 2) << 10);
+	val |= ((m->flags & DRM_MODE_FLAG_NHSYNC)  ? 1 : 0)<<20;
+	hdmi_reg_writev(hdata, HDMI_V13_H_SYNC_GEN_0, 3, val);
+
+	/*
+	 * Quirk requirement for exynos HDMI IP design,
+	 * 2 pixels less than the actual calculation for hsync_start
+	 * and end.
+	 */
+
+	/* Following values & calculations differ for different type of modes */
+	if (m->flags & DRM_MODE_FLAG_INTERLACE) {
+		/* Interlaced Mode */
+		val = ((m->vsync_end - m->vdisplay) / 2);
+		val |= ((m->vsync_start - m->vdisplay) / 2) << 12;
+		hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_1_0, 3, val);
+
+		val = m->vtotal / 2;
+		val |= ((m->vtotal - m->vdisplay) / 2) << 11;
+		hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_0, 3, val);
+
+		val = (m->vtotal +
+			((m->vsync_end - m->vsync_start) * 4) + 5) / 2;
+		val |= m->vtotal << 11;
+		hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_F_0, 3, val);
+
+		val = ((m->vtotal / 2) + 7);
+		val |= ((m->vtotal / 2) + 2) << 12;
+		hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_2_0, 3, val);
+
+		val = ((m->htotal / 2) + (m->hsync_start - m->hdisplay));
+		val |= ((m->htotal / 2) +
+			(m->hsync_start - m->hdisplay)) << 12;
+		hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_3_0, 3, val);
+
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
+				(m->vtotal - m->vdisplay) / 2);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay / 2);
+
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x249);
+	} else {
+		/* Progressive Mode */
+
+		val = m->vtotal;
+		val |= (m->vtotal - m->vdisplay) << 11;
+		hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_0, 3, val);
+
+		hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_F_0, 3, 0);
+
+		val = (m->vsync_end - m->vdisplay);
+		val |= ((m->vsync_start - m->vdisplay) << 12);
+		hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_1_0, 3, val);
+
+		hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_2_0, 3, 0x1001);
+		hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_3_0, 3, 0x1001);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
+				m->vtotal - m->vdisplay);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x248);
+	}
+
 	/* Timing generator registers */
-	hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]);
+	hdmi_reg_writev(hdata, HDMI_TG_H_FSZ_L, 2, m->htotal);
+	hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2, m->htotal - m->hdisplay);
+	hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay);
+	hdmi_reg_writev(hdata, HDMI_TG_V_FSZ_L, 2, m->vtotal);
+	hdmi_reg_writev(hdata, HDMI_TG_VSYNC_L, 2, 0x1);
+	hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2, 0x233);
+	hdmi_reg_writev(hdata, HDMI_TG_FIELD_CHG_L, 2, 0x233);
+	hdmi_reg_writev(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, 2, 0x1);
+	hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2, 0x233);
+	hdmi_reg_writev(hdata, HDMI_TG_FIELD_TOP_HDMI_L, 2, 0x1);
+	hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2, 0x233);
 
 	/* waiting for HDMIPHY's PLL to get to steady state */
 	for (tries = 100; tries; --tries) {
@@ -1508,144 +1449,119 @@ static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
 
 static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
 {
-	const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg;
-	const struct hdmi_v14_core_regs *core =
-		&hdata->mode_conf.conf.v14_conf.core;
+	struct drm_display_mode *m = &hdata->current_mode;
 	int tries;
 
-	/* setting core registers */
-	hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]);
-	hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]);
-	hdmi_reg_writeb(hdata, HDMI_V2_BLANK_0, core->v2_blank[0]);
-	hdmi_reg_writeb(hdata, HDMI_V2_BLANK_1, core->v2_blank[1]);
-	hdmi_reg_writeb(hdata, HDMI_V1_BLANK_0, core->v1_blank[0]);
-	hdmi_reg_writeb(hdata, HDMI_V1_BLANK_1, core->v1_blank[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_LINE_0, core->v_line[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_LINE_1, core->v_line[1]);
-	hdmi_reg_writeb(hdata, HDMI_H_LINE_0, core->h_line[0]);
-	hdmi_reg_writeb(hdata, HDMI_H_LINE_1, core->h_line[1]);
-	hdmi_reg_writeb(hdata, HDMI_HSYNC_POL, core->hsync_pol[0]);
-	hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]);
-	hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_0, core->v_blank_f0[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_1, core->v_blank_f0[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_0, core->v_blank_f1[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_1, core->v_blank_f1[1]);
-	hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_0, core->h_sync_start[0]);
-	hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_1, core->h_sync_start[1]);
-	hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_0, core->h_sync_end[0]);
-	hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_1, core->h_sync_end[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_0,
-			core->v_sync_line_bef_2[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_1,
-			core->v_sync_line_bef_2[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_0,
-			core->v_sync_line_bef_1[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_1,
-			core->v_sync_line_bef_1[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_0,
-			core->v_sync_line_aft_2[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_1,
-			core->v_sync_line_aft_2[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_0,
-			core->v_sync_line_aft_1[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_1,
-			core->v_sync_line_aft_1[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0,
-			core->v_sync_line_aft_pxl_2[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_1,
-			core->v_sync_line_aft_pxl_2[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0,
-			core->v_sync_line_aft_pxl_1[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_1,
-			core->v_sync_line_aft_pxl_1[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_0, core->v_blank_f2[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_1, core->v_blank_f2[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_0, core->v_blank_f3[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_1, core->v_blank_f3[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_0, core->v_blank_f4[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_1, core->v_blank_f4[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_0, core->v_blank_f5[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_1, core->v_blank_f5[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_0,
-			core->v_sync_line_aft_3[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_1,
-			core->v_sync_line_aft_3[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_0,
-			core->v_sync_line_aft_4[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_1,
-			core->v_sync_line_aft_4[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_0,
-			core->v_sync_line_aft_5[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_1,
-			core->v_sync_line_aft_5[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_0,
-			core->v_sync_line_aft_6[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_1,
-			core->v_sync_line_aft_6[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_0,
-			core->v_sync_line_aft_pxl_3[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_1,
-			core->v_sync_line_aft_pxl_3[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_0,
-			core->v_sync_line_aft_pxl_4[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_1,
-			core->v_sync_line_aft_pxl_4[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0,
-			core->v_sync_line_aft_pxl_5[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_1,
-			core->v_sync_line_aft_pxl_5[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0,
-			core->v_sync_line_aft_pxl_6[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_1,
-			core->v_sync_line_aft_pxl_6[1]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_0, core->vact_space_1[0]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_1, core->vact_space_1[1]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_0, core->vact_space_2[0]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_1, core->vact_space_2[1]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_0, core->vact_space_3[0]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_1, core->vact_space_3[1]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_0, core->vact_space_4[0]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_1, core->vact_space_4[1]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_0, core->vact_space_5[0]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_1, core->vact_space_5[1]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_0, core->vact_space_6[0]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_1, core->vact_space_6[1]);
+	hdmi_reg_writev(hdata, HDMI_H_BLANK_0, 2, m->htotal - m->hdisplay);
+	hdmi_reg_writev(hdata, HDMI_V_LINE_0, 2, m->vtotal);
+	hdmi_reg_writev(hdata, HDMI_H_LINE_0, 2, m->htotal);
+	hdmi_reg_writev(hdata, HDMI_HSYNC_POL, 1,
+			(m->flags & DRM_MODE_FLAG_NHSYNC)  ? 1 : 0);
+	hdmi_reg_writev(hdata, HDMI_VSYNC_POL, 1,
+			(m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0);
+	hdmi_reg_writev(hdata, HDMI_INT_PRO_MODE, 1,
+			(m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
+
+	/*
+	 * Quirk requirement for exynos 5 HDMI IP design,
+	 * 2 pixels less than the actual calculation for hsync_start
+	 * and end.
+	 */
+
+	/* Following values & calculations differ for different type of modes */
+	if (m->flags & DRM_MODE_FLAG_INTERLACE) {
+		/* Interlaced Mode */
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_2_0, 2,
+			(m->vsync_end - m->vdisplay) / 2);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_1_0, 2,
+			(m->vsync_start - m->vdisplay) / 2);
+		hdmi_reg_writev(hdata, HDMI_V2_BLANK_0, 2, m->vtotal / 2);
+		hdmi_reg_writev(hdata, HDMI_V1_BLANK_0, 2,
+				(m->vtotal - m->vdisplay) / 2);
+		hdmi_reg_writev(hdata, HDMI_V_BLANK_F0_0, 2,
+				m->vtotal - m->vdisplay / 2);
+		hdmi_reg_writev(hdata, HDMI_V_BLANK_F1_0, 2, m->vtotal);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_2_0, 2,
+				(m->vtotal / 2) + 7);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_1_0, 2,
+				(m->vtotal / 2) + 2);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0, 2,
+			(m->htotal / 2) + (m->hsync_start - m->hdisplay));
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0, 2,
+			(m->htotal / 2) + (m->hsync_start - m->hdisplay));
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
+				(m->vtotal - m->vdisplay) / 2);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay / 2);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2,
+				m->vtotal - m->vdisplay / 2);
+		hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2,
+				(m->vtotal / 2) + 1);
+		hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2,
+				(m->vtotal / 2) + 1);
+		hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2,
+				(m->vtotal / 2) + 1);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST3_L, 2, 0x0);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST4_L, 2, 0x0);
+	} else {
+		/* Progressive Mode */
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_2_0, 2,
+			m->vsync_end - m->vdisplay);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_1_0, 2,
+			m->vsync_start - m->vdisplay);
+		hdmi_reg_writev(hdata, HDMI_V2_BLANK_0, 2, m->vtotal);
+		hdmi_reg_writev(hdata, HDMI_V1_BLANK_0, 2,
+				m->vtotal - m->vdisplay);
+		hdmi_reg_writev(hdata, HDMI_V_BLANK_F0_0, 2, 0xffff);
+		hdmi_reg_writev(hdata, HDMI_V_BLANK_F1_0, 2, 0xffff);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_2_0, 2, 0xffff);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_1_0, 2, 0xffff);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0, 2, 0xffff);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0, 2, 0xffff);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
+				m->vtotal - m->vdisplay);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x248);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST3_L, 2, 0x47b);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST4_L, 2, 0x6ae);
+		hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2, 0x233);
+		hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2, 0x233);
+		hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2, 0x233);
+	}
+
+	/* Following values & calculations are same irrespective of mode type */
+	hdmi_reg_writev(hdata, HDMI_H_SYNC_START_0, 2,
+			m->hsync_start - m->hdisplay - 2);
+	hdmi_reg_writev(hdata, HDMI_H_SYNC_END_0, 2,
+			m->hsync_end - m->hdisplay - 2);
+	hdmi_reg_writev(hdata, HDMI_VACT_SPACE_1_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_VACT_SPACE_2_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_VACT_SPACE_3_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_VACT_SPACE_4_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_VACT_SPACE_5_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_VACT_SPACE_6_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_BLANK_F2_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_BLANK_F3_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_BLANK_F4_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_BLANK_F5_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_3_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_4_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_5_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_6_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0, 2, 0xffff);
 
 	/* Timing generator registers */
-	hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_L, tg->vact_st3[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_H, tg->vact_st3[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_L, tg->vact_st4[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_H, tg->vact_st4[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_3D, tg->tg_3d[0]);
+	hdmi_reg_writev(hdata, HDMI_TG_H_FSZ_L, 2, m->htotal);
+	hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2, m->htotal - m->hdisplay);
+	hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay);
+	hdmi_reg_writev(hdata, HDMI_TG_V_FSZ_L, 2, m->vtotal);
+	hdmi_reg_writev(hdata, HDMI_TG_VSYNC_L, 2, 0x1);
+	hdmi_reg_writev(hdata, HDMI_TG_FIELD_CHG_L, 2, 0x233);
+	hdmi_reg_writev(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, 2, 0x1);
+	hdmi_reg_writev(hdata, HDMI_TG_FIELD_TOP_HDMI_L, 2, 0x1);
+	hdmi_reg_writev(hdata, HDMI_TG_3D, 1, 0x0);
 
 	/* waiting for HDMIPHY's PLL to get to steady state */
 	for (tries = 100; tries; --tries) {
@@ -1670,7 +1586,7 @@ static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
 
 static void hdmi_mode_apply(struct hdmi_context *hdata)
 {
-	if (hdata->type == HDMI_TYPE13)
+	if (hdata->drv_data->type == HDMI_TYPE13)
 		hdmi_v13_mode_apply(hdata);
 	else
 		hdmi_v14_mode_apply(hdata);
@@ -1688,7 +1604,7 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
 	hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
 				HDMI_PHY_ENABLE_MODE_SET);
 
-	if (hdata->type == HDMI_TYPE13)
+	if (hdata->drv_data->type == HDMI_TYPE13)
 		reg = HDMI_V13_PHY_RSTOUT;
 	else
 		reg = HDMI_PHY_RSTOUT;
@@ -1702,7 +1618,7 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
 
 static void hdmiphy_poweron(struct hdmi_context *hdata)
 {
-	if (hdata->type != HDMI_TYPE14)
+	if (hdata->drv_data->type != HDMI_TYPE14)
 		return;
 
 	DRM_DEBUG_KMS("\n");
@@ -1722,7 +1638,7 @@ static void hdmiphy_poweron(struct hdmi_context *hdata)
 
 static void hdmiphy_poweroff(struct hdmi_context *hdata)
 {
-	if (hdata->type != HDMI_TYPE14)
+	if (hdata->drv_data->type != HDMI_TYPE14)
 		return;
 
 	DRM_DEBUG_KMS("\n");
@@ -1748,13 +1664,14 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
 	int i;
 
 	/* pixel clock */
-	i = hdmi_find_phy_conf(hdata, hdata->mode_conf.pixel_clock);
+	i = hdmi_find_phy_conf(hdata, hdata->current_mode.clock * 1000);
 	if (i < 0) {
 		DRM_ERROR("failed to find hdmiphy conf\n");
 		return;
 	}
 
-	ret = hdmiphy_reg_write_buf(hdata, 0, hdata->phy_confs[i].conf, 32);
+	ret = hdmiphy_reg_write_buf(hdata, 0,
+			hdata->drv_data->phy_confs[i].conf, 32);
 	if (ret) {
 		DRM_ERROR("failed to configure hdmiphy\n");
 		return;
@@ -1776,10 +1693,8 @@ static void hdmi_conf_apply(struct hdmi_context *hdata)
 	hdmiphy_conf_reset(hdata);
 	hdmiphy_conf_apply(hdata);
 
-	mutex_lock(&hdata->hdmi_mutex);
 	hdmi_start(hdata, false);
 	hdmi_conf_init(hdata);
-	mutex_unlock(&hdata->hdmi_mutex);
 
 	hdmi_audio_init(hdata);
 
@@ -1790,271 +1705,32 @@ static void hdmi_conf_apply(struct hdmi_context *hdata)
 	hdmi_regs_dump(hdata, "start");
 }
 
-static void hdmi_set_reg(u8 *reg_pair, int num_bytes, u32 value)
-{
-	int i;
-	BUG_ON(num_bytes > 4);
-	for (i = 0; i < num_bytes; i++)
-		reg_pair[i] = (value >> (8 * i)) & 0xff;
-}
-
-static void hdmi_v13_mode_set(struct hdmi_context *hdata,
-			struct drm_display_mode *m)
+static void hdmi_mode_set(struct drm_encoder *encoder,
+			  struct drm_display_mode *mode,
+			  struct drm_display_mode *adjusted_mode)
 {
-	struct hdmi_v13_core_regs *core = &hdata->mode_conf.conf.v13_conf.core;
-	struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg;
-	unsigned int val;
-
-	hdata->mode_conf.cea_video_id =
-		drm_match_cea_mode((struct drm_display_mode *)m);
-	hdata->mode_conf.pixel_clock = m->clock * 1000;
-	hdata->mode_conf.aspect_ratio = m->picture_aspect_ratio;
-
-	hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
-	hdmi_set_reg(core->h_v_line, 3, (m->htotal << 12) | m->vtotal);
-
-	val = (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
-	hdmi_set_reg(core->vsync_pol, 1, val);
-
-	val = (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0;
-	hdmi_set_reg(core->int_pro_mode, 1, val);
-
-	val = (m->hsync_start - m->hdisplay - 2);
-	val |= ((m->hsync_end - m->hdisplay - 2) << 10);
-	val |= ((m->flags & DRM_MODE_FLAG_NHSYNC)  ? 1 : 0)<<20;
-	hdmi_set_reg(core->h_sync_gen, 3, val);
-
-	/*
-	 * Quirk requirement for exynos HDMI IP design,
-	 * 2 pixels less than the actual calculation for hsync_start
-	 * and end.
-	 */
-
-	/* Following values & calculations differ for different type of modes */
-	if (m->flags & DRM_MODE_FLAG_INTERLACE) {
-		/* Interlaced Mode */
-		val = ((m->vsync_end - m->vdisplay) / 2);
-		val |= ((m->vsync_start - m->vdisplay) / 2) << 12;
-		hdmi_set_reg(core->v_sync_gen1, 3, val);
-
-		val = m->vtotal / 2;
-		val |= ((m->vtotal - m->vdisplay) / 2) << 11;
-		hdmi_set_reg(core->v_blank, 3, val);
-
-		val = (m->vtotal +
-			((m->vsync_end - m->vsync_start) * 4) + 5) / 2;
-		val |= m->vtotal << 11;
-		hdmi_set_reg(core->v_blank_f, 3, val);
-
-		val = ((m->vtotal / 2) + 7);
-		val |= ((m->vtotal / 2) + 2) << 12;
-		hdmi_set_reg(core->v_sync_gen2, 3, val);
-
-		val = ((m->htotal / 2) + (m->hsync_start - m->hdisplay));
-		val |= ((m->htotal / 2) +
-			(m->hsync_start - m->hdisplay)) << 12;
-		hdmi_set_reg(core->v_sync_gen3, 3, val);
-
-		hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2);
-		hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2);
-
-		hdmi_set_reg(tg->vact_st2, 2, 0x249);/* Reset value + 1*/
-	} else {
-		/* Progressive Mode */
-
-		val = m->vtotal;
-		val |= (m->vtotal - m->vdisplay) << 11;
-		hdmi_set_reg(core->v_blank, 3, val);
-
-		hdmi_set_reg(core->v_blank_f, 3, 0);
-
-		val = (m->vsync_end - m->vdisplay);
-		val |= ((m->vsync_start - m->vdisplay) << 12);
-		hdmi_set_reg(core->v_sync_gen1, 3, val);
-
-		hdmi_set_reg(core->v_sync_gen2, 3, 0x1001);/* Reset value  */
-		hdmi_set_reg(core->v_sync_gen3, 3, 0x1001);/* Reset value  */
-		hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay);
-		hdmi_set_reg(tg->vact_sz, 2, m->vdisplay);
-		hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */
-	}
-
-	/* Timing generator registers */
-	hdmi_set_reg(tg->cmd, 1, 0x0);
-	hdmi_set_reg(tg->h_fsz, 2, m->htotal);
-	hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay);
-	hdmi_set_reg(tg->hact_sz, 2, m->hdisplay);
-	hdmi_set_reg(tg->v_fsz, 2, m->vtotal);
-	hdmi_set_reg(tg->vsync, 2, 0x1);
-	hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */
-	hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */
-	hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */
-	hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */
-	hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
-	hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
-	hdmi_set_reg(tg->tg_3d, 1, 0x0); /* Not used */
-}
-
-static void hdmi_v14_mode_set(struct hdmi_context *hdata,
-			struct drm_display_mode *m)
-{
-	struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg;
-	struct hdmi_v14_core_regs *core =
-		&hdata->mode_conf.conf.v14_conf.core;
-
-	hdata->mode_conf.cea_video_id =
-		drm_match_cea_mode((struct drm_display_mode *)m);
-	hdata->mode_conf.pixel_clock = m->clock * 1000;
-	hdata->mode_conf.aspect_ratio = m->picture_aspect_ratio;
-
-	hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
-	hdmi_set_reg(core->v_line, 2, m->vtotal);
-	hdmi_set_reg(core->h_line, 2, m->htotal);
-	hdmi_set_reg(core->hsync_pol, 1,
-			(m->flags & DRM_MODE_FLAG_NHSYNC)  ? 1 : 0);
-	hdmi_set_reg(core->vsync_pol, 1,
-			(m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0);
-	hdmi_set_reg(core->int_pro_mode, 1,
-			(m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
-
-	/*
-	 * Quirk requirement for exynos 5 HDMI IP design,
-	 * 2 pixels less than the actual calculation for hsync_start
-	 * and end.
-	 */
-
-	/* Following values & calculations differ for different type of modes */
-	if (m->flags & DRM_MODE_FLAG_INTERLACE) {
-		/* Interlaced Mode */
-		hdmi_set_reg(core->v_sync_line_bef_2, 2,
-			(m->vsync_end - m->vdisplay) / 2);
-		hdmi_set_reg(core->v_sync_line_bef_1, 2,
-			(m->vsync_start - m->vdisplay) / 2);
-		hdmi_set_reg(core->v2_blank, 2, m->vtotal / 2);
-		hdmi_set_reg(core->v1_blank, 2, (m->vtotal - m->vdisplay) / 2);
-		hdmi_set_reg(core->v_blank_f0, 2, m->vtotal - m->vdisplay / 2);
-		hdmi_set_reg(core->v_blank_f1, 2, m->vtotal);
-		hdmi_set_reg(core->v_sync_line_aft_2, 2, (m->vtotal / 2) + 7);
-		hdmi_set_reg(core->v_sync_line_aft_1, 2, (m->vtotal / 2) + 2);
-		hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2,
-			(m->htotal / 2) + (m->hsync_start - m->hdisplay));
-		hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2,
-			(m->htotal / 2) + (m->hsync_start - m->hdisplay));
-		hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2);
-		hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2);
-		hdmi_set_reg(tg->vact_st2, 2, m->vtotal - m->vdisplay / 2);
-		hdmi_set_reg(tg->vsync2, 2, (m->vtotal / 2) + 1);
-		hdmi_set_reg(tg->vsync_bot_hdmi, 2, (m->vtotal / 2) + 1);
-		hdmi_set_reg(tg->field_bot_hdmi, 2, (m->vtotal / 2) + 1);
-		hdmi_set_reg(tg->vact_st3, 2, 0x0);
-		hdmi_set_reg(tg->vact_st4, 2, 0x0);
-	} else {
-		/* Progressive Mode */
-		hdmi_set_reg(core->v_sync_line_bef_2, 2,
-			m->vsync_end - m->vdisplay);
-		hdmi_set_reg(core->v_sync_line_bef_1, 2,
-			m->vsync_start - m->vdisplay);
-		hdmi_set_reg(core->v2_blank, 2, m->vtotal);
-		hdmi_set_reg(core->v1_blank, 2, m->vtotal - m->vdisplay);
-		hdmi_set_reg(core->v_blank_f0, 2, 0xffff);
-		hdmi_set_reg(core->v_blank_f1, 2, 0xffff);
-		hdmi_set_reg(core->v_sync_line_aft_2, 2, 0xffff);
-		hdmi_set_reg(core->v_sync_line_aft_1, 2, 0xffff);
-		hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2, 0xffff);
-		hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2, 0xffff);
-		hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay);
-		hdmi_set_reg(tg->vact_sz, 2, m->vdisplay);
-		hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */
-		hdmi_set_reg(tg->vact_st3, 2, 0x47b); /* Reset value */
-		hdmi_set_reg(tg->vact_st4, 2, 0x6ae); /* Reset value */
-		hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */
-		hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */
-		hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
-	}
-
-	/* Following values & calculations are same irrespective of mode type */
-	hdmi_set_reg(core->h_sync_start, 2, m->hsync_start - m->hdisplay - 2);
-	hdmi_set_reg(core->h_sync_end, 2, m->hsync_end - m->hdisplay - 2);
-	hdmi_set_reg(core->vact_space_1, 2, 0xffff);
-	hdmi_set_reg(core->vact_space_2, 2, 0xffff);
-	hdmi_set_reg(core->vact_space_3, 2, 0xffff);
-	hdmi_set_reg(core->vact_space_4, 2, 0xffff);
-	hdmi_set_reg(core->vact_space_5, 2, 0xffff);
-	hdmi_set_reg(core->vact_space_6, 2, 0xffff);
-	hdmi_set_reg(core->v_blank_f2, 2, 0xffff);
-	hdmi_set_reg(core->v_blank_f3, 2, 0xffff);
-	hdmi_set_reg(core->v_blank_f4, 2, 0xffff);
-	hdmi_set_reg(core->v_blank_f5, 2, 0xffff);
-	hdmi_set_reg(core->v_sync_line_aft_3, 2, 0xffff);
-	hdmi_set_reg(core->v_sync_line_aft_4, 2, 0xffff);
-	hdmi_set_reg(core->v_sync_line_aft_5, 2, 0xffff);
-	hdmi_set_reg(core->v_sync_line_aft_6, 2, 0xffff);
-	hdmi_set_reg(core->v_sync_line_aft_pxl_3, 2, 0xffff);
-	hdmi_set_reg(core->v_sync_line_aft_pxl_4, 2, 0xffff);
-	hdmi_set_reg(core->v_sync_line_aft_pxl_5, 2, 0xffff);
-	hdmi_set_reg(core->v_sync_line_aft_pxl_6, 2, 0xffff);
-
-	/* Timing generator registers */
-	hdmi_set_reg(tg->cmd, 1, 0x0);
-	hdmi_set_reg(tg->h_fsz, 2, m->htotal);
-	hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay);
-	hdmi_set_reg(tg->hact_sz, 2, m->hdisplay);
-	hdmi_set_reg(tg->v_fsz, 2, m->vtotal);
-	hdmi_set_reg(tg->vsync, 2, 0x1);
-	hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */
-	hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */
-	hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
-	hdmi_set_reg(tg->tg_3d, 1, 0x0);
-}
-
-static void hdmi_mode_set(struct exynos_drm_display *display,
-			struct drm_display_mode *mode)
-{
-	struct hdmi_context *hdata = display_to_hdmi(display);
-	struct drm_display_mode *m = mode;
+	struct hdmi_context *hdata = encoder_to_hdmi(encoder);
+	struct drm_display_mode *m = adjusted_mode;
 
 	DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%s\n",
 		m->hdisplay, m->vdisplay,
 		m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ?
 		"INTERLACED" : "PROGRESSIVE");
 
-	/* preserve mode information for later use. */
-	drm_mode_copy(&hdata->current_mode, mode);
-
-	if (hdata->type == HDMI_TYPE13)
-		hdmi_v13_mode_set(hdata, mode);
-	else
-		hdmi_v14_mode_set(hdata, mode);
-}
-
-static void hdmi_commit(struct exynos_drm_display *display)
-{
-	struct hdmi_context *hdata = display_to_hdmi(display);
-
-	mutex_lock(&hdata->hdmi_mutex);
-	if (!hdata->powered) {
-		mutex_unlock(&hdata->hdmi_mutex);
-		return;
-	}
-	mutex_unlock(&hdata->hdmi_mutex);
-
-	hdmi_conf_apply(hdata);
+	drm_mode_copy(&hdata->current_mode, m);
+	hdata->cea_video_id = drm_match_cea_mode(mode);
 }
 
-static void hdmi_poweron(struct hdmi_context *hdata)
+static void hdmi_enable(struct drm_encoder *encoder)
 {
+	struct hdmi_context *hdata = encoder_to_hdmi(encoder);
 	struct hdmi_resources *res = &hdata->res;
 
-	mutex_lock(&hdata->hdmi_mutex);
-	if (hdata->powered) {
-		mutex_unlock(&hdata->hdmi_mutex);
+	if (hdata->powered)
 		return;
-	}
 
 	hdata->powered = true;
 
-	mutex_unlock(&hdata->hdmi_mutex);
-
 	pm_runtime_get_sync(hdata->dev);
 
 	if (regulator_bulk_enable(res->regul_count, res->regul_bulk))
@@ -2068,17 +1744,32 @@ static void hdmi_poweron(struct hdmi_context *hdata)
 	clk_prepare_enable(res->sclk_hdmi);
 
 	hdmiphy_poweron(hdata);
-	hdmi_commit(&hdata->display);
+	hdmi_conf_apply(hdata);
 }
 
-static void hdmi_poweroff(struct hdmi_context *hdata)
+static void hdmi_disable(struct drm_encoder *encoder)
 {
+	struct hdmi_context *hdata = encoder_to_hdmi(encoder);
 	struct hdmi_resources *res = &hdata->res;
+	struct drm_crtc *crtc = encoder->crtc;
+	const struct drm_crtc_helper_funcs *funcs = NULL;
 
-	mutex_lock(&hdata->hdmi_mutex);
 	if (!hdata->powered)
-		goto out;
-	mutex_unlock(&hdata->hdmi_mutex);
+		return;
+
+	/*
+	 * The SFRs of VP and Mixer are updated by Vertical Sync of
+	 * Timing generator which is a part of HDMI so the sequence
+	 * to disable TV Subsystem should be as following,
+	 *	VP -> Mixer -> HDMI
+	 *
+	 * Below codes will try to disable Mixer and VP(if used)
+	 * prior to disabling HDMI.
+	 */
+	if (crtc)
+		funcs = crtc->helper_private;
+	if (funcs && funcs->disable)
+		(*funcs->disable)(crtc);
 
 	/* HDMI System Disable */
 	hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_EN);
@@ -2098,57 +1789,18 @@ static void hdmi_poweroff(struct hdmi_context *hdata)
 
 	pm_runtime_put_sync(hdata->dev);
 
-	mutex_lock(&hdata->hdmi_mutex);
 	hdata->powered = false;
-
-out:
-	mutex_unlock(&hdata->hdmi_mutex);
-}
-
-static void hdmi_dpms(struct exynos_drm_display *display, int mode)
-{
-	struct hdmi_context *hdata = display_to_hdmi(display);
-	struct drm_encoder *encoder = hdata->encoder;
-	struct drm_crtc *crtc = encoder->crtc;
-	const struct drm_crtc_helper_funcs *funcs = NULL;
-
-	DRM_DEBUG_KMS("mode %d\n", mode);
-
-	switch (mode) {
-	case DRM_MODE_DPMS_ON:
-		hdmi_poweron(hdata);
-		break;
-	case DRM_MODE_DPMS_STANDBY:
-	case DRM_MODE_DPMS_SUSPEND:
-	case DRM_MODE_DPMS_OFF:
-		/*
-		 * The SFRs of VP and Mixer are updated by Vertical Sync of
-		 * Timing generator which is a part of HDMI so the sequence
-		 * to disable TV Subsystem should be as following,
-		 *	VP -> Mixer -> HDMI
-		 *
-		 * Below codes will try to disable Mixer and VP(if used)
-		 * prior to disabling HDMI.
-		 */
-		if (crtc)
-			funcs = crtc->helper_private;
-		if (funcs && funcs->disable)
-			(*funcs->disable)(crtc);
-
-		hdmi_poweroff(hdata);
-		break;
-	default:
-		DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
-		break;
-	}
 }
 
-static struct exynos_drm_display_ops hdmi_display_ops = {
-	.create_connector = hdmi_create_connector,
+static struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = {
 	.mode_fixup	= hdmi_mode_fixup,
 	.mode_set	= hdmi_mode_set,
-	.dpms		= hdmi_dpms,
-	.commit		= hdmi_commit,
+	.enable		= hdmi_enable,
+	.disable	= hdmi_disable,
+};
+
+static struct drm_encoder_funcs exynos_hdmi_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
 };
 
 static void hdmi_hotplug_work_func(struct work_struct *work)
@@ -2157,10 +1809,6 @@ static void hdmi_hotplug_work_func(struct work_struct *work)
 
 	hdata = container_of(work, struct hdmi_context, hotplug_work.work);
 
-	mutex_lock(&hdata->hdmi_mutex);
-	hdata->hpd = gpio_get_value(hdata->hpd_gpio);
-	mutex_unlock(&hdata->hdmi_mutex);
-
 	if (hdata->drm_dev)
 		drm_helper_hpd_irq_event(hdata->drm_dev);
 }
@@ -2259,30 +1907,6 @@ fail:
 	return ret;
 }
 
-static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
-					(struct device *dev)
-{
-	struct device_node *np = dev->of_node;
-	struct s5p_hdmi_platform_data *pd;
-	u32 value;
-
-	pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
-	if (!pd)
-		goto err_data;
-
-	if (!of_find_property(np, "hpd-gpio", &value)) {
-		DRM_ERROR("no hpd gpio property found\n");
-		goto err_data;
-	}
-
-	pd->hpd_gpio = of_get_named_gpio(np, "hpd-gpio", 0);
-
-	return pd;
-
-err_data:
-	return NULL;
-}
-
 static struct of_device_id hdmi_match_types[] = {
 	{
 		.compatible = "samsung,exynos5-hdmi",
@@ -2306,10 +1930,33 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
 {
 	struct drm_device *drm_dev = data;
 	struct hdmi_context *hdata = dev_get_drvdata(dev);
+	struct drm_encoder *encoder = &hdata->encoder;
+	int ret, pipe;
 
 	hdata->drm_dev = drm_dev;
 
-	return exynos_drm_create_enc_conn(drm_dev, &hdata->display);
+	pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
+						  EXYNOS_DISPLAY_TYPE_HDMI);
+	if (pipe < 0)
+		return pipe;
+
+	encoder->possible_crtcs = 1 << pipe;
+
+	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+	drm_encoder_init(drm_dev, encoder, &exynos_hdmi_encoder_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+
+	drm_encoder_helper_add(encoder, &exynos_hdmi_encoder_helper_funcs);
+
+	ret = hdmi_create_connector(encoder);
+	if (ret) {
+		DRM_ERROR("failed to create connector ret = %d\n", ret);
+		drm_encoder_cleanup(encoder);
+		return ret;
+	}
+
+	return 0;
 }
 
 static void hdmi_unbind(struct device *dev, struct device *master, void *data)
@@ -2343,43 +1990,30 @@ static struct device_node *hdmi_legacy_phy_dt_binding(struct device *dev)
 static int hdmi_probe(struct platform_device *pdev)
 {
 	struct device_node *ddc_node, *phy_node;
-	struct s5p_hdmi_platform_data *pdata;
-	struct hdmi_driver_data *drv_data;
 	const struct of_device_id *match;
 	struct device *dev = &pdev->dev;
 	struct hdmi_context *hdata;
 	struct resource *res;
 	int ret;
 
-	if (!dev->of_node)
-		return -ENODEV;
-
-	pdata = drm_hdmi_dt_parse_pdata(dev);
-	if (!pdata)
-		return -EINVAL;
-
 	hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
 	if (!hdata)
 		return -ENOMEM;
 
-	hdata->display.type = EXYNOS_DISPLAY_TYPE_HDMI;
-	hdata->display.ops = &hdmi_display_ops;
-
-	mutex_init(&hdata->hdmi_mutex);
-
-	platform_set_drvdata(pdev, hdata);
-
-	match = of_match_node(hdmi_match_types, dev->of_node);
+	match = of_match_device(hdmi_match_types, dev);
 	if (!match)
 		return -ENODEV;
 
-	drv_data = (struct hdmi_driver_data *)match->data;
-	hdata->type = drv_data->type;
-	hdata->phy_confs = drv_data->phy_confs;
-	hdata->phy_conf_count = drv_data->phy_conf_count;
+	hdata->drv_data = match->data;
+
+	platform_set_drvdata(pdev, hdata);
 
-	hdata->hpd_gpio = pdata->hpd_gpio;
 	hdata->dev = dev;
+	hdata->hpd_gpio = of_get_named_gpio(dev->of_node, "hpd-gpio", 0);
+	if (hdata->hpd_gpio < 0) {
+		DRM_ERROR("cannot get hpd gpio property\n");
+		return hdata->hpd_gpio;
+	}
 
 	ret = hdmi_resources_init(hdata);
 	if (ret) {
@@ -2431,7 +2065,7 @@ out_get_ddc_adpt:
 	}
 
 out_get_phy_port:
-	if (drv_data->is_apb_phy) {
+	if (hdata->drv_data->is_apb_phy) {
 		hdata->regs_hdmiphy = of_iomap(phy_node, 0);
 		if (!hdata->regs_hdmiphy) {
 			DRM_ERROR("failed to ioremap hdmi phy\n");
@@ -2454,8 +2088,6 @@ out_get_phy_port:
 		goto err_hdmiphy;
 	}
 
-	hdata->hpd = gpio_get_value(hdata->hpd_gpio);
-
 	INIT_DELAYED_WORK(&hdata->hotplug_work, hdmi_hotplug_work_func);
 
 	ret = devm_request_threaded_irq(dev, hdata->irq, NULL,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 4706b56902b4..7f81cce966d4 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -43,6 +43,7 @@
 
 #define MIXER_WIN_NR		3
 #define MIXER_DEFAULT_WIN	0
+#define VP_DEFAULT_WIN		2
 
 /* The pixelformats that are natively supported by the mixer. */
 #define MXR_FORMAT_RGB565	4
@@ -69,6 +70,24 @@ enum mixer_version_id {
 	MXR_VER_128_0_0_184,
 };
 
+enum mixer_flag_bits {
+	MXR_BIT_POWERED,
+	MXR_BIT_VSYNC,
+};
+
+static const uint32_t mixer_formats[] = {
+	DRM_FORMAT_XRGB4444,
+	DRM_FORMAT_XRGB1555,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+};
+
+static const uint32_t vp_formats[] = {
+	DRM_FORMAT_NV12,
+	DRM_FORMAT_NV21,
+};
+
 struct mixer_context {
 	struct platform_device *pdev;
 	struct device		*dev;
@@ -76,13 +95,11 @@ struct mixer_context {
 	struct exynos_drm_crtc	*crtc;
 	struct exynos_drm_plane	planes[MIXER_WIN_NR];
 	int			pipe;
+	unsigned long		flags;
 	bool			interlace;
-	bool			powered;
 	bool			vp_enabled;
 	bool			has_sclk;
-	u32			int_en;
 
-	struct mutex		mixer_mutex;
 	struct mixer_resources	mixer_res;
 	enum mixer_version_id	mxr_ver;
 	wait_queue_head_t	wait_vsync_queue;
@@ -380,19 +397,20 @@ static void mixer_stop(struct mixer_context *ctx)
 		usleep_range(10000, 12000);
 }
 
-static void vp_video_buffer(struct mixer_context *ctx, unsigned int win)
+static void vp_video_buffer(struct mixer_context *ctx,
+			    struct exynos_drm_plane *plane)
 {
 	struct mixer_resources *res = &ctx->mixer_res;
+	struct drm_plane_state *state = plane->base.state;
+	struct drm_framebuffer *fb = state->fb;
+	struct drm_display_mode *mode = &state->crtc->mode;
 	unsigned long flags;
-	struct exynos_drm_plane *plane;
 	dma_addr_t luma_addr[2], chroma_addr[2];
 	bool tiled_mode = false;
 	bool crcb_mode = false;
 	u32 val;
 
-	plane = &ctx->planes[win];
-
-	switch (plane->pixel_format) {
+	switch (fb->pixel_format) {
 	case DRM_FORMAT_NV12:
 		crcb_mode = false;
 		break;
@@ -401,21 +419,21 @@ static void vp_video_buffer(struct mixer_context *ctx, unsigned int win)
 		break;
 	default:
 		DRM_ERROR("pixel format for vp is wrong [%d].\n",
-				plane->pixel_format);
+				fb->pixel_format);
 		return;
 	}
 
 	luma_addr[0] = plane->dma_addr[0];
 	chroma_addr[0] = plane->dma_addr[1];
 
-	if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE) {
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
 		ctx->interlace = true;
 		if (tiled_mode) {
 			luma_addr[1] = luma_addr[0] + 0x40;
 			chroma_addr[1] = chroma_addr[0] + 0x40;
 		} else {
-			luma_addr[1] = luma_addr[0] + plane->pitch;
-			chroma_addr[1] = chroma_addr[0] + plane->pitch;
+			luma_addr[1] = luma_addr[0] + fb->pitches[0];
+			chroma_addr[1] = chroma_addr[0] + fb->pitches[0];
 		}
 	} else {
 		ctx->interlace = false;
@@ -436,25 +454,25 @@ static void vp_video_buffer(struct mixer_context *ctx, unsigned int win)
 	vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
 
 	/* setting size of input image */
-	vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(plane->pitch) |
-		VP_IMG_VSIZE(plane->fb_height));
+	vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) |
+		VP_IMG_VSIZE(fb->height));
 	/* chroma height has to reduced by 2 to avoid chroma distorions */
-	vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(plane->pitch) |
-		VP_IMG_VSIZE(plane->fb_height / 2));
+	vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) |
+		VP_IMG_VSIZE(fb->height / 2));
 
-	vp_reg_write(res, VP_SRC_WIDTH, plane->src_width);
-	vp_reg_write(res, VP_SRC_HEIGHT, plane->src_height);
+	vp_reg_write(res, VP_SRC_WIDTH, plane->src_w);
+	vp_reg_write(res, VP_SRC_HEIGHT, plane->src_h);
 	vp_reg_write(res, VP_SRC_H_POSITION,
 			VP_SRC_H_POSITION_VAL(plane->src_x));
 	vp_reg_write(res, VP_SRC_V_POSITION, plane->src_y);
 
-	vp_reg_write(res, VP_DST_WIDTH, plane->crtc_width);
+	vp_reg_write(res, VP_DST_WIDTH, plane->crtc_w);
 	vp_reg_write(res, VP_DST_H_POSITION, plane->crtc_x);
 	if (ctx->interlace) {
-		vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_height / 2);
+		vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_h / 2);
 		vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y / 2);
 	} else {
-		vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_height);
+		vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_h);
 		vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y);
 	}
 
@@ -469,9 +487,9 @@ static void vp_video_buffer(struct mixer_context *ctx, unsigned int win)
 	vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]);
 	vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]);
 
-	mixer_cfg_scan(ctx, plane->mode_height);
-	mixer_cfg_rgb_fmt(ctx, plane->mode_height);
-	mixer_cfg_layer(ctx, win, true);
+	mixer_cfg_scan(ctx, mode->vdisplay);
+	mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
+	mixer_cfg_layer(ctx, plane->zpos, true);
 	mixer_run(ctx);
 
 	mixer_vsync_set_update(ctx, true);
@@ -491,15 +509,15 @@ static void mixer_layer_update(struct mixer_context *ctx)
 static int mixer_setup_scale(const struct exynos_drm_plane *plane,
 		unsigned int *x_ratio, unsigned int *y_ratio)
 {
-	if (plane->crtc_width != plane->src_width) {
-		if (plane->crtc_width == 2 * plane->src_width)
+	if (plane->crtc_w != plane->src_w) {
+		if (plane->crtc_w == 2 * plane->src_w)
 			*x_ratio = 1;
 		else
 			goto fail;
 	}
 
-	if (plane->crtc_height != plane->src_height) {
-		if (plane->crtc_height == 2 * plane->src_height)
+	if (plane->crtc_h != plane->src_h) {
+		if (plane->crtc_h == 2 * plane->src_h)
 			*y_ratio = 1;
 		else
 			goto fail;
@@ -512,20 +530,22 @@ fail:
 	return -ENOTSUPP;
 }
 
-static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win)
+static void mixer_graph_buffer(struct mixer_context *ctx,
+			       struct exynos_drm_plane *plane)
 {
 	struct mixer_resources *res = &ctx->mixer_res;
+	struct drm_plane_state *state = plane->base.state;
+	struct drm_framebuffer *fb = state->fb;
+	struct drm_display_mode *mode = &state->crtc->mode;
 	unsigned long flags;
-	struct exynos_drm_plane *plane;
+	unsigned int win = plane->zpos;
 	unsigned int x_ratio = 0, y_ratio = 0;
 	unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
 	dma_addr_t dma_addr;
 	unsigned int fmt;
 	u32 val;
 
-	plane = &ctx->planes[win];
-
-	switch (plane->pixel_format) {
+	switch (fb->pixel_format) {
 	case DRM_FORMAT_XRGB4444:
 		fmt = MXR_FORMAT_ARGB4444;
 		break;
@@ -557,12 +577,12 @@ static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win)
 
 	/* converting dma address base and source offset */
 	dma_addr = plane->dma_addr[0]
-		+ (plane->src_x * plane->bpp >> 3)
-		+ (plane->src_y * plane->pitch);
+		+ (plane->src_x * fb->bits_per_pixel >> 3)
+		+ (plane->src_y * fb->pitches[0]);
 	src_x_offset = 0;
 	src_y_offset = 0;
 
-	if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE)
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
 		ctx->interlace = true;
 	else
 		ctx->interlace = false;
@@ -576,18 +596,18 @@ static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win)
 
 	/* setup geometry */
 	mixer_reg_write(res, MXR_GRAPHIC_SPAN(win),
-			plane->pitch / (plane->bpp >> 3));
+			fb->pitches[0] / (fb->bits_per_pixel >> 3));
 
 	/* setup display size */
 	if (ctx->mxr_ver == MXR_VER_128_0_0_184 &&
 		win == MIXER_DEFAULT_WIN) {
-		val  = MXR_MXR_RES_HEIGHT(plane->mode_height);
-		val |= MXR_MXR_RES_WIDTH(plane->mode_width);
+		val  = MXR_MXR_RES_HEIGHT(mode->vdisplay);
+		val |= MXR_MXR_RES_WIDTH(mode->hdisplay);
 		mixer_reg_write(res, MXR_RESOLUTION, val);
 	}
 
-	val  = MXR_GRP_WH_WIDTH(plane->src_width);
-	val |= MXR_GRP_WH_HEIGHT(plane->src_height);
+	val  = MXR_GRP_WH_WIDTH(plane->src_w);
+	val |= MXR_GRP_WH_HEIGHT(plane->src_h);
 	val |= MXR_GRP_WH_H_SCALE(x_ratio);
 	val |= MXR_GRP_WH_V_SCALE(y_ratio);
 	mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
@@ -605,8 +625,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win)
 	/* set buffer address to mixer */
 	mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr);
 
-	mixer_cfg_scan(ctx, plane->mode_height);
-	mixer_cfg_rgb_fmt(ctx, plane->mode_height);
+	mixer_cfg_scan(ctx, mode->vdisplay);
+	mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
 	mixer_cfg_layer(ctx, win, true);
 
 	/* layer update mandatory for mixer 16.0.33.0 */
@@ -710,6 +730,7 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
 	struct mixer_context *ctx = arg;
 	struct mixer_resources *res = &ctx->mixer_res;
 	u32 val, base, shadow;
+	int win;
 
 	spin_lock(&res->reg_slock);
 
@@ -735,8 +756,15 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
 				goto out;
 		}
 
-		drm_handle_vblank(ctx->drm_dev, ctx->pipe);
-		exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+		drm_crtc_handle_vblank(&ctx->crtc->base);
+		for (win = 0 ; win < MIXER_WIN_NR ; win++) {
+			struct exynos_drm_plane *plane = &ctx->planes[win];
+
+			if (!plane->pending_fb)
+				continue;
+
+			exynos_drm_crtc_finish_update(ctx->crtc, plane);
+		}
 
 		/* set wait vsync event to zero and wake up queue. */
 		if (atomic_read(&ctx->wait_vsync_event)) {
@@ -881,8 +909,7 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
 		}
 	}
 
-	ret = drm_iommu_attach_device_if_possible(mixer_ctx->crtc, drm_dev,
-								mixer_ctx->dev);
+	ret = drm_iommu_attach_device(drm_dev, mixer_ctx->dev);
 	if (ret)
 		priv->pipe--;
 
@@ -891,8 +918,7 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
 
 static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
 {
-	if (is_drm_iommu_supported(mixer_ctx->drm_dev))
-		drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
+	drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
 }
 
 static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
@@ -900,10 +926,9 @@ static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
 	struct mixer_context *mixer_ctx = crtc->ctx;
 	struct mixer_resources *res = &mixer_ctx->mixer_res;
 
-	if (!mixer_ctx->powered) {
-		mixer_ctx->int_en |= MXR_INT_EN_VSYNC;
+	__set_bit(MXR_BIT_VSYNC, &mixer_ctx->flags);
+	if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
 		return 0;
-	}
 
 	/* enable vsync interrupt */
 	mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
@@ -917,54 +942,48 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
 	struct mixer_context *mixer_ctx = crtc->ctx;
 	struct mixer_resources *res = &mixer_ctx->mixer_res;
 
-	if (!mixer_ctx->powered) {
-		mixer_ctx->int_en &= MXR_INT_EN_VSYNC;
+	__clear_bit(MXR_BIT_VSYNC, &mixer_ctx->flags);
+
+	if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
 		return;
-	}
 
 	/* disable vsync interrupt */
 	mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
 	mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
 }
 
-static void mixer_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
+static void mixer_update_plane(struct exynos_drm_crtc *crtc,
+			       struct exynos_drm_plane *plane)
 {
 	struct mixer_context *mixer_ctx = crtc->ctx;
 
-	DRM_DEBUG_KMS("win: %d\n", win);
+	DRM_DEBUG_KMS("win: %d\n", plane->zpos);
 
-	mutex_lock(&mixer_ctx->mixer_mutex);
-	if (!mixer_ctx->powered) {
-		mutex_unlock(&mixer_ctx->mixer_mutex);
+	if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
 		return;
-	}
-	mutex_unlock(&mixer_ctx->mixer_mutex);
 
-	if (win > 1 && mixer_ctx->vp_enabled)
-		vp_video_buffer(mixer_ctx, win);
+	if (plane->zpos > 1 && mixer_ctx->vp_enabled)
+		vp_video_buffer(mixer_ctx, plane);
 	else
-		mixer_graph_buffer(mixer_ctx, win);
+		mixer_graph_buffer(mixer_ctx, plane);
 }
 
-static void mixer_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
+static void mixer_disable_plane(struct exynos_drm_crtc *crtc,
+				struct exynos_drm_plane *plane)
 {
 	struct mixer_context *mixer_ctx = crtc->ctx;
 	struct mixer_resources *res = &mixer_ctx->mixer_res;
 	unsigned long flags;
 
-	DRM_DEBUG_KMS("win: %d\n", win);
+	DRM_DEBUG_KMS("win: %d\n", plane->zpos);
 
-	mutex_lock(&mixer_ctx->mixer_mutex);
-	if (!mixer_ctx->powered) {
-		mutex_unlock(&mixer_ctx->mixer_mutex);
+	if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
 		return;
-	}
-	mutex_unlock(&mixer_ctx->mixer_mutex);
 
 	spin_lock_irqsave(&res->reg_slock, flags);
 	mixer_vsync_set_update(mixer_ctx, false);
 
-	mixer_cfg_layer(mixer_ctx, win, false);
+	mixer_cfg_layer(mixer_ctx, plane->zpos, false);
 
 	mixer_vsync_set_update(mixer_ctx, true);
 	spin_unlock_irqrestore(&res->reg_slock, flags);
@@ -975,12 +994,8 @@ static void mixer_wait_for_vblank(struct exynos_drm_crtc *crtc)
 	struct mixer_context *mixer_ctx = crtc->ctx;
 	int err;
 
-	mutex_lock(&mixer_ctx->mixer_mutex);
-	if (!mixer_ctx->powered) {
-		mutex_unlock(&mixer_ctx->mixer_mutex);
+	if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
 		return;
-	}
-	mutex_unlock(&mixer_ctx->mixer_mutex);
 
 	err = drm_vblank_get(mixer_ctx->drm_dev, mixer_ctx->pipe);
 	if (err < 0) {
@@ -1008,13 +1023,8 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
 	struct mixer_resources *res = &ctx->mixer_res;
 	int ret;
 
-	mutex_lock(&ctx->mixer_mutex);
-	if (ctx->powered) {
-		mutex_unlock(&ctx->mixer_mutex);
+	if (test_bit(MXR_BIT_POWERED, &ctx->flags))
 		return;
-	}
-
-	mutex_unlock(&ctx->mixer_mutex);
 
 	pm_runtime_get_sync(ctx->dev);
 
@@ -1046,15 +1056,14 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
 		}
 	}
 
-	mutex_lock(&ctx->mixer_mutex);
-	ctx->powered = true;
-	mutex_unlock(&ctx->mixer_mutex);
+	set_bit(MXR_BIT_POWERED, &ctx->flags);
 
 	mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
 
-	if (ctx->int_en & MXR_INT_EN_VSYNC)
+	if (test_bit(MXR_BIT_VSYNC, &ctx->flags)) {
 		mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
-	mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
+		mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
+	}
 	mixer_win_reset(ctx);
 }
 
@@ -1064,24 +1073,16 @@ static void mixer_disable(struct exynos_drm_crtc *crtc)
 	struct mixer_resources *res = &ctx->mixer_res;
 	int i;
 
-	mutex_lock(&ctx->mixer_mutex);
-	if (!ctx->powered) {
-		mutex_unlock(&ctx->mixer_mutex);
+	if (!test_bit(MXR_BIT_POWERED, &ctx->flags))
 		return;
-	}
-	mutex_unlock(&ctx->mixer_mutex);
 
 	mixer_stop(ctx);
 	mixer_regs_dump(ctx);
 
 	for (i = 0; i < MIXER_WIN_NR; i++)
-		mixer_win_disable(crtc, i);
-
-	ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
+		mixer_disable_plane(crtc, &ctx->planes[i]);
 
-	mutex_lock(&ctx->mixer_mutex);
-	ctx->powered = false;
-	mutex_unlock(&ctx->mixer_mutex);
+	clear_bit(MXR_BIT_POWERED, &ctx->flags);
 
 	clk_disable_unprepare(res->hdmi);
 	clk_disable_unprepare(res->mixer);
@@ -1120,8 +1121,8 @@ static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
 	.enable_vblank		= mixer_enable_vblank,
 	.disable_vblank		= mixer_disable_vblank,
 	.wait_for_vblank	= mixer_wait_for_vblank,
-	.win_commit		= mixer_win_commit,
-	.win_disable		= mixer_win_disable,
+	.update_plane		= mixer_update_plane,
+	.disable_plane		= mixer_disable_plane,
 };
 
 static struct mixer_drv_data exynos5420_mxr_drv_data = {
@@ -1184,7 +1185,6 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
 	struct mixer_context *ctx = dev_get_drvdata(dev);
 	struct drm_device *drm_dev = data;
 	struct exynos_drm_plane *exynos_plane;
-	enum drm_plane_type type;
 	unsigned int zpos;
 	int ret;
 
@@ -1193,10 +1193,23 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
 		return ret;
 
 	for (zpos = 0; zpos < MIXER_WIN_NR; zpos++) {
+		enum drm_plane_type type;
+		const uint32_t *formats;
+		unsigned int fcount;
+
 		type = (zpos == MIXER_DEFAULT_WIN) ? DRM_PLANE_TYPE_PRIMARY :
 						DRM_PLANE_TYPE_OVERLAY;
+		if (zpos < VP_DEFAULT_WIN) {
+			formats = mixer_formats;
+			fcount = ARRAY_SIZE(mixer_formats);
+		} else {
+			formats = vp_formats;
+			fcount = ARRAY_SIZE(vp_formats);
+		}
+
 		ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
-					1 << ctx->pipe, type, zpos);
+					1 << ctx->pipe, type, formats, fcount,
+					zpos);
 		if (ret)
 			return ret;
 	}
@@ -1243,8 +1256,6 @@ static int mixer_probe(struct platform_device *pdev)
 		return -ENOMEM;
 	}
 
-	mutex_init(&ctx->mixer_mutex);
-
 	if (dev->of_node) {
 		const struct of_device_id *match;
 
diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig
new file mode 100644
index 000000000000..c78cf3f605d0
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/Kconfig
@@ -0,0 +1,18 @@
+config DRM_FSL_DCU
+	tristate "DRM Support for Freescale DCU"
+	depends on DRM && OF && ARM
+	select BACKLIGHT_CLASS_DEVICE
+	select BACKLIGHT_LCD_SUPPORT
+	select DRM_KMS_HELPER
+	select DRM_KMS_CMA_HELPER
+	select DRM_KMS_FB_HELPER
+	select DRM_PANEL
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+	select FB_SYS_FOPS
+	select REGMAP_MMIO
+	select VIDEOMODE_HELPERS
+	help
+	  Choose this option if you have an Freescale DCU chipset.
+	  If M is selected the module will be called fsl-dcu-drm.
diff --git a/drivers/gpu/drm/fsl-dcu/Makefile b/drivers/gpu/drm/fsl-dcu/Makefile
new file mode 100644
index 000000000000..6ea1523ae6ec
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/Makefile
@@ -0,0 +1,7 @@
+fsl-dcu-drm-y := fsl_dcu_drm_drv.o \
+		 fsl_dcu_drm_kms.o \
+		 fsl_dcu_drm_rgb.o \
+		 fsl_dcu_drm_plane.o \
+		 fsl_dcu_drm_crtc.o \
+		 fsl_dcu_drm_fbdev.o
+obj-$(CONFIG_DRM_FSL_DCU)	+= fsl-dcu-drm.o
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
new file mode 100644
index 000000000000..82a3d311e164
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/regmap.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "fsl_dcu_drm_crtc.h"
+#include "fsl_dcu_drm_drv.h"
+#include "fsl_dcu_drm_plane.h"
+
+static void fsl_dcu_drm_crtc_atomic_begin(struct drm_crtc *crtc,
+					  struct drm_crtc_state *old_crtc_state)
+{
+}
+
+static int fsl_dcu_drm_crtc_atomic_check(struct drm_crtc *crtc,
+					 struct drm_crtc_state *state)
+{
+	return 0;
+}
+
+static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
+					  struct drm_crtc_state *old_crtc_state)
+{
+}
+
+static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+	int ret;
+
+	ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+				 DCU_MODE_DCU_MODE_MASK,
+				 DCU_MODE_DCU_MODE(DCU_MODE_OFF));
+	if (ret)
+		dev_err(fsl_dev->dev, "Disable CRTC failed\n");
+	ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+			   DCU_UPDATE_MODE_READREG);
+	if (ret)
+		dev_err(fsl_dev->dev, "Enable CRTC failed\n");
+}
+
+static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+	int ret;
+
+	ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+				 DCU_MODE_DCU_MODE_MASK,
+				 DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
+	if (ret)
+		dev_err(fsl_dev->dev, "Enable CRTC failed\n");
+	ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+			   DCU_UPDATE_MODE_READREG);
+	if (ret)
+		dev_err(fsl_dev->dev, "Enable CRTC failed\n");
+}
+
+static bool fsl_dcu_drm_crtc_mode_fixup(struct drm_crtc *crtc,
+					const struct drm_display_mode *mode,
+					struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+	struct drm_display_mode *mode = &crtc->state->mode;
+	unsigned int hbp, hfp, hsw, vbp, vfp, vsw, div, index;
+	unsigned long dcuclk;
+	int ret;
+
+	index = drm_crtc_index(crtc);
+	dcuclk = clk_get_rate(fsl_dev->clk);
+	div = dcuclk / mode->clock / 1000;
+
+	/* Configure timings: */
+	hbp = mode->htotal - mode->hsync_end;
+	hfp = mode->hsync_start - mode->hdisplay;
+	hsw = mode->hsync_end - mode->hsync_start;
+	vbp = mode->vtotal - mode->vsync_end;
+	vfp = mode->vsync_start - mode->vdisplay;
+	vsw = mode->vsync_end - mode->vsync_start;
+
+	ret = regmap_write(fsl_dev->regmap, DCU_HSYN_PARA,
+			   DCU_HSYN_PARA_BP(hbp) |
+			   DCU_HSYN_PARA_PW(hsw) |
+			   DCU_HSYN_PARA_FP(hfp));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_VSYN_PARA,
+			   DCU_VSYN_PARA_BP(vbp) |
+			   DCU_VSYN_PARA_PW(vsw) |
+			   DCU_VSYN_PARA_FP(vfp));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_DISP_SIZE,
+			   DCU_DISP_SIZE_DELTA_Y(mode->vdisplay) |
+			   DCU_DISP_SIZE_DELTA_X(mode->hdisplay));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_DIV_RATIO, div);
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_SYN_POL,
+			   DCU_SYN_POL_INV_VS_LOW | DCU_SYN_POL_INV_HS_LOW);
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_BGND, DCU_BGND_R(0) |
+			   DCU_BGND_G(0) | DCU_BGND_B(0));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_DCU_MODE,
+			   DCU_MODE_BLEND_ITER(1) | DCU_MODE_RASTER_EN);
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_THRESHOLD,
+			   DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) |
+			   DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) |
+			   DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+			   DCU_UPDATE_MODE_READREG);
+	if (ret)
+		goto set_failed;
+	return;
+set_failed:
+	dev_err(dev->dev, "set DCU register failed\n");
+}
+
+static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = {
+	.atomic_begin = fsl_dcu_drm_crtc_atomic_begin,
+	.atomic_check = fsl_dcu_drm_crtc_atomic_check,
+	.atomic_flush = fsl_dcu_drm_crtc_atomic_flush,
+	.disable = fsl_dcu_drm_disable_crtc,
+	.enable = fsl_dcu_drm_crtc_enable,
+	.mode_fixup = fsl_dcu_drm_crtc_mode_fixup,
+	.mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb,
+};
+
+static const struct drm_crtc_funcs fsl_dcu_drm_crtc_funcs = {
+	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+	.destroy = drm_crtc_cleanup,
+	.page_flip = drm_atomic_helper_page_flip,
+	.reset = drm_atomic_helper_crtc_reset,
+	.set_config = drm_atomic_helper_set_config,
+};
+
+int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev)
+{
+	struct drm_plane *primary;
+	struct drm_crtc *crtc = &fsl_dev->crtc;
+	unsigned int i, j, reg_num;
+	int ret;
+
+	primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm);
+	ret = drm_crtc_init_with_planes(fsl_dev->drm, crtc, primary, NULL,
+					&fsl_dcu_drm_crtc_funcs);
+	if (ret < 0)
+		return ret;
+
+	drm_crtc_helper_add(crtc, &fsl_dcu_drm_crtc_helper_funcs);
+
+	if (!strcmp(fsl_dev->soc->name, "ls1021a"))
+		reg_num = LS1021A_LAYER_REG_NUM;
+	else
+		reg_num = VF610_LAYER_REG_NUM;
+	for (i = 0; i <= fsl_dev->soc->total_layer; i++) {
+		for (j = 0; j < reg_num; j++) {
+			ret = regmap_write(fsl_dev->regmap,
+					   DCU_CTRLDESCLN(i, j), 0);
+			if (ret)
+				goto init_failed;
+		}
+	}
+	ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+				 DCU_MODE_DCU_MODE_MASK,
+				 DCU_MODE_DCU_MODE(DCU_MODE_OFF));
+	if (ret)
+		goto init_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+			   DCU_UPDATE_MODE_READREG);
+	if (ret)
+		goto init_failed;
+
+	return 0;
+init_failed:
+	dev_err(fsl_dev->dev, "init DCU register failed\n");
+	return ret;
+}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.h
new file mode 100644
index 000000000000..43d4da2c5fe5
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __FSL_DCU_DRM_CRTC_H__
+#define __FSL_DCU_DRM_CRTC_H__
+
+struct fsl_dcu_drm_device;
+
+int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev);
+
+#endif /* __FSL_DCU_DRM_CRTC_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
new file mode 100644
index 000000000000..9a8e2da47158
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -0,0 +1,404 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "fsl_dcu_drm_crtc.h"
+#include "fsl_dcu_drm_drv.h"
+
+static const struct regmap_config fsl_dcu_regmap_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.cache_type = REGCACHE_RBTREE,
+};
+
+static int fsl_dcu_drm_irq_init(struct drm_device *dev)
+{
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+	unsigned int value;
+	int ret;
+
+	ret = drm_irq_install(dev, fsl_dev->irq);
+	if (ret < 0)
+		dev_err(dev->dev, "failed to install IRQ handler\n");
+
+	ret = regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0);
+	if (ret)
+		dev_err(dev->dev, "set DCU_INT_STATUS failed\n");
+	ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value);
+	if (ret)
+		dev_err(dev->dev, "read DCU_INT_MASK failed\n");
+	value &= DCU_INT_MASK_VBLANK;
+	ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
+	if (ret)
+		dev_err(dev->dev, "set DCU_INT_MASK failed\n");
+	ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+			   DCU_UPDATE_MODE_READREG);
+	if (ret)
+		dev_err(dev->dev, "set DCU_UPDATE_MODE failed\n");
+
+	return ret;
+}
+
+static int fsl_dcu_load(struct drm_device *drm, unsigned long flags)
+{
+	struct device *dev = drm->dev;
+	struct fsl_dcu_drm_device *fsl_dev = drm->dev_private;
+	int ret;
+
+	ret = fsl_dcu_drm_modeset_init(fsl_dev);
+	if (ret < 0) {
+		dev_err(dev, "failed to initialize mode setting\n");
+		return ret;
+	}
+
+	ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+	if (ret < 0) {
+		dev_err(dev, "failed to initialize vblank\n");
+		goto done;
+	}
+	drm->vblank_disable_allowed = true;
+
+	ret = fsl_dcu_drm_irq_init(drm);
+	if (ret < 0)
+		goto done;
+	drm->irq_enabled = true;
+
+	fsl_dcu_fbdev_init(drm);
+
+	return 0;
+done:
+	if (ret) {
+		drm_mode_config_cleanup(drm);
+		drm_vblank_cleanup(drm);
+		drm_irq_uninstall(drm);
+		drm->dev_private = NULL;
+	}
+
+	return ret;
+}
+
+static int fsl_dcu_unload(struct drm_device *dev)
+{
+	drm_mode_config_cleanup(dev);
+	drm_vblank_cleanup(dev);
+	drm_irq_uninstall(dev);
+
+	dev->dev_private = NULL;
+
+	return 0;
+}
+
+static void fsl_dcu_drm_preclose(struct drm_device *dev, struct drm_file *file)
+{
+}
+
+static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg)
+{
+	struct drm_device *dev = arg;
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+	unsigned int int_status;
+	int ret;
+
+	ret = regmap_read(fsl_dev->regmap, DCU_INT_STATUS, &int_status);
+	if (ret)
+		dev_err(dev->dev, "set DCU_INT_STATUS failed\n");
+	if (int_status & DCU_INT_STATUS_VBLANK)
+		drm_handle_vblank(dev, 0);
+
+	ret = regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0xffffffff);
+	if (ret)
+		dev_err(dev->dev, "set DCU_INT_STATUS failed\n");
+	ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+			   DCU_UPDATE_MODE_READREG);
+	if (ret)
+		dev_err(dev->dev, "set DCU_UPDATE_MODE failed\n");
+
+	return IRQ_HANDLED;
+}
+
+static int fsl_dcu_drm_enable_vblank(struct drm_device *dev, int crtc)
+{
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+	unsigned int value;
+	int ret;
+
+	ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value);
+	if (ret)
+		dev_err(dev->dev, "read DCU_INT_MASK failed\n");
+	value &= ~DCU_INT_MASK_VBLANK;
+	ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
+	if (ret)
+		dev_err(dev->dev, "set DCU_INT_MASK failed\n");
+	return 0;
+}
+
+static void fsl_dcu_drm_disable_vblank(struct drm_device *dev, int crtc)
+{
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+	unsigned int value;
+	int ret;
+
+	ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value);
+	if (ret)
+		dev_err(dev->dev, "read DCU_INT_MASK failed\n");
+	value |= DCU_INT_MASK_VBLANK;
+	ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
+	if (ret)
+		dev_err(dev->dev, "set DCU_INT_MASK failed\n");
+}
+
+static const struct file_operations fsl_dcu_drm_fops = {
+	.owner		= THIS_MODULE,
+	.open		= drm_open,
+	.release	= drm_release,
+	.unlocked_ioctl	= drm_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= drm_compat_ioctl,
+#endif
+	.poll		= drm_poll,
+	.read		= drm_read,
+	.llseek		= no_llseek,
+	.mmap		= drm_gem_cma_mmap,
+};
+
+static struct drm_driver fsl_dcu_drm_driver = {
+	.driver_features	= DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET
+				| DRIVER_PRIME | DRIVER_ATOMIC,
+	.load			= fsl_dcu_load,
+	.unload			= fsl_dcu_unload,
+	.preclose		= fsl_dcu_drm_preclose,
+	.irq_handler		= fsl_dcu_drm_irq,
+	.get_vblank_counter	= drm_vblank_count,
+	.enable_vblank		= fsl_dcu_drm_enable_vblank,
+	.disable_vblank		= fsl_dcu_drm_disable_vblank,
+	.gem_free_object	= drm_gem_cma_free_object,
+	.gem_vm_ops		= &drm_gem_cma_vm_ops,
+	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle,
+	.gem_prime_import	= drm_gem_prime_import,
+	.gem_prime_export	= drm_gem_prime_export,
+	.gem_prime_get_sg_table	= drm_gem_cma_prime_get_sg_table,
+	.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+	.gem_prime_vmap		= drm_gem_cma_prime_vmap,
+	.gem_prime_vunmap	= drm_gem_cma_prime_vunmap,
+	.gem_prime_mmap		= drm_gem_cma_prime_mmap,
+	.dumb_create		= drm_gem_cma_dumb_create,
+	.dumb_map_offset	= drm_gem_cma_dumb_map_offset,
+	.dumb_destroy		= drm_gem_dumb_destroy,
+	.fops			= &fsl_dcu_drm_fops,
+	.name			= "fsl-dcu-drm",
+	.desc			= "Freescale DCU DRM",
+	.date			= "20150213",
+	.major			= 1,
+	.minor			= 0,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int fsl_dcu_drm_pm_suspend(struct device *dev)
+{
+	struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev);
+
+	if (!fsl_dev)
+		return 0;
+
+	drm_kms_helper_poll_disable(fsl_dev->drm);
+	regcache_cache_only(fsl_dev->regmap, true);
+	regcache_mark_dirty(fsl_dev->regmap);
+	clk_disable(fsl_dev->clk);
+	clk_unprepare(fsl_dev->clk);
+
+	return 0;
+}
+
+static int fsl_dcu_drm_pm_resume(struct device *dev)
+{
+	struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev);
+	int ret;
+
+	if (!fsl_dev)
+		return 0;
+
+	ret = clk_enable(fsl_dev->clk);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable dcu clk\n");
+		clk_unprepare(fsl_dev->clk);
+		return ret;
+	}
+	ret = clk_prepare(fsl_dev->clk);
+	if (ret < 0) {
+		dev_err(dev, "failed to prepare dcu clk\n");
+		return ret;
+	}
+
+	drm_kms_helper_poll_enable(fsl_dev->drm);
+	regcache_cache_only(fsl_dev->regmap, false);
+	regcache_sync(fsl_dev->regmap);
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops fsl_dcu_drm_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(fsl_dcu_drm_pm_suspend, fsl_dcu_drm_pm_resume)
+};
+
+static const struct fsl_dcu_soc_data fsl_dcu_ls1021a_data = {
+	.name = "ls1021a",
+	.total_layer = 16,
+	.max_layer = 4,
+};
+
+static const struct fsl_dcu_soc_data fsl_dcu_vf610_data = {
+	.name = "vf610",
+	.total_layer = 64,
+	.max_layer = 6,
+};
+
+static const struct of_device_id fsl_dcu_of_match[] = {
+	{
+		.compatible = "fsl,ls1021a-dcu",
+		.data = &fsl_dcu_ls1021a_data,
+	}, {
+		.compatible = "fsl,vf610-dcu",
+		.data = &fsl_dcu_vf610_data,
+	}, {
+	},
+};
+MODULE_DEVICE_TABLE(of, fsl_dcu_of_match);
+
+static int fsl_dcu_drm_probe(struct platform_device *pdev)
+{
+	struct fsl_dcu_drm_device *fsl_dev;
+	struct drm_device *drm;
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	void __iomem *base;
+	struct drm_driver *driver = &fsl_dcu_drm_driver;
+	const struct of_device_id *id;
+	int ret;
+
+	fsl_dev = devm_kzalloc(dev, sizeof(*fsl_dev), GFP_KERNEL);
+	if (!fsl_dev)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev, "could not get memory IO resource\n");
+		return -ENODEV;
+	}
+
+	base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(base)) {
+		ret = PTR_ERR(base);
+		return ret;
+	}
+
+	fsl_dev->irq = platform_get_irq(pdev, 0);
+	if (fsl_dev->irq < 0) {
+		dev_err(dev, "failed to get irq\n");
+		return -ENXIO;
+	}
+
+	fsl_dev->clk = devm_clk_get(dev, "dcu");
+	if (IS_ERR(fsl_dev->clk)) {
+		ret = PTR_ERR(fsl_dev->clk);
+		dev_err(dev, "failed to get dcu clock\n");
+		return ret;
+	}
+	ret = clk_prepare(fsl_dev->clk);
+	if (ret < 0) {
+		dev_err(dev, "failed to prepare dcu clk\n");
+		return ret;
+	}
+	ret = clk_enable(fsl_dev->clk);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable dcu clk\n");
+		clk_unprepare(fsl_dev->clk);
+		return ret;
+	}
+
+	fsl_dev->regmap = devm_regmap_init_mmio(dev, base,
+			&fsl_dcu_regmap_config);
+	if (IS_ERR(fsl_dev->regmap)) {
+		dev_err(dev, "regmap init failed\n");
+		return PTR_ERR(fsl_dev->regmap);
+	}
+
+	id = of_match_node(fsl_dcu_of_match, pdev->dev.of_node);
+	if (!id)
+		return -ENODEV;
+	fsl_dev->soc = id->data;
+
+	drm = drm_dev_alloc(driver, dev);
+	if (!drm)
+		return -ENOMEM;
+
+	fsl_dev->dev = dev;
+	fsl_dev->drm = drm;
+	fsl_dev->np = dev->of_node;
+	drm->dev_private = fsl_dev;
+	dev_set_drvdata(dev, fsl_dev);
+	drm_dev_set_unique(drm, dev_name(dev));
+
+	ret = drm_dev_register(drm, 0);
+	if (ret < 0)
+		goto unref;
+
+	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name,
+		 driver->major, driver->minor, driver->patchlevel,
+		 driver->date, drm->primary->index);
+
+	return 0;
+
+unref:
+	drm_dev_unref(drm);
+	return ret;
+}
+
+static int fsl_dcu_drm_remove(struct platform_device *pdev)
+{
+	struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev);
+
+	drm_put_dev(fsl_dev->drm);
+
+	return 0;
+}
+
+static struct platform_driver fsl_dcu_drm_platform_driver = {
+	.probe		= fsl_dcu_drm_probe,
+	.remove		= fsl_dcu_drm_remove,
+	.driver		= {
+		.name	= "fsl-dcu",
+		.pm	= &fsl_dcu_drm_pm_ops,
+		.of_match_table = fsl_dcu_of_match,
+	},
+};
+
+module_platform_driver(fsl_dcu_drm_platform_driver);
+
+MODULE_DESCRIPTION("Freescale DCU DRM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
new file mode 100644
index 000000000000..579b9e44e764
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __FSL_DCU_DRM_DRV_H__
+#define __FSL_DCU_DRM_DRV_H__
+
+#include "fsl_dcu_drm_crtc.h"
+#include "fsl_dcu_drm_output.h"
+#include "fsl_dcu_drm_plane.h"
+
+#define DCU_DCU_MODE			0x0010
+#define DCU_MODE_BLEND_ITER(x)		((x) << 20)
+#define DCU_MODE_RASTER_EN		BIT(14)
+#define DCU_MODE_DCU_MODE(x)		(x)
+#define DCU_MODE_DCU_MODE_MASK		0x03
+#define DCU_MODE_OFF			0
+#define DCU_MODE_NORMAL			1
+#define DCU_MODE_TEST			2
+#define DCU_MODE_COLORBAR		3
+
+#define DCU_BGND			0x0014
+#define DCU_BGND_R(x)			((x) << 16)
+#define DCU_BGND_G(x)			((x) << 8)
+#define DCU_BGND_B(x)			(x)
+
+#define DCU_DISP_SIZE			0x0018
+#define DCU_DISP_SIZE_DELTA_Y(x)	((x) << 16)
+/*Regisiter value 1/16 of horizontal resolution*/
+#define DCU_DISP_SIZE_DELTA_X(x)	((x) >> 4)
+
+#define DCU_HSYN_PARA			0x001c
+#define DCU_HSYN_PARA_BP(x)		((x) << 22)
+#define DCU_HSYN_PARA_PW(x)		((x) << 11)
+#define DCU_HSYN_PARA_FP(x)		(x)
+
+#define DCU_VSYN_PARA			0x0020
+#define DCU_VSYN_PARA_BP(x)		((x) << 22)
+#define DCU_VSYN_PARA_PW(x)		((x) << 11)
+#define DCU_VSYN_PARA_FP(x)		(x)
+
+#define DCU_SYN_POL			0x0024
+#define DCU_SYN_POL_INV_PXCK_FALL	(0 << 6)
+#define DCU_SYN_POL_NEG_REMAIN		(0 << 5)
+#define DCU_SYN_POL_INV_VS_LOW		BIT(1)
+#define DCU_SYN_POL_INV_HS_LOW		BIT(0)
+
+#define DCU_THRESHOLD			0x0028
+#define DCU_THRESHOLD_LS_BF_VS(x)	((x) << 16)
+#define DCU_THRESHOLD_OUT_BUF_HIGH(x)	((x) << 8)
+#define DCU_THRESHOLD_OUT_BUF_LOW(x)	(x)
+#define BF_VS_VAL			0x03
+#define BUF_MAX_VAL			0x78
+#define BUF_MIN_VAL			0x0a
+
+#define DCU_INT_STATUS			0x002C
+#define DCU_INT_STATUS_VSYNC		BIT(0)
+#define DCU_INT_STATUS_UNDRUN		BIT(1)
+#define DCU_INT_STATUS_LSBFVS		BIT(2)
+#define DCU_INT_STATUS_VBLANK		BIT(3)
+#define DCU_INT_STATUS_CRCREADY		BIT(4)
+#define DCU_INT_STATUS_CRCOVERFLOW	BIT(5)
+#define DCU_INT_STATUS_P1FIFOLO		BIT(6)
+#define DCU_INT_STATUS_P1FIFOHI		BIT(7)
+#define DCU_INT_STATUS_P2FIFOLO		BIT(8)
+#define DCU_INT_STATUS_P2FIFOHI		BIT(9)
+#define DCU_INT_STATUS_PROGEND		BIT(10)
+#define DCU_INT_STATUS_IPMERROR		BIT(11)
+#define DCU_INT_STATUS_LYRTRANS		BIT(12)
+#define DCU_INT_STATUS_DMATRANS		BIT(14)
+#define DCU_INT_STATUS_P3FIFOLO		BIT(16)
+#define DCU_INT_STATUS_P3FIFOHI		BIT(17)
+#define DCU_INT_STATUS_P4FIFOLO		BIT(18)
+#define DCU_INT_STATUS_P4FIFOHI		BIT(19)
+#define DCU_INT_STATUS_P1EMPTY		BIT(26)
+#define DCU_INT_STATUS_P2EMPTY		BIT(27)
+#define DCU_INT_STATUS_P3EMPTY		BIT(28)
+#define DCU_INT_STATUS_P4EMPTY		BIT(29)
+
+#define DCU_INT_MASK			0x0030
+#define DCU_INT_MASK_VSYNC		BIT(0)
+#define DCU_INT_MASK_UNDRUN		BIT(1)
+#define DCU_INT_MASK_LSBFVS		BIT(2)
+#define DCU_INT_MASK_VBLANK		BIT(3)
+#define DCU_INT_MASK_CRCREADY		BIT(4)
+#define DCU_INT_MASK_CRCOVERFLOW	BIT(5)
+#define DCU_INT_MASK_P1FIFOLO		BIT(6)
+#define DCU_INT_MASK_P1FIFOHI		BIT(7)
+#define DCU_INT_MASK_P2FIFOLO		BIT(8)
+#define DCU_INT_MASK_P2FIFOHI		BIT(9)
+#define DCU_INT_MASK_PROGEND		BIT(10)
+#define DCU_INT_MASK_IPMERROR		BIT(11)
+#define DCU_INT_MASK_LYRTRANS		BIT(12)
+#define DCU_INT_MASK_DMATRANS		BIT(14)
+#define DCU_INT_MASK_P3FIFOLO		BIT(16)
+#define DCU_INT_MASK_P3FIFOHI		BIT(17)
+#define DCU_INT_MASK_P4FIFOLO		BIT(18)
+#define DCU_INT_MASK_P4FIFOHI		BIT(19)
+#define DCU_INT_MASK_P1EMPTY		BIT(26)
+#define DCU_INT_MASK_P2EMPTY		BIT(27)
+#define DCU_INT_MASK_P3EMPTY		BIT(28)
+#define DCU_INT_MASK_P4EMPTY		BIT(29)
+
+#define DCU_DIV_RATIO			0x0054
+
+#define DCU_UPDATE_MODE			0x00cc
+#define DCU_UPDATE_MODE_MODE		BIT(31)
+#define DCU_UPDATE_MODE_READREG		BIT(30)
+
+#define DCU_DCFB_MAX			0x300
+
+#define DCU_CTRLDESCLN(layer, reg)	(0x200 + (reg - 1) * 4 + (layer) * 0x40)
+
+#define DCU_LAYER_HEIGHT(x)		((x) << 16)
+#define DCU_LAYER_WIDTH(x)		(x)
+
+#define DCU_LAYER_POSY(x)		((x) << 16)
+#define DCU_LAYER_POSX(x)		(x)
+
+#define DCU_LAYER_EN			BIT(31)
+#define DCU_LAYER_TILE_EN		BIT(30)
+#define DCU_LAYER_DATA_SEL_CLUT		BIT(29)
+#define DCU_LAYER_SAFETY_EN		BIT(28)
+#define DCU_LAYER_TRANS(x)		((x) << 20)
+#define DCU_LAYER_BPP(x)		((x) << 16)
+#define DCU_LAYER_RLE_EN		BIT(15)
+#define DCU_LAYER_LUOFFS(x)		((x) << 4)
+#define DCU_LAYER_BB_ON			BIT(2)
+#define DCU_LAYER_AB(x)			(x)
+
+#define DCU_LAYER_CKMAX_R(x)		((x) << 16)
+#define DCU_LAYER_CKMAX_G(x)		((x) << 8)
+#define DCU_LAYER_CKMAX_B(x)		(x)
+
+#define DCU_LAYER_CKMIN_R(x)		((x) << 16)
+#define DCU_LAYER_CKMIN_G(x)		((x) << 8)
+#define DCU_LAYER_CKMIN_B(x)		(x)
+
+#define DCU_LAYER_TILE_VER(x)		((x) << 16)
+#define DCU_LAYER_TILE_HOR(x)		(x)
+
+#define DCU_LAYER_FG_FCOLOR(x)		(x)
+
+#define DCU_LAYER_BG_BCOLOR(x)		(x)
+
+#define DCU_LAYER_POST_SKIP(x)		((x) << 16)
+#define DCU_LAYER_PRE_SKIP(x)		(x)
+
+#define FSL_DCU_RGB565			4
+#define FSL_DCU_RGB888			5
+#define FSL_DCU_ARGB8888		6
+#define FSL_DCU_ARGB1555		11
+#define FSL_DCU_ARGB4444		12
+#define FSL_DCU_YUV422			14
+
+#define VF610_LAYER_REG_NUM		9
+#define LS1021A_LAYER_REG_NUM		10
+
+struct clk;
+struct device;
+struct drm_device;
+
+struct fsl_dcu_soc_data {
+	const char *name;
+	/*total layer number*/
+	unsigned int total_layer;
+	/*max layer number DCU supported*/
+	unsigned int max_layer;
+};
+
+struct fsl_dcu_drm_device {
+	struct device *dev;
+	struct device_node *np;
+	struct regmap *regmap;
+	int irq;
+	struct clk *clk;
+	/*protects hardware register*/
+	spinlock_t irq_lock;
+	struct drm_device *drm;
+	struct drm_fbdev_cma *fbdev;
+	struct drm_crtc crtc;
+	struct drm_encoder encoder;
+	struct fsl_dcu_drm_connector connector;
+	const struct fsl_dcu_soc_data *soc;
+};
+
+void fsl_dcu_fbdev_init(struct drm_device *dev);
+int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev);
+
+#endif /* __FSL_DCU_DRM_DRV_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c
new file mode 100644
index 000000000000..8b8b819ea704
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "fsl_dcu_drm_drv.h"
+
+/* initialize fbdev helper */
+void fsl_dcu_fbdev_init(struct drm_device *dev)
+{
+	struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev->dev);
+
+	fsl_dev->fbdev = drm_fbdev_cma_init(dev, 24, 1, 1);
+}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
new file mode 100644
index 000000000000..0ef5959710e7
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "fsl_dcu_drm_crtc.h"
+#include "fsl_dcu_drm_drv.h"
+
+static const struct drm_mode_config_funcs fsl_dcu_drm_mode_config_funcs = {
+	.atomic_check = drm_atomic_helper_check,
+	.atomic_commit = drm_atomic_helper_commit,
+	.fb_create = drm_fb_cma_create,
+};
+
+int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev)
+{
+	drm_mode_config_init(fsl_dev->drm);
+
+	fsl_dev->drm->mode_config.min_width = 0;
+	fsl_dev->drm->mode_config.min_height = 0;
+	fsl_dev->drm->mode_config.max_width = 2031;
+	fsl_dev->drm->mode_config.max_height = 2047;
+	fsl_dev->drm->mode_config.funcs = &fsl_dcu_drm_mode_config_funcs;
+
+	drm_kms_helper_poll_init(fsl_dev->drm);
+	fsl_dcu_drm_crtc_create(fsl_dev);
+	fsl_dcu_drm_encoder_create(fsl_dev, &fsl_dev->crtc);
+	fsl_dcu_drm_connector_create(fsl_dev, &fsl_dev->encoder);
+	drm_mode_config_reset(fsl_dev->drm);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h
new file mode 100644
index 000000000000..7093109fbc21
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __FSL_DCU_DRM_CONNECTOR_H__
+#define __FSL_DCU_DRM_CONNECTOR_H__
+
+struct fsl_dcu_drm_connector {
+	struct drm_connector base;
+	struct drm_encoder *encoder;
+	struct drm_panel *panel;
+};
+
+static inline struct fsl_dcu_drm_connector *
+to_fsl_dcu_connector(struct drm_connector *con)
+{
+	return con ? container_of(con, struct fsl_dcu_drm_connector, base)
+		     : NULL;
+}
+
+int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
+				 struct drm_encoder *encoder);
+int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
+			       struct drm_crtc *crtc);
+
+#endif /* __FSL_DCU_DRM_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
new file mode 100644
index 000000000000..82be6b86a168
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -0,0 +1,261 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/regmap.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "fsl_dcu_drm_drv.h"
+#include "fsl_dcu_drm_plane.h"
+
+static int fsl_dcu_drm_plane_index(struct drm_plane *plane)
+{
+	struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
+	unsigned int total_layer = fsl_dev->soc->total_layer;
+	unsigned int index;
+
+	index = drm_plane_index(plane);
+	if (index < total_layer)
+		return total_layer - index - 1;
+
+	dev_err(fsl_dev->dev, "No more layer left\n");
+	return -EINVAL;
+}
+
+static int fsl_dcu_drm_plane_atomic_check(struct drm_plane *plane,
+					  struct drm_plane_state *state)
+{
+	struct drm_framebuffer *fb = state->fb;
+
+	switch (fb->pixel_format) {
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_RGB888:
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_BGRA4444:
+	case DRM_FORMAT_ARGB1555:
+	case DRM_FORMAT_YUV422:
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static void fsl_dcu_drm_plane_atomic_disable(struct drm_plane *plane,
+					     struct drm_plane_state *old_state)
+{
+	struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
+	unsigned int index, value, ret;
+
+	index = fsl_dcu_drm_plane_index(plane);
+	if (index < 0)
+		return;
+
+	ret = regmap_read(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), &value);
+	if (ret)
+		dev_err(fsl_dev->dev, "read DCU_INT_MASK failed\n");
+	value &= ~DCU_LAYER_EN;
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), value);
+	if (ret)
+		dev_err(fsl_dev->dev, "set DCU register failed\n");
+}
+
+static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
+					    struct drm_plane_state *old_state)
+
+{
+	struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
+	struct drm_plane_state *state = plane->state;
+	struct drm_framebuffer *fb = plane->state->fb;
+	struct drm_gem_cma_object *gem;
+	unsigned int alpha, bpp;
+	int index, ret;
+
+	if (!fb)
+		return;
+
+	index = fsl_dcu_drm_plane_index(plane);
+	if (index < 0)
+		return;
+
+	gem = drm_fb_cma_get_gem_obj(fb, 0);
+
+	switch (fb->pixel_format) {
+	case DRM_FORMAT_RGB565:
+		bpp = FSL_DCU_RGB565;
+		alpha = 0xff;
+		break;
+	case DRM_FORMAT_RGB888:
+		bpp = FSL_DCU_RGB888;
+		alpha = 0xff;
+		break;
+	case DRM_FORMAT_ARGB8888:
+		bpp = FSL_DCU_ARGB8888;
+		alpha = 0xff;
+		break;
+	case DRM_FORMAT_BGRA4444:
+		bpp = FSL_DCU_ARGB4444;
+		alpha = 0xff;
+		break;
+	case DRM_FORMAT_ARGB1555:
+		bpp = FSL_DCU_ARGB1555;
+		alpha = 0xff;
+		break;
+	case DRM_FORMAT_YUV422:
+		bpp = FSL_DCU_YUV422;
+		alpha = 0xff;
+		break;
+	default:
+		return;
+	}
+
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 1),
+			   DCU_LAYER_HEIGHT(state->crtc_h) |
+			   DCU_LAYER_WIDTH(state->crtc_w));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 2),
+			   DCU_LAYER_POSY(state->crtc_y) |
+			   DCU_LAYER_POSX(state->crtc_x));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap,
+			   DCU_CTRLDESCLN(index, 3), gem->paddr);
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4),
+			   DCU_LAYER_EN |
+			   DCU_LAYER_TRANS(alpha) |
+			   DCU_LAYER_BPP(bpp) |
+			   DCU_LAYER_AB(0));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 5),
+			   DCU_LAYER_CKMAX_R(0xFF) |
+			   DCU_LAYER_CKMAX_G(0xFF) |
+			   DCU_LAYER_CKMAX_B(0xFF));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 6),
+			   DCU_LAYER_CKMIN_R(0) |
+			   DCU_LAYER_CKMIN_G(0) |
+			   DCU_LAYER_CKMIN_B(0));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 7), 0);
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 8),
+			   DCU_LAYER_FG_FCOLOR(0));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 9),
+			   DCU_LAYER_BG_BCOLOR(0));
+	if (ret)
+		goto set_failed;
+	if (!strcmp(fsl_dev->soc->name, "ls1021a")) {
+		ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 10),
+				   DCU_LAYER_POST_SKIP(0) |
+				   DCU_LAYER_PRE_SKIP(0));
+		if (ret)
+			goto set_failed;
+	}
+	ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+				 DCU_MODE_DCU_MODE_MASK,
+				 DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap,
+			   DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG);
+	if (ret)
+		goto set_failed;
+	return;
+
+set_failed:
+	dev_err(fsl_dev->dev, "set DCU register failed\n");
+}
+
+static void
+fsl_dcu_drm_plane_cleanup_fb(struct drm_plane *plane,
+			     struct drm_framebuffer *fb,
+			     const struct drm_plane_state *new_state)
+{
+}
+
+static int
+fsl_dcu_drm_plane_prepare_fb(struct drm_plane *plane,
+			     struct drm_framebuffer *fb,
+			     const struct drm_plane_state *new_state)
+{
+	return 0;
+}
+
+static const struct drm_plane_helper_funcs fsl_dcu_drm_plane_helper_funcs = {
+	.atomic_check = fsl_dcu_drm_plane_atomic_check,
+	.atomic_disable = fsl_dcu_drm_plane_atomic_disable,
+	.atomic_update = fsl_dcu_drm_plane_atomic_update,
+	.cleanup_fb = fsl_dcu_drm_plane_cleanup_fb,
+	.prepare_fb = fsl_dcu_drm_plane_prepare_fb,
+};
+
+static void fsl_dcu_drm_plane_destroy(struct drm_plane *plane)
+{
+	drm_plane_cleanup(plane);
+}
+
+static const struct drm_plane_funcs fsl_dcu_drm_plane_funcs = {
+	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+	.destroy = fsl_dcu_drm_plane_destroy,
+	.disable_plane = drm_atomic_helper_disable_plane,
+	.reset = drm_atomic_helper_plane_reset,
+	.update_plane = drm_atomic_helper_update_plane,
+};
+
+static const u32 fsl_dcu_drm_plane_formats[] = {
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_RGB888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_ARGB4444,
+	DRM_FORMAT_ARGB1555,
+	DRM_FORMAT_YUV422,
+};
+
+struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
+{
+	struct drm_plane *primary;
+	int ret;
+
+	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
+	if (!primary) {
+		DRM_DEBUG_KMS("Failed to allocate primary plane\n");
+		return NULL;
+	}
+
+	/* possible_crtc's will be filled in later by crtc_init */
+	ret = drm_universal_plane_init(dev, primary, 0,
+				       &fsl_dcu_drm_plane_funcs,
+				       fsl_dcu_drm_plane_formats,
+				       ARRAY_SIZE(fsl_dcu_drm_plane_formats),
+				       DRM_PLANE_TYPE_PRIMARY);
+	if (ret) {
+		kfree(primary);
+		primary = NULL;
+	}
+	drm_plane_helper_add(primary, &fsl_dcu_drm_plane_helper_funcs);
+
+	return primary;
+}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h
new file mode 100644
index 000000000000..d657f088d859
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __FSL_DCU_DRM_PLANE_H__
+#define __FSL_DCU_DRM_PLANE_H__
+
+struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev);
+
+#endif /* __FSL_DCU_DRM_PLANE_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
new file mode 100644
index 000000000000..fe8ab5da04fb
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/backlight.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
+
+#include "fsl_dcu_drm_drv.h"
+
+static int
+fsl_dcu_drm_encoder_atomic_check(struct drm_encoder *encoder,
+				 struct drm_crtc_state *crtc_state,
+				 struct drm_connector_state *conn_state)
+{
+	return 0;
+}
+
+static void fsl_dcu_drm_encoder_disable(struct drm_encoder *encoder)
+{
+}
+
+static void fsl_dcu_drm_encoder_enable(struct drm_encoder *encoder)
+{
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+	.atomic_check = fsl_dcu_drm_encoder_atomic_check,
+	.disable = fsl_dcu_drm_encoder_disable,
+	.enable = fsl_dcu_drm_encoder_enable,
+};
+
+static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs encoder_funcs = {
+	.destroy = fsl_dcu_drm_encoder_destroy,
+};
+
+int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
+			       struct drm_crtc *crtc)
+{
+	struct drm_encoder *encoder = &fsl_dev->encoder;
+	int ret;
+
+	encoder->possible_crtcs = 1;
+	ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs,
+			       DRM_MODE_ENCODER_LVDS);
+	if (ret < 0)
+		return ret;
+
+	drm_encoder_helper_add(encoder, &encoder_helper_funcs);
+
+	return 0;
+}
+
+static void fsl_dcu_drm_connector_destroy(struct drm_connector *connector)
+{
+	drm_connector_unregister(connector);
+	drm_connector_cleanup(connector);
+}
+
+static enum drm_connector_status
+fsl_dcu_drm_connector_detect(struct drm_connector *connector, bool force)
+{
+	return connector_status_connected;
+}
+
+static const struct drm_connector_funcs fsl_dcu_drm_connector_funcs = {
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+	.destroy = fsl_dcu_drm_connector_destroy,
+	.detect = fsl_dcu_drm_connector_detect,
+	.dpms = drm_atomic_helper_connector_dpms,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.reset = drm_atomic_helper_connector_reset,
+};
+
+static struct drm_encoder *
+fsl_dcu_drm_connector_best_encoder(struct drm_connector *connector)
+{
+	struct fsl_dcu_drm_connector *fsl_con = to_fsl_dcu_connector(connector);
+
+	return fsl_con->encoder;
+}
+
+static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector)
+{
+	struct fsl_dcu_drm_connector *fsl_connector;
+	int (*get_modes)(struct drm_panel *panel);
+	int num_modes = 0;
+
+	fsl_connector = to_fsl_dcu_connector(connector);
+	if (fsl_connector->panel && fsl_connector->panel->funcs &&
+	    fsl_connector->panel->funcs->get_modes) {
+		get_modes = fsl_connector->panel->funcs->get_modes;
+		num_modes = get_modes(fsl_connector->panel);
+	}
+
+	return num_modes;
+}
+
+static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
+					    struct drm_display_mode *mode)
+{
+	if (mode->hdisplay & 0xf)
+		return MODE_ERROR;
+
+	return MODE_OK;
+}
+
+static const struct drm_connector_helper_funcs connector_helper_funcs = {
+	.best_encoder = fsl_dcu_drm_connector_best_encoder,
+	.get_modes = fsl_dcu_drm_connector_get_modes,
+	.mode_valid = fsl_dcu_drm_connector_mode_valid,
+};
+
+int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
+				 struct drm_encoder *encoder)
+{
+	struct drm_connector *connector = &fsl_dev->connector.base;
+	struct drm_mode_config mode_config = fsl_dev->drm->mode_config;
+	struct device_node *panel_node;
+	int ret;
+
+	fsl_dev->connector.encoder = encoder;
+
+	ret = drm_connector_init(fsl_dev->drm, connector,
+				 &fsl_dcu_drm_connector_funcs,
+				 DRM_MODE_CONNECTOR_LVDS);
+	if (ret < 0)
+		return ret;
+
+	drm_connector_helper_add(connector, &connector_helper_funcs);
+	ret = drm_connector_register(connector);
+	if (ret < 0)
+		goto err_cleanup;
+
+	ret = drm_mode_connector_attach_encoder(connector, encoder);
+	if (ret < 0)
+		goto err_sysfs;
+
+	drm_object_property_set_value(&connector->base,
+				      mode_config.dpms_property,
+				      DRM_MODE_DPMS_OFF);
+
+	panel_node = of_parse_phandle(fsl_dev->np, "fsl,panel", 0);
+	if (panel_node) {
+		fsl_dev->connector.panel = of_drm_find_panel(panel_node);
+		if (!fsl_dev->connector.panel) {
+			ret = -EPROBE_DEFER;
+			goto err_sysfs;
+		}
+	of_node_put(panel_node);
+	}
+
+	ret = drm_panel_attach(fsl_dev->connector.panel, connector);
+	if (ret) {
+		dev_err(fsl_dev->dev, "failed to attach panel\n");
+		goto err_sysfs;
+	}
+
+	return 0;
+
+err_sysfs:
+	drm_connector_unregister(connector);
+err_cleanup:
+	drm_connector_cleanup(connector);
+	return ret;
+}
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
index de6f62a6ceb7..db9f7d011832 100644
--- a/drivers/gpu/drm/gma500/accel_2d.c
+++ b/drivers/gpu/drm/gma500/accel_2d.c
@@ -276,12 +276,12 @@ static void psbfb_copyarea_accel(struct fb_info *info,
 		break;
 	default:
 		/* software fallback */
-		cfb_copyarea(info, a);
+		drm_fb_helper_cfb_copyarea(info, a);
 		return;
 	}
 
 	if (!gma_power_begin(dev, false)) {
-		cfb_copyarea(info, a);
+		drm_fb_helper_cfb_copyarea(info, a);
 		return;
 	}
 	psb_accel_2d_copy(dev_priv,
@@ -308,7 +308,7 @@ void psbfb_copyarea(struct fb_info *info,
 	/* Avoid the 8 pixel erratum */
 	if (region->width == 8 || region->height == 8 ||
 		(info->flags & FBINFO_HWACCEL_DISABLED))
-		return cfb_copyarea(info, region);
+		return drm_fb_helper_cfb_copyarea(info, region);
 
 	psbfb_copyarea_accel(info, region);
 }
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 2d42ce6d3757..2eaf1b31c7bd 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -194,9 +194,9 @@ static struct fb_ops psbfb_ops = {
 	.fb_set_par = drm_fb_helper_set_par,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcolreg = psbfb_setcolreg,
-	.fb_fillrect = cfb_fillrect,
+	.fb_fillrect = drm_fb_helper_cfb_fillrect,
 	.fb_copyarea = psbfb_copyarea,
-	.fb_imageblit = cfb_imageblit,
+	.fb_imageblit = drm_fb_helper_cfb_imageblit,
 	.fb_mmap = psbfb_mmap,
 	.fb_sync = psbfb_sync,
 	.fb_ioctl = psbfb_ioctl,
@@ -208,9 +208,9 @@ static struct fb_ops psbfb_roll_ops = {
 	.fb_set_par = drm_fb_helper_set_par,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcolreg = psbfb_setcolreg,
-	.fb_fillrect = cfb_fillrect,
-	.fb_copyarea = cfb_copyarea,
-	.fb_imageblit = cfb_imageblit,
+	.fb_fillrect = drm_fb_helper_cfb_fillrect,
+	.fb_copyarea = drm_fb_helper_cfb_copyarea,
+	.fb_imageblit = drm_fb_helper_cfb_imageblit,
 	.fb_pan_display = psbfb_pan,
 	.fb_mmap = psbfb_mmap,
 	.fb_ioctl = psbfb_ioctl,
@@ -222,9 +222,9 @@ static struct fb_ops psbfb_unaccel_ops = {
 	.fb_set_par = drm_fb_helper_set_par,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcolreg = psbfb_setcolreg,
-	.fb_fillrect = cfb_fillrect,
-	.fb_copyarea = cfb_copyarea,
-	.fb_imageblit = cfb_imageblit,
+	.fb_fillrect = drm_fb_helper_cfb_fillrect,
+	.fb_copyarea = drm_fb_helper_cfb_copyarea,
+	.fb_imageblit = drm_fb_helper_cfb_imageblit,
 	.fb_mmap = psbfb_mmap,
 	.fb_ioctl = psbfb_ioctl,
 };
@@ -343,7 +343,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
 	struct drm_framebuffer *fb;
 	struct psb_framebuffer *psbfb = &fbdev->pfb;
 	struct drm_mode_fb_cmd2 mode_cmd;
-	struct device *device = &dev->pdev->dev;
 	int size;
 	int ret;
 	struct gtt_range *backing;
@@ -409,9 +408,9 @@ static int psbfb_create(struct psb_fbdev *fbdev,
 
 	mutex_lock(&dev->struct_mutex);
 
-	info = framebuffer_alloc(0, device);
-	if (!info) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto out_err1;
 	}
 	info->par = fbdev;
@@ -426,7 +425,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
 	psbfb->fbdev = info;
 
 	fbdev->psb_fb_helper.fb = fb;
-	fbdev->psb_fb_helper.fbdev = info;
 
 	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
 	strcpy(info->fix.id, "psbdrmfb");
@@ -440,12 +438,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
 	} else	/* Software */
 		info->fbops = &psbfb_unaccel_ops;
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto out_unref;
-	}
-
 	info->fix.smem_start = dev->mode_config.fb_base;
 	info->fix.smem_len = size;
 	info->fix.ywrapstep = gtt_roll;
@@ -456,11 +448,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
 	info->screen_size = size;
 
 	if (dev_priv->gtt.stolen_size) {
-		info->apertures = alloc_apertures(1);
-		if (!info->apertures) {
-			ret = -ENOMEM;
-			goto out_unref;
-		}
 		info->apertures->ranges[0].base = dev->mode_config.fb_base;
 		info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
 	}
@@ -483,6 +470,8 @@ out_unref:
 		psb_gtt_free_range(dev, backing);
 	else
 		drm_gem_object_unreference(&backing->gem);
+
+	drm_fb_helper_release_fbi(&fbdev->psb_fb_helper);
 out_err1:
 	mutex_unlock(&dev->struct_mutex);
 	psb_gtt_free_range(dev, backing);
@@ -570,16 +559,11 @@ static const struct drm_fb_helper_funcs psb_fb_helper_funcs = {
 
 static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
 {
-	struct fb_info *info;
 	struct psb_framebuffer *psbfb = &fbdev->pfb;
 
-	if (fbdev->psb_fb_helper.fbdev) {
-		info = fbdev->psb_fb_helper.fbdev;
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&fbdev->psb_fb_helper);
+	drm_fb_helper_release_fbi(&fbdev->psb_fb_helper);
+
 	drm_fb_helper_fini(&fbdev->psb_fb_helper);
 	drm_framebuffer_unregister_private(&psbfb->base);
 	drm_framebuffer_cleanup(&psbfb->base);
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 74acca9bcd9d..051eab33e4c7 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -36,30 +36,6 @@ config DRM_I915
 	  i810 driver instead, and the Atom z5xx series has an entirely
 	  different implementation.
 
-config DRM_I915_KMS
-	bool "Enable modesetting on intel by default"
-	depends on DRM_I915
-	default y
-	help
-	  Choose this option if you want kernel modesetting enabled by default.
-
-	  If in doubt, say "Y".
-
-config DRM_I915_FBDEV
-	bool "Enable legacy fbdev support for the modesetting intel driver"
-	depends on DRM_I915
-	select DRM_KMS_FB_HELPER
-	select FB_CFB_FILLRECT
-	select FB_CFB_COPYAREA
-	select FB_CFB_IMAGEBLIT
-	default y
-	help
-	  Choose this option if you have a need for the legacy fbdev
-	  support. Note that this support also provide the linux console
-	  support on top of the intel modesetting driver.
-
-	  If in doubt, say "Y".
-
 config DRM_I915_PRELIMINARY_HW_SUPPORT
 	bool "Enable preliminary support for prerelease Intel hardware by default"
 	depends on DRM_I915
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b7ddf48e1d75..998b4643109f 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -6,12 +6,13 @@
 
 # core driver code
 i915-y := i915_drv.o \
+	  i915_irq.o \
 	  i915_params.o \
           i915_suspend.o \
 	  i915_sysfs.o \
+	  intel_csr.o \
 	  intel_pm.o \
-	  intel_runtime_pm.o \
-	  intel_csr.o
+	  intel_runtime_pm.o
 
 i915-$(CONFIG_COMPAT)   += i915_ioc32.o
 i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
@@ -20,21 +21,22 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
 i915-y += i915_cmd_parser.o \
 	  i915_gem_batch_pool.o \
 	  i915_gem_context.o \
-	  i915_gem_render_state.o \
 	  i915_gem_debug.o \
 	  i915_gem_dmabuf.o \
 	  i915_gem_evict.o \
 	  i915_gem_execbuffer.o \
+	  i915_gem_fence.o \
 	  i915_gem_gtt.o \
 	  i915_gem.o \
+	  i915_gem_render_state.o \
 	  i915_gem_shrinker.o \
 	  i915_gem_stolen.o \
 	  i915_gem_tiling.o \
 	  i915_gem_userptr.o \
 	  i915_gpu_error.o \
-	  i915_irq.o \
 	  i915_trace_points.o \
 	  intel_lrc.o \
+	  intel_mocs.o \
 	  intel_ringbuffer.o \
 	  intel_uncore.o
 
@@ -46,18 +48,21 @@ i915-y += intel_renderstate_gen6.o \
 
 # modesetting core code
 i915-y += intel_audio.o \
+	  intel_atomic.o \
+	  intel_atomic_plane.o \
 	  intel_bios.o \
 	  intel_display.o \
 	  intel_fbc.o \
 	  intel_fifo_underrun.o \
 	  intel_frontbuffer.o \
+	  intel_hotplug.o \
 	  intel_modes.o \
 	  intel_overlay.o \
 	  intel_psr.o \
 	  intel_sideband.o \
 	  intel_sprite.o
 i915-$(CONFIG_ACPI)		+= intel_acpi.o intel_opregion.o
-i915-$(CONFIG_DRM_I915_FBDEV)	+= intel_fbdev.o
+i915-$(CONFIG_DRM_FBDEV_EMULATION)	+= intel_fbdev.o
 
 # modesetting output/encoder code
 i915-y += dvo_ch7017.o \
@@ -66,15 +71,13 @@ i915-y += dvo_ch7017.o \
 	  dvo_ns2501.o \
 	  dvo_sil164.o \
 	  dvo_tfp410.o \
-	  intel_atomic.o \
-	  intel_atomic_plane.o \
 	  intel_crt.o \
 	  intel_ddi.o \
-	  intel_dp.o \
 	  intel_dp_mst.o \
+	  intel_dp.o \
 	  intel_dsi.o \
-	  intel_dsi_pll.o \
 	  intel_dsi_panel_vbt.o \
+	  intel_dsi_pll.o \
 	  intel_dvo.o \
 	  intel_hdmi.o \
 	  intel_i2c.o \
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 89b08a896d20..732ce8785945 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -22,6 +22,7 @@
  *
  * Authors:
  *    Eric Anholt <eric@anholt.net>
+ *    Thomas Richter <thor@math.tu-berlin.de>
  *
  * Minor modifications (Dithering enable):
  *    Thomas Richter <thor@math.tu-berlin.de>
@@ -90,7 +91,7 @@
 /*
  * LCD Vertical Display Size
  */
-#define VR21	0x20
+#define VR21	0x21
 
 /*
  * Panel power down status
@@ -155,16 +156,33 @@
 # define VR8F_POWER_MASK		(0x3c)
 # define VR8F_POWER_POS			(2)
 
+/* Some Bios implementations do not restore the DVO state upon
+ * resume from standby. Thus, this driver has to handle it
+ * instead. The following list contains all registers that
+ * require saving.
+ */
+static const uint16_t backup_addresses[] = {
+	0x11, 0x12,
+	0x18, 0x19, 0x1a, 0x1f,
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+	0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+	0x8e, 0x8f,
+	0x10		/* this must come last */
+};
+
 
 struct ivch_priv {
 	bool quiet;
 
 	uint16_t width, height;
+
+	/* Register backup */
+
+	uint16_t reg_backup[ARRAY_SIZE(backup_addresses)];
 };
 
 
 static void ivch_dump_regs(struct intel_dvo_device *dvo);
-
 /**
  * Reads a register on the ivch.
  *
@@ -246,6 +264,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
 {
 	struct ivch_priv *priv;
 	uint16_t temp;
+	int i;
 
 	priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
 	if (priv == NULL)
@@ -273,6 +292,14 @@ static bool ivch_init(struct intel_dvo_device *dvo,
 	ivch_read(dvo, VR20, &priv->width);
 	ivch_read(dvo, VR21, &priv->height);
 
+	/* Make a backup of the registers to be able to restore them
+	 * upon suspend.
+	 */
+	for (i = 0; i < ARRAY_SIZE(backup_addresses); i++)
+		ivch_read(dvo, backup_addresses[i], priv->reg_backup + i);
+
+	ivch_dump_regs(dvo);
+
 	return true;
 
 out:
@@ -294,12 +321,31 @@ static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
 	return MODE_OK;
 }
 
+/* Restore the DVO registers after a resume
+ * from RAM. Registers have been saved during
+ * the initialization.
+ */
+static void ivch_reset(struct intel_dvo_device *dvo)
+{
+	struct ivch_priv *priv = dvo->dev_priv;
+	int i;
+
+	DRM_DEBUG_KMS("Resetting the IVCH registers\n");
+
+	ivch_write(dvo, VR10, 0x0000);
+
+	for (i = 0; i < ARRAY_SIZE(backup_addresses); i++)
+		ivch_write(dvo, backup_addresses[i], priv->reg_backup[i]);
+}
+
 /** Sets the power state of the panel connected to the ivch */
 static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
 {
 	int i;
 	uint16_t vr01, vr30, backlight;
 
+	ivch_reset(dvo);
+
 	/* Set the new power state of the panel. */
 	if (!ivch_read(dvo, VR01, &vr01))
 		return;
@@ -308,6 +354,7 @@ static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
 		backlight = 1;
 	else
 		backlight = 0;
+
 	ivch_write(dvo, VR80, backlight);
 
 	if (enable)
@@ -334,6 +381,8 @@ static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
 {
 	uint16_t vr01;
 
+	ivch_reset(dvo);
+
 	/* Set the new power state of the panel. */
 	if (!ivch_read(dvo, VR01, &vr01))
 		return false;
@@ -348,11 +397,15 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
 			  struct drm_display_mode *mode,
 			  struct drm_display_mode *adjusted_mode)
 {
+	struct ivch_priv *priv = dvo->dev_priv;
 	uint16_t vr40 = 0;
 	uint16_t vr01 = 0;
 	uint16_t vr10;
 
-	ivch_read(dvo, VR10, &vr10);
+	ivch_reset(dvo);
+
+	vr10 = priv->reg_backup[ARRAY_SIZE(backup_addresses) - 1];
+
 	/* Enable dithering for 18 bpp pipelines */
 	vr10 &= VR10_INTERFACE_DEPTH_MASK;
 	if (vr10 == VR10_INTERFACE_2X18 || vr10 == VR10_INTERFACE_1X18)
@@ -366,7 +419,7 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
 		uint16_t x_ratio, y_ratio;
 
 		vr01 |= VR01_PANEL_FIT_ENABLE;
-		vr40 |= VR40_CLOCK_GATING_ENABLE | VR40_ENHANCED_PANEL_FITTING;
+		vr40 |= VR40_CLOCK_GATING_ENABLE;
 		x_ratio = (((mode->hdisplay - 1) << 16) /
 			   (adjusted_mode->hdisplay - 1)) >> 2;
 		y_ratio = (((mode->vdisplay - 1) << 16) /
@@ -381,8 +434,6 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
 
 	ivch_write(dvo, VR01, vr01);
 	ivch_write(dvo, VR40, vr40);
-
-	ivch_dump_regs(dvo);
 }
 
 static void ivch_dump_regs(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 306d9e4e5cf3..237ff6884a22 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -131,7 +131,7 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
 			.mask = MI_GLOBAL_GTT,
 			.expected = 0,
 	      }},						       ),
-	CMD(  MI_LOAD_REGISTER_MEM,             SMI,   !F,  0xFF,   W | B,
+	CMD(  MI_LOAD_REGISTER_MEM(1),             SMI,   !F,  0xFF,   W | B,
 	      .reg = { .offset = 1, .mask = 0x007FFFFC },
 	      .bits = {{
 			.offset = 0,
@@ -151,8 +151,8 @@ static const struct drm_i915_cmd_descriptor render_cmds[] = {
 	CMD(  MI_ARB_ON_OFF,                    SMI,    F,  1,      R  ),
 	CMD(  MI_PREDICATE,                     SMI,    F,  1,      S  ),
 	CMD(  MI_TOPOLOGY_FILTER,               SMI,    F,  1,      S  ),
-	CMD(  MI_DISPLAY_FLIP,                  SMI,   !F,  0xFF,   R  ),
 	CMD(  MI_SET_APPID,                     SMI,    F,  1,      S  ),
+	CMD(  MI_DISPLAY_FLIP,                  SMI,   !F,  0xFF,   R  ),
 	CMD(  MI_SET_CONTEXT,                   SMI,   !F,  0xFF,   R  ),
 	CMD(  MI_URB_CLEAR,                     SMI,   !F,  0xFF,   S  ),
 	CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0x3F,   B,
@@ -564,7 +564,7 @@ static bool validate_cmds_sorted(struct intel_engine_cs *ring,
 
 		for (j = 0; j < table->count; j++) {
 			const struct drm_i915_cmd_descriptor *desc =
-				&table->table[i];
+				&table->table[j];
 			u32 curr = desc->cmd.value & desc->cmd.mask;
 
 			if (curr < previous) {
@@ -1021,7 +1021,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
 			 * only MI_LOAD_REGISTER_IMM commands.
 			 */
 			if (reg_addr == OACONTROL) {
-				if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
+				if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) {
 					DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
 					return false;
 				}
@@ -1035,7 +1035,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
 			 * allowed mask/value pair given in the whitelist entry.
 			 */
 			if (reg->mask) {
-				if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
+				if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) {
 					DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n",
 							 reg_addr);
 					return false;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 82bbe3f2a7e1..e3ec9049081f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -117,6 +117,20 @@ static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
 	return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
 }
 
+static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
+{
+	u64 size = 0;
+	struct i915_vma *vma;
+
+	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+		if (i915_is_ggtt(vma->vm) &&
+		    drm_mm_node_allocated(&vma->node))
+			size += vma->node.size;
+	}
+
+	return size;
+}
+
 static void
 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 {
@@ -156,13 +170,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 	if (obj->fence_reg != I915_FENCE_REG_NONE)
 		seq_printf(m, " (fence: %d)", obj->fence_reg);
 	list_for_each_entry(vma, &obj->vma_list, vma_link) {
-		if (!i915_is_ggtt(vma->vm))
-			seq_puts(m, " (pp");
+		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
+			   i915_is_ggtt(vma->vm) ? "g" : "pp",
+			   vma->node.start, vma->node.size);
+		if (i915_is_ggtt(vma->vm))
+			seq_printf(m, ", type: %u)", vma->ggtt_view.type);
 		else
-			seq_puts(m, " (g");
-		seq_printf(m, "gtt offset: %08llx, size: %08llx, type: %u)",
-			   vma->node.start, vma->node.size,
-			   vma->ggtt_view.type);
+			seq_puts(m, ")");
 	}
 	if (obj->stolen)
 		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
@@ -198,7 +212,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_address_space *vm = &dev_priv->gtt.base;
 	struct i915_vma *vma;
-	size_t total_obj_size, total_gtt_size;
+	u64 total_obj_size, total_gtt_size;
 	int count, ret;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -231,7 +245,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
 	}
 	mutex_unlock(&dev->struct_mutex);
 
-	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
 		   count, total_obj_size, total_gtt_size);
 	return 0;
 }
@@ -253,7 +267,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
-	size_t total_obj_size, total_gtt_size;
+	u64 total_obj_size, total_gtt_size;
 	LIST_HEAD(stolen);
 	int count, ret;
 
@@ -269,7 +283,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 		list_add(&obj->obj_exec_link, &stolen);
 
 		total_obj_size += obj->base.size;
-		total_gtt_size += i915_gem_obj_ggtt_size(obj);
+		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
 		count++;
 	}
 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
@@ -292,14 +306,14 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 	}
 	mutex_unlock(&dev->struct_mutex);
 
-	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
 		   count, total_obj_size, total_gtt_size);
 	return 0;
 }
 
 #define count_objects(list, member) do { \
 	list_for_each_entry(obj, list, member) { \
-		size += i915_gem_obj_ggtt_size(obj); \
+		size += i915_gem_obj_total_ggtt_size(obj); \
 		++count; \
 		if (obj->map_and_fenceable) { \
 			mappable_size += i915_gem_obj_ggtt_size(obj); \
@@ -310,10 +324,10 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 
 struct file_stats {
 	struct drm_i915_file_private *file_priv;
-	int count;
-	size_t total, unbound;
-	size_t global, shared;
-	size_t active, inactive;
+	unsigned long count;
+	u64 total, unbound;
+	u64 global, shared;
+	u64 active, inactive;
 };
 
 static int per_file_stats(int id, void *ptr, void *data)
@@ -370,7 +384,7 @@ static int per_file_stats(int id, void *ptr, void *data)
 
 #define print_file_stats(m, name, stats) do { \
 	if (stats.count) \
-		seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", \
+		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
 			   name, \
 			   stats.count, \
 			   stats.total, \
@@ -405,7 +419,7 @@ static void print_batch_pool_stats(struct seq_file *m,
 
 #define count_vmas(list, member) do { \
 	list_for_each_entry(vma, list, member) { \
-		size += i915_gem_obj_ggtt_size(vma->obj); \
+		size += i915_gem_obj_total_ggtt_size(vma->obj); \
 		++count; \
 		if (vma->obj->map_and_fenceable) { \
 			mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
@@ -420,7 +434,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 count, mappable_count, purgeable_count;
-	size_t size, mappable_size, purgeable_size;
+	u64 size, mappable_size, purgeable_size;
 	struct drm_i915_gem_object *obj;
 	struct i915_address_space *vm = &dev_priv->gtt.base;
 	struct drm_file *file;
@@ -437,17 +451,17 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
 
 	size = count = mappable_size = mappable_count = 0;
 	count_objects(&dev_priv->mm.bound_list, global_list);
-	seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
+	seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n",
 		   count, mappable_count, size, mappable_size);
 
 	size = count = mappable_size = mappable_count = 0;
 	count_vmas(&vm->active_list, mm_list);
-	seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
+	seq_printf(m, "  %u [%u] active objects, %llu [%llu] bytes\n",
 		   count, mappable_count, size, mappable_size);
 
 	size = count = mappable_size = mappable_count = 0;
 	count_vmas(&vm->inactive_list, mm_list);
-	seq_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
+	seq_printf(m, "  %u [%u] inactive objects, %llu [%llu] bytes\n",
 		   count, mappable_count, size, mappable_size);
 
 	size = count = purgeable_size = purgeable_count = 0;
@@ -456,7 +470,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
 		if (obj->madv == I915_MADV_DONTNEED)
 			purgeable_size += obj->base.size, ++purgeable_count;
 	}
-	seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
+	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
 
 	size = count = mappable_size = mappable_count = 0;
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
@@ -473,16 +487,16 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
 			++purgeable_count;
 		}
 	}
-	seq_printf(m, "%u purgeable objects, %zu bytes\n",
+	seq_printf(m, "%u purgeable objects, %llu bytes\n",
 		   purgeable_count, purgeable_size);
-	seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
+	seq_printf(m, "%u pinned mappable objects, %llu bytes\n",
 		   mappable_count, mappable_size);
-	seq_printf(m, "%u fault mappable objects, %zu bytes\n",
+	seq_printf(m, "%u fault mappable objects, %llu bytes\n",
 		   count, size);
 
-	seq_printf(m, "%zu [%lu] gtt total\n",
+	seq_printf(m, "%llu [%llu] gtt total\n",
 		   dev_priv->gtt.base.total,
-		   dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
+		   (u64)dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
 
 	seq_putc(m, '\n');
 	print_batch_pool_stats(m, dev_priv);
@@ -519,7 +533,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
 	uintptr_t list = (uintptr_t) node->info_ent->data;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
-	size_t total_obj_size, total_gtt_size;
+	u64 total_obj_size, total_gtt_size;
 	int count, ret;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -535,13 +549,13 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
 		describe_obj(m, obj);
 		seq_putc(m, '\n');
 		total_obj_size += obj->base.size;
-		total_gtt_size += i915_gem_obj_ggtt_size(obj);
+		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
 		count++;
 	}
 
 	mutex_unlock(&dev->struct_mutex);
 
-	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
 		   count, total_obj_size, total_gtt_size);
 
 	return 0;
@@ -1132,9 +1146,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
 	} else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
 		   IS_BROADWELL(dev) || IS_GEN9(dev)) {
-		u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
-		u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
-		u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+		u32 rp_state_limits;
+		u32 gt_perf_status;
+		u32 rp_state_cap;
 		u32 rpmodectl, rpinclimit, rpdeclimit;
 		u32 rpstat, cagf, reqf;
 		u32 rpupei, rpcurup, rpprevup;
@@ -1142,6 +1156,15 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
 		int max_freq;
 
+		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
+		if (IS_BROXTON(dev)) {
+			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
+			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
+		} else {
+			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+		}
+
 		/* RPSTAT1 is in the GT power well */
 		ret = mutex_lock_interruptible(&dev->struct_mutex);
 		if (ret)
@@ -1229,7 +1252,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 		seq_printf(m, "Down threshold: %d%%\n",
 			   dev_priv->rps.down_threshold);
 
-		max_freq = (rp_state_cap & 0xff0000) >> 16;
+		max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
+			    rp_state_cap >> 16) & 0xff;
 		max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
 			   intel_gpu_freq(dev_priv, max_freq));
@@ -1239,7 +1263,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
 			   intel_gpu_freq(dev_priv, max_freq));
 
-		max_freq = rp_state_cap & 0xff;
+		max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
+			    rp_state_cap >> 0) & 0xff;
 		max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
 			   intel_gpu_freq(dev_priv, max_freq));
@@ -1581,6 +1606,21 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
 		return ironlake_drpc_info(m);
 }
 
+static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
+		   dev_priv->fb_tracking.busy_bits);
+
+	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
+		   dev_priv->fb_tracking.flip_bits);
+
+	return 0;
+}
+
 static int i915_fbc_status(struct seq_file *m, void *unused)
 {
 	struct drm_info_node *node = m->private;
@@ -1593,51 +1633,20 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
 	}
 
 	intel_runtime_pm_get(dev_priv);
+	mutex_lock(&dev_priv->fbc.lock);
 
-	if (intel_fbc_enabled(dev)) {
+	if (intel_fbc_enabled(dev_priv))
 		seq_puts(m, "FBC enabled\n");
-	} else {
-		seq_puts(m, "FBC disabled: ");
-		switch (dev_priv->fbc.no_fbc_reason) {
-		case FBC_OK:
-			seq_puts(m, "FBC actived, but currently disabled in hardware");
-			break;
-		case FBC_UNSUPPORTED:
-			seq_puts(m, "unsupported by this chipset");
-			break;
-		case FBC_NO_OUTPUT:
-			seq_puts(m, "no outputs");
-			break;
-		case FBC_STOLEN_TOO_SMALL:
-			seq_puts(m, "not enough stolen memory");
-			break;
-		case FBC_UNSUPPORTED_MODE:
-			seq_puts(m, "mode not supported");
-			break;
-		case FBC_MODE_TOO_LARGE:
-			seq_puts(m, "mode too large");
-			break;
-		case FBC_BAD_PLANE:
-			seq_puts(m, "FBC unsupported on plane");
-			break;
-		case FBC_NOT_TILED:
-			seq_puts(m, "scanout buffer not tiled");
-			break;
-		case FBC_MULTIPLE_PIPES:
-			seq_puts(m, "multiple pipes are enabled");
-			break;
-		case FBC_MODULE_PARAM:
-			seq_puts(m, "disabled per module param (default off)");
-			break;
-		case FBC_CHIP_DEFAULT:
-			seq_puts(m, "disabled per chip default");
-			break;
-		default:
-			seq_puts(m, "unknown reason");
-		}
-		seq_putc(m, '\n');
-	}
+	else
+		seq_printf(m, "FBC disabled: %s\n",
+			  intel_no_fbc_reason_str(dev_priv->fbc.no_fbc_reason));
+
+	if (INTEL_INFO(dev_priv)->gen >= 7)
+		seq_printf(m, "Compressing: %s\n",
+			   yesno(I915_READ(FBC_STATUS2) &
+				 FBC_COMPRESSION_MASK));
 
+	mutex_unlock(&dev_priv->fbc.lock);
 	intel_runtime_pm_put(dev_priv);
 
 	return 0;
@@ -1651,9 +1660,7 @@ static int i915_fbc_fc_get(void *data, u64 *val)
 	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
 		return -ENODEV;
 
-	drm_modeset_lock_all(dev);
 	*val = dev_priv->fbc.false_color;
-	drm_modeset_unlock_all(dev);
 
 	return 0;
 }
@@ -1667,7 +1674,7 @@ static int i915_fbc_fc_set(void *data, u64 val)
 	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
 		return -ENODEV;
 
-	drm_modeset_lock_all(dev);
+	mutex_lock(&dev_priv->fbc.lock);
 
 	reg = I915_READ(ILK_DPFC_CONTROL);
 	dev_priv->fbc.false_color = val;
@@ -1676,7 +1683,7 @@ static int i915_fbc_fc_set(void *data, u64 val)
 		   (reg | FBC_CTL_FALSE_COLOR) :
 		   (reg & ~FBC_CTL_FALSE_COLOR));
 
-	drm_modeset_unlock_all(dev);
+	mutex_unlock(&dev_priv->fbc.lock);
 	return 0;
 }
 
@@ -1778,8 +1785,9 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret = 0;
 	int gpu_freq, ia_freq;
+	unsigned int max_gpu_freq, min_gpu_freq;
 
-	if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
+	if (!HAS_CORE_RING_FREQ(dev)) {
 		seq_puts(m, "unsupported on this chipset\n");
 		return 0;
 	}
@@ -1792,17 +1800,27 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
 	if (ret)
 		goto out;
 
+	if (IS_SKYLAKE(dev)) {
+		/* Convert GT frequency to 50 HZ units */
+		min_gpu_freq =
+			dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
+		max_gpu_freq =
+			dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
+	} else {
+		min_gpu_freq = dev_priv->rps.min_freq_softlimit;
+		max_gpu_freq = dev_priv->rps.max_freq_softlimit;
+	}
+
 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
 
-	for (gpu_freq = dev_priv->rps.min_freq_softlimit;
-	     gpu_freq <= dev_priv->rps.max_freq_softlimit;
-	     gpu_freq++) {
+	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
 		ia_freq = gpu_freq;
 		sandybridge_pcode_read(dev_priv,
 				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
 				       &ia_freq);
 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
-			   intel_gpu_freq(dev_priv, gpu_freq),
+			   intel_gpu_freq(dev_priv, (gpu_freq *
+				(IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1))),
 			   ((ia_freq >> 0) & 0xff) * 100,
 			   ((ia_freq >> 8) & 0xff) * 100);
 	}
@@ -1848,8 +1866,9 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
 	struct drm_device *dev = node->minor->dev;
 	struct intel_fbdev *ifbdev = NULL;
 	struct intel_framebuffer *fb;
+	struct drm_framebuffer *drm_fb;
 
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	ifbdev = dev_priv->fbdev;
@@ -1867,7 +1886,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
 #endif
 
 	mutex_lock(&dev->mode_config.fb_lock);
-	list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
+	drm_for_each_fb(drm_fb, dev) {
+		fb = to_intel_framebuffer(drm_fb);
 		if (ifbdev && &fb->base == ifbdev->helper.fb)
 			continue;
 
@@ -2248,7 +2268,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
 
 		seq_puts(m, "aliasing PPGTT:\n");
-		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.pd_offset);
+		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
 
 		ppgtt->debug_dump(ppgtt, m);
 	}
@@ -2479,13 +2499,13 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
 	return 0;
 }
 
-static int i915_pc8_status(struct seq_file *m, void *unused)
+static int i915_runtime_pm_status(struct seq_file *m, void *unused)
 {
 	struct drm_info_node *node = m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
+	if (!HAS_RUNTIME_PM(dev)) {
 		seq_puts(m, "not supported\n");
 		return 0;
 	}
@@ -2493,6 +2513,12 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
 	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
 	seq_printf(m, "IRQs disabled: %s\n",
 		   yesno(!intel_irqs_enabled(dev_priv)));
+#ifdef CONFIG_PM
+	seq_printf(m, "Usage count: %d\n",
+		   atomic_read(&dev->dev->power.usage_count));
+#else
+	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
+#endif
 
 	return 0;
 }
@@ -2536,6 +2562,8 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
 		return "PORT_DDI_D_2_LANES";
 	case POWER_DOMAIN_PORT_DDI_D_4_LANES:
 		return "PORT_DDI_D_4_LANES";
+	case POWER_DOMAIN_PORT_DDI_E_2_LANES:
+		return "PORT_DDI_E_2_LANES";
 	case POWER_DOMAIN_PORT_DSI:
 		return "PORT_DSI";
 	case POWER_DOMAIN_PORT_CRT:
@@ -2780,13 +2808,16 @@ static int i915_display_info(struct seq_file *m, void *unused)
 	seq_printf(m, "---------\n");
 	for_each_intel_crtc(dev, crtc) {
 		bool active;
+		struct intel_crtc_state *pipe_config;
 		int x, y;
 
+		pipe_config = to_intel_crtc_state(crtc->base.state);
+
 		seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
 			   crtc->base.base.id, pipe_name(crtc->pipe),
-			   yesno(crtc->active), crtc->config->pipe_src_w,
-			   crtc->config->pipe_src_h);
-		if (crtc->active) {
+			   yesno(pipe_config->base.active),
+			   pipe_config->pipe_src_w, pipe_config->pipe_src_h);
+		if (pipe_config->base.active) {
 			intel_crtc_info(m, crtc);
 
 			active = cursor_position(dev, crtc->pipe, &x, &y);
@@ -3027,7 +3058,7 @@ static void drrs_status_per_crtc(struct seq_file *m,
 
 	seq_puts(m, "\n\n");
 
-	if (intel_crtc->config->has_drrs) {
+	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
 		struct intel_panel *panel;
 
 		mutex_lock(&drrs->mutex);
@@ -3079,7 +3110,7 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
 	for_each_intel_crtc(dev, intel_crtc) {
 		drm_modeset_lock(&intel_crtc->base.mutex, NULL);
 
-		if (intel_crtc->active) {
+		if (intel_crtc->base.state->active) {
 			active_crtc_cnt++;
 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
 
@@ -3616,53 +3647,40 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
 	return 0;
 }
 
-static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
+static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *crtc =
 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
+	struct intel_crtc_state *pipe_config;
+	struct drm_atomic_state *state;
+	int ret = 0;
 
 	drm_modeset_lock_all(dev);
-	/*
-	 * If we use the eDP transcoder we need to make sure that we don't
-	 * bypass the pfit, since otherwise the pipe CRC source won't work. Only
-	 * relevant on hsw with pipe A when using the always-on power well
-	 * routing.
-	 */
-	if (crtc->config->cpu_transcoder == TRANSCODER_EDP &&
-	    !crtc->config->pch_pfit.enabled) {
-		crtc->config->pch_pfit.force_thru = true;
-
-		intel_display_power_get(dev_priv,
-					POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
-
-		intel_crtc_reset(crtc);
+	state = drm_atomic_state_alloc(dev);
+	if (!state) {
+		ret = -ENOMEM;
+		goto out;
 	}
-	drm_modeset_unlock_all(dev);
-}
-
-static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *crtc =
-		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
 
-	drm_modeset_lock_all(dev);
-	/*
-	 * If we use the eDP transcoder we need to make sure that we don't
-	 * bypass the pfit, since otherwise the pipe CRC source won't work. Only
-	 * relevant on hsw with pipe A when using the always-on power well
-	 * routing.
-	 */
-	if (crtc->config->pch_pfit.force_thru) {
-		crtc->config->pch_pfit.force_thru = false;
+	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
+	pipe_config = intel_atomic_get_crtc_state(state, crtc);
+	if (IS_ERR(pipe_config)) {
+		ret = PTR_ERR(pipe_config);
+		goto out;
+	}
 
-		intel_crtc_reset(crtc);
+	pipe_config->pch_pfit.force_thru = enable;
+	if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
+	    pipe_config->pch_pfit.enabled != enable)
+		pipe_config->base.connectors_changed = true;
 
-		intel_display_power_put(dev_priv,
-					POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
-	}
+	ret = drm_atomic_commit(state);
+out:
 	drm_modeset_unlock_all(dev);
+	WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
+	if (ret)
+		drm_atomic_state_free(state);
 }
 
 static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
@@ -3682,7 +3700,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
 		break;
 	case INTEL_PIPE_CRC_SOURCE_PF:
 		if (IS_HASWELL(dev) && pipe == PIPE_A)
-			hsw_trans_edp_pipe_A_crc_wa(dev);
+			hsw_trans_edp_pipe_A_crc_wa(dev, true);
 
 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
 		break;
@@ -3776,7 +3794,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
 				 pipe_name(pipe));
 
 		drm_modeset_lock(&crtc->base.mutex, NULL);
-		if (crtc->active)
+		if (crtc->base.state->active)
 			intel_wait_for_vblank(dev, pipe);
 		drm_modeset_unlock(&crtc->base.mutex);
 
@@ -3794,7 +3812,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
 		else if (IS_VALLEYVIEW(dev))
 			vlv_undo_pipe_scramble_reset(dev, pipe);
 		else if (IS_HASWELL(dev) && pipe == PIPE_A)
-			hsw_undo_trans_edp_pipe_A_crc_wa(dev);
+			hsw_trans_edp_pipe_A_crc_wa(dev, false);
 
 		hsw_enable_ips(crtc);
 	}
@@ -3980,24 +3998,14 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
 {
 	char *input_buffer;
 	int status = 0;
-	struct seq_file *m;
 	struct drm_device *dev;
 	struct drm_connector *connector;
 	struct list_head *connector_list;
 	struct intel_dp *intel_dp;
 	int val = 0;
 
-	m = file->private_data;
-	if (!m) {
-		status = -ENODEV;
-		return status;
-	}
-	dev = m->private;
+	dev = ((struct seq_file *)file->private_data)->private;
 
-	if (!dev) {
-		status = -ENODEV;
-		return status;
-	}
 	connector_list = &dev->mode_config.connector_list;
 
 	if (len == 0)
@@ -4021,9 +4029,7 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
 		    DRM_MODE_CONNECTOR_DisplayPort)
 			continue;
 
-		if (connector->connector_type ==
-		    DRM_MODE_CONNECTOR_DisplayPort &&
-		    connector->status == connector_status_connected &&
+		if (connector->status == connector_status_connected &&
 		    connector->encoder != NULL) {
 			intel_dp = enc_to_intel_dp(connector->encoder);
 			status = kstrtoint(input_buffer, 10, &val);
@@ -4055,9 +4061,6 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
 	struct list_head *connector_list = &dev->mode_config.connector_list;
 	struct intel_dp *intel_dp;
 
-	if (!dev)
-		return -ENODEV;
-
 	list_for_each_entry(connector, connector_list, head) {
 
 		if (connector->connector_type !=
@@ -4102,9 +4105,6 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
 	struct list_head *connector_list = &dev->mode_config.connector_list;
 	struct intel_dp *intel_dp;
 
-	if (!dev)
-		return -ENODEV;
-
 	list_for_each_entry(connector, connector_list, head) {
 
 		if (connector->connector_type !=
@@ -4144,9 +4144,6 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
 	struct list_head *connector_list = &dev->mode_config.connector_list;
 	struct intel_dp *intel_dp;
 
-	if (!dev)
-		return -ENODEV;
-
 	list_for_each_entry(connector, connector_list, head) {
 
 		if (connector->connector_type !=
@@ -4183,8 +4180,15 @@ static const struct file_operations i915_displayport_test_type_fops = {
 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
 {
 	struct drm_device *dev = m->private;
-	int num_levels = ilk_wm_max_level(dev) + 1;
 	int level;
+	int num_levels;
+
+	if (IS_CHERRYVIEW(dev))
+		num_levels = 3;
+	else if (IS_VALLEYVIEW(dev))
+		num_levels = 1;
+	else
+		num_levels = ilk_wm_max_level(dev) + 1;
 
 	drm_modeset_lock_all(dev);
 
@@ -4193,9 +4197,9 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
 
 		/*
 		 * - WM1+ latency values in 0.5us units
-		 * - latencies are in us on gen9
+		 * - latencies are in us on gen9/vlv/chv
 		 */
-		if (INTEL_INFO(dev)->gen >= 9)
+		if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev))
 			latency *= 10;
 		else if (level > 0)
 			latency *= 5;
@@ -4259,7 +4263,7 @@ static int pri_wm_latency_open(struct inode *inode, struct file *file)
 {
 	struct drm_device *dev = inode->i_private;
 
-	if (HAS_GMCH_DISPLAY(dev))
+	if (INTEL_INFO(dev)->gen < 5)
 		return -ENODEV;
 
 	return single_open(file, pri_wm_latency_show, dev);
@@ -4291,11 +4295,18 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
 	struct seq_file *m = file->private_data;
 	struct drm_device *dev = m->private;
 	uint16_t new[8] = { 0 };
-	int num_levels = ilk_wm_max_level(dev) + 1;
+	int num_levels;
 	int level;
 	int ret;
 	char tmp[32];
 
+	if (IS_CHERRYVIEW(dev))
+		num_levels = 3;
+	else if (IS_VALLEYVIEW(dev))
+		num_levels = 1;
+	else
+		num_levels = ilk_wm_max_level(dev) + 1;
+
 	if (len >= sizeof(tmp))
 		return -EINVAL;
 
@@ -5027,6 +5038,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
 	{"i915_drpc_info", i915_drpc_info, 0},
 	{"i915_emon_status", i915_emon_status, 0},
 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
+	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
 	{"i915_fbc_status", i915_fbc_status, 0},
 	{"i915_ips_status", i915_ips_status, 0},
 	{"i915_sr_status", i915_sr_status, 0},
@@ -5042,7 +5054,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
 	{"i915_sink_crc_eDP1", i915_sink_crc, 0},
 	{"i915_energy_uJ", i915_energy_uJ, 0},
-	{"i915_pc8_status", i915_pc8_status, 0},
+	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
 	{"i915_power_domain_info", i915_power_domain_info, 0},
 	{"i915_display_info", i915_display_info, 0},
 	{"i915_semaphore_status", i915_semaphore_status, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index d2df321ba634..ab37d1121be8 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -163,6 +163,13 @@ static int i915_getparam(struct drm_device *dev, void *data,
 		if (!value)
 			return -ENODEV;
 		break;
+	case I915_PARAM_HAS_GPU_RESET:
+		value = i915.enable_hangcheck &&
+			intel_has_gpu_reset(dev);
+		break;
+	case I915_PARAM_HAS_RESOURCE_STREAMER:
+		value = HAS_RESOURCE_STREAMER(dev);
+		break;
 	default:
 		DRM_DEBUG("Unknown parameter %d\n", param->param);
 		return -EINVAL;
@@ -719,11 +726,19 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
 
 	info = (struct intel_device_info *)&dev_priv->info;
 
+	/*
+	 * Skylake and Broxton currently don't expose the topmost plane as its
+	 * use is exclusive with the legacy cursor and we only want to expose
+	 * one of those, not both. Until we can safely expose the topmost plane
+	 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
+	 * we don't expose the topmost plane at all to prevent ABI breakage
+	 * down the line.
+	 */
 	if (IS_BROXTON(dev)) {
-		info->num_sprites[PIPE_A] = 3;
-		info->num_sprites[PIPE_B] = 3;
-		info->num_sprites[PIPE_C] = 2;
-	} else if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
+		info->num_sprites[PIPE_A] = 2;
+		info->num_sprites[PIPE_B] = 2;
+		info->num_sprites[PIPE_C] = 1;
+	} else if (IS_VALLEYVIEW(dev))
 		for_each_pipe(dev_priv, pipe)
 			info->num_sprites[pipe] = 2;
 	else
@@ -933,8 +948,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 		goto out_mtrrfree;
 	}
 
-	dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0);
-	if (dev_priv->dp_wq == NULL) {
+	dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+	if (dev_priv->hotplug.dp_wq == NULL) {
 		DRM_ERROR("Failed to create our dp workqueue.\n");
 		ret = -ENOMEM;
 		goto out_freewq;
@@ -1029,7 +1044,7 @@ out_gem_unload:
 	pm_qos_remove_request(&dev_priv->pm_qos);
 	destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
 out_freedpwq:
-	destroy_workqueue(dev_priv->dp_wq);
+	destroy_workqueue(dev_priv->hotplug.dp_wq);
 out_freewq:
 	destroy_workqueue(dev_priv->wq);
 out_mtrrfree:
@@ -1116,6 +1131,7 @@ int i915_driver_unload(struct drm_device *dev)
 	i915_gem_cleanup_ringbuffer(dev);
 	i915_gem_context_fini(dev);
 	mutex_unlock(&dev->struct_mutex);
+	intel_fbc_cleanup_cfb(dev_priv);
 	i915_gem_cleanup_stolen(dev);
 
 	intel_csr_ucode_fini(dev);
@@ -1123,7 +1139,7 @@ int i915_driver_unload(struct drm_device *dev)
 	intel_teardown_gmbus(dev);
 	intel_teardown_mchbar(dev);
 
-	destroy_workqueue(dev_priv->dp_wq);
+	destroy_workqueue(dev_priv->hotplug.dp_wq);
 	destroy_workqueue(dev_priv->wq);
 	destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
 	pm_qos_remove_request(&dev_priv->pm_qos);
@@ -1258,13 +1274,3 @@ const struct drm_ioctl_desc i915_ioctls[] = {
 };
 
 int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
-
-/*
- * This is really ugly: Because old userspace abused the linux agp interface to
- * manage the gtt, we need to claim that all intel devices are agp.  For
- * otherwise the drm core refuses to initialize the agp support code.
- */
-int i915_driver_device_is_agp(struct drm_device *dev)
-{
-	return 1;
-}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 8917c98ff121..ab64d68388f2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -356,7 +356,6 @@ static const struct intel_device_info intel_cherryview_info = {
 };
 
 static const struct intel_device_info intel_skylake_info = {
-	.is_preliminary = 1,
 	.is_skylake = 1,
 	.gen = 9, .num_pipes = 3,
 	.need_gfx_hws = 1, .has_hotplug = 1,
@@ -369,7 +368,6 @@ static const struct intel_device_info intel_skylake_info = {
 };
 
 static const struct intel_device_info intel_skylake_gt3_info = {
-	.is_preliminary = 1,
 	.is_skylake = 1,
 	.gen = 9, .num_pipes = 3,
 	.need_gfx_hws = 1, .has_hotplug = 1,
@@ -440,9 +438,7 @@ static const struct pci_device_id pciidlist[] = {		/* aka */
 	{0, 0, 0}
 };
 
-#if defined(CONFIG_DRM_I915_KMS)
 MODULE_DEVICE_TABLE(pci, pciidlist);
-#endif
 
 void intel_detect_pch(struct drm_device *dev)
 {
@@ -541,21 +537,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
 	return true;
 }
 
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
-{
-	spin_lock_irq(&dev_priv->irq_lock);
-
-	dev_priv->long_hpd_port_mask = 0;
-	dev_priv->short_hpd_port_mask = 0;
-	dev_priv->hpd_event_bits = 0;
-
-	spin_unlock_irq(&dev_priv->irq_lock);
-
-	cancel_work_sync(&dev_priv->dig_port_work);
-	cancel_work_sync(&dev_priv->hotplug_work);
-	cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work);
-}
-
 void i915_firmware_load_error_print(const char *fw_path, int err)
 {
 	DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
@@ -601,7 +582,6 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
 static int i915_drm_suspend(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc;
 	pci_power_t opregion_target_state;
 	int error;
 
@@ -632,8 +612,7 @@ static int i915_drm_suspend(struct drm_device *dev)
 	 * for _thaw. Also, power gate the CRTC power wells.
 	 */
 	drm_modeset_lock_all(dev);
-	for_each_crtc(dev, crtc)
-		intel_crtc_control(crtc, false);
+	intel_display_suspend(dev);
 	drm_modeset_unlock_all(dev);
 
 	intel_dp_mst_suspend(dev);
@@ -683,15 +662,18 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
 
 	pci_disable_device(drm_dev->pdev);
 	/*
-	 * During hibernation on some GEN4 platforms the BIOS may try to access
+	 * During hibernation on some platforms the BIOS may try to access
 	 * the device even though it's already in D3 and hang the machine. So
 	 * leave the device in D0 on those platforms and hope the BIOS will
-	 * power down the device properly. Platforms where this was seen:
-	 * Lenovo Thinkpad X301, X61s
+	 * power down the device properly. The issue was seen on multiple old
+	 * GENs with different BIOS vendors, so having an explicit blacklist
+	 * is inpractical; apply the workaround on everything pre GEN6. The
+	 * platforms where the issue was seen:
+	 * Lenovo Thinkpad X301, X61s, X60, T60, X41
+	 * Fujitsu FSC S7110
+	 * Acer Aspire 1830T
 	 */
-	if (!(hibernation &&
-	      drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
-	      INTEL_INFO(dev_priv)->gen == 4))
+	if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
 		pci_set_power_state(drm_dev->pdev, PCI_D3hot);
 
 	return 0;
@@ -760,7 +742,7 @@ static int i915_drm_resume(struct drm_device *dev)
 	spin_unlock_irq(&dev_priv->irq_lock);
 
 	drm_modeset_lock_all(dev);
-	intel_modeset_setup_hw_state(dev, true);
+	intel_display_resume(dev);
 	drm_modeset_unlock_all(dev);
 
 	intel_dp_mst_resume(dev);
@@ -865,9 +847,6 @@ int i915_reset(struct drm_device *dev)
 	bool simulated;
 	int ret;
 
-	if (!i915.reset)
-		return 0;
-
 	intel_reset_gt_powersave(dev);
 
 	mutex_lock(&dev->struct_mutex);
@@ -959,8 +938,6 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	if (PCI_FUNC(pdev->devfn))
 		return -ENODEV;
 
-	driver.driver_features &= ~(DRIVER_USE_AGP);
-
 	return drm_get_pci_dev(pdev, ent, &driver);
 }
 
@@ -1515,7 +1492,15 @@ static int intel_runtime_suspend(struct device *device)
 	 * FIXME: We really should find a document that references the arguments
 	 * used below!
 	 */
-	if (IS_HASWELL(dev)) {
+	if (IS_BROADWELL(dev)) {
+		/*
+		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
+		 * being detected, and the call we do at intel_runtime_resume()
+		 * won't be able to restore them. Since PCI_D3hot matches the
+		 * actual specification and appears to be working, use it.
+		 */
+		intel_opregion_notify_adapter(dev, PCI_D3hot);
+	} else {
 		/*
 		 * current versions of firmware which depend on this opregion
 		 * notification have repurposed the D1 definition to mean
@@ -1524,16 +1509,6 @@ static int intel_runtime_suspend(struct device *device)
 		 * the suspend path.
 		 */
 		intel_opregion_notify_adapter(dev, PCI_D1);
-	} else {
-		/*
-		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
-		 * being detected, and the call we do at intel_runtime_resume()
-		 * won't be able to restore them. Since PCI_D3hot matches the
-		 * actual specification and appears to be working, use it. Let's
-		 * assume the other non-Haswell platforms will stay the same as
-		 * Broadwell.
-		 */
-		intel_opregion_notify_adapter(dev, PCI_D3hot);
 	}
 
 	assert_forcewakes_inactive(dev_priv);
@@ -1673,7 +1648,6 @@ static struct drm_driver driver = {
 	 * deal with them for Intel hardware.
 	 */
 	.driver_features =
-	    DRIVER_USE_AGP |
 	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
 	    DRIVER_RENDER,
 	.load = i915_driver_load,
@@ -1688,7 +1662,6 @@ static struct drm_driver driver = {
 	.suspend = i915_suspend_legacy,
 	.resume = i915_resume_legacy,
 
-	.device_is_agp = i915_driver_device_is_agp,
 #if defined(CONFIG_DEBUG_FS)
 	.debugfs_init = i915_debugfs_init,
 	.debugfs_cleanup = i915_debugfs_cleanup,
@@ -1727,20 +1700,14 @@ static int __init i915_init(void)
 	driver.num_ioctls = i915_max_ioctl;
 
 	/*
-	 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
-	 * explicitly disabled with the module pararmeter.
-	 *
-	 * Otherwise, just follow the parameter (defaulting to off).
-	 *
-	 * Allow optional vga_text_mode_force boot option to override
-	 * the default behavior.
+	 * Enable KMS by default, unless explicitly overriden by
+	 * either the i915.modeset prarameter or by the
+	 * vga_text_mode_force boot option.
 	 */
-#if defined(CONFIG_DRM_I915_KMS)
-	if (i915.modeset != 0)
-		driver.driver_features |= DRIVER_MODESET;
-#endif
-	if (i915.modeset == 1)
-		driver.driver_features |= DRIVER_MODESET;
+	driver.driver_features |= DRIVER_MODESET;
+
+	if (i915.modeset == 0)
+		driver.driver_features &= ~DRIVER_MODESET;
 
 #ifdef CONFIG_VGA_CONSOLE
 	if (vgacon_text_force() && i915.modeset == -1)
@@ -1759,7 +1726,7 @@ static int __init i915_init(void)
 	 * to the atomic ioctl and the atomic properties.  Only plane operations on
 	 * a single CRTC will actually work.
 	 */
-	if (i915.nuclear_pageflip)
+	if (driver.driver_features & DRIVER_MODESET)
 		driver.driver_features |= DRIVER_ATOMIC;
 
 	return drm_pci_init(&driver, &i915_pci_driver);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1fc327d3421e..81adf89b92f1 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -56,7 +56,7 @@
 
 #define DRIVER_NAME		"i915"
 #define DRIVER_DESC		"Intel Graphics"
-#define DRIVER_DATE		"20150522"
+#define DRIVER_DATE		"20150731"
 
 #undef WARN_ON
 /* Many gcc seem to no see through this and fall over :( */
@@ -182,6 +182,7 @@ enum intel_display_power_domain {
 	POWER_DOMAIN_PORT_DDI_C_4_LANES,
 	POWER_DOMAIN_PORT_DDI_D_2_LANES,
 	POWER_DOMAIN_PORT_DDI_D_4_LANES,
+	POWER_DOMAIN_PORT_DDI_E_2_LANES,
 	POWER_DOMAIN_PORT_DSI,
 	POWER_DOMAIN_PORT_CRT,
 	POWER_DOMAIN_PORT_OTHER,
@@ -206,17 +207,51 @@ enum intel_display_power_domain {
 
 enum hpd_pin {
 	HPD_NONE = 0,
-	HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
 	HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
 	HPD_CRT,
 	HPD_SDVO_B,
 	HPD_SDVO_C,
+	HPD_PORT_A,
 	HPD_PORT_B,
 	HPD_PORT_C,
 	HPD_PORT_D,
+	HPD_PORT_E,
 	HPD_NUM_PINS
 };
 
+#define for_each_hpd_pin(__pin) \
+	for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
+
+struct i915_hotplug {
+	struct work_struct hotplug_work;
+
+	struct {
+		unsigned long last_jiffies;
+		int count;
+		enum {
+			HPD_ENABLED = 0,
+			HPD_DISABLED = 1,
+			HPD_MARK_DISABLED = 2
+		} state;
+	} stats[HPD_NUM_PINS];
+	u32 event_bits;
+	struct delayed_work reenable_work;
+
+	struct intel_digital_port *irq_port[I915_MAX_PORTS];
+	u32 long_port_mask;
+	u32 short_port_mask;
+	struct work_struct dig_port_work;
+
+	/*
+	 * if we get a HPD irq from DP and a HPD irq from non-DP
+	 * the non-DP HPD could block the workqueue on a mode config
+	 * mutex getting, that userspace may have taken. However
+	 * userspace is waiting on the DP workqueue to run which is
+	 * blocked behind the non-DP one.
+	 */
+	struct workqueue_struct *dp_wq;
+};
+
 #define I915_GEM_GPU_DOMAINS \
 	(I915_GEM_DOMAIN_RENDER | \
 	 I915_GEM_DOMAIN_SAMPLER | \
@@ -243,6 +278,12 @@ enum hpd_pin {
 			    &dev->mode_config.plane_list,	\
 			    base.head)
 
+#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane)	\
+	list_for_each_entry(intel_plane,				\
+			    &(dev)->mode_config.plane_list,		\
+			    base.head)					\
+		if ((intel_plane)->pipe == (intel_crtc)->pipe)
+
 #define for_each_intel_crtc(dev, intel_crtc) \
 	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
 
@@ -333,7 +374,8 @@ struct intel_dpll_hw_state {
 	uint32_t cfgcr1, cfgcr2;
 
 	/* bxt */
-	uint32_t ebb0, pll0, pll1, pll2, pll3, pll6, pll8, pll10, pcsdw12;
+	uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10,
+		 pcsdw12;
 };
 
 struct intel_shared_dpll_config {
@@ -343,7 +385,6 @@ struct intel_shared_dpll_config {
 
 struct intel_shared_dpll {
 	struct intel_shared_dpll_config config;
-	struct intel_shared_dpll_config *new_config;
 
 	int active; /* count of number of active CRTCs (i.e. DPMS on) */
 	bool on; /* is the PLL actually active? Disabled during modeset */
@@ -445,6 +486,7 @@ struct drm_i915_error_state {
 	struct timeval time;
 
 	char error_msg[128];
+	int iommu;
 	u32 reset_count;
 	u32 suspend_count;
 
@@ -559,9 +601,6 @@ struct intel_limit;
 struct dpll;
 
 struct drm_i915_display_funcs {
-	bool (*fbc_enabled)(struct drm_device *dev);
-	void (*enable_fbc)(struct drm_crtc *crtc);
-	void (*disable_fbc)(struct drm_device *dev);
 	int (*get_display_clock_speed)(struct drm_device *dev);
 	int (*get_fifo_size)(struct drm_device *dev, int plane);
 	/**
@@ -587,7 +626,8 @@ struct drm_i915_display_funcs {
 				 struct drm_crtc *crtc,
 				 uint32_t sprite_width, uint32_t sprite_height,
 				 int pixel_size, bool enable, bool scaled);
-	void (*modeset_global_resources)(struct drm_atomic_state *state);
+	int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
+	void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
 	/* Returns the active state of the crtc, and if the crtc is active,
 	 * fills out the pipe-config with the hw state. */
 	bool (*get_pipe_config)(struct intel_crtc *,
@@ -598,7 +638,6 @@ struct drm_i915_display_funcs {
 				  struct intel_crtc_state *crtc_state);
 	void (*crtc_enable)(struct drm_crtc *crtc);
 	void (*crtc_disable)(struct drm_crtc *crtc);
-	void (*off)(struct drm_crtc *crtc);
 	void (*audio_codec_enable)(struct drm_connector *connector,
 				   struct intel_encoder *encoder,
 				   struct drm_display_mode *mode);
@@ -608,7 +647,7 @@ struct drm_i915_display_funcs {
 	int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
 			  struct drm_framebuffer *fb,
 			  struct drm_i915_gem_object *obj,
-			  struct intel_engine_cs *ring,
+			  struct drm_i915_gem_request *req,
 			  uint32_t flags);
 	void (*update_primary_plane)(struct drm_crtc *crtc,
 				     struct drm_framebuffer *fb,
@@ -706,7 +745,7 @@ enum csr_state {
 
 struct intel_csr {
 	const char *fw_path;
-	__be32 *dmc_payload;
+	uint32_t *dmc_payload;
 	uint32_t dmc_fw_size;
 	uint32_t mmio_count;
 	uint32_t mmioaddr[8];
@@ -805,11 +844,15 @@ struct i915_ctx_hang_stats {
 
 /* This must match up with the value previously used for execbuf2.rsvd1. */
 #define DEFAULT_CONTEXT_HANDLE 0
+
+#define CONTEXT_NO_ZEROMAP (1<<0)
 /**
  * struct intel_context - as the name implies, represents a context.
  * @ref: reference count.
  * @user_handle: userspace tracking identity for this context.
  * @remap_slice: l3 row remapping information.
+ * @flags: context specific flags:
+ *         CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0.
  * @file_priv: filp associated with this context (NULL for global default
  *	       context).
  * @hang_stats: information about the role of this context in possible GPU
@@ -827,6 +870,7 @@ struct intel_context {
 	int user_handle;
 	uint8_t remap_slice;
 	struct drm_i915_private *i915;
+	int flags;
 	struct drm_i915_file_private *file_priv;
 	struct i915_ctx_hang_stats hang_stats;
 	struct i915_hw_ppgtt *ppgtt;
@@ -853,9 +897,13 @@ enum fb_op_origin {
 	ORIGIN_CPU,
 	ORIGIN_CS,
 	ORIGIN_FLIP,
+	ORIGIN_DIRTYFB,
 };
 
 struct i915_fbc {
+	/* This is always the inner lock when overlapping with struct_mutex and
+	 * it's the outer lock when overlapping with stolen_lock. */
+	struct mutex lock;
 	unsigned long uncompressed_size;
 	unsigned threshold;
 	unsigned int fb_id;
@@ -875,7 +923,7 @@ struct i915_fbc {
 
 	struct intel_fbc_work {
 		struct delayed_work work;
-		struct drm_crtc *crtc;
+		struct intel_crtc *crtc;
 		struct drm_framebuffer *fb;
 	} *fbc_work;
 
@@ -891,7 +939,13 @@ struct i915_fbc {
 		FBC_MULTIPLE_PIPES, /* more than one pipe active */
 		FBC_MODULE_PARAM,
 		FBC_CHIP_DEFAULT, /* disabled by default on this chip */
+		FBC_ROTATION, /* rotation is not supported */
+		FBC_IN_DBG_MASTER, /* kernel debugger is active */
 	} no_fbc_reason;
+
+	bool (*fbc_enabled)(struct drm_i915_private *dev_priv);
+	void (*enable_fbc)(struct intel_crtc *crtc);
+	void (*disable_fbc)(struct drm_i915_private *dev_priv);
 };
 
 /**
@@ -1201,6 +1255,10 @@ struct intel_l3_parity {
 struct i915_gem_mm {
 	/** Memory allocator for GTT stolen memory */
 	struct drm_mm stolen;
+	/** Protects the usage of the GTT stolen memory allocator. This is
+	 * always the inner lock when overlapping with struct_mutex. */
+	struct mutex stolen_lock;
+
 	/** List of all objects in gtt_space. Used to restore gtt
 	 * mappings on resume */
 	struct list_head bound_list;
@@ -1354,6 +1412,15 @@ enum modeset_restore {
 	MODESET_SUSPENDED,
 };
 
+#define DP_AUX_A 0x40
+#define DP_AUX_B 0x10
+#define DP_AUX_C 0x20
+#define DP_AUX_D 0x30
+
+#define DDC_PIN_B  0x05
+#define DDC_PIN_C  0x04
+#define DDC_PIN_D  0x06
+
 struct ddi_vbt_port_info {
 	/*
 	 * This is an index in the HDMI/DVI DDI buffer translation table.
@@ -1366,6 +1433,12 @@ struct ddi_vbt_port_info {
 	uint8_t supports_dvi:1;
 	uint8_t supports_hdmi:1;
 	uint8_t supports_dp:1;
+
+	uint8_t alternate_aux_channel;
+	uint8_t alternate_ddc_pin;
+
+	uint8_t dp_boost_level;
+	uint8_t hdmi_boost_level;
 };
 
 enum psr_lines_to_wait {
@@ -1461,23 +1534,27 @@ struct ilk_wm_values {
 	enum intel_ddb_partitioning partitioning;
 };
 
-struct vlv_wm_values {
-	struct {
-		uint16_t primary;
-		uint16_t sprite[2];
-		uint8_t cursor;
-	} pipe[3];
+struct vlv_pipe_wm {
+	uint16_t primary;
+	uint16_t sprite[2];
+	uint8_t cursor;
+};
 
-	struct {
-		uint16_t plane;
-		uint8_t cursor;
-	} sr;
+struct vlv_sr_wm {
+	uint16_t plane;
+	uint8_t cursor;
+};
 
+struct vlv_wm_values {
+	struct vlv_pipe_wm pipe[3];
+	struct vlv_sr_wm sr;
 	struct {
 		uint8_t cursor;
 		uint8_t sprite[2];
 		uint8_t primary;
 	} ddl[3];
+	uint8_t level;
+	bool cxsr;
 };
 
 struct skl_ddb_entry {
@@ -1611,6 +1688,18 @@ struct i915_virtual_gpu {
 	bool active;
 };
 
+struct i915_execbuffer_params {
+	struct drm_device               *dev;
+	struct drm_file                 *file;
+	uint32_t                        dispatch_flags;
+	uint32_t                        args_batch_start_offset;
+	uint32_t                        batch_obj_vm_offset;
+	struct intel_engine_cs          *ring;
+	struct drm_i915_gem_object      *batch_obj;
+	struct intel_context            *ctx;
+	struct drm_i915_gem_request     *request;
+};
+
 struct drm_i915_private {
 	struct drm_device *dev;
 	struct kmem_cache *objects;
@@ -1680,19 +1769,7 @@ struct drm_i915_private {
 	u32 pm_rps_events;
 	u32 pipestat_irq_mask[I915_MAX_PIPES];
 
-	struct work_struct hotplug_work;
-	struct {
-		unsigned long hpd_last_jiffies;
-		int hpd_cnt;
-		enum {
-			HPD_ENABLED = 0,
-			HPD_DISABLED = 1,
-			HPD_MARK_DISABLED = 2
-		} hpd_mark;
-	} hpd_stats[HPD_NUM_PINS];
-	u32 hpd_event_bits;
-	struct delayed_work hotplug_reenable_work;
-
+	struct i915_hotplug hotplug;
 	struct i915_fbc fbc;
 	struct i915_drrs drrs;
 	struct intel_opregion opregion;
@@ -1718,7 +1795,7 @@ struct drm_i915_private {
 
 	unsigned int fsb_freq, mem_freq, is_ddr3;
 	unsigned int skl_boot_cdclk;
-	unsigned int cdclk_freq;
+	unsigned int cdclk_freq, max_cdclk_freq;
 	unsigned int hpll_freq;
 
 	/**
@@ -1769,9 +1846,6 @@ struct drm_i915_private {
 
 	/* Reclocking support */
 	bool render_reclock_avail;
-	bool lvds_downclock_avail;
-	/* indicates the reduced downclock for LVDS*/
-	int lvds_downclock;
 
 	struct i915_frontbuffer_tracking fb_tracking;
 
@@ -1799,7 +1873,7 @@ struct drm_i915_private {
 
 	struct drm_i915_gem_object *vlv_pctx;
 
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
 	/* list of fbdev register on this device */
 	struct intel_fbdev *fbdev;
 	struct work_struct fbdev_suspend_work;
@@ -1859,29 +1933,11 @@ struct drm_i915_private {
 
 	struct i915_runtime_pm pm;
 
-	struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS];
-	u32 long_hpd_port_mask;
-	u32 short_hpd_port_mask;
-	struct work_struct dig_port_work;
-
-	/*
-	 * if we get a HPD irq from DP and a HPD irq from non-DP
-	 * the non-DP HPD could block the workqueue on a mode config
-	 * mutex getting, that userspace may have taken. However
-	 * userspace is waiting on the DP workqueue to run which is
-	 * blocked behind the non-DP one.
-	 */
-	struct workqueue_struct *dp_wq;
-
 	/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
 	struct {
-		int (*execbuf_submit)(struct drm_device *dev, struct drm_file *file,
-				      struct intel_engine_cs *ring,
-				      struct intel_context *ctx,
+		int (*execbuf_submit)(struct i915_execbuffer_params *params,
 				      struct drm_i915_gem_execbuffer2 *args,
-				      struct list_head *vmas,
-				      struct drm_i915_gem_object *batch_obj,
-				      u64 exec_start, u32 flags);
+				      struct list_head *vmas);
 		int (*init_rings)(struct drm_device *dev);
 		void (*cleanup_ring)(struct intel_engine_cs *ring);
 		void (*stop_ring)(struct intel_engine_cs *ring);
@@ -2149,7 +2205,8 @@ struct drm_i915_gem_request {
 	struct intel_context *ctx;
 	struct intel_ringbuffer *ringbuf;
 
-	/** Batch buffer related to this request if any */
+	/** Batch buffer related to this request if any (used for
+	    error state dump only) */
 	struct drm_i915_gem_object *batch_obj;
 
 	/** Time at which this request was emitted, in jiffies. */
@@ -2187,8 +2244,12 @@ struct drm_i915_gem_request {
 };
 
 int i915_gem_request_alloc(struct intel_engine_cs *ring,
-			   struct intel_context *ctx);
+			   struct intel_context *ctx,
+			   struct drm_i915_gem_request **req_out);
+void i915_gem_request_cancel(struct drm_i915_gem_request *req);
 void i915_gem_request_free(struct kref *req_ref);
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+				   struct drm_file *file);
 
 static inline uint32_t
 i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
@@ -2392,6 +2453,9 @@ struct drm_i915_cmd_table {
 				 ((INTEL_DEVID(dev) & 0xf) == 0x6 ||	\
 				 (INTEL_DEVID(dev) & 0xf) == 0xb ||	\
 				 (INTEL_DEVID(dev) & 0xf) == 0xe))
+/* ULX machines are also considered ULT. */
+#define IS_BDW_ULX(dev)		(IS_BROADWELL(dev) && \
+				 (INTEL_DEVID(dev) & 0xf) == 0xe)
 #define IS_BDW_GT3(dev)		(IS_BROADWELL(dev) && \
 				 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
 #define IS_HSW_ULT(dev)		(IS_HASWELL(dev) && \
@@ -2401,6 +2465,14 @@ struct drm_i915_cmd_table {
 /* ULX machines are also considered ULT. */
 #define IS_HSW_ULX(dev)		(INTEL_DEVID(dev) == 0x0A0E || \
 				 INTEL_DEVID(dev) == 0x0A1E)
+#define IS_SKL_ULT(dev)		(INTEL_DEVID(dev) == 0x1906 || \
+				 INTEL_DEVID(dev) == 0x1913 || \
+				 INTEL_DEVID(dev) == 0x1916 || \
+				 INTEL_DEVID(dev) == 0x1921 || \
+				 INTEL_DEVID(dev) == 0x1926)
+#define IS_SKL_ULX(dev)		(INTEL_DEVID(dev) == 0x190E || \
+				 INTEL_DEVID(dev) == 0x1915 || \
+				 INTEL_DEVID(dev) == 0x191E)
 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
 
 #define SKL_REVID_A0		(0x0)
@@ -2467,9 +2539,6 @@ struct drm_i915_cmd_table {
  */
 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
 						      IS_I915GM(dev)))
-#define SUPPORTS_DIGITAL_OUTPUTS(dev)	(!IS_GEN2(dev) && !IS_PINEVIEW(dev))
-#define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_GEN5(dev))
 #define SUPPORTS_TV(dev)		(INTEL_INFO(dev)->supports_tv)
 #define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)
 
@@ -2495,6 +2564,12 @@ struct drm_i915_cmd_table {
 
 #define HAS_CSR(dev)	(IS_SKYLAKE(dev))
 
+#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
+				    INTEL_INFO(dev)->gen >= 8)
+
+#define HAS_CORE_RING_FREQ(dev)	(INTEL_INFO(dev)->gen >= 6 && \
+				 !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev))
+
 #define INTEL_PCH_DEVICE_ID_MASK		0xff00
 #define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00
 #define INTEL_PCH_CPT_DEVICE_ID_TYPE		0x1c00
@@ -2534,7 +2609,6 @@ struct i915_params {
 	int modeset;
 	int panel_ignore_lid;
 	int semaphores;
-	unsigned int lvds_downclock;
 	int lvds_channel_mode;
 	int panel_use_ssc;
 	int vbt_sdvo_panel_type;
@@ -2556,10 +2630,11 @@ struct i915_params {
 	bool reset;
 	bool disable_display;
 	bool disable_vtd_wa;
+	bool enable_guc_submission;
+	int guc_log_level;
 	int use_mmio_flip;
 	int mmio_debug;
 	bool verbose_state_checks;
-	bool nuclear_pageflip;
 	int edp_vswing;
 };
 extern struct i915_params i915 __read_mostly;
@@ -2573,21 +2648,27 @@ extern void i915_driver_preclose(struct drm_device *dev,
 				 struct drm_file *file);
 extern void i915_driver_postclose(struct drm_device *dev,
 				  struct drm_file *file);
-extern int i915_driver_device_is_agp(struct drm_device * dev);
 #ifdef CONFIG_COMPAT
 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
 			      unsigned long arg);
 #endif
 extern int intel_gpu_reset(struct drm_device *dev);
+extern bool intel_has_gpu_reset(struct drm_device *dev);
 extern int i915_reset(struct drm_device *dev);
 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
 void i915_firmware_load_error_print(const char *fw_path, int err);
 
+/* intel_hotplug.c */
+void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask);
+void intel_hpd_init(struct drm_i915_private *dev_priv);
+void intel_hpd_init_work(struct drm_i915_private *dev_priv);
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
+bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
+
 /* i915_irq.c */
 void i915_queue_hangcheck(struct drm_device *dev);
 __printf(3, 4)
@@ -2595,7 +2676,6 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
 		       const char *fmt, ...);
 
 extern void intel_irq_init(struct drm_i915_private *dev_priv);
-extern void intel_hpd_init(struct drm_i915_private *dev_priv);
 int intel_irq_install(struct drm_i915_private *dev_priv);
 void intel_irq_uninstall(struct drm_i915_private *dev_priv);
 
@@ -2662,19 +2742,11 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
 			     struct drm_file *file_priv);
 void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
-					struct intel_engine_cs *ring);
-void i915_gem_execbuffer_retire_commands(struct drm_device *dev,
-					 struct drm_file *file,
-					 struct intel_engine_cs *ring,
-					 struct drm_i915_gem_object *obj);
-int i915_gem_ringbuffer_submission(struct drm_device *dev,
-				   struct drm_file *file,
-				   struct intel_engine_cs *ring,
-				   struct intel_context *ctx,
+					struct drm_i915_gem_request *req);
+void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params);
+int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 				   struct drm_i915_gem_execbuffer2 *args,
-				   struct list_head *vmas,
-				   struct drm_i915_gem_object *batch_obj,
-				   u64 exec_start, u32 flags);
+				   struct list_head *vmas);
 int i915_gem_execbuffer(struct drm_device *dev, void *data,
 			struct drm_file *file_priv);
 int i915_gem_execbuffer2(struct drm_device *dev, void *data,
@@ -2707,6 +2779,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 			 const struct drm_i915_gem_object_ops *ops);
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 						  size_t size);
+struct drm_i915_gem_object *i915_gem_object_create_from_data(
+		struct drm_device *dev, const void *data, size_t size);
 void i915_init_vm(struct drm_i915_private *dev_priv,
 		  struct i915_address_space *vm);
 void i915_gem_free_object(struct drm_gem_object *obj);
@@ -2781,9 +2855,10 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
-			 struct intel_engine_cs *to);
+			 struct intel_engine_cs *to,
+			 struct drm_i915_gem_request **to_req);
 void i915_vma_move_to_active(struct i915_vma *vma,
-			     struct intel_engine_cs *ring);
+			     struct drm_i915_gem_request *req);
 int i915_gem_dumb_create(struct drm_file *file_priv,
 			 struct drm_device *dev,
 			 struct drm_mode_create_dumb *args);
@@ -2812,11 +2887,6 @@ static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
 
 int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
-int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
-int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
-
-bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
-void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
 
 struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_engine_cs *ring);
@@ -2825,7 +2895,6 @@ bool i915_gem_retire_requests(struct drm_device *dev);
 void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
 				      bool interruptible);
-int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req);
 
 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
 {
@@ -2860,16 +2929,18 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
 int __must_check i915_gem_init(struct drm_device *dev);
 int i915_gem_init_rings(struct drm_device *dev);
 int __must_check i915_gem_init_hw(struct drm_device *dev);
-int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
+int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
 void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 int __must_check i915_gpu_idle(struct drm_device *dev);
 int __must_check i915_gem_suspend(struct drm_device *dev);
-int __i915_add_request(struct intel_engine_cs *ring,
-		       struct drm_file *file,
-		       struct drm_i915_gem_object *batch_obj);
-#define i915_add_request(ring) \
-	__i915_add_request(ring, NULL, NULL)
+void __i915_add_request(struct drm_i915_gem_request *req,
+			struct drm_i915_gem_object *batch_obj,
+			bool flush_caches);
+#define i915_add_request(req) \
+	__i915_add_request(req, NULL, true)
+#define i915_add_request_no_flush(req) \
+	__i915_add_request(req, NULL, false)
 int __i915_wait_request(struct drm_i915_gem_request *req,
 			unsigned reset_counter,
 			bool interruptible,
@@ -2889,6 +2960,7 @@ int __must_check
 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 				     u32 alignment,
 				     struct intel_engine_cs *pipelined,
+				     struct drm_i915_gem_request **pipelined_request,
 				     const struct i915_ggtt_view *view);
 void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
 					      const struct i915_ggtt_view *view);
@@ -2912,8 +2984,6 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
 				struct drm_gem_object *gem_obj, int flags);
 
-void i915_gem_restore_fences(struct drm_device *dev);
-
 unsigned long
 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
 			      const struct i915_ggtt_view *view);
@@ -3008,15 +3078,27 @@ i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
 	i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal);
 }
 
+/* i915_gem_fence.c */
+int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
+
+bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
+void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
+
+void i915_gem_restore_fences(struct drm_device *dev);
+
+void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
+
 /* i915_gem_context.c */
 int __must_check i915_gem_context_init(struct drm_device *dev);
 void i915_gem_context_fini(struct drm_device *dev);
 void i915_gem_context_reset(struct drm_device *dev);
 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
-int i915_gem_context_enable(struct drm_i915_private *dev_priv);
+int i915_gem_context_enable(struct drm_i915_gem_request *req);
 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
-int i915_switch_context(struct intel_engine_cs *ring,
-			struct intel_context *to);
+int i915_switch_context(struct drm_i915_gem_request *req);
 struct intel_context *
 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
 void i915_gem_context_free(struct kref *ctx_ref);
@@ -3066,9 +3148,12 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
 }
 
 /* i915_gem_stolen.c */
+int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+				struct drm_mm_node *node, u64 size,
+				unsigned alignment);
+void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+				 struct drm_mm_node *node);
 int i915_gem_init_stolen(struct drm_device *dev);
-int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp);
-void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
 void i915_gem_cleanup_stolen(struct drm_device *dev);
 struct drm_i915_gem_object *
 i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
@@ -3098,10 +3183,6 @@ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_objec
 		obj->tiling_mode != I915_TILING_NONE;
 }
 
-void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
-void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
-void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
-
 /* i915_gem_debug.c */
 #if WATCH_LISTS
 int i915_verify_lists(struct drm_device *dev);
@@ -3223,8 +3304,7 @@ extern void intel_modeset_gem_init(struct drm_device *dev);
 extern void intel_modeset_cleanup(struct drm_device *dev);
 extern void intel_connector_unregister(struct intel_connector *);
 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
-extern void intel_modeset_setup_hw_state(struct drm_device *dev,
-					 bool force_restore);
+extern void intel_display_resume(struct drm_device *dev);
 extern void i915_redisable_vga(struct drm_device *dev);
 extern void i915_redisable_vga_power_on(struct drm_device *dev);
 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 7a918d1c12ba..4d631a946481 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -46,11 +46,6 @@ static void
 i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
 static void
 i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
-static void i915_gem_write_fence(struct drm_device *dev, int reg,
-				 struct drm_i915_gem_object *obj);
-static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
-					 struct drm_i915_fence_reg *fence,
-					 bool enable);
 
 static bool cpu_cache_is_coherent(struct drm_device *dev,
 				  enum i915_cache_level level)
@@ -66,18 +61,6 @@ static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
 	return obj->pin_display;
 }
 
-static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
-{
-	if (obj->tiling_mode)
-		i915_gem_release_mmap(obj);
-
-	/* As we do not have an associated fence register, we will force
-	 * a tiling change if we ever need to acquire one.
-	 */
-	obj->fence_dirty = false;
-	obj->fence_reg = I915_FENCE_REG_NONE;
-}
-
 /* some bookkeeping */
 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
 				  size_t size)
@@ -149,14 +132,18 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_get_aperture *args = data;
-	struct drm_i915_gem_object *obj;
+	struct i915_gtt *ggtt = &dev_priv->gtt;
+	struct i915_vma *vma;
 	size_t pinned;
 
 	pinned = 0;
 	mutex_lock(&dev->struct_mutex);
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
-		if (i915_gem_obj_is_pinned(obj))
-			pinned += i915_gem_obj_ggtt_size(obj);
+	list_for_each_entry(vma, &ggtt->base.active_list, mm_list)
+		if (vma->pin_count)
+			pinned += vma->node.size;
+	list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list)
+		if (vma->pin_count)
+			pinned += vma->node.size;
 	mutex_unlock(&dev->struct_mutex);
 
 	args->aper_size = dev_priv->gtt.base.total;
@@ -347,7 +334,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
 	if (ret)
 		return ret;
 
-	intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
+	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
 	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
 		unsigned long unwritten;
 
@@ -368,7 +355,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
 	i915_gem_chipset_flush(dev);
 
 out:
-	intel_fb_obj_flush(obj, false);
+	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
 	return ret;
 }
 
@@ -801,7 +788,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
 
 	offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
 
-	intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
+	intel_fb_obj_invalidate(obj, ORIGIN_GTT);
 
 	while (remain > 0) {
 		/* Operation in this page
@@ -832,7 +819,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
 	}
 
 out_flush:
-	intel_fb_obj_flush(obj, false);
+	intel_fb_obj_flush(obj, false, ORIGIN_GTT);
 out_unpin:
 	i915_gem_object_ggtt_unpin(obj);
 out:
@@ -945,7 +932,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
 	if (ret)
 		return ret;
 
-	intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
+	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
 
 	i915_gem_object_pin_pages(obj);
 
@@ -1025,7 +1012,7 @@ out:
 	if (needs_clflush_after)
 		i915_gem_chipset_flush(dev);
 
-	intel_fb_obj_flush(obj, false);
+	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
 	return ret;
 }
 
@@ -1146,23 +1133,6 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
 	return 0;
 }
 
-/*
- * Compare arbitrary request against outstanding lazy request. Emit on match.
- */
-int
-i915_gem_check_olr(struct drm_i915_gem_request *req)
-{
-	int ret;
-
-	WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
-
-	ret = 0;
-	if (req == req->ring->outstanding_lazy_request)
-		ret = i915_add_request(req->ring);
-
-	return ret;
-}
-
 static void fake_irq(unsigned long data)
 {
 	wake_up_process((struct task_struct *)data);
@@ -1334,6 +1304,33 @@ out:
 	return ret;
 }
 
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+				   struct drm_file *file)
+{
+	struct drm_i915_private *dev_private;
+	struct drm_i915_file_private *file_priv;
+
+	WARN_ON(!req || !file || req->file_priv);
+
+	if (!req || !file)
+		return -EINVAL;
+
+	if (req->file_priv)
+		return -EINVAL;
+
+	dev_private = req->ring->dev->dev_private;
+	file_priv = file->driver_priv;
+
+	spin_lock(&file_priv->mm.lock);
+	req->file_priv = file_priv;
+	list_add_tail(&req->client_list, &file_priv->mm.request_list);
+	spin_unlock(&file_priv->mm.lock);
+
+	req->pid = get_pid(task_pid(current));
+
+	return 0;
+}
+
 static inline void
 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
 {
@@ -1346,6 +1343,9 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
 	list_del(&request->client_list);
 	request->file_priv = NULL;
 	spin_unlock(&file_priv->mm.lock);
+
+	put_pid(request->pid);
+	request->pid = NULL;
 }
 
 static void i915_gem_request_retire(struct drm_i915_gem_request *request)
@@ -1365,8 +1365,6 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
 	list_del_init(&request->list);
 	i915_gem_request_remove_from_client(request);
 
-	put_pid(request->pid);
-
 	i915_gem_request_unreference(request);
 }
 
@@ -1415,10 +1413,6 @@ i915_wait_request(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	ret = i915_gem_check_olr(req);
-	if (ret)
-		return ret;
-
 	ret = __i915_wait_request(req,
 				  atomic_read(&dev_priv->gpu_error.reset_counter),
 				  interruptible, NULL, NULL);
@@ -1518,10 +1512,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 		if (req == NULL)
 			return 0;
 
-		ret = i915_gem_check_olr(req);
-		if (ret)
-			goto err;
-
 		requests[n++] = i915_gem_request_reference(req);
 	} else {
 		for (i = 0; i < I915_NUM_RINGS; i++) {
@@ -1531,10 +1521,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 			if (req == NULL)
 				continue;
 
-			ret = i915_gem_check_olr(req);
-			if (ret)
-				goto err;
-
 			requests[n++] = i915_gem_request_reference(req);
 		}
 	}
@@ -1545,7 +1531,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 					  NULL, rps);
 	mutex_lock(&dev->struct_mutex);
 
-err:
 	for (i = 0; i < n; i++) {
 		if (ret == 0)
 			i915_gem_object_retire_request(obj, requests[i]);
@@ -1613,6 +1598,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 	else
 		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
 
+	if (write_domain != 0)
+		intel_fb_obj_invalidate(obj,
+					write_domain == I915_GEM_DOMAIN_GTT ?
+					ORIGIN_GTT : ORIGIN_CPU);
+
 unref:
 	drm_gem_object_unreference(&obj->base);
 unlock:
@@ -2349,9 +2339,12 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 }
 
 void i915_vma_move_to_active(struct i915_vma *vma,
-			     struct intel_engine_cs *ring)
+			     struct drm_i915_gem_request *req)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
+	struct intel_engine_cs *ring;
+
+	ring = i915_gem_request_get_ring(req);
 
 	/* Add a reference if we're newly entering the active list. */
 	if (obj->active == 0)
@@ -2359,8 +2352,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 	obj->active |= intel_ring_flag(ring);
 
 	list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
-	i915_gem_request_assign(&obj->last_read_req[ring->id],
-				intel_ring_get_request(ring));
+	i915_gem_request_assign(&obj->last_read_req[ring->id], req);
 
 	list_move_tail(&vma->mm_list, &vma->vm->active_list);
 }
@@ -2372,7 +2364,7 @@ i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
 	RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
 
 	i915_gem_request_assign(&obj->last_write_req, NULL);
-	intel_fb_obj_flush(obj, true);
+	intel_fb_obj_flush(obj, true, ORIGIN_CS);
 }
 
 static void
@@ -2393,6 +2385,13 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
 	if (obj->active)
 		return;
 
+	/* Bump our place on the bound list to keep it roughly in LRU order
+	 * so that we don't steal from recently used but inactive objects
+	 * (unless we are forced to ofc!)
+	 */
+	list_move_tail(&obj->global_list,
+		       &to_i915(obj->base.dev)->mm.bound_list);
+
 	list_for_each_entry(vma, &obj->vma_list, vma_link) {
 		if (!list_empty(&vma->mm_list))
 			list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
@@ -2472,24 +2471,34 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
 	return 0;
 }
 
-int __i915_add_request(struct intel_engine_cs *ring,
-		       struct drm_file *file,
-		       struct drm_i915_gem_object *obj)
+/*
+ * NB: This function is not allowed to fail. Doing so would mean the the
+ * request is not being tracked for completion but the work itself is
+ * going to happen on the hardware. This would be a Bad Thing(tm).
+ */
+void __i915_add_request(struct drm_i915_gem_request *request,
+			struct drm_i915_gem_object *obj,
+			bool flush_caches)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-	struct drm_i915_gem_request *request;
+	struct intel_engine_cs *ring;
+	struct drm_i915_private *dev_priv;
 	struct intel_ringbuffer *ringbuf;
 	u32 request_start;
 	int ret;
 
-	request = ring->outstanding_lazy_request;
 	if (WARN_ON(request == NULL))
-		return -ENOMEM;
+		return;
 
-	if (i915.enable_execlists) {
-		ringbuf = request->ctx->engine[ring->id].ringbuf;
-	} else
-		ringbuf = ring->buffer;
+	ring = request->ring;
+	dev_priv = ring->dev->dev_private;
+	ringbuf = request->ringbuf;
+
+	/*
+	 * To ensure that this call will not fail, space for its emissions
+	 * should already have been reserved in the ring buffer. Let the ring
+	 * know that it is time to use that space up.
+	 */
+	intel_ring_reserved_space_use(ringbuf);
 
 	request_start = intel_ring_get_tail(ringbuf);
 	/*
@@ -2499,14 +2508,13 @@ int __i915_add_request(struct intel_engine_cs *ring,
 	 * is that the flush _must_ happen before the next request, no matter
 	 * what.
 	 */
-	if (i915.enable_execlists) {
-		ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
-		if (ret)
-			return ret;
-	} else {
-		ret = intel_ring_flush_all_caches(ring);
-		if (ret)
-			return ret;
+	if (flush_caches) {
+		if (i915.enable_execlists)
+			ret = logical_ring_flush_all_caches(request);
+		else
+			ret = intel_ring_flush_all_caches(request);
+		/* Not allowed to fail! */
+		WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
 	}
 
 	/* Record the position of the start of the request so that
@@ -2516,17 +2524,15 @@ int __i915_add_request(struct intel_engine_cs *ring,
 	 */
 	request->postfix = intel_ring_get_tail(ringbuf);
 
-	if (i915.enable_execlists) {
-		ret = ring->emit_request(ringbuf, request);
-		if (ret)
-			return ret;
-	} else {
-		ret = ring->add_request(ring);
-		if (ret)
-			return ret;
+	if (i915.enable_execlists)
+		ret = ring->emit_request(request);
+	else {
+		ret = ring->add_request(request);
 
 		request->tail = intel_ring_get_tail(ringbuf);
 	}
+	/* Not allowed to fail! */
+	WARN(ret, "emit|add_request failed: %d!\n", ret);
 
 	request->head = request_start;
 
@@ -2538,34 +2544,11 @@ int __i915_add_request(struct intel_engine_cs *ring,
 	 */
 	request->batch_obj = obj;
 
-	if (!i915.enable_execlists) {
-		/* Hold a reference to the current context so that we can inspect
-		 * it later in case a hangcheck error event fires.
-		 */
-		request->ctx = ring->last_context;
-		if (request->ctx)
-			i915_gem_context_reference(request->ctx);
-	}
-
 	request->emitted_jiffies = jiffies;
 	ring->last_submitted_seqno = request->seqno;
 	list_add_tail(&request->list, &ring->request_list);
-	request->file_priv = NULL;
-
-	if (file) {
-		struct drm_i915_file_private *file_priv = file->driver_priv;
-
-		spin_lock(&file_priv->mm.lock);
-		request->file_priv = file_priv;
-		list_add_tail(&request->client_list,
-			      &file_priv->mm.request_list);
-		spin_unlock(&file_priv->mm.lock);
-
-		request->pid = get_pid(task_pid(current));
-	}
 
 	trace_i915_gem_request_add(request);
-	ring->outstanding_lazy_request = NULL;
 
 	i915_queue_hangcheck(ring->dev);
 
@@ -2574,7 +2557,8 @@ int __i915_add_request(struct intel_engine_cs *ring,
 			   round_jiffies_up_relative(HZ));
 	intel_mark_busy(dev_priv->dev);
 
-	return 0;
+	/* Sanity check that the reserved size was large enough. */
+	intel_ring_reserved_space_end(ringbuf);
 }
 
 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
@@ -2628,12 +2612,13 @@ void i915_gem_request_free(struct kref *req_ref)
 						 typeof(*req), ref);
 	struct intel_context *ctx = req->ctx;
 
+	if (req->file_priv)
+		i915_gem_request_remove_from_client(req);
+
 	if (ctx) {
 		if (i915.enable_execlists) {
-			struct intel_engine_cs *ring = req->ring;
-
-			if (ctx != ring->default_context)
-				intel_lr_context_unpin(ring, ctx);
+			if (ctx != req->ring->default_context)
+				intel_lr_context_unpin(req);
 		}
 
 		i915_gem_context_unreference(ctx);
@@ -2643,36 +2628,63 @@ void i915_gem_request_free(struct kref *req_ref)
 }
 
 int i915_gem_request_alloc(struct intel_engine_cs *ring,
-			   struct intel_context *ctx)
+			   struct intel_context *ctx,
+			   struct drm_i915_gem_request **req_out)
 {
 	struct drm_i915_private *dev_priv = to_i915(ring->dev);
 	struct drm_i915_gem_request *req;
 	int ret;
 
-	if (ring->outstanding_lazy_request)
-		return 0;
+	if (!req_out)
+		return -EINVAL;
+
+	*req_out = NULL;
 
 	req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
 	if (req == NULL)
 		return -ENOMEM;
 
-	kref_init(&req->ref);
-	req->i915 = dev_priv;
-
 	ret = i915_gem_get_seqno(ring->dev, &req->seqno);
 	if (ret)
 		goto err;
 
+	kref_init(&req->ref);
+	req->i915 = dev_priv;
 	req->ring = ring;
+	req->ctx  = ctx;
+	i915_gem_context_reference(req->ctx);
 
 	if (i915.enable_execlists)
-		ret = intel_logical_ring_alloc_request_extras(req, ctx);
+		ret = intel_logical_ring_alloc_request_extras(req);
 	else
 		ret = intel_ring_alloc_request_extras(req);
-	if (ret)
+	if (ret) {
+		i915_gem_context_unreference(req->ctx);
 		goto err;
+	}
+
+	/*
+	 * Reserve space in the ring buffer for all the commands required to
+	 * eventually emit this request. This is to guarantee that the
+	 * i915_add_request() call can't fail. Note that the reserve may need
+	 * to be redone if the request is not actually submitted straight
+	 * away, e.g. because a GPU scheduler has deferred it.
+	 */
+	if (i915.enable_execlists)
+		ret = intel_logical_ring_reserve_space(req);
+	else
+		ret = intel_ring_reserve_space(req);
+	if (ret) {
+		/*
+		 * At this point, the request is fully allocated even if not
+		 * fully prepared. Thus it can be cleaned up using the proper
+		 * free code.
+		 */
+		i915_gem_request_cancel(req);
+		return ret;
+	}
 
-	ring->outstanding_lazy_request = req;
+	*req_out = req;
 	return 0;
 
 err:
@@ -2680,6 +2692,13 @@ err:
 	return ret;
 }
 
+void i915_gem_request_cancel(struct drm_i915_gem_request *req)
+{
+	intel_ring_reserved_space_cancel(req->ringbuf);
+
+	i915_gem_request_unreference(req);
+}
+
 struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_engine_cs *ring)
 {
@@ -2741,7 +2760,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
 		list_del(&submit_req->execlist_link);
 
 		if (submit_req->ctx != ring->default_context)
-			intel_lr_context_unpin(ring, submit_req->ctx);
+			intel_lr_context_unpin(submit_req);
 
 		i915_gem_request_unreference(submit_req);
 	}
@@ -2762,30 +2781,6 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
 
 		i915_gem_request_retire(request);
 	}
-
-	/* This may not have been flushed before the reset, so clean it now */
-	i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
-}
-
-void i915_gem_restore_fences(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int i;
-
-	for (i = 0; i < dev_priv->num_fence_regs; i++) {
-		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
-
-		/*
-		 * Commit delayed tiling changes if we have an object still
-		 * attached to the fence, otherwise just clear the fence.
-		 */
-		if (reg->obj) {
-			i915_gem_object_update_fence(reg->obj, reg,
-						     reg->obj->tiling_mode);
-		} else {
-			i915_gem_write_fence(dev, i, NULL);
-		}
-	}
 }
 
 void i915_gem_reset(struct drm_device *dev)
@@ -2947,7 +2942,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
 static int
 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
 {
-	int ret, i;
+	int i;
 
 	if (!obj->active)
 		return 0;
@@ -2962,10 +2957,6 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
 		if (list_empty(&req->list))
 			goto retire;
 
-		ret = i915_gem_check_olr(req);
-		if (ret)
-			return ret;
-
 		if (i915_gem_request_completed(req, true)) {
 			__i915_gem_request_retire__upto(req);
 retire:
@@ -3068,25 +3059,22 @@ out:
 static int
 __i915_gem_object_sync(struct drm_i915_gem_object *obj,
 		       struct intel_engine_cs *to,
-		       struct drm_i915_gem_request *req)
+		       struct drm_i915_gem_request *from_req,
+		       struct drm_i915_gem_request **to_req)
 {
 	struct intel_engine_cs *from;
 	int ret;
 
-	from = i915_gem_request_get_ring(req);
+	from = i915_gem_request_get_ring(from_req);
 	if (to == from)
 		return 0;
 
-	if (i915_gem_request_completed(req, true))
+	if (i915_gem_request_completed(from_req, true))
 		return 0;
 
-	ret = i915_gem_check_olr(req);
-	if (ret)
-		return ret;
-
 	if (!i915_semaphore_is_enabled(obj->base.dev)) {
 		struct drm_i915_private *i915 = to_i915(obj->base.dev);
-		ret = __i915_wait_request(req,
+		ret = __i915_wait_request(from_req,
 					  atomic_read(&i915->gpu_error.reset_counter),
 					  i915->mm.interruptible,
 					  NULL,
@@ -3094,16 +3082,24 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
 		if (ret)
 			return ret;
 
-		i915_gem_object_retire_request(obj, req);
+		i915_gem_object_retire_request(obj, from_req);
 	} else {
 		int idx = intel_ring_sync_index(from, to);
-		u32 seqno = i915_gem_request_get_seqno(req);
+		u32 seqno = i915_gem_request_get_seqno(from_req);
+
+		WARN_ON(!to_req);
 
 		if (seqno <= from->semaphore.sync_seqno[idx])
 			return 0;
 
-		trace_i915_gem_ring_sync_to(from, to, req);
-		ret = to->semaphore.sync_to(to, from, seqno);
+		if (*to_req == NULL) {
+			ret = i915_gem_request_alloc(to, to->default_context, to_req);
+			if (ret)
+				return ret;
+		}
+
+		trace_i915_gem_ring_sync_to(*to_req, from, from_req);
+		ret = to->semaphore.sync_to(*to_req, from, seqno);
 		if (ret)
 			return ret;
 
@@ -3123,11 +3119,14 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
  *
  * @obj: object which may be in use on another ring.
  * @to: ring we wish to use the object on. May be NULL.
+ * @to_req: request we wish to use the object for. See below.
+ *          This will be allocated and returned if a request is
+ *          required but not passed in.
  *
  * This code is meant to abstract object synchronization with the GPU.
  * Calling with NULL implies synchronizing the object with the CPU
  * rather than a particular GPU ring. Conceptually we serialise writes
- * between engines inside the GPU. We only allow on engine to write
+ * between engines inside the GPU. We only allow one engine to write
  * into a buffer at any time, but multiple readers. To ensure each has
  * a coherent view of memory, we must:
  *
@@ -3138,11 +3137,22 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
  * - If we are a write request (pending_write_domain is set), the new
  *   request must wait for outstanding read requests to complete.
  *
+ * For CPU synchronisation (NULL to) no request is required. For syncing with
+ * rings to_req must be non-NULL. However, a request does not have to be
+ * pre-allocated. If *to_req is NULL and sync commands will be emitted then a
+ * request will be allocated automatically and returned through *to_req. Note
+ * that it is not guaranteed that commands will be emitted (because the system
+ * might already be idle). Hence there is no need to create a request that
+ * might never have any work submitted. Note further that if a request is
+ * returned in *to_req, it is the responsibility of the caller to submit
+ * that request (after potentially adding more work to it).
+ *
  * Returns 0 if successful, else propagates up the lower layer error.
  */
 int
 i915_gem_object_sync(struct drm_i915_gem_object *obj,
-		     struct intel_engine_cs *to)
+		     struct intel_engine_cs *to,
+		     struct drm_i915_gem_request **to_req)
 {
 	const bool readonly = obj->base.pending_write_domain == 0;
 	struct drm_i915_gem_request *req[I915_NUM_RINGS];
@@ -3164,7 +3174,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
 				req[n++] = obj->last_read_req[i];
 	}
 	for (i = 0; i < n; i++) {
-		ret = __i915_gem_object_sync(obj, to, req[i]);
+		ret = __i915_gem_object_sync(obj, to, req[i], to_req);
 		if (ret)
 			return ret;
 	}
@@ -3275,354 +3285,27 @@ int i915_gpu_idle(struct drm_device *dev)
 	/* Flush everything onto the inactive list. */
 	for_each_ring(ring, dev_priv, i) {
 		if (!i915.enable_execlists) {
-			ret = i915_switch_context(ring, ring->default_context);
+			struct drm_i915_gem_request *req;
+
+			ret = i915_gem_request_alloc(ring, ring->default_context, &req);
 			if (ret)
 				return ret;
-		}
 
-		ret = intel_ring_idle(ring);
-		if (ret)
-			return ret;
-	}
-
-	WARN_ON(i915_verify_lists(dev));
-	return 0;
-}
-
-static void i965_write_fence_reg(struct drm_device *dev, int reg,
-				 struct drm_i915_gem_object *obj)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int fence_reg;
-	int fence_pitch_shift;
-
-	if (INTEL_INFO(dev)->gen >= 6) {
-		fence_reg = FENCE_REG_SANDYBRIDGE_0;
-		fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
-	} else {
-		fence_reg = FENCE_REG_965_0;
-		fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
-	}
-
-	fence_reg += reg * 8;
+			ret = i915_switch_context(req);
+			if (ret) {
+				i915_gem_request_cancel(req);
+				return ret;
+			}
 
-	/* To w/a incoherency with non-atomic 64-bit register updates,
-	 * we split the 64-bit update into two 32-bit writes. In order
-	 * for a partial fence not to be evaluated between writes, we
-	 * precede the update with write to turn off the fence register,
-	 * and only enable the fence as the last step.
-	 *
-	 * For extra levels of paranoia, we make sure each step lands
-	 * before applying the next step.
-	 */
-	I915_WRITE(fence_reg, 0);
-	POSTING_READ(fence_reg);
-
-	if (obj) {
-		u32 size = i915_gem_obj_ggtt_size(obj);
-		uint64_t val;
-
-		/* Adjust fence size to match tiled area */
-		if (obj->tiling_mode != I915_TILING_NONE) {
-			uint32_t row_size = obj->stride *
-				(obj->tiling_mode == I915_TILING_Y ? 32 : 8);
-			size = (size / row_size) * row_size;
+			i915_add_request_no_flush(req);
 		}
 
-		val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
-				 0xfffff000) << 32;
-		val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
-		val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
-		if (obj->tiling_mode == I915_TILING_Y)
-			val |= 1 << I965_FENCE_TILING_Y_SHIFT;
-		val |= I965_FENCE_REG_VALID;
-
-		I915_WRITE(fence_reg + 4, val >> 32);
-		POSTING_READ(fence_reg + 4);
-
-		I915_WRITE(fence_reg + 0, val);
-		POSTING_READ(fence_reg);
-	} else {
-		I915_WRITE(fence_reg + 4, 0);
-		POSTING_READ(fence_reg + 4);
-	}
-}
-
-static void i915_write_fence_reg(struct drm_device *dev, int reg,
-				 struct drm_i915_gem_object *obj)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 val;
-
-	if (obj) {
-		u32 size = i915_gem_obj_ggtt_size(obj);
-		int pitch_val;
-		int tile_width;
-
-		WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
-		     (size & -size) != size ||
-		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
-		     "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
-		     i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
-
-		if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
-			tile_width = 128;
-		else
-			tile_width = 512;
-
-		/* Note: pitch better be a power of two tile widths */
-		pitch_val = obj->stride / tile_width;
-		pitch_val = ffs(pitch_val) - 1;
-
-		val = i915_gem_obj_ggtt_offset(obj);
-		if (obj->tiling_mode == I915_TILING_Y)
-			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
-		val |= I915_FENCE_SIZE_BITS(size);
-		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
-		val |= I830_FENCE_REG_VALID;
-	} else
-		val = 0;
-
-	if (reg < 8)
-		reg = FENCE_REG_830_0 + reg * 4;
-	else
-		reg = FENCE_REG_945_8 + (reg - 8) * 4;
-
-	I915_WRITE(reg, val);
-	POSTING_READ(reg);
-}
-
-static void i830_write_fence_reg(struct drm_device *dev, int reg,
-				struct drm_i915_gem_object *obj)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint32_t val;
-
-	if (obj) {
-		u32 size = i915_gem_obj_ggtt_size(obj);
-		uint32_t pitch_val;
-
-		WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
-		     (size & -size) != size ||
-		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
-		     "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
-		     i915_gem_obj_ggtt_offset(obj), size);
-
-		pitch_val = obj->stride / 128;
-		pitch_val = ffs(pitch_val) - 1;
-
-		val = i915_gem_obj_ggtt_offset(obj);
-		if (obj->tiling_mode == I915_TILING_Y)
-			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
-		val |= I830_FENCE_SIZE_BITS(size);
-		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
-		val |= I830_FENCE_REG_VALID;
-	} else
-		val = 0;
-
-	I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
-	POSTING_READ(FENCE_REG_830_0 + reg * 4);
-}
-
-inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
-{
-	return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
-}
-
-static void i915_gem_write_fence(struct drm_device *dev, int reg,
-				 struct drm_i915_gem_object *obj)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	/* Ensure that all CPU reads are completed before installing a fence
-	 * and all writes before removing the fence.
-	 */
-	if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
-		mb();
-
-	WARN(obj && (!obj->stride || !obj->tiling_mode),
-	     "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
-	     obj->stride, obj->tiling_mode);
-
-	if (IS_GEN2(dev))
-		i830_write_fence_reg(dev, reg, obj);
-	else if (IS_GEN3(dev))
-		i915_write_fence_reg(dev, reg, obj);
-	else if (INTEL_INFO(dev)->gen >= 4)
-		i965_write_fence_reg(dev, reg, obj);
-
-	/* And similarly be paranoid that no direct access to this region
-	 * is reordered to before the fence is installed.
-	 */
-	if (i915_gem_object_needs_mb(obj))
-		mb();
-}
-
-static inline int fence_number(struct drm_i915_private *dev_priv,
-			       struct drm_i915_fence_reg *fence)
-{
-	return fence - dev_priv->fence_regs;
-}
-
-static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
-					 struct drm_i915_fence_reg *fence,
-					 bool enable)
-{
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-	int reg = fence_number(dev_priv, fence);
-
-	i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
-
-	if (enable) {
-		obj->fence_reg = reg;
-		fence->obj = obj;
-		list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
-	} else {
-		obj->fence_reg = I915_FENCE_REG_NONE;
-		fence->obj = NULL;
-		list_del_init(&fence->lru_list);
-	}
-	obj->fence_dirty = false;
-}
-
-static int
-i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
-{
-	if (obj->last_fenced_req) {
-		int ret = i915_wait_request(obj->last_fenced_req);
-		if (ret)
-			return ret;
-
-		i915_gem_request_assign(&obj->last_fenced_req, NULL);
-	}
-
-	return 0;
-}
-
-int
-i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
-{
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-	struct drm_i915_fence_reg *fence;
-	int ret;
-
-	ret = i915_gem_object_wait_fence(obj);
-	if (ret)
-		return ret;
-
-	if (obj->fence_reg == I915_FENCE_REG_NONE)
-		return 0;
-
-	fence = &dev_priv->fence_regs[obj->fence_reg];
-
-	if (WARN_ON(fence->pin_count))
-		return -EBUSY;
-
-	i915_gem_object_fence_lost(obj);
-	i915_gem_object_update_fence(obj, fence, false);
-
-	return 0;
-}
-
-static struct drm_i915_fence_reg *
-i915_find_fence_reg(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_fence_reg *reg, *avail;
-	int i;
-
-	/* First try to find a free reg */
-	avail = NULL;
-	for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
-		reg = &dev_priv->fence_regs[i];
-		if (!reg->obj)
-			return reg;
-
-		if (!reg->pin_count)
-			avail = reg;
-	}
-
-	if (avail == NULL)
-		goto deadlock;
-
-	/* None available, try to steal one or wait for a user to finish */
-	list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
-		if (reg->pin_count)
-			continue;
-
-		return reg;
-	}
-
-deadlock:
-	/* Wait for completion of pending flips which consume fences */
-	if (intel_has_pending_fb_unpin(dev))
-		return ERR_PTR(-EAGAIN);
-
-	return ERR_PTR(-EDEADLK);
-}
-
-/**
- * i915_gem_object_get_fence - set up fencing for an object
- * @obj: object to map through a fence reg
- *
- * When mapping objects through the GTT, userspace wants to be able to write
- * to them without having to worry about swizzling if the object is tiled.
- * This function walks the fence regs looking for a free one for @obj,
- * stealing one if it can't find any.
- *
- * It then sets up the reg based on the object's properties: address, pitch
- * and tiling format.
- *
- * For an untiled surface, this removes any existing fence.
- */
-int
-i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
-{
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	bool enable = obj->tiling_mode != I915_TILING_NONE;
-	struct drm_i915_fence_reg *reg;
-	int ret;
-
-	/* Have we updated the tiling parameters upon the object and so
-	 * will need to serialise the write to the associated fence register?
-	 */
-	if (obj->fence_dirty) {
-		ret = i915_gem_object_wait_fence(obj);
+		ret = intel_ring_idle(ring);
 		if (ret)
 			return ret;
 	}
 
-	/* Just update our place in the LRU if our fence is getting reused. */
-	if (obj->fence_reg != I915_FENCE_REG_NONE) {
-		reg = &dev_priv->fence_regs[obj->fence_reg];
-		if (!obj->fence_dirty) {
-			list_move_tail(&reg->lru_list,
-				       &dev_priv->mm.fence_list);
-			return 0;
-		}
-	} else if (enable) {
-		if (WARN_ON(!obj->map_and_fenceable))
-			return -EINVAL;
-
-		reg = i915_find_fence_reg(dev);
-		if (IS_ERR(reg))
-			return PTR_ERR(reg);
-
-		if (reg->obj) {
-			struct drm_i915_gem_object *old = reg->obj;
-
-			ret = i915_gem_object_wait_fence(old);
-			if (ret)
-				return ret;
-
-			i915_gem_object_fence_lost(old);
-		}
-	} else
-		return 0;
-
-	i915_gem_object_update_fence(obj, reg, enable);
-
+	WARN_ON(i915_verify_lists(dev));
 	return 0;
 }
 
@@ -3673,9 +3356,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 size, fence_size, fence_alignment, unfenced_alignment;
-	unsigned long start =
+	u64 start =
 		flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
-	unsigned long end =
+	u64 end =
 		flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
 	struct i915_vma *vma;
 	int ret;
@@ -3731,7 +3414,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 	 * attempt to find space.
 	 */
 	if (size > end) {
-		DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%lu\n",
+		DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%llu\n",
 			  ggtt_view ? ggtt_view->type : 0,
 			  size,
 			  flags & PIN_MAPPABLE ? "mappable" : "total",
@@ -3853,7 +3536,7 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
 	old_write_domain = obj->base.write_domain;
 	obj->base.write_domain = 0;
 
-	intel_fb_obj_flush(obj, false);
+	intel_fb_obj_flush(obj, false, ORIGIN_GTT);
 
 	trace_i915_gem_object_change_domain(obj,
 					    obj->base.read_domains,
@@ -3875,7 +3558,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
 	old_write_domain = obj->base.write_domain;
 	obj->base.write_domain = 0;
 
-	intel_fb_obj_flush(obj, false);
+	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
 
 	trace_i915_gem_object_change_domain(obj,
 					    obj->base.read_domains,
@@ -3937,9 +3620,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 		obj->dirty = 1;
 	}
 
-	if (write)
-		intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
-
 	trace_i915_gem_object_change_domain(obj,
 					    old_read_domains,
 					    old_write_domain);
@@ -4094,12 +3774,13 @@ int
 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 				     u32 alignment,
 				     struct intel_engine_cs *pipelined,
+				     struct drm_i915_gem_request **pipelined_request,
 				     const struct i915_ggtt_view *view)
 {
 	u32 old_read_domains, old_write_domain;
 	int ret;
 
-	ret = i915_gem_object_sync(obj, pipelined);
+	ret = i915_gem_object_sync(obj, pipelined, pipelined_request);
 	if (ret)
 		return ret;
 
@@ -4210,9 +3891,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
 		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
 	}
 
-	if (write)
-		intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
-
 	trace_i915_gem_object_change_domain(obj,
 					    old_read_domains,
 					    old_write_domain);
@@ -4253,6 +3931,13 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 		if (time_after_eq(request->emitted_jiffies, recent_enough))
 			break;
 
+		/*
+		 * Note that the request might not have been submitted yet.
+		 * In which case emitted_jiffies will be zero.
+		 */
+		if (!request->emitted_jiffies)
+			continue;
+
 		target = request;
 	}
 	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
@@ -4423,32 +4108,6 @@ i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
 	--vma->pin_count;
 }
 
-bool
-i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
-{
-	if (obj->fence_reg != I915_FENCE_REG_NONE) {
-		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-		struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
-
-		WARN_ON(!ggtt_vma ||
-			dev_priv->fence_regs[obj->fence_reg].pin_count >
-			ggtt_vma->pin_count);
-		dev_priv->fence_regs[obj->fence_reg].pin_count++;
-		return true;
-	} else
-		return false;
-}
-
-void
-i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
-{
-	if (obj->fence_reg != I915_FENCE_REG_NONE) {
-		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-		WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
-		dev_priv->fence_regs[obj->fence_reg].pin_count--;
-	}
-}
-
 int
 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 		    struct drm_file *file)
@@ -4810,8 +4469,9 @@ err:
 	return ret;
 }
 
-int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
+int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
@@ -4821,7 +4481,7 @@ int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
 	if (!HAS_L3_DPF(dev) || !remap_info)
 		return 0;
 
-	ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
+	ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
 	if (ret)
 		return ret;
 
@@ -4967,7 +4627,7 @@ i915_gem_init_hw(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *ring;
-	int ret, i;
+	int ret, i, j;
 
 	if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
 		return -EIO;
@@ -5004,27 +4664,55 @@ i915_gem_init_hw(struct drm_device *dev)
 	 */
 	init_unused_rings(dev);
 
+	BUG_ON(!dev_priv->ring[RCS].default_context);
+
+	ret = i915_ppgtt_init_hw(dev);
+	if (ret) {
+		DRM_ERROR("PPGTT enable HW failed %d\n", ret);
+		goto out;
+	}
+
+	/* Need to do basic initialisation of all rings first: */
 	for_each_ring(ring, dev_priv, i) {
 		ret = ring->init_hw(ring);
 		if (ret)
 			goto out;
 	}
 
-	for (i = 0; i < NUM_L3_SLICES(dev); i++)
-		i915_gem_l3_remap(&dev_priv->ring[RCS], i);
+	/* Now it is safe to go back round and do everything else: */
+	for_each_ring(ring, dev_priv, i) {
+		struct drm_i915_gem_request *req;
 
-	ret = i915_ppgtt_init_hw(dev);
-	if (ret && ret != -EIO) {
-		DRM_ERROR("PPGTT enable failed %d\n", ret);
-		i915_gem_cleanup_ringbuffer(dev);
-	}
+		WARN_ON(!ring->default_context);
+
+		ret = i915_gem_request_alloc(ring, ring->default_context, &req);
+		if (ret) {
+			i915_gem_cleanup_ringbuffer(dev);
+			goto out;
+		}
 
-	ret = i915_gem_context_enable(dev_priv);
-	if (ret && ret != -EIO) {
-		DRM_ERROR("Context enable failed %d\n", ret);
-		i915_gem_cleanup_ringbuffer(dev);
+		if (ring->id == RCS) {
+			for (j = 0; j < NUM_L3_SLICES(dev); j++)
+				i915_gem_l3_remap(req, j);
+		}
 
-		goto out;
+		ret = i915_ppgtt_init_ring(req);
+		if (ret && ret != -EIO) {
+			DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
+			i915_gem_request_cancel(req);
+			i915_gem_cleanup_ringbuffer(dev);
+			goto out;
+		}
+
+		ret = i915_gem_context_enable(req);
+		if (ret && ret != -EIO) {
+			DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
+			i915_gem_request_cancel(req);
+			i915_gem_cleanup_ringbuffer(dev);
+			goto out;
+		}
+
+		i915_add_request_no_flush(req);
 	}
 
 out:
@@ -5111,6 +4799,14 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
 
 	for_each_ring(ring, dev_priv, i)
 		dev_priv->gt.cleanup_ring(ring);
+
+    if (i915.enable_execlists)
+            /*
+             * Neither the BIOS, ourselves or any other kernel
+             * expects the system to be in execlists mode on startup,
+             * so we need to reset the GPU back to legacy mode.
+             */
+            intel_gpu_reset(dev);
 }
 
 static void
@@ -5388,3 +5084,42 @@ bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
 	return false;
 }
 
+/* Allocate a new GEM object and fill it with the supplied data */
+struct drm_i915_gem_object *
+i915_gem_object_create_from_data(struct drm_device *dev,
+			         const void *data, size_t size)
+{
+	struct drm_i915_gem_object *obj;
+	struct sg_table *sg;
+	size_t bytes;
+	int ret;
+
+	obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE));
+	if (IS_ERR_OR_NULL(obj))
+		return obj;
+
+	ret = i915_gem_object_set_to_cpu_domain(obj, true);
+	if (ret)
+		goto fail;
+
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		goto fail;
+
+	i915_gem_object_pin_pages(obj);
+	sg = obj->pages;
+	bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
+	i915_gem_object_unpin_pages(obj);
+
+	if (WARN_ON(bytes != size)) {
+		DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	return obj;
+
+fail:
+	drm_gem_object_unreference(&obj->base);
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 48afa777e94a..8e893b354bcc 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -287,6 +287,7 @@ err_unpin:
 	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
 		i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
 err_destroy:
+	idr_remove(&file_priv->context_idr, ctx->user_handle);
 	i915_gem_context_unreference(ctx);
 	return ERR_PTR(ret);
 }
@@ -407,32 +408,23 @@ void i915_gem_context_fini(struct drm_device *dev)
 	i915_gem_context_unreference(dctx);
 }
 
-int i915_gem_context_enable(struct drm_i915_private *dev_priv)
+int i915_gem_context_enable(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring;
-	int ret, i;
-
-	BUG_ON(!dev_priv->ring[RCS].default_context);
+	struct intel_engine_cs *ring = req->ring;
+	int ret;
 
 	if (i915.enable_execlists) {
-		for_each_ring(ring, dev_priv, i) {
-			if (ring->init_context) {
-				ret = ring->init_context(ring,
-						ring->default_context);
-				if (ret) {
-					DRM_ERROR("ring init context: %d\n",
-							ret);
-					return ret;
-				}
-			}
-		}
+		if (ring->init_context == NULL)
+			return 0;
 
+		ret = ring->init_context(req);
 	} else
-		for_each_ring(ring, dev_priv, i) {
-			ret = i915_switch_context(ring, ring->default_context);
-			if (ret)
-				return ret;
-		}
+		ret = i915_switch_context(req);
+
+	if (ret) {
+		DRM_ERROR("ring init context: %d\n", ret);
+		return ret;
+	}
 
 	return 0;
 }
@@ -485,10 +477,9 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
 }
 
 static inline int
-mi_set_context(struct intel_engine_cs *ring,
-	       struct intel_context *new_context,
-	       u32 hw_flags)
+mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	u32 flags = hw_flags | MI_MM_SPACE_GTT;
 	const int num_rings =
 		/* Use an extended w/a on ivb+ if signalling from other rings */
@@ -503,13 +494,15 @@ mi_set_context(struct intel_engine_cs *ring,
 	 * itlb_before_ctx_switch.
 	 */
 	if (IS_GEN6(ring->dev)) {
-		ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
+		ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
 		if (ret)
 			return ret;
 	}
 
 	/* These flags are for resource streamer on HSW+ */
-	if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
+	if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
+		flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
+	else if (INTEL_INFO(ring->dev)->gen < 8)
 		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
 
 
@@ -517,7 +510,7 @@ mi_set_context(struct intel_engine_cs *ring,
 	if (INTEL_INFO(ring->dev)->gen >= 7)
 		len += 2 + (num_rings ? 4*num_rings + 2 : 0);
 
-	ret = intel_ring_begin(ring, len);
+	ret = intel_ring_begin(req, len);
 	if (ret)
 		return ret;
 
@@ -540,7 +533,7 @@ mi_set_context(struct intel_engine_cs *ring,
 
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_emit(ring, MI_SET_CONTEXT);
-	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
+	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
 			flags);
 	/*
 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
@@ -621,9 +614,10 @@ needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
 	return false;
 }
 
-static int do_switch(struct intel_engine_cs *ring,
-		     struct intel_context *to)
+static int do_switch(struct drm_i915_gem_request *req)
 {
+	struct intel_context *to = req->ctx;
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
 	struct intel_context *from = ring->last_context;
 	u32 hw_flags = 0;
@@ -659,7 +653,7 @@ static int do_switch(struct intel_engine_cs *ring,
 		 * Register Immediate commands in Ring Buffer before submitting
 		 * a context."*/
 		trace_switch_mm(ring, to);
-		ret = to->ppgtt->switch_mm(to->ppgtt, ring);
+		ret = to->ppgtt->switch_mm(to->ppgtt, req);
 		if (ret)
 			goto unpin_out;
 
@@ -701,7 +695,7 @@ static int do_switch(struct intel_engine_cs *ring,
 	WARN_ON(needs_pd_load_pre(ring, to) &&
 		needs_pd_load_post(ring, to, hw_flags));
 
-	ret = mi_set_context(ring, to, hw_flags);
+	ret = mi_set_context(req, hw_flags);
 	if (ret)
 		goto unpin_out;
 
@@ -710,7 +704,7 @@ static int do_switch(struct intel_engine_cs *ring,
 	 */
 	if (needs_pd_load_post(ring, to, hw_flags)) {
 		trace_switch_mm(ring, to);
-		ret = to->ppgtt->switch_mm(to->ppgtt, ring);
+		ret = to->ppgtt->switch_mm(to->ppgtt, req);
 		/* The hardware context switch is emitted, but we haven't
 		 * actually changed the state - so it's probably safe to bail
 		 * here. Still, let the user know something dangerous has
@@ -726,7 +720,7 @@ static int do_switch(struct intel_engine_cs *ring,
 		if (!(to->remap_slice & (1<<i)))
 			continue;
 
-		ret = i915_gem_l3_remap(ring, i);
+		ret = i915_gem_l3_remap(req, i);
 		/* If it failed, try again next round */
 		if (ret)
 			DRM_DEBUG_DRIVER("L3 remapping failed\n");
@@ -742,7 +736,7 @@ static int do_switch(struct intel_engine_cs *ring,
 	 */
 	if (from != NULL) {
 		from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
+		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
 		 * whole damn pipeline, we don't need to explicitly mark the
 		 * object dirty. The only exception is that the context must be
@@ -766,7 +760,7 @@ done:
 
 	if (uninitialized) {
 		if (ring->init_context) {
-			ret = ring->init_context(ring, to);
+			ret = ring->init_context(req);
 			if (ret)
 				DRM_ERROR("ring init context: %d\n", ret);
 		}
@@ -782,8 +776,7 @@ unpin_out:
 
 /**
  * i915_switch_context() - perform a GPU context switch.
- * @ring: ring for which we'll execute the context switch
- * @to: the context to switch to
+ * @req: request for which we'll execute the context switch
  *
  * The context life cycle is simple. The context refcount is incremented and
  * decremented by 1 and create and destroy. If the context is in use by the GPU,
@@ -794,25 +787,25 @@ unpin_out:
  * switched by writing to the ELSP and requests keep a reference to their
  * context.
  */
-int i915_switch_context(struct intel_engine_cs *ring,
-			struct intel_context *to)
+int i915_switch_context(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
 	WARN_ON(i915.enable_execlists);
 	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
-	if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
-		if (to != ring->last_context) {
-			i915_gem_context_reference(to);
+	if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
+		if (req->ctx != ring->last_context) {
+			i915_gem_context_reference(req->ctx);
 			if (ring->last_context)
 				i915_gem_context_unreference(ring->last_context);
-			ring->last_context = to;
+			ring->last_context = req->ctx;
 		}
 		return 0;
 	}
 
-	return do_switch(ring, to);
+	return do_switch(req);
 }
 
 static bool contexts_enabled(struct drm_device *dev)
@@ -898,6 +891,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
 	case I915_CONTEXT_PARAM_BAN_PERIOD:
 		args->value = ctx->hang_stats.ban_period_seconds;
 		break;
+	case I915_CONTEXT_PARAM_NO_ZEROMAP:
+		args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
+		break;
 	default:
 		ret = -EINVAL;
 		break;
@@ -935,6 +931,14 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 		else
 			ctx->hang_stats.ban_period_seconds = args->value;
 		break;
+	case I915_CONTEXT_PARAM_NO_ZEROMAP:
+		if (args->size) {
+			ret = -EINVAL;
+		} else {
+			ctx->flags &= ~CONTEXT_NO_ZEROMAP;
+			ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
+		}
+		break;
 	default:
 		ret = -EINVAL;
 		break;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a7fa14516cda..923a3c4bf0b7 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -677,6 +677,7 @@ eb_vma_misplaced(struct i915_vma *vma)
 static int
 i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
 			    struct list_head *vmas,
+			    struct intel_context *ctx,
 			    bool *need_relocs)
 {
 	struct drm_i915_gem_object *obj;
@@ -699,6 +700,9 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
 		obj = vma->obj;
 		entry = vma->exec_entry;
 
+		if (ctx->flags & CONTEXT_NO_ZEROMAP)
+			entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
+
 		if (!has_fenced_gpu_access)
 			entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
 		need_fence =
@@ -776,7 +780,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
 				  struct drm_file *file,
 				  struct intel_engine_cs *ring,
 				  struct eb_vmas *eb,
-				  struct drm_i915_gem_exec_object2 *exec)
+				  struct drm_i915_gem_exec_object2 *exec,
+				  struct intel_context *ctx)
 {
 	struct drm_i915_gem_relocation_entry *reloc;
 	struct i915_address_space *vm;
@@ -862,7 +867,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
 		goto err;
 
 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
+	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
 	if (ret)
 		goto err;
 
@@ -887,10 +892,10 @@ err:
 }
 
 static int
-i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
+i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 				struct list_head *vmas)
 {
-	const unsigned other_rings = ~intel_ring_flag(ring);
+	const unsigned other_rings = ~intel_ring_flag(req->ring);
 	struct i915_vma *vma;
 	uint32_t flush_domains = 0;
 	bool flush_chipset = false;
@@ -900,7 +905,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
 		struct drm_i915_gem_object *obj = vma->obj;
 
 		if (obj->active & other_rings) {
-			ret = i915_gem_object_sync(obj, ring);
+			ret = i915_gem_object_sync(obj, req->ring, &req);
 			if (ret)
 				return ret;
 		}
@@ -912,7 +917,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
 	}
 
 	if (flush_chipset)
-		i915_gem_chipset_flush(ring->dev);
+		i915_gem_chipset_flush(req->ring->dev);
 
 	if (flush_domains & I915_GEM_DOMAIN_GTT)
 		wmb();
@@ -920,7 +925,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
 	/* Unconditionally invalidate gpu caches and ensure that we do flush
 	 * any residual writes from the previous batch.
 	 */
-	return intel_ring_invalidate_all_caches(ring);
+	return intel_ring_invalidate_all_caches(req);
 }
 
 static bool
@@ -953,6 +958,9 @@ validate_exec_list(struct drm_device *dev,
 		if (exec[i].flags & invalid_flags)
 			return -EINVAL;
 
+		if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
+			return -EINVAL;
+
 		/* First check for malicious input causing overflow in
 		 * the worst case where we need to allocate the entire
 		 * relocation tree as a single array.
@@ -1013,9 +1021,9 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
 
 void
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
-				   struct intel_engine_cs *ring)
+				   struct drm_i915_gem_request *req)
 {
-	struct drm_i915_gem_request *req = intel_ring_get_request(ring);
+	struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
 	struct i915_vma *vma;
 
 	list_for_each_entry(vma, vmas, exec_list) {
@@ -1029,12 +1037,12 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 			obj->base.pending_read_domains |= obj->base.read_domains;
 		obj->base.read_domains = obj->base.pending_read_domains;
 
-		i915_vma_move_to_active(vma, ring);
+		i915_vma_move_to_active(vma, req);
 		if (obj->base.write_domain) {
 			obj->dirty = 1;
 			i915_gem_request_assign(&obj->last_write_req, req);
 
-			intel_fb_obj_invalidate(obj, ring, ORIGIN_CS);
+			intel_fb_obj_invalidate(obj, ORIGIN_CS);
 
 			/* update for the implicit flush after a batch */
 			obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -1053,22 +1061,20 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 }
 
 void
-i915_gem_execbuffer_retire_commands(struct drm_device *dev,
-				    struct drm_file *file,
-				    struct intel_engine_cs *ring,
-				    struct drm_i915_gem_object *obj)
+i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
 {
 	/* Unconditionally force add_request to emit a full flush. */
-	ring->gpu_caches_dirty = true;
+	params->ring->gpu_caches_dirty = true;
 
 	/* Add a breadcrumb for the completion of the batch buffer */
-	(void)__i915_add_request(ring, file, obj);
+	__i915_add_request(params->request, params->batch_obj, true);
 }
 
 static int
 i915_reset_gen7_sol_offsets(struct drm_device *dev,
-			    struct intel_engine_cs *ring)
+			    struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret, i;
 
@@ -1077,7 +1083,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
 		return -EINVAL;
 	}
 
-	ret = intel_ring_begin(ring, 4 * 3);
+	ret = intel_ring_begin(req, 4 * 3);
 	if (ret)
 		return ret;
 
@@ -1093,10 +1099,11 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
 }
 
 static int
-i915_emit_box(struct intel_engine_cs *ring,
+i915_emit_box(struct drm_i915_gem_request *req,
 	      struct drm_clip_rect *box,
 	      int DR1, int DR4)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
 	if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
@@ -1107,7 +1114,7 @@ i915_emit_box(struct intel_engine_cs *ring,
 	}
 
 	if (INTEL_INFO(ring->dev)->gen >= 4) {
-		ret = intel_ring_begin(ring, 4);
+		ret = intel_ring_begin(req, 4);
 		if (ret)
 			return ret;
 
@@ -1116,7 +1123,7 @@ i915_emit_box(struct intel_engine_cs *ring,
 		intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
 		intel_ring_emit(ring, DR4);
 	} else {
-		ret = intel_ring_begin(ring, 6);
+		ret = intel_ring_begin(req, 6);
 		if (ret)
 			return ret;
 
@@ -1186,17 +1193,15 @@ err:
 }
 
 int
-i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
-			       struct intel_engine_cs *ring,
-			       struct intel_context *ctx,
+i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 			       struct drm_i915_gem_execbuffer2 *args,
-			       struct list_head *vmas,
-			       struct drm_i915_gem_object *batch_obj,
-			       u64 exec_start, u32 dispatch_flags)
+			       struct list_head *vmas)
 {
 	struct drm_clip_rect *cliprects = NULL;
+	struct drm_device *dev = params->dev;
+	struct intel_engine_cs *ring = params->ring;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u64 exec_len;
+	u64 exec_start, exec_len;
 	int instp_mode;
 	u32 instp_mask;
 	int i, ret = 0;
@@ -1244,15 +1249,15 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
 		}
 	}
 
-	ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
+	ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
 	if (ret)
 		goto error;
 
-	ret = i915_switch_context(ring, ctx);
+	ret = i915_switch_context(params->request);
 	if (ret)
 		goto error;
 
-	WARN(ctx->ppgtt && ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
+	WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
 	     "%s didn't clear reload\n", ring->name);
 
 	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
@@ -1294,7 +1299,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
 
 	if (ring == &dev_priv->ring[RCS] &&
 			instp_mode != dev_priv->relative_constants_mode) {
-		ret = intel_ring_begin(ring, 4);
+		ret = intel_ring_begin(params->request, 4);
 		if (ret)
 			goto error;
 
@@ -1308,37 +1313,40 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
 	}
 
 	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
-		ret = i915_reset_gen7_sol_offsets(dev, ring);
+		ret = i915_reset_gen7_sol_offsets(dev, params->request);
 		if (ret)
 			goto error;
 	}
 
-	exec_len = args->batch_len;
+	exec_len   = args->batch_len;
+	exec_start = params->batch_obj_vm_offset +
+		     params->args_batch_start_offset;
+
 	if (cliprects) {
 		for (i = 0; i < args->num_cliprects; i++) {
-			ret = i915_emit_box(ring, &cliprects[i],
+			ret = i915_emit_box(params->request, &cliprects[i],
 					    args->DR1, args->DR4);
 			if (ret)
 				goto error;
 
-			ret = ring->dispatch_execbuffer(ring,
+			ret = ring->dispatch_execbuffer(params->request,
 							exec_start, exec_len,
-							dispatch_flags);
+							params->dispatch_flags);
 			if (ret)
 				goto error;
 		}
 	} else {
-		ret = ring->dispatch_execbuffer(ring,
+		ret = ring->dispatch_execbuffer(params->request,
 						exec_start, exec_len,
-						dispatch_flags);
+						params->dispatch_flags);
 		if (ret)
 			return ret;
 	}
 
-	trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
+	trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
 
-	i915_gem_execbuffer_move_to_active(vmas, ring);
-	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+	i915_gem_execbuffer_move_to_active(vmas, params->request);
+	i915_gem_execbuffer_retire_commands(params);
 
 error:
 	kfree(cliprects);
@@ -1408,8 +1416,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	struct intel_engine_cs *ring;
 	struct intel_context *ctx;
 	struct i915_address_space *vm;
+	struct i915_execbuffer_params params_master; /* XXX: will be removed later */
+	struct i915_execbuffer_params *params = &params_master;
 	const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
-	u64 exec_start = args->batch_start_offset;
 	u32 dispatch_flags;
 	int ret;
 	bool need_relocs;
@@ -1482,6 +1491,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		return -EINVAL;
 	}
 
+	if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
+		if (!HAS_RESOURCE_STREAMER(dev)) {
+			DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
+			return -EINVAL;
+		}
+		if (ring->id != RCS) {
+			DRM_DEBUG("RS is not available on %s\n",
+				 ring->name);
+			return -EINVAL;
+		}
+
+		dispatch_flags |= I915_DISPATCH_RS;
+	}
+
 	intel_runtime_pm_get(dev_priv);
 
 	ret = i915_mutex_lock_interruptible(dev);
@@ -1502,6 +1525,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	else
 		vm = &dev_priv->gtt.base;
 
+	memset(&params_master, 0x00, sizeof(params_master));
+
 	eb = eb_create(args);
 	if (eb == NULL) {
 		i915_gem_context_unreference(ctx);
@@ -1520,7 +1545,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
 	/* Move the objects en-masse into the GTT, evicting if necessary. */
 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
+	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
 	if (ret)
 		goto err;
 
@@ -1530,7 +1555,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	if (ret) {
 		if (ret == -EFAULT) {
 			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
-								eb, exec);
+								eb, exec, ctx);
 			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 		}
 		if (ret)
@@ -1544,6 +1569,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		goto err;
 	}
 
+	params->args_batch_start_offset = args->batch_start_offset;
 	if (i915_needs_cmd_parser(ring) && args->batch_len) {
 		struct drm_i915_gem_object *parsed_batch_obj;
 
@@ -1575,7 +1601,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 			 * command parser has accepted.
 			 */
 			dispatch_flags |= I915_DISPATCH_SECURE;
-			exec_start = 0;
+			params->args_batch_start_offset = 0;
 			batch_obj = parsed_batch_obj;
 		}
 	}
@@ -1600,15 +1626,36 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		if (ret)
 			goto err;
 
-		exec_start += i915_gem_obj_ggtt_offset(batch_obj);
+		params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
 	} else
-		exec_start += i915_gem_obj_offset(batch_obj, vm);
+		params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
 
-	ret = dev_priv->gt.execbuf_submit(dev, file, ring, ctx, args,
-					  &eb->vmas, batch_obj, exec_start,
-					  dispatch_flags);
+	/* Allocate a request for this batch buffer nice and early. */
+	ret = i915_gem_request_alloc(ring, ctx, &params->request);
+	if (ret)
+		goto err_batch_unpin;
+
+	ret = i915_gem_request_add_to_client(params->request, file);
+	if (ret)
+		goto err_batch_unpin;
 
 	/*
+	 * Save assorted stuff away to pass through to *_submission().
+	 * NB: This data should be 'persistent' and not local as it will
+	 * kept around beyond the duration of the IOCTL once the GPU
+	 * scheduler arrives.
+	 */
+	params->dev                     = dev;
+	params->file                    = file;
+	params->ring                    = ring;
+	params->dispatch_flags          = dispatch_flags;
+	params->batch_obj               = batch_obj;
+	params->ctx                     = ctx;
+
+	ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
+
+err_batch_unpin:
+	/*
 	 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
 	 * batch vma for correctness. For less ugly and less fragility this
 	 * needs to be adjusted to also track the ggtt batch vma properly as
@@ -1616,11 +1663,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	 */
 	if (dispatch_flags & I915_DISPATCH_SECURE)
 		i915_gem_object_ggtt_unpin(batch_obj);
+
 err:
 	/* the request owns the ref now */
 	i915_gem_context_unreference(ctx);
 	eb_destroy(eb);
 
+	/*
+	 * If the request was created but not successfully submitted then it
+	 * must be freed again. If it was submitted then it is being tracked
+	 * on the active request list and no clean up is required here.
+	 */
+	if (ret && params->request)
+		i915_gem_request_cancel(params->request);
+
 	mutex_unlock(&dev->struct_mutex);
 
 pre_mutex_err:
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
new file mode 100644
index 000000000000..af1f8c461060
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -0,0 +1,787 @@
+/*
+ * Copyright © 2008-2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+/**
+ * DOC: fence register handling
+ *
+ * Important to avoid confusions: "fences" in the i915 driver are not execution
+ * fences used to track command completion but hardware detiler objects which
+ * wrap a given range of the global GTT. Each platform has only a fairly limited
+ * set of these objects.
+ *
+ * Fences are used to detile GTT memory mappings. They're also connected to the
+ * hardware frontbuffer render tracking and hence interract with frontbuffer
+ * conmpression. Furthermore on older platforms fences are required for tiled
+ * objects used by the display engine. They can also be used by the render
+ * engine - they're required for blitter commands and are optional for render
+ * commands. But on gen4+ both display (with the exception of fbc) and rendering
+ * have their own tiling state bits and don't need fences.
+ *
+ * Also note that fences only support X and Y tiling and hence can't be used for
+ * the fancier new tiling formats like W, Ys and Yf.
+ *
+ * Finally note that because fences are such a restricted resource they're
+ * dynamically associated with objects. Furthermore fence state is committed to
+ * the hardware lazily to avoid unecessary stalls on gen2/3. Therefore code must
+ * explictly call i915_gem_object_get_fence() to synchronize fencing status
+ * for cpu access. Also note that some code wants an unfenced view, for those
+ * cases the fence can be removed forcefully with i915_gem_object_put_fence().
+ *
+ * Internally these functions will synchronize with userspace access by removing
+ * CPU ptes into GTT mmaps (not the GTT ptes themselves) as needed.
+ */
+
+static void i965_write_fence_reg(struct drm_device *dev, int reg,
+				 struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int fence_reg;
+	int fence_pitch_shift;
+
+	if (INTEL_INFO(dev)->gen >= 6) {
+		fence_reg = FENCE_REG_SANDYBRIDGE_0;
+		fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
+	} else {
+		fence_reg = FENCE_REG_965_0;
+		fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
+	}
+
+	fence_reg += reg * 8;
+
+	/* To w/a incoherency with non-atomic 64-bit register updates,
+	 * we split the 64-bit update into two 32-bit writes. In order
+	 * for a partial fence not to be evaluated between writes, we
+	 * precede the update with write to turn off the fence register,
+	 * and only enable the fence as the last step.
+	 *
+	 * For extra levels of paranoia, we make sure each step lands
+	 * before applying the next step.
+	 */
+	I915_WRITE(fence_reg, 0);
+	POSTING_READ(fence_reg);
+
+	if (obj) {
+		u32 size = i915_gem_obj_ggtt_size(obj);
+		uint64_t val;
+
+		/* Adjust fence size to match tiled area */
+		if (obj->tiling_mode != I915_TILING_NONE) {
+			uint32_t row_size = obj->stride *
+				(obj->tiling_mode == I915_TILING_Y ? 32 : 8);
+			size = (size / row_size) * row_size;
+		}
+
+		val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
+				 0xfffff000) << 32;
+		val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
+		val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
+		if (obj->tiling_mode == I915_TILING_Y)
+			val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+		val |= I965_FENCE_REG_VALID;
+
+		I915_WRITE(fence_reg + 4, val >> 32);
+		POSTING_READ(fence_reg + 4);
+
+		I915_WRITE(fence_reg + 0, val);
+		POSTING_READ(fence_reg);
+	} else {
+		I915_WRITE(fence_reg + 4, 0);
+		POSTING_READ(fence_reg + 4);
+	}
+}
+
+static void i915_write_fence_reg(struct drm_device *dev, int reg,
+				 struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val;
+
+	if (obj) {
+		u32 size = i915_gem_obj_ggtt_size(obj);
+		int pitch_val;
+		int tile_width;
+
+		WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
+		     (size & -size) != size ||
+		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+		     "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+		     i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
+
+		if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
+			tile_width = 128;
+		else
+			tile_width = 512;
+
+		/* Note: pitch better be a power of two tile widths */
+		pitch_val = obj->stride / tile_width;
+		pitch_val = ffs(pitch_val) - 1;
+
+		val = i915_gem_obj_ggtt_offset(obj);
+		if (obj->tiling_mode == I915_TILING_Y)
+			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+		val |= I915_FENCE_SIZE_BITS(size);
+		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+		val |= I830_FENCE_REG_VALID;
+	} else
+		val = 0;
+
+	if (reg < 8)
+		reg = FENCE_REG_830_0 + reg * 4;
+	else
+		reg = FENCE_REG_945_8 + (reg - 8) * 4;
+
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+}
+
+static void i830_write_fence_reg(struct drm_device *dev, int reg,
+				struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t val;
+
+	if (obj) {
+		u32 size = i915_gem_obj_ggtt_size(obj);
+		uint32_t pitch_val;
+
+		WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
+		     (size & -size) != size ||
+		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+		     "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
+		     i915_gem_obj_ggtt_offset(obj), size);
+
+		pitch_val = obj->stride / 128;
+		pitch_val = ffs(pitch_val) - 1;
+
+		val = i915_gem_obj_ggtt_offset(obj);
+		if (obj->tiling_mode == I915_TILING_Y)
+			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+		val |= I830_FENCE_SIZE_BITS(size);
+		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+		val |= I830_FENCE_REG_VALID;
+	} else
+		val = 0;
+
+	I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
+	POSTING_READ(FENCE_REG_830_0 + reg * 4);
+}
+
+inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
+{
+	return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
+}
+
+static void i915_gem_write_fence(struct drm_device *dev, int reg,
+				 struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* Ensure that all CPU reads are completed before installing a fence
+	 * and all writes before removing the fence.
+	 */
+	if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
+		mb();
+
+	WARN(obj && (!obj->stride || !obj->tiling_mode),
+	     "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
+	     obj->stride, obj->tiling_mode);
+
+	if (IS_GEN2(dev))
+		i830_write_fence_reg(dev, reg, obj);
+	else if (IS_GEN3(dev))
+		i915_write_fence_reg(dev, reg, obj);
+	else if (INTEL_INFO(dev)->gen >= 4)
+		i965_write_fence_reg(dev, reg, obj);
+
+	/* And similarly be paranoid that no direct access to this region
+	 * is reordered to before the fence is installed.
+	 */
+	if (i915_gem_object_needs_mb(obj))
+		mb();
+}
+
+static inline int fence_number(struct drm_i915_private *dev_priv,
+			       struct drm_i915_fence_reg *fence)
+{
+	return fence - dev_priv->fence_regs;
+}
+
+static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
+					 struct drm_i915_fence_reg *fence,
+					 bool enable)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	int reg = fence_number(dev_priv, fence);
+
+	i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
+
+	if (enable) {
+		obj->fence_reg = reg;
+		fence->obj = obj;
+		list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
+	} else {
+		obj->fence_reg = I915_FENCE_REG_NONE;
+		fence->obj = NULL;
+		list_del_init(&fence->lru_list);
+	}
+	obj->fence_dirty = false;
+}
+
+static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
+{
+	if (obj->tiling_mode)
+		i915_gem_release_mmap(obj);
+
+	/* As we do not have an associated fence register, we will force
+	 * a tiling change if we ever need to acquire one.
+	 */
+	obj->fence_dirty = false;
+	obj->fence_reg = I915_FENCE_REG_NONE;
+}
+
+static int
+i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
+{
+	if (obj->last_fenced_req) {
+		int ret = i915_wait_request(obj->last_fenced_req);
+		if (ret)
+			return ret;
+
+		i915_gem_request_assign(&obj->last_fenced_req, NULL);
+	}
+
+	return 0;
+}
+
+/**
+ * i915_gem_object_put_fence - force-remove fence for an object
+ * @obj: object to map through a fence reg
+ *
+ * This function force-removes any fence from the given object, which is useful
+ * if the kernel wants to do untiled GTT access.
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
+int
+i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_fence_reg *fence;
+	int ret;
+
+	ret = i915_gem_object_wait_fence(obj);
+	if (ret)
+		return ret;
+
+	if (obj->fence_reg == I915_FENCE_REG_NONE)
+		return 0;
+
+	fence = &dev_priv->fence_regs[obj->fence_reg];
+
+	if (WARN_ON(fence->pin_count))
+		return -EBUSY;
+
+	i915_gem_object_fence_lost(obj);
+	i915_gem_object_update_fence(obj, fence, false);
+
+	return 0;
+}
+
+static struct drm_i915_fence_reg *
+i915_find_fence_reg(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_fence_reg *reg, *avail;
+	int i;
+
+	/* First try to find a free reg */
+	avail = NULL;
+	for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
+		reg = &dev_priv->fence_regs[i];
+		if (!reg->obj)
+			return reg;
+
+		if (!reg->pin_count)
+			avail = reg;
+	}
+
+	if (avail == NULL)
+		goto deadlock;
+
+	/* None available, try to steal one or wait for a user to finish */
+	list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
+		if (reg->pin_count)
+			continue;
+
+		return reg;
+	}
+
+deadlock:
+	/* Wait for completion of pending flips which consume fences */
+	if (intel_has_pending_fb_unpin(dev))
+		return ERR_PTR(-EAGAIN);
+
+	return ERR_PTR(-EDEADLK);
+}
+
+/**
+ * i915_gem_object_get_fence - set up fencing for an object
+ * @obj: object to map through a fence reg
+ *
+ * When mapping objects through the GTT, userspace wants to be able to write
+ * to them without having to worry about swizzling if the object is tiled.
+ * This function walks the fence regs looking for a free one for @obj,
+ * stealing one if it can't find any.
+ *
+ * It then sets up the reg based on the object's properties: address, pitch
+ * and tiling format.
+ *
+ * For an untiled surface, this removes any existing fence.
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
+int
+i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool enable = obj->tiling_mode != I915_TILING_NONE;
+	struct drm_i915_fence_reg *reg;
+	int ret;
+
+	/* Have we updated the tiling parameters upon the object and so
+	 * will need to serialise the write to the associated fence register?
+	 */
+	if (obj->fence_dirty) {
+		ret = i915_gem_object_wait_fence(obj);
+		if (ret)
+			return ret;
+	}
+
+	/* Just update our place in the LRU if our fence is getting reused. */
+	if (obj->fence_reg != I915_FENCE_REG_NONE) {
+		reg = &dev_priv->fence_regs[obj->fence_reg];
+		if (!obj->fence_dirty) {
+			list_move_tail(&reg->lru_list,
+				       &dev_priv->mm.fence_list);
+			return 0;
+		}
+	} else if (enable) {
+		if (WARN_ON(!obj->map_and_fenceable))
+			return -EINVAL;
+
+		reg = i915_find_fence_reg(dev);
+		if (IS_ERR(reg))
+			return PTR_ERR(reg);
+
+		if (reg->obj) {
+			struct drm_i915_gem_object *old = reg->obj;
+
+			ret = i915_gem_object_wait_fence(old);
+			if (ret)
+				return ret;
+
+			i915_gem_object_fence_lost(old);
+		}
+	} else
+		return 0;
+
+	i915_gem_object_update_fence(obj, reg, enable);
+
+	return 0;
+}
+
+/**
+ * i915_gem_object_pin_fence - pin fencing state
+ * @obj: object to pin fencing for
+ *
+ * This pins the fencing state (whether tiled or untiled) to make sure the
+ * object is ready to be used as a scanout target. Fencing status must be
+ * synchronize first by calling i915_gem_object_get_fence():
+ *
+ * The resulting fence pin reference must be released again with
+ * i915_gem_object_unpin_fence().
+ *
+ * Returns:
+ *
+ * True if the object has a fence, false otherwise.
+ */
+bool
+i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
+{
+	if (obj->fence_reg != I915_FENCE_REG_NONE) {
+		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+		struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
+
+		WARN_ON(!ggtt_vma ||
+			dev_priv->fence_regs[obj->fence_reg].pin_count >
+			ggtt_vma->pin_count);
+		dev_priv->fence_regs[obj->fence_reg].pin_count++;
+		return true;
+	} else
+		return false;
+}
+
+/**
+ * i915_gem_object_unpin_fence - unpin fencing state
+ * @obj: object to unpin fencing for
+ *
+ * This releases the fence pin reference acquired through
+ * i915_gem_object_pin_fence. It will handle both objects with and without an
+ * attached fence correctly, callers do not need to distinguish this.
+ */
+void
+i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
+{
+	if (obj->fence_reg != I915_FENCE_REG_NONE) {
+		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+		WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
+		dev_priv->fence_regs[obj->fence_reg].pin_count--;
+	}
+}
+
+/**
+ * i915_gem_restore_fences - restore fence state
+ * @dev: DRM device
+ *
+ * Restore the hw fence state to match the software tracking again, to be called
+ * after a gpu reset and on resume.
+ */
+void i915_gem_restore_fences(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	for (i = 0; i < dev_priv->num_fence_regs; i++) {
+		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+
+		/*
+		 * Commit delayed tiling changes if we have an object still
+		 * attached to the fence, otherwise just clear the fence.
+		 */
+		if (reg->obj) {
+			i915_gem_object_update_fence(reg->obj, reg,
+						     reg->obj->tiling_mode);
+		} else {
+			i915_gem_write_fence(dev, i, NULL);
+		}
+	}
+}
+
+/**
+ * DOC: tiling swizzling details
+ *
+ * The idea behind tiling is to increase cache hit rates by rearranging
+ * pixel data so that a group of pixel accesses are in the same cacheline.
+ * Performance improvement from doing this on the back/depth buffer are on
+ * the order of 30%.
+ *
+ * Intel architectures make this somewhat more complicated, though, by
+ * adjustments made to addressing of data when the memory is in interleaved
+ * mode (matched pairs of DIMMS) to improve memory bandwidth.
+ * For interleaved memory, the CPU sends every sequential 64 bytes
+ * to an alternate memory channel so it can get the bandwidth from both.
+ *
+ * The GPU also rearranges its accesses for increased bandwidth to interleaved
+ * memory, and it matches what the CPU does for non-tiled.  However, when tiled
+ * it does it a little differently, since one walks addresses not just in the
+ * X direction but also Y.  So, along with alternating channels when bit
+ * 6 of the address flips, it also alternates when other bits flip --  Bits 9
+ * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
+ * are common to both the 915 and 965-class hardware.
+ *
+ * The CPU also sometimes XORs in higher bits as well, to improve
+ * bandwidth doing strided access like we do so frequently in graphics.  This
+ * is called "Channel XOR Randomization" in the MCH documentation.  The result
+ * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
+ * decode.
+ *
+ * All of this bit 6 XORing has an effect on our memory management,
+ * as we need to make sure that the 3d driver can correctly address object
+ * contents.
+ *
+ * If we don't have interleaved memory, all tiling is safe and no swizzling is
+ * required.
+ *
+ * When bit 17 is XORed in, we simply refuse to tile at all.  Bit
+ * 17 is not just a page offset, so as we page an objet out and back in,
+ * individual pages in it will have different bit 17 addresses, resulting in
+ * each 64 bytes being swapped with its neighbor!
+ *
+ * Otherwise, if interleaved, we have to tell the 3d driver what the address
+ * swizzling it needs to do is, since it's writing with the CPU to the pages
+ * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
+ * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
+ * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
+ * to match what the GPU expects.
+ */
+
+/**
+ * i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
+ * @dev: DRM device
+ *
+ * Detects bit 6 swizzling of address lookup between IGD access and CPU
+ * access through main memory.
+ */
+void
+i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+	uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+
+	if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
+		/*
+		 * On BDW+, swizzling is not used. We leave the CPU memory
+		 * controller in charge of optimizing memory accesses without
+		 * the extra address manipulation GPU side.
+		 *
+		 * VLV and CHV don't have GPU swizzling.
+		 */
+		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+	} else if (INTEL_INFO(dev)->gen >= 6) {
+		if (dev_priv->preserve_bios_swizzle) {
+			if (I915_READ(DISP_ARB_CTL) &
+			    DISP_TILE_SURFACE_SWIZZLING) {
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+				swizzle_y = I915_BIT_6_SWIZZLE_9;
+			} else {
+				swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+				swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+			}
+		} else {
+			uint32_t dimm_c0, dimm_c1;
+			dimm_c0 = I915_READ(MAD_DIMM_C0);
+			dimm_c1 = I915_READ(MAD_DIMM_C1);
+			dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+			dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+			/* Enable swizzling when the channels are populated
+			 * with identically sized dimms. We don't need to check
+			 * the 3rd channel because no cpu with gpu attached
+			 * ships in that configuration. Also, swizzling only
+			 * makes sense for 2 channels anyway. */
+			if (dimm_c0 == dimm_c1) {
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+				swizzle_y = I915_BIT_6_SWIZZLE_9;
+			} else {
+				swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+				swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+			}
+		}
+	} else if (IS_GEN5(dev)) {
+		/* On Ironlake whatever DRAM config, GPU always do
+		 * same swizzling setup.
+		 */
+		swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+		swizzle_y = I915_BIT_6_SWIZZLE_9;
+	} else if (IS_GEN2(dev)) {
+		/* As far as we know, the 865 doesn't have these bit 6
+		 * swizzling issues.
+		 */
+		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+	} else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
+		uint32_t dcc;
+
+		/* On 9xx chipsets, channel interleave by the CPU is
+		 * determined by DCC.  For single-channel, neither the CPU
+		 * nor the GPU do swizzling.  For dual channel interleaved,
+		 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
+		 * 9 for Y tiled.  The CPU's interleave is independent, and
+		 * can be based on either bit 11 (haven't seen this yet) or
+		 * bit 17 (common).
+		 */
+		dcc = I915_READ(DCC);
+		switch (dcc & DCC_ADDRESSING_MODE_MASK) {
+		case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
+		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
+			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+			break;
+		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
+			if (dcc & DCC_CHANNEL_XOR_DISABLE) {
+				/* This is the base swizzling by the GPU for
+				 * tiled buffers.
+				 */
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+				swizzle_y = I915_BIT_6_SWIZZLE_9;
+			} else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
+				/* Bit 11 swizzling by the CPU in addition. */
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
+				swizzle_y = I915_BIT_6_SWIZZLE_9_11;
+			} else {
+				/* Bit 17 swizzling by the CPU in addition. */
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
+				swizzle_y = I915_BIT_6_SWIZZLE_9_17;
+			}
+			break;
+		}
+
+		/* check for L-shaped memory aka modified enhanced addressing */
+		if (IS_GEN4(dev)) {
+			uint32_t ddc2 = I915_READ(DCC2);
+
+			if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
+				dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+		}
+
+		if (dcc == 0xffffffff) {
+			DRM_ERROR("Couldn't read from MCHBAR.  "
+				  "Disabling tiling.\n");
+			swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+			swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+		}
+	} else {
+		/* The 965, G33, and newer, have a very flexible memory
+		 * configuration.  It will enable dual-channel mode
+		 * (interleaving) on as much memory as it can, and the GPU
+		 * will additionally sometimes enable different bit 6
+		 * swizzling for tiled objects from the CPU.
+		 *
+		 * Here's what I found on the G965:
+		 *    slot fill         memory size  swizzling
+		 * 0A   0B   1A   1B    1-ch   2-ch
+		 * 512  0    0    0     512    0     O
+		 * 512  0    512  0     16     1008  X
+		 * 512  0    0    512   16     1008  X
+		 * 0    512  0    512   16     1008  X
+		 * 1024 1024 1024 0     2048   1024  O
+		 *
+		 * We could probably detect this based on either the DRB
+		 * matching, which was the case for the swizzling required in
+		 * the table above, or from the 1-ch value being less than
+		 * the minimum size of a rank.
+		 */
+		if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
+			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+		} else {
+			swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+			swizzle_y = I915_BIT_6_SWIZZLE_9;
+		}
+	}
+
+	dev_priv->mm.bit_6_swizzle_x = swizzle_x;
+	dev_priv->mm.bit_6_swizzle_y = swizzle_y;
+}
+
+/*
+ * Swap every 64 bytes of this page around, to account for it having a new
+ * bit 17 of its physical address and therefore being interpreted differently
+ * by the GPU.
+ */
+static void
+i915_gem_swizzle_page(struct page *page)
+{
+	char temp[64];
+	char *vaddr;
+	int i;
+
+	vaddr = kmap(page);
+
+	for (i = 0; i < PAGE_SIZE; i += 128) {
+		memcpy(temp, &vaddr[i], 64);
+		memcpy(&vaddr[i], &vaddr[i + 64], 64);
+		memcpy(&vaddr[i + 64], temp, 64);
+	}
+
+	kunmap(page);
+}
+
+/**
+ * i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
+ * @obj: i915 GEM buffer object
+ *
+ * This function fixes up the swizzling in case any page frame number for this
+ * object has changed in bit 17 since that state has been saved with
+ * i915_gem_object_save_bit_17_swizzle().
+ *
+ * This is called when pinning backing storage again, since the kernel is free
+ * to move unpinned backing storage around (either by directly moving pages or
+ * by swapping them out and back in again).
+ */
+void
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
+{
+	struct sg_page_iter sg_iter;
+	int i;
+
+	if (obj->bit_17 == NULL)
+		return;
+
+	i = 0;
+	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+		struct page *page = sg_page_iter_page(&sg_iter);
+		char new_bit_17 = page_to_phys(page) >> 17;
+		if ((new_bit_17 & 0x1) !=
+		    (test_bit(i, obj->bit_17) != 0)) {
+			i915_gem_swizzle_page(page);
+			set_page_dirty(page);
+		}
+		i++;
+	}
+}
+
+/**
+ * i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
+ * @obj: i915 GEM buffer object
+ *
+ * This function saves the bit 17 of each page frame number so that swizzling
+ * can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
+ * be called before the backing storage can be unpinned.
+ */
+void
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
+{
+	struct sg_page_iter sg_iter;
+	int page_count = obj->base.size >> PAGE_SHIFT;
+	int i;
+
+	if (obj->bit_17 == NULL) {
+		obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
+				      sizeof(long), GFP_KERNEL);
+		if (obj->bit_17 == NULL) {
+			DRM_ERROR("Failed to allocate memory for bit 17 "
+				  "record\n");
+			return;
+		}
+	}
+
+	i = 0;
+	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+		if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
+			__set_bit(i, obj->bit_17);
+		else
+			__clear_bit(i, obj->bit_17);
+		i++;
+	}
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 31e8269e6e3d..96054a560f4f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -192,9 +192,8 @@ static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
 	return pte;
 }
 
-static gen8_pde_t gen8_pde_encode(struct drm_device *dev,
-				  dma_addr_t addr,
-				  enum i915_cache_level level)
+static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
+				  const enum i915_cache_level level)
 {
 	gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
 	pde |= addr;
@@ -301,75 +300,120 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
 	return pte;
 }
 
-#define i915_dma_unmap_single(px, dev) \
-	__i915_dma_unmap_single((px)->daddr, dev)
-
-static void __i915_dma_unmap_single(dma_addr_t daddr,
-				    struct drm_device *dev)
+static int __setup_page_dma(struct drm_device *dev,
+			    struct i915_page_dma *p, gfp_t flags)
 {
 	struct device *device = &dev->pdev->dev;
 
-	dma_unmap_page(device, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+	p->page = alloc_page(flags);
+	if (!p->page)
+		return -ENOMEM;
+
+	p->daddr = dma_map_page(device,
+				p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
+
+	if (dma_mapping_error(device, p->daddr)) {
+		__free_page(p->page);
+		return -EINVAL;
+	}
+
+	return 0;
 }
 
-/**
- * i915_dma_map_single() - Create a dma mapping for a page table/dir/etc.
- * @px:	Page table/dir/etc to get a DMA map for
- * @dev:	drm device
- *
- * Page table allocations are unified across all gens. They always require a
- * single 4k allocation, as well as a DMA mapping. If we keep the structs
- * symmetric here, the simple macro covers us for every page table type.
- *
- * Return: 0 if success.
- */
-#define i915_dma_map_single(px, dev) \
-	i915_dma_map_page_single((px)->page, (dev), &(px)->daddr)
+static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
+{
+	return __setup_page_dma(dev, p, GFP_KERNEL);
+}
 
-static int i915_dma_map_page_single(struct page *page,
-				    struct drm_device *dev,
-				    dma_addr_t *daddr)
+static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
 {
-	struct device *device = &dev->pdev->dev;
+	if (WARN_ON(!p->page))
+		return;
 
-	*daddr = dma_map_page(device, page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
-	if (dma_mapping_error(device, *daddr))
-		return -ENOMEM;
+	dma_unmap_page(&dev->pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+	__free_page(p->page);
+	memset(p, 0, sizeof(*p));
+}
 
-	return 0;
+static void *kmap_page_dma(struct i915_page_dma *p)
+{
+	return kmap_atomic(p->page);
 }
 
-static void unmap_and_free_pt(struct i915_page_table *pt,
-			       struct drm_device *dev)
+/* We use the flushing unmap only with ppgtt structures:
+ * page directories, page tables and scratch pages.
+ */
+static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
 {
-	if (WARN_ON(!pt->page))
-		return;
+	/* There are only few exceptions for gen >=6. chv and bxt.
+	 * And we are not sure about the latter so play safe for now.
+	 */
+	if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
+		drm_clflush_virt_range(vaddr, PAGE_SIZE);
 
-	i915_dma_unmap_single(pt, dev);
-	__free_page(pt->page);
-	kfree(pt->used_ptes);
-	kfree(pt);
+	kunmap_atomic(vaddr);
 }
 
-static void gen8_initialize_pt(struct i915_address_space *vm,
-			       struct i915_page_table *pt)
+#define kmap_px(px) kmap_page_dma(px_base(px))
+#define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr))
+
+#define setup_px(dev, px) setup_page_dma((dev), px_base(px))
+#define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px))
+#define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v))
+#define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v))
+
+static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
+			  const uint64_t val)
 {
-	gen8_pte_t *pt_vaddr, scratch_pte;
 	int i;
+	uint64_t * const vaddr = kmap_page_dma(p);
 
-	pt_vaddr = kmap_atomic(pt->page);
-	scratch_pte = gen8_pte_encode(vm->scratch.addr,
-				      I915_CACHE_LLC, true);
+	for (i = 0; i < 512; i++)
+		vaddr[i] = val;
+
+	kunmap_page_dma(dev, vaddr);
+}
+
+static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
+			     const uint32_t val32)
+{
+	uint64_t v = val32;
 
-	for (i = 0; i < GEN8_PTES; i++)
-		pt_vaddr[i] = scratch_pte;
+	v = v << 32 | val32;
 
-	if (!HAS_LLC(vm->dev))
-		drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
-	kunmap_atomic(pt_vaddr);
+	fill_page_dma(dev, p, v);
 }
 
-static struct i915_page_table *alloc_pt_single(struct drm_device *dev)
+static struct i915_page_scratch *alloc_scratch_page(struct drm_device *dev)
+{
+	struct i915_page_scratch *sp;
+	int ret;
+
+	sp = kzalloc(sizeof(*sp), GFP_KERNEL);
+	if (sp == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = __setup_page_dma(dev, px_base(sp), GFP_DMA32 | __GFP_ZERO);
+	if (ret) {
+		kfree(sp);
+		return ERR_PTR(ret);
+	}
+
+	set_pages_uc(px_page(sp), 1);
+
+	return sp;
+}
+
+static void free_scratch_page(struct drm_device *dev,
+			      struct i915_page_scratch *sp)
+{
+	set_pages_wb(px_page(sp), 1);
+
+	cleanup_px(dev, sp);
+	kfree(sp);
+}
+
+static struct i915_page_table *alloc_pt(struct drm_device *dev)
 {
 	struct i915_page_table *pt;
 	const size_t count = INTEL_INFO(dev)->gen >= 8 ?
@@ -386,19 +430,13 @@ static struct i915_page_table *alloc_pt_single(struct drm_device *dev)
 	if (!pt->used_ptes)
 		goto fail_bitmap;
 
-	pt->page = alloc_page(GFP_KERNEL);
-	if (!pt->page)
-		goto fail_page;
-
-	ret = i915_dma_map_single(pt, dev);
+	ret = setup_px(dev, pt);
 	if (ret)
-		goto fail_dma;
+		goto fail_page_m;
 
 	return pt;
 
-fail_dma:
-	__free_page(pt->page);
-fail_page:
+fail_page_m:
 	kfree(pt->used_ptes);
 fail_bitmap:
 	kfree(pt);
@@ -406,18 +444,38 @@ fail_bitmap:
 	return ERR_PTR(ret);
 }
 
-static void unmap_and_free_pd(struct i915_page_directory *pd,
-			      struct drm_device *dev)
+static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
 {
-	if (pd->page) {
-		i915_dma_unmap_single(pd, dev);
-		__free_page(pd->page);
-		kfree(pd->used_pdes);
-		kfree(pd);
-	}
+	cleanup_px(dev, pt);
+	kfree(pt->used_ptes);
+	kfree(pt);
 }
 
-static struct i915_page_directory *alloc_pd_single(struct drm_device *dev)
+static void gen8_initialize_pt(struct i915_address_space *vm,
+			       struct i915_page_table *pt)
+{
+	gen8_pte_t scratch_pte;
+
+	scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+				      I915_CACHE_LLC, true);
+
+	fill_px(vm->dev, pt, scratch_pte);
+}
+
+static void gen6_initialize_pt(struct i915_address_space *vm,
+			       struct i915_page_table *pt)
+{
+	gen6_pte_t scratch_pte;
+
+	WARN_ON(px_dma(vm->scratch_page) == 0);
+
+	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+				     I915_CACHE_LLC, true, 0);
+
+	fill32_px(vm->dev, pt, scratch_pte);
+}
+
+static struct i915_page_directory *alloc_pd(struct drm_device *dev)
 {
 	struct i915_page_directory *pd;
 	int ret = -ENOMEM;
@@ -429,38 +487,52 @@ static struct i915_page_directory *alloc_pd_single(struct drm_device *dev)
 	pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES),
 				sizeof(*pd->used_pdes), GFP_KERNEL);
 	if (!pd->used_pdes)
-		goto free_pd;
-
-	pd->page = alloc_page(GFP_KERNEL);
-	if (!pd->page)
-		goto free_bitmap;
+		goto fail_bitmap;
 
-	ret = i915_dma_map_single(pd, dev);
+	ret = setup_px(dev, pd);
 	if (ret)
-		goto free_page;
+		goto fail_page_m;
 
 	return pd;
 
-free_page:
-	__free_page(pd->page);
-free_bitmap:
+fail_page_m:
 	kfree(pd->used_pdes);
-free_pd:
+fail_bitmap:
 	kfree(pd);
 
 	return ERR_PTR(ret);
 }
 
+static void free_pd(struct drm_device *dev, struct i915_page_directory *pd)
+{
+	if (px_page(pd)) {
+		cleanup_px(dev, pd);
+		kfree(pd->used_pdes);
+		kfree(pd);
+	}
+}
+
+static void gen8_initialize_pd(struct i915_address_space *vm,
+			       struct i915_page_directory *pd)
+{
+	gen8_pde_t scratch_pde;
+
+	scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
+
+	fill_px(vm->dev, pd, scratch_pde);
+}
+
 /* Broadwell Page Directory Pointer Descriptors */
-static int gen8_write_pdp(struct intel_engine_cs *ring,
+static int gen8_write_pdp(struct drm_i915_gem_request *req,
 			  unsigned entry,
 			  dma_addr_t addr)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
 	BUG_ON(entry >= 4);
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -476,16 +548,14 @@ static int gen8_write_pdp(struct intel_engine_cs *ring,
 }
 
 static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
-			  struct intel_engine_cs *ring)
+			  struct drm_i915_gem_request *req)
 {
 	int i, ret;
 
 	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
-		struct i915_page_directory *pd = ppgtt->pdp.page_directory[i];
-		dma_addr_t pd_daddr = pd ? pd->daddr : ppgtt->scratch_pd->daddr;
-		/* The page directory might be NULL, but we need to clear out
-		 * whatever the previous context might have used. */
-		ret = gen8_write_pdp(ring, i, pd_daddr);
+		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+		ret = gen8_write_pdp(req, i, pd_daddr);
 		if (ret)
 			return ret;
 	}
@@ -507,13 +577,12 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
 	unsigned num_entries = length >> PAGE_SHIFT;
 	unsigned last_pte, i;
 
-	scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
+	scratch_pte = gen8_pte_encode(px_dma(ppgtt->base.scratch_page),
 				      I915_CACHE_LLC, use_scratch);
 
 	while (num_entries) {
 		struct i915_page_directory *pd;
 		struct i915_page_table *pt;
-		struct page *page_table;
 
 		if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
 			break;
@@ -525,25 +594,21 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
 
 		pt = pd->page_table[pde];
 
-		if (WARN_ON(!pt->page))
+		if (WARN_ON(!px_page(pt)))
 			break;
 
-		page_table = pt->page;
-
 		last_pte = pte + num_entries;
 		if (last_pte > GEN8_PTES)
 			last_pte = GEN8_PTES;
 
-		pt_vaddr = kmap_atomic(page_table);
+		pt_vaddr = kmap_px(pt);
 
 		for (i = pte; i < last_pte; i++) {
 			pt_vaddr[i] = scratch_pte;
 			num_entries--;
 		}
 
-		if (!HAS_LLC(ppgtt->base.dev))
-			drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
-		kunmap_atomic(pt_vaddr);
+		kunmap_px(ppgtt, pt);
 
 		pte = 0;
 		if (++pde == I915_PDES) {
@@ -575,18 +640,14 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
 		if (pt_vaddr == NULL) {
 			struct i915_page_directory *pd = ppgtt->pdp.page_directory[pdpe];
 			struct i915_page_table *pt = pd->page_table[pde];
-			struct page *page_table = pt->page;
-
-			pt_vaddr = kmap_atomic(page_table);
+			pt_vaddr = kmap_px(pt);
 		}
 
 		pt_vaddr[pte] =
 			gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
 					cache_level, true);
 		if (++pte == GEN8_PTES) {
-			if (!HAS_LLC(ppgtt->base.dev))
-				drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
-			kunmap_atomic(pt_vaddr);
+			kunmap_px(ppgtt, pt_vaddr);
 			pt_vaddr = NULL;
 			if (++pde == I915_PDES) {
 				pdpe++;
@@ -595,58 +656,64 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
 			pte = 0;
 		}
 	}
-	if (pt_vaddr) {
-		if (!HAS_LLC(ppgtt->base.dev))
-			drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
-		kunmap_atomic(pt_vaddr);
-	}
-}
-
-static void __gen8_do_map_pt(gen8_pde_t * const pde,
-			     struct i915_page_table *pt,
-			     struct drm_device *dev)
-{
-	gen8_pde_t entry =
-		gen8_pde_encode(dev, pt->daddr, I915_CACHE_LLC);
-	*pde = entry;
-}
 
-static void gen8_initialize_pd(struct i915_address_space *vm,
-			       struct i915_page_directory *pd)
-{
-	struct i915_hw_ppgtt *ppgtt =
-			container_of(vm, struct i915_hw_ppgtt, base);
-	gen8_pde_t *page_directory;
-	struct i915_page_table *pt;
-	int i;
-
-	page_directory = kmap_atomic(pd->page);
-	pt = ppgtt->scratch_pt;
-	for (i = 0; i < I915_PDES; i++)
-		/* Map the PDE to the page table */
-		__gen8_do_map_pt(page_directory + i, pt, vm->dev);
-
-	if (!HAS_LLC(vm->dev))
-		drm_clflush_virt_range(page_directory, PAGE_SIZE);
-	kunmap_atomic(page_directory);
+	if (pt_vaddr)
+		kunmap_px(ppgtt, pt_vaddr);
 }
 
-static void gen8_free_page_tables(struct i915_page_directory *pd, struct drm_device *dev)
+static void gen8_free_page_tables(struct drm_device *dev,
+				  struct i915_page_directory *pd)
 {
 	int i;
 
-	if (!pd->page)
+	if (!px_page(pd))
 		return;
 
 	for_each_set_bit(i, pd->used_pdes, I915_PDES) {
 		if (WARN_ON(!pd->page_table[i]))
 			continue;
 
-		unmap_and_free_pt(pd->page_table[i], dev);
+		free_pt(dev, pd->page_table[i]);
 		pd->page_table[i] = NULL;
 	}
 }
 
+static int gen8_init_scratch(struct i915_address_space *vm)
+{
+	struct drm_device *dev = vm->dev;
+
+	vm->scratch_page = alloc_scratch_page(dev);
+	if (IS_ERR(vm->scratch_page))
+		return PTR_ERR(vm->scratch_page);
+
+	vm->scratch_pt = alloc_pt(dev);
+	if (IS_ERR(vm->scratch_pt)) {
+		free_scratch_page(dev, vm->scratch_page);
+		return PTR_ERR(vm->scratch_pt);
+	}
+
+	vm->scratch_pd = alloc_pd(dev);
+	if (IS_ERR(vm->scratch_pd)) {
+		free_pt(dev, vm->scratch_pt);
+		free_scratch_page(dev, vm->scratch_page);
+		return PTR_ERR(vm->scratch_pd);
+	}
+
+	gen8_initialize_pt(vm, vm->scratch_pt);
+	gen8_initialize_pd(vm, vm->scratch_pd);
+
+	return 0;
+}
+
+static void gen8_free_scratch(struct i915_address_space *vm)
+{
+	struct drm_device *dev = vm->dev;
+
+	free_pd(dev, vm->scratch_pd);
+	free_pt(dev, vm->scratch_pt);
+	free_scratch_page(dev, vm->scratch_page);
+}
+
 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 {
 	struct i915_hw_ppgtt *ppgtt =
@@ -657,12 +724,12 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 		if (WARN_ON(!ppgtt->pdp.page_directory[i]))
 			continue;
 
-		gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
-		unmap_and_free_pd(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
+		gen8_free_page_tables(ppgtt->base.dev,
+				      ppgtt->pdp.page_directory[i]);
+		free_pd(ppgtt->base.dev, ppgtt->pdp.page_directory[i]);
 	}
 
-	unmap_and_free_pd(ppgtt->scratch_pd, ppgtt->base.dev);
-	unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
+	gen8_free_scratch(vm);
 }
 
 /**
@@ -698,24 +765,24 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt,
 		/* Don't reallocate page tables */
 		if (pt) {
 			/* Scratch is never allocated this way */
-			WARN_ON(pt == ppgtt->scratch_pt);
+			WARN_ON(pt == ppgtt->base.scratch_pt);
 			continue;
 		}
 
-		pt = alloc_pt_single(dev);
+		pt = alloc_pt(dev);
 		if (IS_ERR(pt))
 			goto unwind_out;
 
 		gen8_initialize_pt(&ppgtt->base, pt);
 		pd->page_table[pde] = pt;
-		set_bit(pde, new_pts);
+		__set_bit(pde, new_pts);
 	}
 
 	return 0;
 
 unwind_out:
 	for_each_set_bit(pde, new_pts, I915_PDES)
-		unmap_and_free_pt(pd->page_table[pde], dev);
+		free_pt(dev, pd->page_table[pde]);
 
 	return -ENOMEM;
 }
@@ -756,27 +823,24 @@ static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt,
 
 	WARN_ON(!bitmap_empty(new_pds, GEN8_LEGACY_PDPES));
 
-	/* FIXME: upper bound must not overflow 32 bits  */
-	WARN_ON((start + length) > (1ULL << 32));
-
 	gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
 		if (pd)
 			continue;
 
-		pd = alloc_pd_single(dev);
+		pd = alloc_pd(dev);
 		if (IS_ERR(pd))
 			goto unwind_out;
 
 		gen8_initialize_pd(&ppgtt->base, pd);
 		pdp->page_directory[pdpe] = pd;
-		set_bit(pdpe, new_pds);
+		__set_bit(pdpe, new_pds);
 	}
 
 	return 0;
 
 unwind_out:
 	for_each_set_bit(pdpe, new_pds, GEN8_LEGACY_PDPES)
-		unmap_and_free_pd(pdp->page_directory[pdpe], dev);
+		free_pd(dev, pdp->page_directory[pdpe]);
 
 	return -ENOMEM;
 }
@@ -830,6 +894,16 @@ err_out:
 	return -ENOMEM;
 }
 
+/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
+ * the page table structures, we mark them dirty so that
+ * context switching/execlist queuing code takes extra steps
+ * to ensure that tlbs are flushed.
+ */
+static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
+{
+	ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
+}
+
 static int gen8_alloc_va_range(struct i915_address_space *vm,
 			       uint64_t start,
 			       uint64_t length)
@@ -848,7 +922,10 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
 	 * actually use the other side of the canonical address space.
 	 */
 	if (WARN_ON(start + length < start))
-		return -ERANGE;
+		return -ENODEV;
+
+	if (WARN_ON(start + length > ppgtt->base.total))
+		return -ENODEV;
 
 	ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables);
 	if (ret)
@@ -876,7 +953,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
 	/* Allocations have completed successfully, so set the bitmaps, and do
 	 * the mappings. */
 	gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
-		gen8_pde_t *const page_directory = kmap_atomic(pd->page);
+		gen8_pde_t *const page_directory = kmap_px(pd);
 		struct i915_page_table *pt;
 		uint64_t pd_len = gen8_clamp_pd(start, length);
 		uint64_t pd_start = start;
@@ -897,36 +974,36 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
 				   gen8_pte_count(pd_start, pd_len));
 
 			/* Our pde is now pointing to the pagetable, pt */
-			set_bit(pde, pd->used_pdes);
+			__set_bit(pde, pd->used_pdes);
 
 			/* Map the PDE to the page table */
-			__gen8_do_map_pt(page_directory + pde, pt, vm->dev);
+			page_directory[pde] = gen8_pde_encode(px_dma(pt),
+							      I915_CACHE_LLC);
 
 			/* NB: We haven't yet mapped ptes to pages. At this
 			 * point we're still relying on insert_entries() */
 		}
 
-		if (!HAS_LLC(vm->dev))
-			drm_clflush_virt_range(page_directory, PAGE_SIZE);
+		kunmap_px(ppgtt, page_directory);
 
-		kunmap_atomic(page_directory);
-
-		set_bit(pdpe, ppgtt->pdp.used_pdpes);
+		__set_bit(pdpe, ppgtt->pdp.used_pdpes);
 	}
 
 	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
+	mark_tlbs_dirty(ppgtt);
 	return 0;
 
 err_out:
 	while (pdpe--) {
 		for_each_set_bit(temp, new_page_tables[pdpe], I915_PDES)
-			unmap_and_free_pt(ppgtt->pdp.page_directory[pdpe]->page_table[temp], vm->dev);
+			free_pt(vm->dev, ppgtt->pdp.page_directory[pdpe]->page_table[temp]);
 	}
 
 	for_each_set_bit(pdpe, new_page_dirs, GEN8_LEGACY_PDPES)
-		unmap_and_free_pd(ppgtt->pdp.page_directory[pdpe], vm->dev);
+		free_pd(vm->dev, ppgtt->pdp.page_directory[pdpe]);
 
 	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
+	mark_tlbs_dirty(ppgtt);
 	return ret;
 }
 
@@ -939,16 +1016,11 @@ err_out:
  */
 static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 {
-	ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev);
-	if (IS_ERR(ppgtt->scratch_pt))
-		return PTR_ERR(ppgtt->scratch_pt);
-
-	ppgtt->scratch_pd = alloc_pd_single(ppgtt->base.dev);
-	if (IS_ERR(ppgtt->scratch_pd))
-		return PTR_ERR(ppgtt->scratch_pd);
+	int ret;
 
-	gen8_initialize_pt(&ppgtt->base, ppgtt->scratch_pt);
-	gen8_initialize_pd(&ppgtt->base, ppgtt->scratch_pd);
+	ret = gen8_init_scratch(&ppgtt->base);
+	if (ret)
+		return ret;
 
 	ppgtt->base.start = 0;
 	ppgtt->base.total = 1ULL << 32;
@@ -980,12 +1052,13 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 	uint32_t  pte, pde, temp;
 	uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
 
-	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
+	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+				     I915_CACHE_LLC, true, 0);
 
 	gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) {
 		u32 expected;
 		gen6_pte_t *pt_vaddr;
-		dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr;
+		const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
 		pd_entry = readl(ppgtt->pd_addr + pde);
 		expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
 
@@ -996,7 +1069,8 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 				   expected);
 		seq_printf(m, "\tPDE: %x\n", pd_entry);
 
-		pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->page);
+		pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]);
+
 		for (pte = 0; pte < GEN6_PTES; pte+=4) {
 			unsigned long va =
 				(pde * PAGE_SIZE * GEN6_PTES) +
@@ -1018,7 +1092,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 			}
 			seq_puts(m, "\n");
 		}
-		kunmap_atomic(pt_vaddr);
+		kunmap_px(ppgtt, pt_vaddr);
 	}
 }
 
@@ -1031,7 +1105,7 @@ static void gen6_write_pde(struct i915_page_directory *pd,
 		container_of(pd, struct i915_hw_ppgtt, pd);
 	u32 pd_entry;
 
-	pd_entry = GEN6_PDE_ADDR_ENCODE(pt->daddr);
+	pd_entry = GEN6_PDE_ADDR_ENCODE(px_dma(pt));
 	pd_entry |= GEN6_PDE_VALID;
 
 	writel(pd_entry, ppgtt->pd_addr + pde);
@@ -1056,22 +1130,23 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv,
 
 static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 {
-	BUG_ON(ppgtt->pd.pd_offset & 0x3f);
+	BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
 
-	return (ppgtt->pd.pd_offset / 64) << 16;
+	return (ppgtt->pd.base.ggtt_offset / 64) << 16;
 }
 
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
-			 struct intel_engine_cs *ring)
+			 struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -1087,8 +1162,9 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 }
 
 static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
-			  struct intel_engine_cs *ring)
+			  struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
 
 	I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
@@ -1097,16 +1173,17 @@ static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
 }
 
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
-			  struct intel_engine_cs *ring)
+			  struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -1120,7 +1197,7 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 
 	/* XXX: RCS is the only one to auto invalidate the TLBs? */
 	if (ring->id != RCS) {
-		ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+		ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 		if (ret)
 			return ret;
 	}
@@ -1129,8 +1206,9 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 }
 
 static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
-			  struct intel_engine_cs *ring)
+			  struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_device *dev = ppgtt->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -1214,19 +1292,20 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
 	unsigned first_pte = first_entry % GEN6_PTES;
 	unsigned last_pte, i;
 
-	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
+	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+				     I915_CACHE_LLC, true, 0);
 
 	while (num_entries) {
 		last_pte = first_pte + num_entries;
 		if (last_pte > GEN6_PTES)
 			last_pte = GEN6_PTES;
 
-		pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
+		pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
 
 		for (i = first_pte; i < last_pte; i++)
 			pt_vaddr[i] = scratch_pte;
 
-		kunmap_atomic(pt_vaddr);
+		kunmap_px(ppgtt, pt_vaddr);
 
 		num_entries -= last_pte - first_pte;
 		first_pte = 0;
@@ -1250,54 +1329,25 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
 	pt_vaddr = NULL;
 	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
 		if (pt_vaddr == NULL)
-			pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
+			pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
 
 		pt_vaddr[act_pte] =
 			vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
 				       cache_level, true, flags);
 
 		if (++act_pte == GEN6_PTES) {
-			kunmap_atomic(pt_vaddr);
+			kunmap_px(ppgtt, pt_vaddr);
 			pt_vaddr = NULL;
 			act_pt++;
 			act_pte = 0;
 		}
 	}
 	if (pt_vaddr)
-		kunmap_atomic(pt_vaddr);
-}
-
-/* PDE TLBs are a pain invalidate pre GEN8. It requires a context reload. If we
- * are switching between contexts with the same LRCA, we also must do a force
- * restore.
- */
-static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
-{
-	/* If current vm != vm, */
-	ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
-}
-
-static void gen6_initialize_pt(struct i915_address_space *vm,
-		struct i915_page_table *pt)
-{
-	gen6_pte_t *pt_vaddr, scratch_pte;
-	int i;
-
-	WARN_ON(vm->scratch.addr == 0);
-
-	scratch_pte = vm->pte_encode(vm->scratch.addr,
-			I915_CACHE_LLC, true, 0);
-
-	pt_vaddr = kmap_atomic(pt->page);
-
-	for (i = 0; i < GEN6_PTES; i++)
-		pt_vaddr[i] = scratch_pte;
-
-	kunmap_atomic(pt_vaddr);
+		kunmap_px(ppgtt, pt_vaddr);
 }
 
 static int gen6_alloc_va_range(struct i915_address_space *vm,
-			       uint64_t start, uint64_t length)
+			       uint64_t start_in, uint64_t length_in)
 {
 	DECLARE_BITMAP(new_page_tables, I915_PDES);
 	struct drm_device *dev = vm->dev;
@@ -1305,11 +1355,15 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
 	struct i915_hw_ppgtt *ppgtt =
 				container_of(vm, struct i915_hw_ppgtt, base);
 	struct i915_page_table *pt;
-	const uint32_t start_save = start, length_save = length;
+	uint32_t start, length, start_save, length_save;
 	uint32_t pde, temp;
 	int ret;
 
-	WARN_ON(upper_32_bits(start));
+	if (WARN_ON(start_in + length_in > ppgtt->base.total))
+		return -ENODEV;
+
+	start = start_save = start_in;
+	length = length_save = length_in;
 
 	bitmap_zero(new_page_tables, I915_PDES);
 
@@ -1319,7 +1373,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
 	 * tables.
 	 */
 	gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
-		if (pt != ppgtt->scratch_pt) {
+		if (pt != vm->scratch_pt) {
 			WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
 			continue;
 		}
@@ -1327,7 +1381,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
 		/* We've already allocated a page table */
 		WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
 
-		pt = alloc_pt_single(dev);
+		pt = alloc_pt(dev);
 		if (IS_ERR(pt)) {
 			ret = PTR_ERR(pt);
 			goto unwind_out;
@@ -1336,7 +1390,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
 		gen6_initialize_pt(vm, pt);
 
 		ppgtt->pd.page_table[pde] = pt;
-		set_bit(pde, new_page_tables);
+		__set_bit(pde, new_page_tables);
 		trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT);
 	}
 
@@ -1350,7 +1404,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
 		bitmap_set(tmp_bitmap, gen6_pte_index(start),
 			   gen6_pte_count(start, length));
 
-		if (test_and_clear_bit(pde, new_page_tables))
+		if (__test_and_clear_bit(pde, new_page_tables))
 			gen6_write_pde(&ppgtt->pd, pde, pt);
 
 		trace_i915_page_table_entry_map(vm, pde, pt,
@@ -1374,14 +1428,41 @@ unwind_out:
 	for_each_set_bit(pde, new_page_tables, I915_PDES) {
 		struct i915_page_table *pt = ppgtt->pd.page_table[pde];
 
-		ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
-		unmap_and_free_pt(pt, vm->dev);
+		ppgtt->pd.page_table[pde] = vm->scratch_pt;
+		free_pt(vm->dev, pt);
 	}
 
 	mark_tlbs_dirty(ppgtt);
 	return ret;
 }
 
+static int gen6_init_scratch(struct i915_address_space *vm)
+{
+	struct drm_device *dev = vm->dev;
+
+	vm->scratch_page = alloc_scratch_page(dev);
+	if (IS_ERR(vm->scratch_page))
+		return PTR_ERR(vm->scratch_page);
+
+	vm->scratch_pt = alloc_pt(dev);
+	if (IS_ERR(vm->scratch_pt)) {
+		free_scratch_page(dev, vm->scratch_page);
+		return PTR_ERR(vm->scratch_pt);
+	}
+
+	gen6_initialize_pt(vm, vm->scratch_pt);
+
+	return 0;
+}
+
+static void gen6_free_scratch(struct i915_address_space *vm)
+{
+	struct drm_device *dev = vm->dev;
+
+	free_pt(dev, vm->scratch_pt);
+	free_scratch_page(dev, vm->scratch_page);
+}
+
 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
 {
 	struct i915_hw_ppgtt *ppgtt =
@@ -1389,20 +1470,19 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
 	struct i915_page_table *pt;
 	uint32_t pde;
 
-
 	drm_mm_remove_node(&ppgtt->node);
 
 	gen6_for_all_pdes(pt, ppgtt, pde) {
-		if (pt != ppgtt->scratch_pt)
-			unmap_and_free_pt(pt, ppgtt->base.dev);
+		if (pt != vm->scratch_pt)
+			free_pt(ppgtt->base.dev, pt);
 	}
 
-	unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
-	unmap_and_free_pd(&ppgtt->pd, ppgtt->base.dev);
+	gen6_free_scratch(vm);
 }
 
 static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
 {
+	struct i915_address_space *vm = &ppgtt->base;
 	struct drm_device *dev = ppgtt->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	bool retried = false;
@@ -1413,11 +1493,10 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
 	 * size. We allocate at the top of the GTT to avoid fragmentation.
 	 */
 	BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
-	ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev);
-	if (IS_ERR(ppgtt->scratch_pt))
-		return PTR_ERR(ppgtt->scratch_pt);
 
-	gen6_initialize_pt(&ppgtt->base, ppgtt->scratch_pt);
+	ret = gen6_init_scratch(vm);
+	if (ret)
+		return ret;
 
 alloc:
 	ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
@@ -1448,7 +1527,7 @@ alloc:
 	return 0;
 
 err_out:
-	unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
+	gen6_free_scratch(vm);
 	return ret;
 }
 
@@ -1464,7 +1543,7 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
 	uint32_t pde, temp;
 
 	gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
-		ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
+		ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
 }
 
 static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
@@ -1500,11 +1579,11 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 	ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
 	ppgtt->debug_dump = gen6_dump_ppgtt;
 
-	ppgtt->pd.pd_offset =
+	ppgtt->pd.base.ggtt_offset =
 		ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
 
 	ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
-		ppgtt->pd.pd_offset / sizeof(gen6_pte_t);
+		ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
 
 	gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
 
@@ -1515,23 +1594,21 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 			 ppgtt->node.start / PAGE_SIZE);
 
 	DRM_DEBUG("Adding PPGTT at offset %x\n",
-		  ppgtt->pd.pd_offset << 10);
+		  ppgtt->pd.base.ggtt_offset << 10);
 
 	return 0;
 }
 
 static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
 	ppgtt->base.dev = dev;
-	ppgtt->base.scratch = dev_priv->gtt.base.scratch;
 
 	if (INTEL_INFO(dev)->gen < 8)
 		return gen6_ppgtt_init(ppgtt);
 	else
 		return gen8_ppgtt_init(ppgtt);
 }
+
 int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1550,11 +1627,6 @@ int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 
 int i915_ppgtt_init_hw(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
-	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-	int i, ret = 0;
-
 	/* In the case of execlists, PPGTT is enabled by the context descriptor
 	 * and the PDPs are contained within the context itself.  We don't
 	 * need to do anything here. */
@@ -1573,16 +1645,23 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
 	else
 		MISSING_CASE(INTEL_INFO(dev)->gen);
 
-	if (ppgtt) {
-		for_each_ring(ring, dev_priv, i) {
-			ret = ppgtt->switch_mm(ppgtt, ring);
-			if (ret != 0)
-				return ret;
-		}
-	}
+	return 0;
+}
 
-	return ret;
+int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
+{
+	struct drm_i915_private *dev_priv = req->ring->dev->dev_private;
+	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+
+	if (i915.enable_execlists)
+		return 0;
+
+	if (!ppgtt)
+		return 0;
+
+	return ppgtt->switch_mm(ppgtt, req);
 }
+
 struct i915_hw_ppgtt *
 i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
 {
@@ -1843,7 +1922,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
 		 first_entry, num_entries, max_entries))
 		num_entries = max_entries;
 
-	scratch_pte = gen8_pte_encode(vm->scratch.addr,
+	scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
 				      I915_CACHE_LLC,
 				      use_scratch);
 	for (i = 0; i < num_entries; i++)
@@ -1869,7 +1948,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
 		 first_entry, num_entries, max_entries))
 		num_entries = max_entries;
 
-	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0);
+	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+				     I915_CACHE_LLC, use_scratch, 0);
 
 	for (i = 0; i < num_entries; i++)
 		iowrite32(scratch_pte, &gtt_base[i]);
@@ -2105,7 +2185,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
 void i915_gem_init_global_gtt(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long gtt_size, mappable_size;
+	u64 gtt_size, mappable_size;
 
 	gtt_size = dev_priv->gtt.base.total;
 	mappable_size = dev_priv->gtt.mappable_end;
@@ -2135,42 +2215,6 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
 	vm->cleanup(vm);
 }
 
-static int setup_scratch_page(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct page *page;
-	dma_addr_t dma_addr;
-
-	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
-	if (page == NULL)
-		return -ENOMEM;
-	set_pages_uc(page, 1);
-
-#ifdef CONFIG_INTEL_IOMMU
-	dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
-				PCI_DMA_BIDIRECTIONAL);
-	if (pci_dma_mapping_error(dev->pdev, dma_addr))
-		return -EINVAL;
-#else
-	dma_addr = page_to_phys(page);
-#endif
-	dev_priv->gtt.base.scratch.page = page;
-	dev_priv->gtt.base.scratch.addr = dma_addr;
-
-	return 0;
-}
-
-static void teardown_scratch_page(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct page *page = dev_priv->gtt.base.scratch.page;
-
-	set_pages_wb(page, 1);
-	pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
-		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-	__free_page(page);
-}
-
 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
 {
 	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
@@ -2253,8 +2297,8 @@ static int ggtt_probe_common(struct drm_device *dev,
 			     size_t gtt_size)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_page_scratch *scratch_page;
 	phys_addr_t gtt_phys_addr;
-	int ret;
 
 	/* For Modern GENs the PTEs and register space are split in the BAR */
 	gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
@@ -2276,14 +2320,17 @@ static int ggtt_probe_common(struct drm_device *dev,
 		return -ENOMEM;
 	}
 
-	ret = setup_scratch_page(dev);
-	if (ret) {
+	scratch_page = alloc_scratch_page(dev);
+	if (IS_ERR(scratch_page)) {
 		DRM_ERROR("Scratch setup failed\n");
 		/* iounmap will also get called at remove, but meh */
 		iounmap(dev_priv->gtt.gsm);
+		return PTR_ERR(scratch_page);
 	}
 
-	return ret;
+	dev_priv->gtt.base.scratch_page = scratch_page;
+
+	return 0;
 }
 
 /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
@@ -2360,13 +2407,13 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
 }
 
 static int gen8_gmch_probe(struct drm_device *dev,
-			   size_t *gtt_total,
+			   u64 *gtt_total,
 			   size_t *stolen,
 			   phys_addr_t *mappable_base,
-			   unsigned long *mappable_end)
+			   u64 *mappable_end)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned int gtt_size;
+	u64 gtt_size;
 	u16 snb_gmch_ctl;
 	int ret;
 
@@ -2408,10 +2455,10 @@ static int gen8_gmch_probe(struct drm_device *dev,
 }
 
 static int gen6_gmch_probe(struct drm_device *dev,
-			   size_t *gtt_total,
+			   u64 *gtt_total,
 			   size_t *stolen,
 			   phys_addr_t *mappable_base,
-			   unsigned long *mappable_end)
+			   u64 *mappable_end)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned int gtt_size;
@@ -2425,7 +2472,7 @@ static int gen6_gmch_probe(struct drm_device *dev,
 	 * a coarse sanity check.
 	 */
 	if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
-		DRM_ERROR("Unknown GMADR size (%lx)\n",
+		DRM_ERROR("Unknown GMADR size (%llx)\n",
 			  dev_priv->gtt.mappable_end);
 		return -ENXIO;
 	}
@@ -2455,14 +2502,14 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
 	struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
 
 	iounmap(gtt->gsm);
-	teardown_scratch_page(vm->dev);
+	free_scratch_page(vm->dev, vm->scratch_page);
 }
 
 static int i915_gmch_probe(struct drm_device *dev,
-			   size_t *gtt_total,
+			   u64 *gtt_total,
 			   size_t *stolen,
 			   phys_addr_t *mappable_base,
-			   unsigned long *mappable_end)
+			   u64 *mappable_end)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
@@ -2519,17 +2566,17 @@ int i915_gem_gtt_init(struct drm_device *dev)
 		dev_priv->gtt.base.cleanup = gen6_gmch_remove;
 	}
 
+	gtt->base.dev = dev;
+
 	ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
 			     &gtt->mappable_base, &gtt->mappable_end);
 	if (ret)
 		return ret;
 
-	gtt->base.dev = dev;
-
 	/* GMADR is the PCI mmio aperture into the global GTT. */
-	DRM_INFO("Memory usable by graphics device = %zdM\n",
+	DRM_INFO("Memory usable by graphics device = %lluM\n",
 		 gtt->base.total >> 20);
-	DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
+	DRM_DEBUG_DRIVER("GMADR size = %lldM\n", gtt->mappable_end >> 20);
 	DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
 #ifdef CONFIG_INTEL_IOMMU
 	if (intel_iommu_gfx_mapped)
@@ -2706,30 +2753,17 @@ static struct sg_table *
 intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
 			  struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->base.dev;
 	struct intel_rotation_info *rot_info = &ggtt_view->rotation_info;
-	unsigned long size, pages, rot_pages;
+	unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
 	struct sg_page_iter sg_iter;
 	unsigned long i;
 	dma_addr_t *page_addr_list;
 	struct sg_table *st;
-	unsigned int tile_pitch, tile_height;
-	unsigned int width_pages, height_pages;
 	int ret = -ENOMEM;
 
-	pages = obj->base.size / PAGE_SIZE;
-
-	/* Calculate tiling geometry. */
-	tile_height = intel_tile_height(dev, rot_info->pixel_format,
-					rot_info->fb_modifier);
-	tile_pitch = PAGE_SIZE / tile_height;
-	width_pages = DIV_ROUND_UP(rot_info->pitch, tile_pitch);
-	height_pages = DIV_ROUND_UP(rot_info->height, tile_height);
-	rot_pages = width_pages * height_pages;
-	size = rot_pages * PAGE_SIZE;
-
 	/* Allocate a temporary list of source pages for random access. */
-	page_addr_list = drm_malloc_ab(pages, sizeof(dma_addr_t));
+	page_addr_list = drm_malloc_ab(obj->base.size / PAGE_SIZE,
+				       sizeof(dma_addr_t));
 	if (!page_addr_list)
 		return ERR_PTR(ret);
 
@@ -2738,7 +2772,7 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
 	if (!st)
 		goto err_st_alloc;
 
-	ret = sg_alloc_table(st, rot_pages, GFP_KERNEL);
+	ret = sg_alloc_table(st, size_pages, GFP_KERNEL);
 	if (ret)
 		goto err_sg_alloc;
 
@@ -2750,13 +2784,15 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
 	}
 
 	/* Rotate the pages. */
-	rotate_pages(page_addr_list, width_pages, height_pages, st);
+	rotate_pages(page_addr_list,
+		     rot_info->width_pages, rot_info->height_pages,
+		     st);
 
 	DRM_DEBUG_KMS(
-		      "Created rotated page mapping for object size %lu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages).\n",
-		      size, rot_info->pitch, rot_info->height,
-		      rot_info->pixel_format, width_pages, height_pages,
-		      rot_pages);
+		      "Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages).\n",
+		      obj->base.size, rot_info->pitch, rot_info->height,
+		      rot_info->pixel_format, rot_info->width_pages,
+		      rot_info->height_pages, size_pages);
 
 	drm_free_large(page_addr_list);
 
@@ -2768,10 +2804,10 @@ err_st_alloc:
 	drm_free_large(page_addr_list);
 
 	DRM_DEBUG_KMS(
-		      "Failed to create rotated mapping for object size %lu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages)\n",
-		      size, ret, rot_info->pitch, rot_info->height,
-		      rot_info->pixel_format, width_pages, height_pages,
-		      rot_pages);
+		      "Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages)\n",
+		      obj->base.size, ret, rot_info->pitch, rot_info->height,
+		      rot_info->pixel_format, rot_info->width_pages,
+		      rot_info->height_pages, size_pages);
 	return ERR_PTR(ret);
 }
 
@@ -2889,9 +2925,12 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 				    vma->node.size,
 				    VM_TO_TRACE_NAME(vma->vm));
 
+		/* XXX: i915_vma_pin() will fix this +- hack */
+		vma->pin_count++;
 		ret = vma->vm->allocate_va_range(vma->vm,
 						 vma->node.start,
 						 vma->node.size);
+		vma->pin_count--;
 		if (ret)
 			return ret;
 	}
@@ -2916,9 +2955,10 @@ size_t
 i915_ggtt_view_size(struct drm_i915_gem_object *obj,
 		    const struct i915_ggtt_view *view)
 {
-	if (view->type == I915_GGTT_VIEW_NORMAL ||
-	    view->type == I915_GGTT_VIEW_ROTATED) {
+	if (view->type == I915_GGTT_VIEW_NORMAL) {
 		return obj->base.size;
+	} else if (view->type == I915_GGTT_VIEW_ROTATED) {
+		return view->rotation_info.size;
 	} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
 		return view->params.partial.size << PAGE_SHIFT;
 	} else {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 0d46dd20bf71..e1cfa292f9ad 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -126,6 +126,8 @@ struct intel_rotation_info {
 	unsigned int pitch;
 	uint32_t pixel_format;
 	uint64_t fb_modifier;
+	unsigned int width_pages, height_pages;
+	uint64_t size;
 };
 
 struct i915_ggtt_view {
@@ -205,19 +207,34 @@ struct i915_vma {
 #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
 };
 
-struct i915_page_table {
+struct i915_page_dma {
 	struct page *page;
-	dma_addr_t daddr;
+	union {
+		dma_addr_t daddr;
+
+		/* For gen6/gen7 only. This is the offset in the GGTT
+		 * where the page directory entries for PPGTT begin
+		 */
+		uint32_t ggtt_offset;
+	};
+};
+
+#define px_base(px) (&(px)->base)
+#define px_page(px) (px_base(px)->page)
+#define px_dma(px) (px_base(px)->daddr)
+
+struct i915_page_scratch {
+	struct i915_page_dma base;
+};
+
+struct i915_page_table {
+	struct i915_page_dma base;
 
 	unsigned long *used_ptes;
 };
 
 struct i915_page_directory {
-	struct page *page; /* NULL for GEN6-GEN7 */
-	union {
-		uint32_t pd_offset;
-		dma_addr_t daddr;
-	};
+	struct i915_page_dma base;
 
 	unsigned long *used_pdes;
 	struct i915_page_table *page_table[I915_PDES]; /* PDEs */
@@ -233,13 +250,12 @@ struct i915_address_space {
 	struct drm_mm mm;
 	struct drm_device *dev;
 	struct list_head global_link;
-	unsigned long start;		/* Start offset always 0 for dri2 */
-	size_t total;		/* size addr space maps (ex. 2GB for ggtt) */
+	u64 start;		/* Start offset always 0 for dri2 */
+	u64 total;		/* size addr space maps (ex. 2GB for ggtt) */
 
-	struct {
-		dma_addr_t addr;
-		struct page *page;
-	} scratch;
+	struct i915_page_scratch *scratch_page;
+	struct i915_page_table *scratch_pt;
+	struct i915_page_directory *scratch_pd;
 
 	/**
 	 * List of objects currently involved in rendering.
@@ -300,9 +316,9 @@ struct i915_address_space {
  */
 struct i915_gtt {
 	struct i915_address_space base;
-	size_t stolen_size;		/* Total size of stolen memory */
 
-	unsigned long mappable_end;	/* End offset that we can CPU map */
+	size_t stolen_size;		/* Total size of stolen memory */
+	u64 mappable_end;		/* End offset that we can CPU map */
 	struct io_mapping *mappable;	/* Mapping to our CPU mappable region */
 	phys_addr_t mappable_base;	/* PA of our GMADR */
 
@@ -314,9 +330,9 @@ struct i915_gtt {
 	int mtrr;
 
 	/* global gtt ops */
-	int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
+	int (*gtt_probe)(struct drm_device *dev, u64 *gtt_total,
 			  size_t *stolen, phys_addr_t *mappable_base,
-			  unsigned long *mappable_end);
+			  u64 *mappable_end);
 };
 
 struct i915_hw_ppgtt {
@@ -329,16 +345,13 @@ struct i915_hw_ppgtt {
 		struct i915_page_directory pd;
 	};
 
-	struct i915_page_table *scratch_pt;
-	struct i915_page_directory *scratch_pd;
-
 	struct drm_i915_file_private *file_priv;
 
 	gen6_pte_t __iomem *pd_addr;
 
 	int (*enable)(struct i915_hw_ppgtt *ppgtt);
 	int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
-			 struct intel_engine_cs *ring);
+			 struct drm_i915_gem_request *req);
 	void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
 };
 
@@ -468,6 +481,14 @@ static inline size_t gen8_pte_count(uint64_t address, uint64_t length)
 	return i915_pte_count(address, length, GEN8_PDE_SHIFT);
 }
 
+static inline dma_addr_t
+i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
+{
+	return test_bit(n, ppgtt->pdp.used_pdpes) ?
+		px_dma(ppgtt->pdp.page_directory[n]) :
+		px_dma(ppgtt->base.scratch_pd);
+}
+
 int i915_gem_gtt_init(struct drm_device *dev);
 void i915_gem_init_global_gtt(struct drm_device *dev);
 void i915_global_gtt_cleanup(struct drm_device *dev);
@@ -475,6 +496,7 @@ void i915_global_gtt_cleanup(struct drm_device *dev);
 
 int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
 int i915_ppgtt_init_hw(struct drm_device *dev);
+int i915_ppgtt_init_ring(struct drm_i915_gem_request *req);
 void i915_ppgtt_release(struct kref *kref);
 struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
 					struct drm_i915_file_private *fpriv);
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 521548a08578..5026a6267a88 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -73,6 +73,24 @@ free_gem:
 	return ret;
 }
 
+/*
+ * Macro to add commands to auxiliary batch.
+ * This macro only checks for page overflow before inserting the commands,
+ * this is sufficient as the null state generator makes the final batch
+ * with two passes to build command and state separately. At this point
+ * the size of both are known and it compacts them by relocating the state
+ * right after the commands taking care of aligment so we should sufficient
+ * space below them for adding new commands.
+ */
+#define OUT_BATCH(batch, i, val)				\
+	do {							\
+		if (WARN_ON((i) >= PAGE_SIZE / sizeof(u32))) {	\
+			ret = -ENOSPC;				\
+			goto err_out;				\
+		}						\
+		(batch)[(i)++] = (val);				\
+	} while(0)
+
 static int render_state_setup(struct render_state *so)
 {
 	const struct intel_renderstate_rodata *rodata = so->rodata;
@@ -96,8 +114,10 @@ static int render_state_setup(struct render_state *so)
 			s = lower_32_bits(r);
 			if (so->gen >= 8) {
 				if (i + 1 >= rodata->batch_items ||
-				    rodata->batch[i + 1] != 0)
-					return -EINVAL;
+				    rodata->batch[i + 1] != 0) {
+					ret = -EINVAL;
+					goto err_out;
+				}
 
 				d[i++] = s;
 				s = upper_32_bits(r);
@@ -108,6 +128,21 @@ static int render_state_setup(struct render_state *so)
 
 		d[i++] = s;
 	}
+
+	while (i % CACHELINE_DWORDS)
+		OUT_BATCH(d, i, MI_NOOP);
+
+	so->aux_batch_offset = i * sizeof(u32);
+
+	OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
+	so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset;
+
+	/*
+	 * Since we are sending length, we need to strictly conform to
+	 * all requirements. For Gen2 this must be a multiple of 8.
+	 */
+	so->aux_batch_size = ALIGN(so->aux_batch_size, 8);
+
 	kunmap(page);
 
 	ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
@@ -120,8 +155,14 @@ static int render_state_setup(struct render_state *so)
 	}
 
 	return 0;
+
+err_out:
+	kunmap(page);
+	return ret;
 }
 
+#undef OUT_BATCH
+
 void i915_gem_render_state_fini(struct render_state *so)
 {
 	i915_gem_object_ggtt_unpin(so->obj);
@@ -152,29 +193,36 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
 	return 0;
 }
 
-int i915_gem_render_state_init(struct intel_engine_cs *ring)
+int i915_gem_render_state_init(struct drm_i915_gem_request *req)
 {
 	struct render_state so;
 	int ret;
 
-	ret = i915_gem_render_state_prepare(ring, &so);
+	ret = i915_gem_render_state_prepare(req->ring, &so);
 	if (ret)
 		return ret;
 
 	if (so.rodata == NULL)
 		return 0;
 
-	ret = ring->dispatch_execbuffer(ring,
-					so.ggtt_offset,
-					so.rodata->batch_items * 4,
-					I915_DISPATCH_SECURE);
+	ret = req->ring->dispatch_execbuffer(req, so.ggtt_offset,
+					     so.rodata->batch_items * 4,
+					     I915_DISPATCH_SECURE);
 	if (ret)
 		goto out;
 
-	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
+	if (so.aux_batch_size > 8) {
+		ret = req->ring->dispatch_execbuffer(req,
+						     (so.ggtt_offset +
+						      so.aux_batch_offset),
+						     so.aux_batch_size,
+						     I915_DISPATCH_SECURE);
+		if (ret)
+			goto out;
+	}
+
+	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
 
-	ret = __i915_add_request(ring, NULL, so.obj);
-	/* __i915_add_request moves object to inactive if it fails */
 out:
 	i915_gem_render_state_fini(&so);
 	return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
index c44961ed3fad..e641bb093a90 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -37,9 +37,11 @@ struct render_state {
 	struct drm_i915_gem_object *obj;
 	u64 ggtt_offset;
 	int gen;
+	u32 aux_batch_size;
+	u32 aux_batch_offset;
 };
 
-int i915_gem_render_state_init(struct intel_engine_cs *ring);
+int i915_gem_render_state_init(struct drm_i915_gem_request *req);
 void i915_gem_render_state_fini(struct render_state *so);
 int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
 				  struct render_state *so);
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 8b5b784c62fe..f361c4a56995 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -42,6 +42,31 @@
  * for is a boon.
  */
 
+int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+				struct drm_mm_node *node, u64 size,
+				unsigned alignment)
+{
+	int ret;
+
+	if (!drm_mm_initialized(&dev_priv->mm.stolen))
+		return -ENODEV;
+
+	mutex_lock(&dev_priv->mm.stolen_lock);
+	ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, size, alignment,
+				 DRM_MM_SEARCH_DEFAULT);
+	mutex_unlock(&dev_priv->mm.stolen_lock);
+
+	return ret;
+}
+
+void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+				 struct drm_mm_node *node)
+{
+	mutex_lock(&dev_priv->mm.stolen_lock);
+	drm_mm_remove_node(node);
+	mutex_unlock(&dev_priv->mm.stolen_lock);
+}
+
 static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -151,150 +176,115 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 	return base;
 }
 
-static int find_compression_threshold(struct drm_device *dev,
-				      struct drm_mm_node *node,
-				      int size,
-				      int fb_cpp)
+void i915_gem_cleanup_stolen(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int compression_threshold = 1;
-	int ret;
-
-	/* HACK: This code depends on what we will do in *_enable_fbc. If that
-	 * code changes, this code needs to change as well.
-	 *
-	 * The enable_fbc code will attempt to use one of our 2 compression
-	 * thresholds, therefore, in that case, we only have 1 resort.
-	 */
 
-	/* Try to over-allocate to reduce reallocations and fragmentation. */
-	ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
-				 size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
-	if (ret == 0)
-		return compression_threshold;
-
-again:
-	/* HW's ability to limit the CFB is 1:4 */
-	if (compression_threshold > 4 ||
-	    (fb_cpp == 2 && compression_threshold == 2))
-		return 0;
+	if (!drm_mm_initialized(&dev_priv->mm.stolen))
+		return;
 
-	ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
-				 size >>= 1, 4096,
-				 DRM_MM_SEARCH_DEFAULT);
-	if (ret && INTEL_INFO(dev)->gen <= 4) {
-		return 0;
-	} else if (ret) {
-		compression_threshold <<= 1;
-		goto again;
-	} else {
-		return compression_threshold;
-	}
+	drm_mm_takedown(&dev_priv->mm.stolen);
 }
 
-static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
+static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
+				     unsigned long *base, unsigned long *size)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_mm_node *uninitialized_var(compressed_llb);
-	int ret;
-
-	ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb,
-					 size, fb_cpp);
-	if (!ret)
-		goto err_llb;
-	else if (ret > 1) {
-		DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
-
-	}
-
-	dev_priv->fbc.threshold = ret;
-
-	if (INTEL_INFO(dev_priv)->gen >= 5)
-		I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
-	else if (IS_GM45(dev)) {
-		I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
-	} else {
-		compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
-		if (!compressed_llb)
-			goto err_fb;
-
-		ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
-					 4096, 4096, DRM_MM_SEARCH_DEFAULT);
-		if (ret)
-			goto err_fb;
-
-		dev_priv->fbc.compressed_llb = compressed_llb;
-
-		I915_WRITE(FBC_CFB_BASE,
-			   dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
-		I915_WRITE(FBC_LL_BASE,
-			   dev_priv->mm.stolen_base + compressed_llb->start);
+	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+
+	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
+
+	switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
+	case GEN6_STOLEN_RESERVED_1M:
+		*size = 1024 * 1024;
+		break;
+	case GEN6_STOLEN_RESERVED_512K:
+		*size = 512 * 1024;
+		break;
+	case GEN6_STOLEN_RESERVED_256K:
+		*size = 256 * 1024;
+		break;
+	case GEN6_STOLEN_RESERVED_128K:
+		*size = 128 * 1024;
+		break;
+	default:
+		*size = 1024 * 1024;
+		MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
 	}
-
-	dev_priv->fbc.uncompressed_size = size;
-
-	DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
-		      size);
-
-	return 0;
-
-err_fb:
-	kfree(compressed_llb);
-	drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
-err_llb:
-	pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
-	return -ENOSPC;
 }
 
-int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp)
+static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
+				     unsigned long *base, unsigned long *size)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (!drm_mm_initialized(&dev_priv->mm.stolen))
-		return -ENODEV;
-
-	if (size <= dev_priv->fbc.uncompressed_size)
-		return 0;
-
-	/* Release any current block */
-	i915_gem_stolen_cleanup_compression(dev);
-
-	return i915_setup_compression(dev, size, fb_cpp);
+	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+
+	*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
+
+	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
+	case GEN7_STOLEN_RESERVED_1M:
+		*size = 1024 * 1024;
+		break;
+	case GEN7_STOLEN_RESERVED_256K:
+		*size = 256 * 1024;
+		break;
+	default:
+		*size = 1024 * 1024;
+		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
+	}
 }
 
-void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
+static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
+				     unsigned long *base, unsigned long *size)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (dev_priv->fbc.uncompressed_size == 0)
-		return;
-
-	drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
-
-	if (dev_priv->fbc.compressed_llb) {
-		drm_mm_remove_node(dev_priv->fbc.compressed_llb);
-		kfree(dev_priv->fbc.compressed_llb);
+	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+
+	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
+
+	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
+	case GEN8_STOLEN_RESERVED_1M:
+		*size = 1024 * 1024;
+		break;
+	case GEN8_STOLEN_RESERVED_2M:
+		*size = 2 * 1024 * 1024;
+		break;
+	case GEN8_STOLEN_RESERVED_4M:
+		*size = 4 * 1024 * 1024;
+		break;
+	case GEN8_STOLEN_RESERVED_8M:
+		*size = 8 * 1024 * 1024;
+		break;
+	default:
+		*size = 8 * 1024 * 1024;
+		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
 	}
-
-	dev_priv->fbc.uncompressed_size = 0;
 }
 
-void i915_gem_cleanup_stolen(struct drm_device *dev)
+static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
+				    unsigned long *base, unsigned long *size)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+	unsigned long stolen_top;
 
-	if (!drm_mm_initialized(&dev_priv->mm.stolen))
-		return;
+	stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
 
-	i915_gem_stolen_cleanup_compression(dev);
-	drm_mm_takedown(&dev_priv->mm.stolen);
+	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
+
+	/* On these platforms, the register doesn't have a size field, so the
+	 * size is the distance between the base and the top of the stolen
+	 * memory. We also have the genuine case where base is zero and there's
+	 * nothing reserved. */
+	if (*base == 0)
+		*size = 0;
+	else
+		*size = stolen_top - *base;
 }
 
 int i915_gem_init_stolen(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 tmp;
-	int bios_reserved = 0;
+	unsigned long reserved_total, reserved_base, reserved_size;
+	unsigned long stolen_top;
+
+	mutex_init(&dev_priv->mm.stolen_lock);
 
 #ifdef CONFIG_INTEL_IOMMU
 	if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
@@ -310,26 +300,61 @@ int i915_gem_init_stolen(struct drm_device *dev)
 	if (dev_priv->mm.stolen_base == 0)
 		return 0;
 
-	DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
-		      dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
-
-	if (INTEL_INFO(dev)->gen >= 8) {
-		tmp = I915_READ(GEN7_BIOS_RESERVED);
-		tmp >>= GEN8_BIOS_RESERVED_SHIFT;
-		tmp &= GEN8_BIOS_RESERVED_MASK;
-		bios_reserved = (1024*1024) << tmp;
-	} else if (IS_GEN7(dev)) {
-		tmp = I915_READ(GEN7_BIOS_RESERVED);
-		bios_reserved = tmp & GEN7_BIOS_RESERVED_256K ?
-			256*1024 : 1024*1024;
+	stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
+
+	switch (INTEL_INFO(dev_priv)->gen) {
+	case 2:
+	case 3:
+	case 4:
+	case 5:
+		/* Assume the gen6 maximum for the older platforms. */
+		reserved_size = 1024 * 1024;
+		reserved_base = stolen_top - reserved_size;
+		break;
+	case 6:
+		gen6_get_stolen_reserved(dev_priv, &reserved_base,
+					 &reserved_size);
+		break;
+	case 7:
+		gen7_get_stolen_reserved(dev_priv, &reserved_base,
+					 &reserved_size);
+		break;
+	default:
+		if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+			bdw_get_stolen_reserved(dev_priv, &reserved_base,
+						&reserved_size);
+		else
+			gen8_get_stolen_reserved(dev_priv, &reserved_base,
+						 &reserved_size);
+		break;
+	}
+
+	/* It is possible for the reserved base to be zero, but the register
+	 * field for size doesn't have a zero option. */
+	if (reserved_base == 0) {
+		reserved_size = 0;
+		reserved_base = stolen_top;
 	}
 
-	if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
+	if (reserved_base < dev_priv->mm.stolen_base ||
+	    reserved_base + reserved_size > stolen_top) {
+		DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n",
+			      reserved_base, reserved_base + reserved_size,
+			      dev_priv->mm.stolen_base, stolen_top);
 		return 0;
+	}
+
+	/* It is possible for the reserved area to end before the end of stolen
+	 * memory, so just consider the start. */
+	reserved_total = stolen_top - reserved_base;
+
+	DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
+		      dev_priv->gtt.stolen_size >> 10,
+		      (dev_priv->gtt.stolen_size - reserved_total) >> 10);
 
 	/* Basic memrange allocator for stolen space */
 	drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
-		    bios_reserved);
+		    reserved_total);
 
 	return 0;
 }
@@ -386,8 +411,10 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
 static void
 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
 {
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
 	if (obj->stolen) {
-		drm_mm_remove_node(obj->stolen);
+		i915_gem_stolen_remove_node(dev_priv, obj->stolen);
 		kfree(obj->stolen);
 		obj->stolen = NULL;
 	}
@@ -448,8 +475,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
 	if (!stolen)
 		return NULL;
 
-	ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
-				 4096, DRM_MM_SEARCH_DEFAULT);
+	ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
 	if (ret) {
 		kfree(stolen);
 		return NULL;
@@ -459,7 +485,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
 	if (obj)
 		return obj;
 
-	drm_mm_remove_node(stolen);
+	i915_gem_stolen_remove_node(dev_priv, stolen);
 	kfree(stolen);
 	return NULL;
 }
@@ -494,7 +520,9 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 
 	stolen->start = stolen_offset;
 	stolen->size = size;
+	mutex_lock(&dev_priv->mm.stolen_lock);
 	ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
+	mutex_unlock(&dev_priv->mm.stolen_lock);
 	if (ret) {
 		DRM_DEBUG_KMS("failed to allocate stolen space\n");
 		kfree(stolen);
@@ -504,7 +532,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 	obj = _i915_gem_object_create_stolen(dev, stolen);
 	if (obj == NULL) {
 		DRM_DEBUG_KMS("failed to allocate stolen object\n");
-		drm_mm_remove_node(stolen);
+		i915_gem_stolen_remove_node(dev_priv, stolen);
 		kfree(stolen);
 		return NULL;
 	}
@@ -545,7 +573,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 err_vma:
 	i915_gem_vma_destroy(vma);
 err_out:
-	drm_mm_remove_node(stolen);
+	i915_gem_stolen_remove_node(dev_priv, stolen);
 	kfree(stolen);
 	drm_gem_object_unreference(&obj->base);
 	return NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index d19c9db5e18c..8a6717cc265c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -31,201 +31,32 @@
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
-/** @file i915_gem_tiling.c
- *
- * Support for managing tiling state of buffer objects.
- *
- * The idea behind tiling is to increase cache hit rates by rearranging
- * pixel data so that a group of pixel accesses are in the same cacheline.
- * Performance improvement from doing this on the back/depth buffer are on
- * the order of 30%.
- *
- * Intel architectures make this somewhat more complicated, though, by
- * adjustments made to addressing of data when the memory is in interleaved
- * mode (matched pairs of DIMMS) to improve memory bandwidth.
- * For interleaved memory, the CPU sends every sequential 64 bytes
- * to an alternate memory channel so it can get the bandwidth from both.
- *
- * The GPU also rearranges its accesses for increased bandwidth to interleaved
- * memory, and it matches what the CPU does for non-tiled.  However, when tiled
- * it does it a little differently, since one walks addresses not just in the
- * X direction but also Y.  So, along with alternating channels when bit
- * 6 of the address flips, it also alternates when other bits flip --  Bits 9
- * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
- * are common to both the 915 and 965-class hardware.
- *
- * The CPU also sometimes XORs in higher bits as well, to improve
- * bandwidth doing strided access like we do so frequently in graphics.  This
- * is called "Channel XOR Randomization" in the MCH documentation.  The result
- * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
- * decode.
+/**
+ * DOC: buffer object tiling
  *
- * All of this bit 6 XORing has an effect on our memory management,
- * as we need to make sure that the 3d driver can correctly address object
- * contents.
+ * i915_gem_set_tiling() and i915_gem_get_tiling() is the userspace interface to
+ * declare fence register requirements.
  *
- * If we don't have interleaved memory, all tiling is safe and no swizzling is
- * required.
+ * In principle GEM doesn't care at all about the internal data layout of an
+ * object, and hence it also doesn't care about tiling or swizzling. There's two
+ * exceptions:
  *
- * When bit 17 is XORed in, we simply refuse to tile at all.  Bit
- * 17 is not just a page offset, so as we page an objet out and back in,
- * individual pages in it will have different bit 17 addresses, resulting in
- * each 64 bytes being swapped with its neighbor!
+ * - For X and Y tiling the hardware provides detilers for CPU access, so called
+ *   fences. Since there's only a limited amount of them the kernel must manage
+ *   these, and therefore userspace must tell the kernel the object tiling if it
+ *   wants to use fences for detiling.
+ * - On gen3 and gen4 platforms have a swizzling pattern for tiled objects which
+ *   depends upon the physical page frame number. When swapping such objects the
+ *   page frame number might change and the kernel must be able to fix this up
+ *   and hence now the tiling. Note that on a subset of platforms with
+ *   asymmetric memory channel population the swizzling pattern changes in an
+ *   unknown way, and for those the kernel simply forbids swapping completely.
  *
- * Otherwise, if interleaved, we have to tell the 3d driver what the address
- * swizzling it needs to do is, since it's writing with the CPU to the pages
- * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
- * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
- * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
- * to match what the GPU expects.
- */
-
-/**
- * Detects bit 6 swizzling of address lookup between IGD access and CPU
- * access through main memory.
+ * Since neither of this applies for new tiling layouts on modern platforms like
+ * W, Ys and Yf tiling GEM only allows object tiling to be set to X or Y tiled.
+ * Anything else can be handled in userspace entirely without the kernel's
+ * invovlement.
  */
-void
-i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
-	uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
-
-	if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
-		/*
-		 * On BDW+, swizzling is not used. We leave the CPU memory
-		 * controller in charge of optimizing memory accesses without
-		 * the extra address manipulation GPU side.
-		 *
-		 * VLV and CHV don't have GPU swizzling.
-		 */
-		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-	} else if (INTEL_INFO(dev)->gen >= 6) {
-		if (dev_priv->preserve_bios_swizzle) {
-			if (I915_READ(DISP_ARB_CTL) &
-			    DISP_TILE_SURFACE_SWIZZLING) {
-				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
-				swizzle_y = I915_BIT_6_SWIZZLE_9;
-			} else {
-				swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-				swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-			}
-		} else {
-			uint32_t dimm_c0, dimm_c1;
-			dimm_c0 = I915_READ(MAD_DIMM_C0);
-			dimm_c1 = I915_READ(MAD_DIMM_C1);
-			dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
-			dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
-			/* Enable swizzling when the channels are populated
-			 * with identically sized dimms. We don't need to check
-			 * the 3rd channel because no cpu with gpu attached
-			 * ships in that configuration. Also, swizzling only
-			 * makes sense for 2 channels anyway. */
-			if (dimm_c0 == dimm_c1) {
-				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
-				swizzle_y = I915_BIT_6_SWIZZLE_9;
-			} else {
-				swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-				swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-			}
-		}
-	} else if (IS_GEN5(dev)) {
-		/* On Ironlake whatever DRAM config, GPU always do
-		 * same swizzling setup.
-		 */
-		swizzle_x = I915_BIT_6_SWIZZLE_9_10;
-		swizzle_y = I915_BIT_6_SWIZZLE_9;
-	} else if (IS_GEN2(dev)) {
-		/* As far as we know, the 865 doesn't have these bit 6
-		 * swizzling issues.
-		 */
-		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-	} else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
-		uint32_t dcc;
-
-		/* On 9xx chipsets, channel interleave by the CPU is
-		 * determined by DCC.  For single-channel, neither the CPU
-		 * nor the GPU do swizzling.  For dual channel interleaved,
-		 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
-		 * 9 for Y tiled.  The CPU's interleave is independent, and
-		 * can be based on either bit 11 (haven't seen this yet) or
-		 * bit 17 (common).
-		 */
-		dcc = I915_READ(DCC);
-		switch (dcc & DCC_ADDRESSING_MODE_MASK) {
-		case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
-		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
-			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-			break;
-		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
-			if (dcc & DCC_CHANNEL_XOR_DISABLE) {
-				/* This is the base swizzling by the GPU for
-				 * tiled buffers.
-				 */
-				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
-				swizzle_y = I915_BIT_6_SWIZZLE_9;
-			} else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
-				/* Bit 11 swizzling by the CPU in addition. */
-				swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
-				swizzle_y = I915_BIT_6_SWIZZLE_9_11;
-			} else {
-				/* Bit 17 swizzling by the CPU in addition. */
-				swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
-				swizzle_y = I915_BIT_6_SWIZZLE_9_17;
-			}
-			break;
-		}
-
-		/* check for L-shaped memory aka modified enhanced addressing */
-		if (IS_GEN4(dev)) {
-			uint32_t ddc2 = I915_READ(DCC2);
-
-			if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
-				dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
-		}
-
-		if (dcc == 0xffffffff) {
-			DRM_ERROR("Couldn't read from MCHBAR.  "
-				  "Disabling tiling.\n");
-			swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
-			swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
-		}
-	} else {
-		/* The 965, G33, and newer, have a very flexible memory
-		 * configuration.  It will enable dual-channel mode
-		 * (interleaving) on as much memory as it can, and the GPU
-		 * will additionally sometimes enable different bit 6
-		 * swizzling for tiled objects from the CPU.
-		 *
-		 * Here's what I found on the G965:
-		 *    slot fill         memory size  swizzling
-		 * 0A   0B   1A   1B    1-ch   2-ch
-		 * 512  0    0    0     512    0     O
-		 * 512  0    512  0     16     1008  X
-		 * 512  0    0    512   16     1008  X
-		 * 0    512  0    512   16     1008  X
-		 * 1024 1024 1024 0     2048   1024  O
-		 *
-		 * We could probably detect this based on either the DRB
-		 * matching, which was the case for the swizzling required in
-		 * the table above, or from the 1-ch value being less than
-		 * the minimum size of a rank.
-		 */
-		if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
-			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-		} else {
-			swizzle_x = I915_BIT_6_SWIZZLE_9_10;
-			swizzle_y = I915_BIT_6_SWIZZLE_9;
-		}
-	}
-
-	dev_priv->mm.bit_6_swizzle_x = swizzle_x;
-	dev_priv->mm.bit_6_swizzle_y = swizzle_y;
-}
 
 /* Check pitch constriants for all chips & tiling formats */
 static bool
@@ -313,8 +144,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
 }
 
 /**
+ * i915_gem_set_tiling - IOCTL handler to set tiling mode
+ * @dev: DRM device
+ * @data: data pointer for the ioctl
+ * @file: DRM file for the ioctl call
+ *
  * Sets the tiling mode of an object, returning the required swizzling of
  * bit 6 of addresses in the object.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
  */
 int
 i915_gem_set_tiling(struct drm_device *dev, void *data,
@@ -432,7 +273,17 @@ err:
 }
 
 /**
+ * i915_gem_get_tiling - IOCTL handler to get tiling mode
+ * @dev: DRM device
+ * @data: data pointer for the ioctl
+ * @file: DRM file for the ioctl call
+ *
  * Returns the current tiling mode and required bit 6 swizzling for the object.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
  */
 int
 i915_gem_get_tiling(struct drm_device *dev, void *data,
@@ -478,75 +329,3 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
 
 	return 0;
 }
-
-/**
- * Swap every 64 bytes of this page around, to account for it having a new
- * bit 17 of its physical address and therefore being interpreted differently
- * by the GPU.
- */
-static void
-i915_gem_swizzle_page(struct page *page)
-{
-	char temp[64];
-	char *vaddr;
-	int i;
-
-	vaddr = kmap(page);
-
-	for (i = 0; i < PAGE_SIZE; i += 128) {
-		memcpy(temp, &vaddr[i], 64);
-		memcpy(&vaddr[i], &vaddr[i + 64], 64);
-		memcpy(&vaddr[i + 64], temp, 64);
-	}
-
-	kunmap(page);
-}
-
-void
-i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
-{
-	struct sg_page_iter sg_iter;
-	int i;
-
-	if (obj->bit_17 == NULL)
-		return;
-
-	i = 0;
-	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
-		struct page *page = sg_page_iter_page(&sg_iter);
-		char new_bit_17 = page_to_phys(page) >> 17;
-		if ((new_bit_17 & 0x1) !=
-		    (test_bit(i, obj->bit_17) != 0)) {
-			i915_gem_swizzle_page(page);
-			set_page_dirty(page);
-		}
-		i++;
-	}
-}
-
-void
-i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
-{
-	struct sg_page_iter sg_iter;
-	int page_count = obj->base.size >> PAGE_SHIFT;
-	int i;
-
-	if (obj->bit_17 == NULL) {
-		obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
-				      sizeof(long), GFP_KERNEL);
-		if (obj->bit_17 == NULL) {
-			DRM_ERROR("Failed to allocate memory for bit 17 "
-				  "record\n");
-			return;
-		}
-	}
-
-	i = 0;
-	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
-		if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
-			__set_bit(i, obj->bit_17);
-		else
-			__clear_bit(i, obj->bit_17);
-		i++;
-	}
-}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 6f4256918f76..41d0739e6fdf 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -369,6 +369,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 	err_printf(m, "Reset count: %u\n", error->reset_count);
 	err_printf(m, "Suspend count: %u\n", error->suspend_count);
 	err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
+	err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
 	err_printf(m, "EIR: 0x%08x\n", error->eir);
 	err_printf(m, "IER: 0x%08x\n", error->ier);
 	if (INTEL_INFO(dev)->gen >= 8) {
@@ -1266,6 +1267,10 @@ static void i915_error_capture_msg(struct drm_device *dev,
 static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
 				   struct drm_i915_error_state *error)
 {
+	error->iommu = -1;
+#ifdef CONFIG_INTEL_IOMMU
+	error->iommu = intel_iommu_gfx_mapped;
+#endif
 	error->reset_count = i915_reset_count(&dev_priv->gpu_error);
 	error->suspend_count = dev_priv->suspend_count;
 }
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h
new file mode 100644
index 000000000000..ccdc6c8ac20b
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_guc_reg.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#ifndef _I915_GUC_REG_H_
+#define _I915_GUC_REG_H_
+
+/* Definitions of GuC H/W registers, bits, etc */
+
+#define GUC_STATUS			0xc000
+#define   GS_BOOTROM_SHIFT		1
+#define   GS_BOOTROM_MASK		  (0x7F << GS_BOOTROM_SHIFT)
+#define   GS_BOOTROM_RSA_FAILED		  (0x50 << GS_BOOTROM_SHIFT)
+#define   GS_UKERNEL_SHIFT		8
+#define   GS_UKERNEL_MASK		  (0xFF << GS_UKERNEL_SHIFT)
+#define   GS_UKERNEL_LAPIC_DONE		  (0x30 << GS_UKERNEL_SHIFT)
+#define   GS_UKERNEL_DPC_ERROR		  (0x60 << GS_UKERNEL_SHIFT)
+#define   GS_UKERNEL_READY		  (0xF0 << GS_UKERNEL_SHIFT)
+#define   GS_MIA_SHIFT			16
+#define   GS_MIA_MASK			  (0x07 << GS_MIA_SHIFT)
+
+#define GUC_WOPCM_SIZE			0xc050
+#define   GUC_WOPCM_SIZE_VALUE  	  (0x80 << 12)	/* 512KB */
+#define GUC_WOPCM_OFFSET		0x80000		/* 512KB */
+
+#define SOFT_SCRATCH(n)			(0xc180 + ((n) * 4))
+
+#define UOS_RSA_SCRATCH_0		0xc200
+#define DMA_ADDR_0_LOW			0xc300
+#define DMA_ADDR_0_HIGH			0xc304
+#define DMA_ADDR_1_LOW			0xc308
+#define DMA_ADDR_1_HIGH			0xc30c
+#define   DMA_ADDRESS_SPACE_WOPCM	  (7 << 16)
+#define   DMA_ADDRESS_SPACE_GTT		  (8 << 16)
+#define DMA_COPY_SIZE			0xc310
+#define DMA_CTRL			0xc314
+#define   UOS_MOVE			  (1<<4)
+#define   START_DMA			  (1<<0)
+#define DMA_GUC_WOPCM_OFFSET		0xc340
+
+#define GEN8_GT_PM_CONFIG		0x138140
+#define GEN9_GT_PM_CONFIG		0x13816c
+#define   GEN8_GT_DOORBELL_ENABLE	  (1<<0)
+
+#define GEN8_GTCR			0x4274
+#define   GEN8_GTCR_INVALIDATE		  (1<<0)
+
+#define GUC_ARAT_C6DIS			0xA178
+
+#define GUC_SHIM_CONTROL		0xc064
+#define   GUC_DISABLE_SRAM_INIT_TO_ZEROES	(1<<0)
+#define   GUC_ENABLE_READ_CACHE_LOGIC		(1<<1)
+#define   GUC_ENABLE_MIA_CACHING		(1<<2)
+#define   GUC_GEN10_MSGCH_ENABLE		(1<<4)
+#define   GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA	(1<<9)
+#define   GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA	(1<<10)
+#define   GUC_ENABLE_MIA_CLOCK_GATING		(1<<15)
+#define   GUC_GEN10_SHIM_WC_ENABLE		(1<<21)
+
+#define GUC_SHIM_CONTROL_VALUE	(GUC_DISABLE_SRAM_INIT_TO_ZEROES	| \
+				 GUC_ENABLE_READ_CACHE_LOGIC		| \
+				 GUC_ENABLE_MIA_CACHING			| \
+				 GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA	| \
+				 GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA)
+
+#define HOST2GUC_INTERRUPT		0xc4c8
+#define   HOST2GUC_TRIGGER		  (1<<0)
+
+#define DRBMISC1			0x1984
+#define   DOORBELL_ENABLE		  (1<<0)
+
+#define GEN8_DRBREGL(x)			(0x1000 + (x) * 8)
+#define   GEN8_DRB_VALID		  (1<<0)
+#define GEN8_DRBREGU(x)			(GEN8_DRBREGL(x) + 4)
+
+#define DE_GUCRMR			0x44054
+
+#define GUC_BCS_RCS_IER			0xC550
+#define GUC_VCS2_VCS1_IER		0xC554
+#define GUC_WD_VECS_IER			0xC558
+#define GUC_PM_P24C_IER			0xC55C
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 23aa04cded6b..97f3a5640289 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -35,107 +35,20 @@
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
-typedef struct _drm_i915_batchbuffer32 {
-	int start;		/* agp offset */
-	int used;		/* nr bytes in use */
-	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
-	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
-	int num_cliprects;	/* mulitpass with multiple cliprects? */
-	u32 cliprects;		/* pointer to userspace cliprects */
-} drm_i915_batchbuffer32_t;
-
-static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
-				   unsigned long arg)
-{
-	drm_i915_batchbuffer32_t batchbuffer32;
-	drm_i915_batchbuffer_t __user *batchbuffer;
-
-	if (copy_from_user
-	    (&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32)))
-		return -EFAULT;
-
-	batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer));
-	if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer))
-	    || __put_user(batchbuffer32.start, &batchbuffer->start)
-	    || __put_user(batchbuffer32.used, &batchbuffer->used)
-	    || __put_user(batchbuffer32.DR1, &batchbuffer->DR1)
-	    || __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
-	    || __put_user(batchbuffer32.num_cliprects,
-			  &batchbuffer->num_cliprects)
-	    || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
-			  &batchbuffer->cliprects))
-		return -EFAULT;
-
-	return drm_ioctl(file, DRM_IOCTL_I915_BATCHBUFFER,
-			 (unsigned long)batchbuffer);
-}
-
-typedef struct _drm_i915_cmdbuffer32 {
-	u32 buf;		/* pointer to userspace command buffer */
-	int sz;			/* nr bytes in buf */
-	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
-	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
-	int num_cliprects;	/* mulitpass with multiple cliprects? */
-	u32 cliprects;		/* pointer to userspace cliprects */
-} drm_i915_cmdbuffer32_t;
-
-static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
-				 unsigned long arg)
-{
-	drm_i915_cmdbuffer32_t cmdbuffer32;
-	drm_i915_cmdbuffer_t __user *cmdbuffer;
-
-	if (copy_from_user
-	    (&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32)))
-		return -EFAULT;
-
-	cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
-	if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
-	    || __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
-			  &cmdbuffer->buf)
-	    || __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
-	    || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
-	    || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
-	    || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
-	    || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
-			  &cmdbuffer->cliprects))
-		return -EFAULT;
-
-	return drm_ioctl(file, DRM_IOCTL_I915_CMDBUFFER,
-			 (unsigned long)cmdbuffer);
-}
-
-typedef struct drm_i915_irq_emit32 {
-	u32 irq_seq;
-} drm_i915_irq_emit32_t;
-
-static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
-				unsigned long arg)
-{
-	drm_i915_irq_emit32_t req32;
-	drm_i915_irq_emit_t __user *request;
-
-	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
-		return -EFAULT;
-
-	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
-	    || __put_user((int __user *)(unsigned long)req32.irq_seq,
-			  &request->irq_seq))
-		return -EFAULT;
-
-	return drm_ioctl(file, DRM_IOCTL_I915_IRQ_EMIT,
-			 (unsigned long)request);
-}
-typedef struct drm_i915_getparam32 {
-	int param;
+struct drm_i915_getparam32 {
+	s32 param;
+	/*
+	 * We screwed up the generic ioctl struct here and used a variable-sized
+	 * pointer. Use u32 in the compat struct to match the 32bit pointer
+	 * userspace expects.
+	 */
 	u32 value;
-} drm_i915_getparam32_t;
+};
 
 static int compat_i915_getparam(struct file *file, unsigned int cmd,
 				unsigned long arg)
 {
-	drm_i915_getparam32_t req32;
+	struct drm_i915_getparam32 req32;
 	drm_i915_getparam_t __user *request;
 
 	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
@@ -152,41 +65,8 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd,
 			 (unsigned long)request);
 }
 
-typedef struct drm_i915_mem_alloc32 {
-	int region;
-	int alignment;
-	int size;
-	u32 region_offset;	/* offset from start of fb or agp */
-} drm_i915_mem_alloc32_t;
-
-static int compat_i915_alloc(struct file *file, unsigned int cmd,
-			     unsigned long arg)
-{
-	drm_i915_mem_alloc32_t req32;
-	drm_i915_mem_alloc_t __user *request;
-
-	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
-		return -EFAULT;
-
-	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
-	    || __put_user(req32.region, &request->region)
-	    || __put_user(req32.alignment, &request->alignment)
-	    || __put_user(req32.size, &request->size)
-	    || __put_user((void __user *)(unsigned long)req32.region_offset,
-			  &request->region_offset))
-		return -EFAULT;
-
-	return drm_ioctl(file, DRM_IOCTL_I915_ALLOC,
-			 (unsigned long)request);
-}
-
 static drm_ioctl_compat_t *i915_compat_ioctls[] = {
-	[DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
-	[DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
 	[DRM_I915_GETPARAM] = compat_i915_getparam,
-	[DRM_I915_IRQ_EMIT] = compat_i915_irq_emit,
-	[DRM_I915_ALLOC] = compat_i915_alloc
 };
 
 /**
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 449a95c6c2a1..b5fb1430c1d7 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -61,6 +61,13 @@ static const u32 hpd_cpt[HPD_NUM_PINS] = {
 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
 };
 
+static const u32 hpd_spt[HPD_NUM_PINS] = {
+	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
+	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
+	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
+	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
+};
+
 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
@@ -564,8 +571,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
 	struct intel_crtc *intel_crtc =
 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
-	const struct drm_display_mode *mode =
-		&intel_crtc->config->base.adjusted_mode;
+	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
 
 	htotal = mode->crtc_htotal;
 	hsync_start = mode->crtc_hsync_start;
@@ -620,7 +626,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
+	const struct drm_display_mode *mode = &crtc->base.hwmode;
 	enum pipe pipe = crtc->pipe;
 	int position, vtotal;
 
@@ -647,14 +653,14 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	const struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
+	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
 	int position;
 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
 	bool in_vbl = true;
 	int ret = 0;
 	unsigned long irqflags;
 
-	if (!intel_crtc->active) {
+	if (WARN_ON(!mode->crtc_clock)) {
 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
 				 "pipe %c\n", pipe_name(pipe));
 		return 0;
@@ -796,7 +802,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
 		return -EINVAL;
 	}
 
-	if (!crtc->state->enable) {
+	if (!crtc->hwmode.crtc_clock) {
 		DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
 		return -EBUSY;
 	}
@@ -805,151 +811,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
 	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
 						     vblank_time, flags,
 						     crtc,
-						     &to_intel_crtc(crtc)->config->base.adjusted_mode);
-}
-
-static bool intel_hpd_irq_event(struct drm_device *dev,
-				struct drm_connector *connector)
-{
-	enum drm_connector_status old_status;
-
-	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
-	old_status = connector->status;
-
-	connector->status = connector->funcs->detect(connector, false);
-	if (old_status == connector->status)
-		return false;
-
-	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
-		      connector->base.id,
-		      connector->name,
-		      drm_get_connector_status_name(old_status),
-		      drm_get_connector_status_name(connector->status));
-
-	return true;
-}
-
-static void i915_digport_work_func(struct work_struct *work)
-{
-	struct drm_i915_private *dev_priv =
-		container_of(work, struct drm_i915_private, dig_port_work);
-	u32 long_port_mask, short_port_mask;
-	struct intel_digital_port *intel_dig_port;
-	int i;
-	u32 old_bits = 0;
-
-	spin_lock_irq(&dev_priv->irq_lock);
-	long_port_mask = dev_priv->long_hpd_port_mask;
-	dev_priv->long_hpd_port_mask = 0;
-	short_port_mask = dev_priv->short_hpd_port_mask;
-	dev_priv->short_hpd_port_mask = 0;
-	spin_unlock_irq(&dev_priv->irq_lock);
-
-	for (i = 0; i < I915_MAX_PORTS; i++) {
-		bool valid = false;
-		bool long_hpd = false;
-		intel_dig_port = dev_priv->hpd_irq_port[i];
-		if (!intel_dig_port || !intel_dig_port->hpd_pulse)
-			continue;
-
-		if (long_port_mask & (1 << i))  {
-			valid = true;
-			long_hpd = true;
-		} else if (short_port_mask & (1 << i))
-			valid = true;
-
-		if (valid) {
-			enum irqreturn ret;
-
-			ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
-			if (ret == IRQ_NONE) {
-				/* fall back to old school hpd */
-				old_bits |= (1 << intel_dig_port->base.hpd_pin);
-			}
-		}
-	}
-
-	if (old_bits) {
-		spin_lock_irq(&dev_priv->irq_lock);
-		dev_priv->hpd_event_bits |= old_bits;
-		spin_unlock_irq(&dev_priv->irq_lock);
-		schedule_work(&dev_priv->hotplug_work);
-	}
-}
-
-/*
- * Handle hotplug events outside the interrupt handler proper.
- */
-#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
-
-static void i915_hotplug_work_func(struct work_struct *work)
-{
-	struct drm_i915_private *dev_priv =
-		container_of(work, struct drm_i915_private, hotplug_work);
-	struct drm_device *dev = dev_priv->dev;
-	struct drm_mode_config *mode_config = &dev->mode_config;
-	struct intel_connector *intel_connector;
-	struct intel_encoder *intel_encoder;
-	struct drm_connector *connector;
-	bool hpd_disabled = false;
-	bool changed = false;
-	u32 hpd_event_bits;
-
-	mutex_lock(&mode_config->mutex);
-	DRM_DEBUG_KMS("running encoder hotplug functions\n");
-
-	spin_lock_irq(&dev_priv->irq_lock);
-
-	hpd_event_bits = dev_priv->hpd_event_bits;
-	dev_priv->hpd_event_bits = 0;
-	list_for_each_entry(connector, &mode_config->connector_list, head) {
-		intel_connector = to_intel_connector(connector);
-		if (!intel_connector->encoder)
-			continue;
-		intel_encoder = intel_connector->encoder;
-		if (intel_encoder->hpd_pin > HPD_NONE &&
-		    dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
-		    connector->polled == DRM_CONNECTOR_POLL_HPD) {
-			DRM_INFO("HPD interrupt storm detected on connector %s: "
-				 "switching from hotplug detection to polling\n",
-				connector->name);
-			dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
-			connector->polled = DRM_CONNECTOR_POLL_CONNECT
-				| DRM_CONNECTOR_POLL_DISCONNECT;
-			hpd_disabled = true;
-		}
-		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
-			DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
-				      connector->name, intel_encoder->hpd_pin);
-		}
-	}
-	 /* if there were no outputs to poll, poll was disabled,
-	  * therefore make sure it's enabled when disabling HPD on
-	  * some connectors */
-	if (hpd_disabled) {
-		drm_kms_helper_poll_enable(dev);
-		mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
-				 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
-	}
-
-	spin_unlock_irq(&dev_priv->irq_lock);
-
-	list_for_each_entry(connector, &mode_config->connector_list, head) {
-		intel_connector = to_intel_connector(connector);
-		if (!intel_connector->encoder)
-			continue;
-		intel_encoder = intel_connector->encoder;
-		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
-			if (intel_encoder->hot_plug)
-				intel_encoder->hot_plug(intel_encoder);
-			if (intel_hpd_irq_event(dev, connector))
-				changed = true;
-		}
-	}
-	mutex_unlock(&mode_config->mutex);
-
-	if (changed)
-		drm_kms_helper_hotplug_event(dev);
+						     &crtc->hwmode);
 }
 
 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
@@ -1372,165 +1234,80 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
 	return ret;
 }
 
-#define HPD_STORM_DETECT_PERIOD 1000
-#define HPD_STORM_THRESHOLD 5
-
-static int pch_port_to_hotplug_shift(enum port port)
+static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
 {
 	switch (port) {
 	case PORT_A:
-	case PORT_E:
-	default:
-		return -1;
+		return val & BXT_PORTA_HOTPLUG_LONG_DETECT;
 	case PORT_B:
-		return 0;
+		return val & PORTB_HOTPLUG_LONG_DETECT;
 	case PORT_C:
-		return 8;
+		return val & PORTC_HOTPLUG_LONG_DETECT;
 	case PORT_D:
-		return 16;
+		return val & PORTD_HOTPLUG_LONG_DETECT;
+	default:
+		return false;
 	}
 }
 
-static int i915_port_to_hotplug_shift(enum port port)
+static bool pch_port_hotplug_long_detect(enum port port, u32 val)
 {
 	switch (port) {
-	case PORT_A:
-	case PORT_E:
-	default:
-		return -1;
 	case PORT_B:
-		return 17;
+		return val & PORTB_HOTPLUG_LONG_DETECT;
 	case PORT_C:
-		return 19;
+		return val & PORTC_HOTPLUG_LONG_DETECT;
 	case PORT_D:
-		return 21;
+		return val & PORTD_HOTPLUG_LONG_DETECT;
+	case PORT_E:
+		return val & PORTE_HOTPLUG_LONG_DETECT;
+	default:
+		return false;
 	}
 }
 
-static enum port get_port_from_pin(enum hpd_pin pin)
+static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
 {
-	switch (pin) {
-	case HPD_PORT_B:
-		return PORT_B;
-	case HPD_PORT_C:
-		return PORT_C;
-	case HPD_PORT_D:
-		return PORT_D;
+	switch (port) {
+	case PORT_B:
+		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
+	case PORT_C:
+		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
+	case PORT_D:
+		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
 	default:
-		return PORT_A; /* no hpd */
+		return false;
 	}
 }
 
-static void intel_hpd_irq_handler(struct drm_device *dev,
-				  u32 hotplug_trigger,
-				  u32 dig_hotplug_reg,
-				  const u32 hpd[HPD_NUM_PINS])
+/* Get a bit mask of pins that have triggered, and which ones may be long. */
+static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
+			     u32 hotplug_trigger, u32 dig_hotplug_reg,
+			     const u32 hpd[HPD_NUM_PINS],
+			     bool long_pulse_detect(enum port port, u32 val))
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int i;
 	enum port port;
-	bool storm_detected = false;
-	bool queue_dig = false, queue_hp = false;
-	u32 dig_shift;
-	u32 dig_port_mask = 0;
-
-	if (!hotplug_trigger)
-		return;
+	int i;
 
-	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
-			 hotplug_trigger, dig_hotplug_reg);
+	*pin_mask = 0;
+	*long_mask = 0;
 
-	spin_lock(&dev_priv->irq_lock);
-	for (i = 1; i < HPD_NUM_PINS; i++) {
-		if (!(hpd[i] & hotplug_trigger))
+	for_each_hpd_pin(i) {
+		if ((hpd[i] & hotplug_trigger) == 0)
 			continue;
 
-		port = get_port_from_pin(i);
-		if (port && dev_priv->hpd_irq_port[port]) {
-			bool long_hpd;
-
-			if (!HAS_GMCH_DISPLAY(dev_priv)) {
-				dig_shift = pch_port_to_hotplug_shift(port);
-				long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
-			} else {
-				dig_shift = i915_port_to_hotplug_shift(port);
-				long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
-			}
-
-			DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
-					 port_name(port),
-					 long_hpd ? "long" : "short");
-			/* for long HPD pulses we want to have the digital queue happen,
-			   but we still want HPD storm detection to function. */
-			if (long_hpd) {
-				dev_priv->long_hpd_port_mask |= (1 << port);
-				dig_port_mask |= hpd[i];
-			} else {
-				/* for short HPD just trigger the digital queue */
-				dev_priv->short_hpd_port_mask |= (1 << port);
-				hotplug_trigger &= ~hpd[i];
-			}
-			queue_dig = true;
-		}
-	}
-
-	for (i = 1; i < HPD_NUM_PINS; i++) {
-		if (hpd[i] & hotplug_trigger &&
-		    dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
-			/*
-			 * On GMCH platforms the interrupt mask bits only
-			 * prevent irq generation, not the setting of the
-			 * hotplug bits itself. So only WARN about unexpected
-			 * interrupts on saner platforms.
-			 */
-			WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
-				  "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
-				  hotplug_trigger, i, hpd[i]);
+		*pin_mask |= BIT(i);
 
+		if (!intel_hpd_pin_to_port(i, &port))
 			continue;
-		}
 
-		if (!(hpd[i] & hotplug_trigger) ||
-		    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
-			continue;
-
-		if (!(dig_port_mask & hpd[i])) {
-			dev_priv->hpd_event_bits |= (1 << i);
-			queue_hp = true;
-		}
-
-		if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
-				   dev_priv->hpd_stats[i].hpd_last_jiffies
-				   + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
-			dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
-			dev_priv->hpd_stats[i].hpd_cnt = 0;
-			DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
-		} else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
-			dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
-			dev_priv->hpd_event_bits &= ~(1 << i);
-			DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
-			storm_detected = true;
-		} else {
-			dev_priv->hpd_stats[i].hpd_cnt++;
-			DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
-				      dev_priv->hpd_stats[i].hpd_cnt);
-		}
+		if (long_pulse_detect(port, dig_hotplug_reg))
+			*long_mask |= BIT(i);
 	}
 
-	if (storm_detected)
-		dev_priv->display.hpd_irq_setup(dev);
-	spin_unlock(&dev_priv->irq_lock);
+	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
+			 hotplug_trigger, dig_hotplug_reg, *pin_mask);
 
-	/*
-	 * Our hotplug handler can grab modeset locks (by calling down into the
-	 * fb helpers). Hence it must not be run on our own dev-priv->wq work
-	 * queue for otherwise the flush_work in the pageflip code will
-	 * deadlock.
-	 */
-	if (queue_dig)
-		queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
-	if (queue_hp)
-		schedule_work(&dev_priv->hotplug_work);
 }
 
 static void gmbus_irq_handler(struct drm_device *dev)
@@ -1755,28 +1532,35 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+	u32 pin_mask, long_mask;
 
-	if (hotplug_status) {
-		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
-		/*
-		 * Make sure hotplug status is cleared before we clear IIR, or else we
-		 * may miss hotplug events.
-		 */
-		POSTING_READ(PORT_HOTPLUG_STAT);
+	if (!hotplug_status)
+		return;
 
-		if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
-			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
+	I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+	/*
+	 * Make sure hotplug status is cleared before we clear IIR, or else we
+	 * may miss hotplug events.
+	 */
+	POSTING_READ(PORT_HOTPLUG_STAT);
 
-			intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
-		} else {
-			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
+	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
+		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
 
-			intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
-		}
+		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+				   hotplug_trigger, hpd_status_g4x,
+				   i9xx_port_hotplug_long_detect);
+		intel_hpd_irq_handler(dev, pin_mask, long_mask);
 
-		if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
-		    hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
+		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
 			dp_aux_irq_handler(dev);
+	} else {
+		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
+
+		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+				   hotplug_trigger, hpd_status_g4x,
+				   i9xx_port_hotplug_long_detect);
+		intel_hpd_irq_handler(dev, pin_mask, long_mask);
 	}
 }
 
@@ -1875,12 +1659,18 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int pipe;
 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
-	u32 dig_hotplug_reg;
 
-	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
-	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+	if (hotplug_trigger) {
+		u32 dig_hotplug_reg, pin_mask, long_mask;
+
+		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
 
-	intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
+		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+				   dig_hotplug_reg, hpd_ibx,
+				   pch_port_hotplug_long_detect);
+		intel_hpd_irq_handler(dev, pin_mask, long_mask);
+	}
 
 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -1971,13 +1761,38 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int pipe;
-	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
-	u32 dig_hotplug_reg;
+	u32 hotplug_trigger;
+
+	if (HAS_PCH_SPT(dev))
+		hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT;
+	else
+		hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
 
-	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
-	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+	if (hotplug_trigger) {
+		u32 dig_hotplug_reg, pin_mask, long_mask;
 
-	intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
+		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+
+		if (HAS_PCH_SPT(dev)) {
+			intel_get_hpd_pins(&pin_mask, &long_mask,
+					   hotplug_trigger,
+					   dig_hotplug_reg, hpd_spt,
+					   pch_port_hotplug_long_detect);
+
+			/* detect PORTE HP event */
+			dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
+			if (pch_port_hotplug_long_detect(PORT_E,
+							 dig_hotplug_reg))
+				long_mask |= 1 << HPD_PORT_E;
+		} else
+			intel_get_hpd_pins(&pin_mask, &long_mask,
+					   hotplug_trigger,
+					   dig_hotplug_reg, hpd_cpt,
+					   pch_port_hotplug_long_detect);
+
+		intel_hpd_irq_handler(dev, pin_mask, long_mask);
+	}
 
 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -2176,8 +1991,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
 static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint32_t hp_control;
-	uint32_t hp_trigger;
+	u32 hp_control, hp_trigger;
+	u32 pin_mask, long_mask;
 
 	/* Get the status */
 	hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK;
@@ -2189,20 +2004,12 @@ static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status)
 		return;
 	}
 
-	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
-		hp_control & BXT_HOTPLUG_CTL_MASK);
-
-	/* Check for HPD storm and schedule bottom half */
-	intel_hpd_irq_handler(dev, hp_trigger, hp_control, hpd_bxt);
-
-	/*
-	 * FIXME: Save the hot plug status for bottom half before
-	 * clearing the sticky status bits, else the status will be
-	 * lost.
-	 */
-
 	/* Clear sticky bits in hpd status */
 	I915_WRITE(BXT_HOTPLUG_CTL, hp_control);
+
+	intel_get_hpd_pins(&pin_mask, &long_mask, hp_trigger, hp_control,
+			   hpd_bxt, bxt_port_hotplug_long_detect);
+	intel_hpd_irq_handler(dev, pin_mask, long_mask);
 }
 
 static irqreturn_t gen8_irq_handler(int irq, void *arg)
@@ -3203,12 +3010,17 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
 	if (HAS_PCH_IBX(dev)) {
 		hotplug_irqs = SDE_HOTPLUG_MASK;
 		for_each_intel_encoder(dev, intel_encoder)
-			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+			if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
 				enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
+	} else if (HAS_PCH_SPT(dev)) {
+		hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
+		for_each_intel_encoder(dev, intel_encoder)
+			if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
+				enabled_irqs |= hpd_spt[intel_encoder->hpd_pin];
 	} else {
 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
 		for_each_intel_encoder(dev, intel_encoder)
-			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+			if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
 				enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
 	}
 
@@ -3226,6 +3038,13 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+
+	/* enable SPT PORTE hot plug */
+	if (HAS_PCH_SPT(dev)) {
+		hotplug = I915_READ(PCH_PORT_HOTPLUG2);
+		hotplug |= PORTE_HOTPLUG_ENABLE;
+		I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
+	}
 }
 
 static void bxt_hpd_irq_setup(struct drm_device *dev)
@@ -3237,7 +3056,7 @@ static void bxt_hpd_irq_setup(struct drm_device *dev)
 
 	/* Now, enable HPD */
 	for_each_intel_encoder(dev, intel_encoder) {
-		if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark
+		if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state
 				== HPD_ENABLED)
 			hotplug_port |= hpd_bxt[intel_encoder->hpd_pin];
 	}
@@ -4130,7 +3949,7 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
 	/* Note HDMI and DP share hotplug bits */
 	/* enable bits are the same for all generations */
 	for_each_intel_encoder(dev, intel_encoder)
-		if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+		if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
 			hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
 	/* Programming the CRT detection parameters tends
 	   to generate a spurious hotplug event about three
@@ -4270,46 +4089,6 @@ static void i965_irq_uninstall(struct drm_device * dev)
 	I915_WRITE(IIR, I915_READ(IIR));
 }
 
-static void intel_hpd_irq_reenable_work(struct work_struct *work)
-{
-	struct drm_i915_private *dev_priv =
-		container_of(work, typeof(*dev_priv),
-			     hotplug_reenable_work.work);
-	struct drm_device *dev = dev_priv->dev;
-	struct drm_mode_config *mode_config = &dev->mode_config;
-	int i;
-
-	intel_runtime_pm_get(dev_priv);
-
-	spin_lock_irq(&dev_priv->irq_lock);
-	for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
-		struct drm_connector *connector;
-
-		if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
-			continue;
-
-		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
-
-		list_for_each_entry(connector, &mode_config->connector_list, head) {
-			struct intel_connector *intel_connector = to_intel_connector(connector);
-
-			if (intel_connector->encoder->hpd_pin == i) {
-				if (connector->polled != intel_connector->polled)
-					DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
-							 connector->name);
-				connector->polled = intel_connector->polled;
-				if (!connector->polled)
-					connector->polled = DRM_CONNECTOR_POLL_HPD;
-			}
-		}
-	}
-	if (dev_priv->display.hpd_irq_setup)
-		dev_priv->display.hpd_irq_setup(dev);
-	spin_unlock_irq(&dev_priv->irq_lock);
-
-	intel_runtime_pm_put(dev_priv);
-}
-
 /**
  * intel_irq_init - initializes irq support
  * @dev_priv: i915 device instance
@@ -4321,8 +4100,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
 {
 	struct drm_device *dev = dev_priv->dev;
 
-	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
-	INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
+	intel_hpd_init_work(dev_priv);
+
 	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
 
@@ -4335,8 +4114,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
 
 	INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
 			  i915_hangcheck_elapsed);
-	INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
-			  intel_hpd_irq_reenable_work);
 
 	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
 
@@ -4422,46 +4199,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
 }
 
 /**
- * intel_hpd_init - initializes and enables hpd support
- * @dev_priv: i915 device instance
- *
- * This function enables the hotplug support. It requires that interrupts have
- * already been enabled with intel_irq_init_hw(). From this point on hotplug and
- * poll request can run concurrently to other code, so locking rules must be
- * obeyed.
- *
- * This is a separate step from interrupt enabling to simplify the locking rules
- * in the driver load and resume code.
- */
-void intel_hpd_init(struct drm_i915_private *dev_priv)
-{
-	struct drm_device *dev = dev_priv->dev;
-	struct drm_mode_config *mode_config = &dev->mode_config;
-	struct drm_connector *connector;
-	int i;
-
-	for (i = 1; i < HPD_NUM_PINS; i++) {
-		dev_priv->hpd_stats[i].hpd_cnt = 0;
-		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
-	}
-	list_for_each_entry(connector, &mode_config->connector_list, head) {
-		struct intel_connector *intel_connector = to_intel_connector(connector);
-		connector->polled = intel_connector->polled;
-		if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
-			connector->polled = DRM_CONNECTOR_POLL_HPD;
-		if (intel_connector->mst_port)
-			connector->polled = DRM_CONNECTOR_POLL_HPD;
-	}
-
-	/* Interrupt setup is already guaranteed to be single-threaded, this is
-	 * just to make the assert_spin_locked checks happy. */
-	spin_lock_irq(&dev_priv->irq_lock);
-	if (dev_priv->display.hpd_irq_setup)
-		dev_priv->display.hpd_irq_setup(dev);
-	spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-/**
  * intel_irq_install - enables the hardware interrupt
  * @dev_priv: i915 device instance
  *
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 8ac5a1b29ac0..5ae4b0aba564 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -28,7 +28,6 @@ struct i915_params i915 __read_mostly = {
 	.modeset = -1,
 	.panel_ignore_lid = 1,
 	.semaphores = -1,
-	.lvds_downclock = 0,
 	.lvds_channel_mode = 0,
 	.panel_use_ssc = -1,
 	.vbt_sdvo_panel_type = -1,
@@ -52,13 +51,14 @@ struct i915_params i915 __read_mostly = {
 	.use_mmio_flip = 0,
 	.mmio_debug = 0,
 	.verbose_state_checks = 1,
-	.nuclear_pageflip = 0,
 	.edp_vswing = 0,
+	.enable_guc_submission = false,
+	.guc_log_level = -1,
 };
 
 module_param_named(modeset, i915.modeset, int, 0400);
 MODULE_PARM_DESC(modeset,
-	"Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
+	"Use kernel modesetting [KMS] (0=disable, "
 	"1=on, -1=force vga console preference [default])");
 
 module_param_named(panel_ignore_lid, i915.panel_ignore_lid, int, 0600);
@@ -84,11 +84,6 @@ MODULE_PARM_DESC(enable_fbc,
 	"Enable frame buffer compression for power savings "
 	"(default: -1 (use per-chip default))");
 
-module_param_named(lvds_downclock, i915.lvds_downclock, int, 0400);
-MODULE_PARM_DESC(lvds_downclock,
-	"Use panel (LVDS/eDP) downclocking for power savings "
-	"(default: false)");
-
 module_param_named(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
 MODULE_PARM_DESC(lvds_channel_mode,
 	 "Specify LVDS channel mode "
@@ -104,7 +99,7 @@ MODULE_PARM_DESC(vbt_sdvo_panel_type,
 	"Override/Ignore selection of SDVO panel mode in the VBT "
 	"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
 
-module_param_named(reset, i915.reset, bool, 0600);
+module_param_named_unsafe(reset, i915.reset, bool, 0600);
 MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
 
 module_param_named(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
@@ -182,13 +177,16 @@ module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
 MODULE_PARM_DESC(verbose_state_checks,
 	"Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
 
-module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0600);
-MODULE_PARM_DESC(nuclear_pageflip,
-		 "Force atomic modeset functionality; only planes work for now (default: false).");
-
 /* WA to get away with the default setting in VBT for early platforms.Will be removed */
 module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400);
 MODULE_PARM_DESC(edp_vswing,
 		 "Ignore/Override vswing pre-emph table selection from VBT "
 		 "(0=use value from vbt [default], 1=low power swing(200mV),"
 		 "2=default swing(400mV))");
+
+module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, bool, 0400);
+MODULE_PARM_DESC(enable_guc_submission, "Enable GuC submission (default:false)");
+
+module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
+MODULE_PARM_DESC(guc_log_level,
+	"GuC firmware logging level (-1:disabled (default), 0-3:enabled)");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2030f602cbf8..83a0888756d6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -50,12 +50,17 @@
 
 /* PCI config space */
 
-#define HPLLCC	0xc0 /* 855 only */
-#define   GC_CLOCK_CONTROL_MASK		(0xf << 0)
+#define HPLLCC	0xc0 /* 85x only */
+#define   GC_CLOCK_CONTROL_MASK		(0x7 << 0)
 #define   GC_CLOCK_133_200		(0 << 0)
 #define   GC_CLOCK_100_200		(1 << 0)
 #define   GC_CLOCK_100_133		(2 << 0)
-#define   GC_CLOCK_166_250		(3 << 0)
+#define   GC_CLOCK_133_266		(3 << 0)
+#define   GC_CLOCK_133_200_2		(4 << 0)
+#define   GC_CLOCK_133_266_2		(5 << 0)
+#define   GC_CLOCK_166_266		(6 << 0)
+#define   GC_CLOCK_166_250		(7 << 0)
+
 #define GCFGC2	0xda
 #define GCFGC	0xf0 /* 915+ only */
 #define   GC_LOW_FREQUENCY_ENABLE	(1 << 7)
@@ -155,6 +160,7 @@
 #define GAM_ECOCHK			0x4090
 #define   BDW_DISABLE_HDC_INVALIDATION	(1<<25)
 #define   ECOCHK_SNB_BIT		(1<<10)
+#define   ECOCHK_DIS_TLB		(1<<8)
 #define   HSW_ECOCHK_ARB_PRIO_SOL	(1<<6)
 #define   ECOCHK_PPGTT_CACHE64B		(0x3<<3)
 #define   ECOCHK_PPGTT_CACHE4B		(0x0<<3)
@@ -172,13 +178,22 @@
 #define GAB_CTL				0x24000
 #define   GAB_CTL_CONT_AFTER_PAGEFAULT	(1<<8)
 
-#define GEN7_BIOS_RESERVED		0x1082C0
-#define GEN7_BIOS_RESERVED_1M		(0 << 5)
-#define GEN7_BIOS_RESERVED_256K		(1 << 5)
-#define GEN8_BIOS_RESERVED_SHIFT       7
-#define GEN7_BIOS_RESERVED_MASK        0x1
-#define GEN8_BIOS_RESERVED_MASK        0x3
-
+#define GEN6_STOLEN_RESERVED		0x1082C0
+#define GEN6_STOLEN_RESERVED_ADDR_MASK	(0xFFF << 20)
+#define GEN7_STOLEN_RESERVED_ADDR_MASK	(0x3FFF << 18)
+#define GEN6_STOLEN_RESERVED_SIZE_MASK	(3 << 4)
+#define GEN6_STOLEN_RESERVED_1M		(0 << 4)
+#define GEN6_STOLEN_RESERVED_512K	(1 << 4)
+#define GEN6_STOLEN_RESERVED_256K	(2 << 4)
+#define GEN6_STOLEN_RESERVED_128K	(3 << 4)
+#define GEN7_STOLEN_RESERVED_SIZE_MASK	(1 << 5)
+#define GEN7_STOLEN_RESERVED_1M		(0 << 5)
+#define GEN7_STOLEN_RESERVED_256K	(1 << 5)
+#define GEN8_STOLEN_RESERVED_SIZE_MASK	(3 << 7)
+#define GEN8_STOLEN_RESERVED_1M		(0 << 7)
+#define GEN8_STOLEN_RESERVED_2M		(1 << 7)
+#define GEN8_STOLEN_RESERVED_4M		(2 << 7)
+#define GEN8_STOLEN_RESERVED_8M		(3 << 7)
 
 /* VGA stuff */
 
@@ -316,6 +331,8 @@
 #define   MI_RESTORE_EXT_STATE_EN	(1<<2)
 #define   MI_FORCE_RESTORE		(1<<1)
 #define   MI_RESTORE_INHIBIT		(1<<0)
+#define   HSW_MI_RS_SAVE_STATE_EN       (1<<3)
+#define   HSW_MI_RS_RESTORE_STATE_EN    (1<<2)
 #define MI_SEMAPHORE_SIGNAL	MI_INSTR(0x1b, 0) /* GEN8+ */
 #define   MI_SEMAPHORE_TARGET(engine)	((engine)<<15)
 #define MI_SEMAPHORE_WAIT	MI_INSTR(0x1c, 2) /* GEN8+ */
@@ -347,6 +364,8 @@
 #define   MI_INVALIDATE_BSD		(1<<7)
 #define   MI_FLUSH_DW_USE_GTT		(1<<2)
 #define   MI_FLUSH_DW_USE_PPGTT		(0<<2)
+#define MI_LOAD_REGISTER_MEM(x) MI_INSTR(0x29, 2*(x)-1)
+#define MI_LOAD_REGISTER_MEM_GEN8(x) MI_INSTR(0x29, 3*(x)-1)
 #define MI_BATCH_BUFFER		MI_INSTR(0x30, 1)
 #define   MI_BATCH_NON_SECURE		(1)
 /* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
@@ -356,6 +375,7 @@
 #define MI_BATCH_BUFFER_START	MI_INSTR(0x31, 0)
 #define   MI_BATCH_GTT		    (2<<6) /* aliased with (1<<7) on gen4 */
 #define MI_BATCH_BUFFER_START_GEN8	MI_INSTR(0x31, 1)
+#define   MI_BATCH_RESOURCE_STREAMER (1<<10)
 
 #define MI_PREDICATE_SRC0	(0x2400)
 #define MI_PREDICATE_SRC1	(0x2408)
@@ -410,6 +430,7 @@
 #define   DISPLAY_PLANE_A           (0<<20)
 #define   DISPLAY_PLANE_B           (1<<20)
 #define GFX_OP_PIPE_CONTROL(len)	((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
+#define   PIPE_CONTROL_FLUSH_L3				(1<<27)
 #define   PIPE_CONTROL_GLOBAL_GTT_IVB			(1<<24) /* gen7+ */
 #define   PIPE_CONTROL_MMIO_WRITE			(1<<23)
 #define   PIPE_CONTROL_STORE_DATA_INDEX			(1<<21)
@@ -426,6 +447,7 @@
 #define   PIPE_CONTROL_INDIRECT_STATE_DISABLE		(1<<9)
 #define   PIPE_CONTROL_NOTIFY				(1<<8)
 #define   PIPE_CONTROL_FLUSH_ENABLE			(1<<7) /* gen7+ */
+#define   PIPE_CONTROL_DC_FLUSH_ENABLE			(1<<5)
 #define   PIPE_CONTROL_VF_CACHE_INVALIDATE		(1<<4)
 #define   PIPE_CONTROL_CONST_CACHE_INVALIDATE		(1<<3)
 #define   PIPE_CONTROL_STATE_CACHE_INVALIDATE		(1<<2)
@@ -449,7 +471,6 @@
 #define MI_CLFLUSH              MI_INSTR(0x27, 0)
 #define MI_REPORT_PERF_COUNT    MI_INSTR(0x28, 0)
 #define   MI_REPORT_PERF_COUNT_GGTT (1<<0)
-#define MI_LOAD_REGISTER_MEM    MI_INSTR(0x29, 0)
 #define MI_LOAD_REGISTER_REG    MI_INSTR(0x2A, 0)
 #define MI_RS_STORE_DATA_IMM    MI_INSTR(0x2B, 0)
 #define MI_LOAD_URB_MEM         MI_INSTR(0x2C, 0)
@@ -1163,10 +1184,12 @@ enum skl_disp_power_wells {
 #define _PORT_PLL_EBB_0_A		0x162034
 #define _PORT_PLL_EBB_0_B		0x6C034
 #define _PORT_PLL_EBB_0_C		0x6C340
-#define   PORT_PLL_P1_MASK		(0x07 << 13)
-#define   PORT_PLL_P1(x)		((x)  << 13)
-#define   PORT_PLL_P2_MASK		(0x1f << 8)
-#define   PORT_PLL_P2(x)		((x)  << 8)
+#define   PORT_PLL_P1_SHIFT		13
+#define   PORT_PLL_P1_MASK		(0x07 << PORT_PLL_P1_SHIFT)
+#define   PORT_PLL_P1(x)		((x)  << PORT_PLL_P1_SHIFT)
+#define   PORT_PLL_P2_SHIFT		8
+#define   PORT_PLL_P2_MASK		(0x1f << PORT_PLL_P2_SHIFT)
+#define   PORT_PLL_P2(x)		((x)  << PORT_PLL_P2_SHIFT)
 #define BXT_PORT_PLL_EBB_0(port)	_PORT3(port, _PORT_PLL_EBB_0_A, \
 						_PORT_PLL_EBB_0_B,	\
 						_PORT_PLL_EBB_0_C)
@@ -1186,8 +1209,9 @@ enum skl_disp_power_wells {
 /* PORT_PLL_0_A */
 #define   PORT_PLL_M2_MASK		0xFF
 /* PORT_PLL_1_A */
-#define   PORT_PLL_N_MASK		(0x0F << 8)
-#define   PORT_PLL_N(x)			((x) << 8)
+#define   PORT_PLL_N_SHIFT		8
+#define   PORT_PLL_N_MASK		(0x0F << PORT_PLL_N_SHIFT)
+#define   PORT_PLL_N(x)			((x) << PORT_PLL_N_SHIFT)
 /* PORT_PLL_2_A */
 #define   PORT_PLL_M2_FRAC_MASK		0x3FFFFF
 /* PORT_PLL_3_A */
@@ -1201,9 +1225,11 @@ enum skl_disp_power_wells {
 /* PORT_PLL_8_A */
 #define   PORT_PLL_TARGET_CNT_MASK	0x3FF
 /* PORT_PLL_9_A */
-#define  PORT_PLL_LOCK_THRESHOLD_MASK	0xe
+#define  PORT_PLL_LOCK_THRESHOLD_SHIFT	1
+#define  PORT_PLL_LOCK_THRESHOLD_MASK	(0x7 << PORT_PLL_LOCK_THRESHOLD_SHIFT)
 /* PORT_PLL_10_A */
 #define  PORT_PLL_DCO_AMP_OVR_EN_H	(1<<27)
+#define  PORT_PLL_DCO_AMP_DEFAULT	15
 #define  PORT_PLL_DCO_AMP_MASK		0x3c00
 #define  PORT_PLL_DCO_AMP(x)		(x<<10)
 #define _PORT_PLL_BASE(port)		_PORT3(port, _PORT_PLL_0_A,	\
@@ -1377,6 +1403,18 @@ enum skl_disp_power_wells {
 							_PORT_TX_DW14_LN0_C) + \
 					 _BXT_LANE_OFFSET(lane))
 
+/* UAIMI scratch pad register 1 */
+#define UAIMI_SPR1			0x4F074
+/* SKL VccIO mask */
+#define SKL_VCCIO_MASK			0x1
+/* SKL balance leg register */
+#define DISPIO_CR_TX_BMU_CR0		0x6C00C
+/* I_boost values */
+#define BALANCE_LEG_SHIFT(port)		(8+3*(port))
+#define BALANCE_LEG_MASK(port)		(7<<(8+3*(port)))
+/* Balance leg disable bits */
+#define BALANCE_LEG_DISABLE_SHIFT	23
+
 /*
  * Fence registers
  */
@@ -1456,6 +1494,9 @@ enum skl_disp_power_wells {
 #define RING_MAX_IDLE(base)	((base)+0x54)
 #define RING_HWS_PGA(base)	((base)+0x80)
 #define RING_HWS_PGA_GEN6(base)	((base)+0x2080)
+#define RING_RESET_CTL(base)	((base)+0xd0)
+#define   RESET_CTL_REQUEST_RESET  (1 << 0)
+#define   RESET_CTL_READY_TO_RESET (1 << 1)
 
 #define HSW_GTT_CACHE_EN	0x4024
 #define   GTT_CACHE_EN_ALL	0xF0007FFF
@@ -1946,6 +1987,9 @@ enum skl_disp_power_wells {
 #define FBC_FENCE_OFF		0x03218 /* BSpec typo has 321Bh */
 #define FBC_TAG			0x03300
 
+#define FBC_STATUS2		0x43214
+#define  FBC_COMPRESSION_MASK	0x7ff
+
 #define FBC_LL_SIZE		(1536)
 
 /* Framebuffer compression for GM45+ */
@@ -2116,7 +2160,7 @@ enum skl_disp_power_wells {
 #define   DPLL_DVO_2X_MODE		(1 << 30)
 #define   DPLL_EXT_BUFFER_ENABLE_VLV	(1 << 30)
 #define   DPLL_SYNCLOCK_ENABLE		(1 << 29)
-#define   DPLL_REFA_CLK_ENABLE_VLV	(1 << 29)
+#define   DPLL_REF_CLK_ENABLE_VLV	(1 << 29)
 #define   DPLL_VGA_MODE_DIS		(1 << 28)
 #define   DPLLB_MODE_DAC_SERIAL		(1 << 26) /* i915 */
 #define   DPLLB_MODE_LVDS		(2 << 26) /* i915 */
@@ -2130,8 +2174,8 @@ enum skl_disp_power_wells {
 #define   DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW	0x00ff8000 /* Pineview */
 #define   DPLL_LOCK_VLV			(1<<15)
 #define   DPLL_INTEGRATED_CRI_CLK_VLV	(1<<14)
-#define   DPLL_INTEGRATED_CLOCK_VLV	(1<<13)
-#define   DPLL_SSC_REF_CLOCK_CHV	(1<<13)
+#define   DPLL_INTEGRATED_REF_CLK_VLV	(1<<13)
+#define   DPLL_SSC_REF_CLK_CHV		(1<<13)
 #define   DPLL_PORTC_READY_MASK		(0xf << 4)
 #define   DPLL_PORTB_READY_MASK		(0xf)
 
@@ -2488,6 +2532,9 @@ enum skl_disp_power_wells {
 #define CLKCFG_MEM_800					(3 << 4)
 #define CLKCFG_MEM_MASK					(7 << 4)
 
+#define HPLLVCO                 (MCHBAR_MIRROR_BASE + 0xc38)
+#define HPLLVCO_MOBILE          (MCHBAR_MIRROR_BASE + 0xc0f)
+
 #define TSC1			0x11001
 #define   TSE			(1<<0)
 #define TR1			0x11006
@@ -2718,8 +2765,10 @@ enum skl_disp_power_wells {
 #define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
 
 #define GEN6_GT_PERF_STATUS	(MCHBAR_MIRROR_BASE_SNB + 0x5948)
+#define BXT_GT_PERF_STATUS      (MCHBAR_MIRROR_BASE_SNB + 0x7070)
 #define GEN6_RP_STATE_LIMITS	(MCHBAR_MIRROR_BASE_SNB + 0x5994)
 #define GEN6_RP_STATE_CAP	(MCHBAR_MIRROR_BASE_SNB + 0x5998)
+#define BXT_RP_STATE_CAP        0x138170
 
 #define INTERVAL_1_28_US(us)	(((us) * 100) >> 7)
 #define INTERVAL_1_33_US(us)	(((us) * 3)   >> 2)
@@ -2767,7 +2816,8 @@ enum skl_disp_power_wells {
  * valid. Now, docs explain in dwords what is in the context object. The full
  * size is 70720 bytes, however, the power context and execlist context will
  * never be saved (power context is stored elsewhere, and execlists don't work
- * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages.
+ * on HSW) - so the final size, including the extra state required for the
+ * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
  */
 #define HSW_CXT_TOTAL_SIZE		(17 * PAGE_SIZE)
 /* Same as Haswell, but 72064 bytes now. */
@@ -4398,9 +4448,32 @@ enum skl_disp_power_wells {
 #define   DSPARB_BSTART_SHIFT	0
 #define   DSPARB_BEND_SHIFT	9 /* on 855 */
 #define   DSPARB_AEND_SHIFT	0
-
+#define   DSPARB_SPRITEA_SHIFT_VLV	0
+#define   DSPARB_SPRITEA_MASK_VLV	(0xff << 0)
+#define   DSPARB_SPRITEB_SHIFT_VLV	8
+#define   DSPARB_SPRITEB_MASK_VLV	(0xff << 8)
+#define   DSPARB_SPRITEC_SHIFT_VLV	16
+#define   DSPARB_SPRITEC_MASK_VLV	(0xff << 16)
+#define   DSPARB_SPRITED_SHIFT_VLV	24
+#define   DSPARB_SPRITED_MASK_VLV	(0xff << 24)
 #define DSPARB2			(VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */
+#define   DSPARB_SPRITEA_HI_SHIFT_VLV	0
+#define   DSPARB_SPRITEA_HI_MASK_VLV	(0x1 << 0)
+#define   DSPARB_SPRITEB_HI_SHIFT_VLV	4
+#define   DSPARB_SPRITEB_HI_MASK_VLV	(0x1 << 4)
+#define   DSPARB_SPRITEC_HI_SHIFT_VLV	8
+#define   DSPARB_SPRITEC_HI_MASK_VLV	(0x1 << 8)
+#define   DSPARB_SPRITED_HI_SHIFT_VLV	12
+#define   DSPARB_SPRITED_HI_MASK_VLV	(0x1 << 12)
+#define   DSPARB_SPRITEE_HI_SHIFT_VLV	16
+#define   DSPARB_SPRITEE_HI_MASK_VLV	(0x1 << 16)
+#define   DSPARB_SPRITEF_HI_SHIFT_VLV	20
+#define   DSPARB_SPRITEF_HI_MASK_VLV	(0x1 << 20)
 #define DSPARB3			(VLV_DISPLAY_BASE + 0x7006c) /* chv */
+#define   DSPARB_SPRITEE_SHIFT_VLV	0
+#define   DSPARB_SPRITEE_MASK_VLV	(0xff << 0)
+#define   DSPARB_SPRITEF_SHIFT_VLV	8
+#define   DSPARB_SPRITEF_MASK_VLV	(0xff << 8)
 
 /* pnv/gen4/g4x/vlv/chv */
 #define DSPFW1			(dev_priv->info.display_mmio_offset + 0x70034)
@@ -5754,6 +5827,13 @@ enum skl_disp_power_wells {
 #define HSW_NDE_RSTWRN_OPT	0x46408
 #define  RESET_PCH_HANDSHAKE_ENABLE	(1<<4)
 
+#define SKL_DFSM			0x51000
+#define SKL_DFSM_CDCLK_LIMIT_MASK	(3 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_675	(0 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_540	(1 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_450	(2 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_337_5	(3 << 23)
+
 #define FF_SLICE_CS_CHICKEN2			0x20e4
 #define  GEN9_TSG_BARRIER_ACK_DISABLE		(1<<8)
 
@@ -5791,6 +5871,7 @@ enum skl_disp_power_wells {
 
 #define GEN8_L3SQCREG4				0xb118
 #define  GEN8_LQSC_RO_PERF_DIS			(1<<27)
+#define  GEN8_LQSC_FLUSH_COHERENT_LINES		(1<<21)
 
 /* GEN8 chicken */
 #define HDC_CHICKEN0				0x7300
@@ -5868,6 +5949,7 @@ enum skl_disp_power_wells {
 #define SDE_AUXC_CPT		(1 << 26)
 #define SDE_AUXB_CPT		(1 << 25)
 #define SDE_AUX_MASK_CPT	(7 << 25)
+#define SDE_PORTE_HOTPLUG_SPT	(1 << 25)
 #define SDE_PORTD_HOTPLUG_CPT	(1 << 23)
 #define SDE_PORTC_HOTPLUG_CPT	(1 << 22)
 #define SDE_PORTB_HOTPLUG_CPT	(1 << 21)
@@ -5878,6 +5960,10 @@ enum skl_disp_power_wells {
 				 SDE_PORTD_HOTPLUG_CPT |	\
 				 SDE_PORTC_HOTPLUG_CPT |	\
 				 SDE_PORTB_HOTPLUG_CPT)
+#define SDE_HOTPLUG_MASK_SPT	(SDE_PORTE_HOTPLUG_SPT |	\
+				 SDE_PORTD_HOTPLUG_CPT |	\
+				 SDE_PORTC_HOTPLUG_CPT |	\
+				 SDE_PORTB_HOTPLUG_CPT)
 #define SDE_GMBUS_CPT		(1 << 17)
 #define SDE_ERROR_CPT		(1 << 16)
 #define SDE_AUDIO_CP_REQ_C_CPT	(1 << 10)
@@ -5913,6 +5999,11 @@ enum skl_disp_power_wells {
 
 /* digital port hotplug */
 #define PCH_PORT_HOTPLUG        0xc4030		/* SHOTPLUG_CTL */
+#define BXT_PORTA_HOTPLUG_ENABLE	(1 << 28)
+#define BXT_PORTA_HOTPLUG_STATUS_MASK	(0x3 << 24)
+#define  BXT_PORTA_HOTPLUG_NO_DETECT	(0 << 24)
+#define  BXT_PORTA_HOTPLUG_SHORT_DETECT	(1 << 24)
+#define  BXT_PORTA_HOTPLUG_LONG_DETECT	(2 << 24)
 #define PORTD_HOTPLUG_ENABLE            (1 << 20)
 #define PORTD_PULSE_DURATION_2ms        (0)
 #define PORTD_PULSE_DURATION_4_5ms      (1 << 18)
@@ -5944,6 +6035,13 @@ enum skl_disp_power_wells {
 #define  PORTB_HOTPLUG_SHORT_DETECT	(1 << 0)
 #define  PORTB_HOTPLUG_LONG_DETECT	(2 << 0)
 
+#define PCH_PORT_HOTPLUG2        0xc403C		/* SHOTPLUG_CTL2 */
+#define PORTE_HOTPLUG_ENABLE            (1 << 4)
+#define PORTE_HOTPLUG_STATUS_MASK	(0x3 << 0)
+#define  PORTE_HOTPLUG_NO_DETECT	(0 << 0)
+#define  PORTE_HOTPLUG_SHORT_DETECT	(1 << 0)
+#define  PORTE_HOTPLUG_LONG_DETECT	(2 << 0)
+
 #define PCH_GPIOA               0xc5010
 #define PCH_GPIOB               0xc5014
 #define PCH_GPIOC               0xc5018
@@ -6047,6 +6145,9 @@ enum skl_disp_power_wells {
 #define _VIDEO_DIP_CTL_A         0xe0200
 #define _VIDEO_DIP_DATA_A        0xe0208
 #define _VIDEO_DIP_GCP_A         0xe0210
+#define  GCP_COLOR_INDICATION		(1 << 2)
+#define  GCP_DEFAULT_PHASE_ENABLE	(1 << 1)
+#define  GCP_AV_MUTE			(1 << 0)
 
 #define _VIDEO_DIP_CTL_B         0xe1200
 #define _VIDEO_DIP_DATA_B        0xe1208
@@ -6186,6 +6287,7 @@ enum skl_disp_power_wells {
 #define _TRANSA_CHICKEN1	 0xf0060
 #define _TRANSB_CHICKEN1	 0xf1060
 #define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
+#define  TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE	(1<<10)
 #define  TRANS_CHICKEN1_DP0UNIT_GC_DISABLE	(1<<4)
 #define _TRANSA_CHICKEN2	 0xf0064
 #define _TRANSB_CHICKEN2	 0xf1064
@@ -6370,6 +6472,8 @@ enum skl_disp_power_wells {
 #define PCH_PP_CONTROL		0xc7204
 #define  PANEL_UNLOCK_REGS	(0xabcd << 16)
 #define  PANEL_UNLOCK_MASK	(0xffff << 16)
+#define  BXT_POWER_CYCLE_DELAY_MASK	(0x1f0)
+#define  BXT_POWER_CYCLE_DELAY_SHIFT	4
 #define  EDP_FORCE_VDD		(1 << 3)
 #define  EDP_BLC_ENABLE		(1 << 2)
 #define  PANEL_POWER_RESET	(1 << 1)
@@ -6398,6 +6502,17 @@ enum skl_disp_power_wells {
 #define  PANEL_POWER_CYCLE_DELAY_MASK	(0x1f)
 #define  PANEL_POWER_CYCLE_DELAY_SHIFT	0
 
+/* BXT PPS changes - 2nd set of PPS registers */
+#define _BXT_PP_STATUS2 	0xc7300
+#define _BXT_PP_CONTROL2 	0xc7304
+#define _BXT_PP_ON_DELAYS2	0xc7308
+#define _BXT_PP_OFF_DELAYS2	0xc730c
+
+#define BXT_PP_STATUS(n)	((!n) ? PCH_PP_STATUS : _BXT_PP_STATUS2)
+#define BXT_PP_CONTROL(n)	((!n) ? PCH_PP_CONTROL : _BXT_PP_CONTROL2)
+#define BXT_PP_ON_DELAYS(n)	((!n) ? PCH_PP_ON_DELAYS : _BXT_PP_ON_DELAYS2)
+#define BXT_PP_OFF_DELAYS(n)	((!n) ? PCH_PP_OFF_DELAYS : _BXT_PP_OFF_DELAYS2)
+
 #define PCH_DP_B		0xe4100
 #define PCH_DPB_AUX_CH_CTL	0xe4110
 #define PCH_DPB_AUX_CH_DATA1	0xe4114
@@ -6698,6 +6813,7 @@ enum skl_disp_power_wells {
 #define	  GEN6_PCODE_READ_RC6VIDS		0x5
 #define     GEN6_ENCODE_RC6_VID(mv)		(((mv) - 245) / 5)
 #define     GEN6_DECODE_RC6_VID(vids)		(((vids) * 5) + 245)
+#define   BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ	0x18
 #define   GEN9_PCODE_READ_MEM_LATENCY		0x6
 #define     GEN9_MEM_LATENCY_LEVEL_MASK		0xFF
 #define     GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT	8
@@ -6756,6 +6872,9 @@ enum skl_disp_power_wells {
 #define GEN7_MISCCPCTL			(0x9424)
 #define   GEN7_DOP_CLOCK_GATE_ENABLE	(1<<0)
 
+#define GEN8_GARBCNTL                   0xB004
+#define   GEN9_GAPS_TSV_CREDIT_DISABLE  (1<<7)
+
 /* IVYBRIDGE DPF */
 #define GEN7_L3CDERRST1			0xB008 /* L3CD Error Status 1 */
 #define HSW_L3CDERRST11			0xB208 /* L3CD Error Status register 1 slice 1 */
@@ -7163,6 +7282,7 @@ enum skl_disp_power_wells {
 #define  LCPLL_CLK_FREQ_337_5_BDW	(2<<26)
 #define  LCPLL_CLK_FREQ_675_BDW		(3<<26)
 #define  LCPLL_CD_CLOCK_DISABLE		(1<<25)
+#define  LCPLL_ROOT_CD_CLOCK_DISABLE	(1<<24)
 #define  LCPLL_CD2X_CLOCK_DISABLE	(1<<23)
 #define  LCPLL_POWER_DOWN_ALLOW		(1<<22)
 #define  LCPLL_CD_SOURCE_FCLK		(1<<21)
@@ -7265,12 +7385,6 @@ enum skl_disp_power_wells {
 #define DC_STATE_EN			0x45504
 #define  DC_STATE_EN_UPTO_DC5		(1<<0)
 #define  DC_STATE_EN_DC9		(1<<3)
-
-/*
-* SKL DC
-*/
-#define  DC_STATE_EN			0x45504
-#define  DC_STATE_EN_UPTO_DC5		(1<<0)
 #define  DC_STATE_EN_UPTO_DC6		(2<<0)
 #define  DC_STATE_EN_UPTO_DC5_DC6_MASK   0x3
 
@@ -7822,4 +7936,13 @@ enum skl_disp_power_wells {
 #define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
 #define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
 
+/* MOCS (Memory Object Control State) registers */
+#define GEN9_LNCFCMOCS0		0xb020	/* L3 Cache Control base */
+
+#define GEN9_GFX_MOCS_0		0xc800	/* Graphics MOCS base register*/
+#define GEN9_MFX0_MOCS_0	0xc900	/* Media 0 MOCS base register*/
+#define GEN9_MFX1_MOCS_0	0xca00	/* Media 1 MOCS base register*/
+#define GEN9_VEBOX_MOCS_0	0xcb00	/* Video MOCS base register*/
+#define GEN9_BLT_MOCS_0		0xcc00	/* Blitter MOCS base register*/
+
 #endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index cf67f82f7b7f..1ccac618468e 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -92,7 +92,7 @@ static void i915_restore_display(struct drm_device *dev)
 	}
 
 	/* only restore FBC info on the platform that supports FBC*/
-	intel_fbc_disable(dev);
+	intel_fbc_disable(dev_priv);
 
 	/* restore FBC interval */
 	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 247626885f49..55bd04c6b939 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -64,24 +64,16 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
 			goto out;
 		}
 
-		units = 0;
-		div = 1000000ULL;
-
-		if (IS_CHERRYVIEW(dev)) {
+		if (IS_CHERRYVIEW(dev) && czcount_30ns == 1) {
 			/* Special case for 320Mhz */
-			if (czcount_30ns == 1) {
-				div = 10000000ULL;
-				units = 3125ULL;
-			} else {
-				/* chv counts are one less */
-				czcount_30ns += 1;
-			}
+			div = 10000000ULL;
+			units = 3125ULL;
+		} else {
+			czcount_30ns += 1;
+			div = 1000000ULL;
+			units = DIV_ROUND_UP_ULL(30ULL * bias, czcount_30ns);
 		}
 
-		if (units == 0)
-			units = DIV_ROUND_UP_ULL(30ULL * bias,
-						 (u64)czcount_30ns);
-
 		if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
 			units <<= 8;
 
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 849a2590e010..2f34c47bd4bf 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -424,10 +424,10 @@ TRACE_EVENT(i915_gem_evict_vm,
 );
 
 TRACE_EVENT(i915_gem_ring_sync_to,
-	    TP_PROTO(struct intel_engine_cs *from,
-		     struct intel_engine_cs *to,
+	    TP_PROTO(struct drm_i915_gem_request *to_req,
+		     struct intel_engine_cs *from,
 		     struct drm_i915_gem_request *req),
-	    TP_ARGS(from, to, req),
+	    TP_ARGS(to_req, from, req),
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
@@ -439,7 +439,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
 	    TP_fast_assign(
 			   __entry->dev = from->dev->primary->index;
 			   __entry->sync_from = from->id;
-			   __entry->sync_to = to->id;
+			   __entry->sync_to = to_req->ring->id;
 			   __entry->seqno = i915_gem_request_get_seqno(req);
 			   ),
 
@@ -475,8 +475,8 @@ TRACE_EVENT(i915_gem_ring_dispatch,
 );
 
 TRACE_EVENT(i915_gem_ring_flush,
-	    TP_PROTO(struct intel_engine_cs *ring, u32 invalidate, u32 flush),
-	    TP_ARGS(ring, invalidate, flush),
+	    TP_PROTO(struct drm_i915_gem_request *req, u32 invalidate, u32 flush),
+	    TP_ARGS(req, invalidate, flush),
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
@@ -486,8 +486,8 @@ TRACE_EVENT(i915_gem_ring_flush,
 			     ),
 
 	    TP_fast_assign(
-			   __entry->dev = ring->dev->primary->index;
-			   __entry->ring = ring->id;
+			   __entry->dev = req->ring->dev->primary->index;
+			   __entry->ring = req->ring->id;
 			   __entry->invalidate = invalidate;
 			   __entry->flush = flush;
 			   ),
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 8e35e0d013df..e2531cf59266 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -35,133 +35,6 @@
 #include <drm/drm_plane_helper.h>
 #include "intel_drv.h"
 
-
-/**
- * intel_atomic_check - validate state object
- * @dev: drm device
- * @state: state to validate
- */
-int intel_atomic_check(struct drm_device *dev,
-		       struct drm_atomic_state *state)
-{
-	int nplanes = dev->mode_config.num_total_plane;
-	int ncrtcs = dev->mode_config.num_crtc;
-	int nconnectors = dev->mode_config.num_connector;
-	enum pipe nuclear_pipe = INVALID_PIPE;
-	struct intel_crtc *nuclear_crtc = NULL;
-	struct intel_crtc_state *crtc_state = NULL;
-	int ret;
-	int i;
-	bool not_nuclear = false;
-
-	/*
-	 * FIXME:  At the moment, we only support "nuclear pageflip" on a
-	 * single CRTC.  Cross-crtc updates will be added later.
-	 */
-	for (i = 0; i < nplanes; i++) {
-		struct intel_plane *plane = to_intel_plane(state->planes[i]);
-		if (!plane)
-			continue;
-
-		if (nuclear_pipe == INVALID_PIPE) {
-			nuclear_pipe = plane->pipe;
-		} else if (nuclear_pipe != plane->pipe) {
-			DRM_DEBUG_KMS("i915 only support atomic plane operations on a single CRTC at the moment\n");
-			return -EINVAL;
-		}
-	}
-
-	/*
-	 * FIXME:  We only handle planes for now; make sure there are no CRTC's
-	 * or connectors involved.
-	 */
-	state->allow_modeset = false;
-	for (i = 0; i < ncrtcs; i++) {
-		struct intel_crtc *crtc = to_intel_crtc(state->crtcs[i]);
-		if (crtc)
-			memset(&crtc->atomic, 0, sizeof(crtc->atomic));
-		if (crtc && crtc->pipe != nuclear_pipe)
-			not_nuclear = true;
-		if (crtc && crtc->pipe == nuclear_pipe) {
-			nuclear_crtc = crtc;
-			crtc_state = to_intel_crtc_state(state->crtc_states[i]);
-		}
-	}
-	for (i = 0; i < nconnectors; i++)
-		if (state->connectors[i] != NULL)
-			not_nuclear = true;
-
-	if (not_nuclear) {
-		DRM_DEBUG_KMS("i915 only supports atomic plane operations at the moment\n");
-		return -EINVAL;
-	}
-
-	ret = drm_atomic_helper_check_planes(dev, state);
-	if (ret)
-		return ret;
-
-	/* FIXME: move to crtc atomic check function once it is ready */
-	ret = intel_atomic_setup_scalers(dev, nuclear_crtc, crtc_state);
-	if (ret)
-		return ret;
-
-	return ret;
-}
-
-
-/**
- * intel_atomic_commit - commit validated state object
- * @dev: DRM device
- * @state: the top-level driver state object
- * @async: asynchronous commit
- *
- * This function commits a top-level state object that has been validated
- * with drm_atomic_helper_check().
- *
- * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
- * we can only handle plane-related operations and do not yet support
- * asynchronous commit.
- *
- * RETURNS
- * Zero for success or -errno.
- */
-int intel_atomic_commit(struct drm_device *dev,
-			struct drm_atomic_state *state,
-			bool async)
-{
-	struct drm_crtc_state *crtc_state;
-	struct drm_crtc *crtc;
-	int ret, i;
-
-	if (async) {
-		DRM_DEBUG_KMS("i915 does not yet support async commit\n");
-		return -EINVAL;
-	}
-
-	ret = drm_atomic_helper_prepare_planes(dev, state);
-	if (ret)
-		return ret;
-
-	/* Point of no return */
-	drm_atomic_helper_swap_state(dev, state);
-
-	/* swap crtc_scaler_state */
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
-
-		if (INTEL_INFO(dev)->gen >= 9)
-			skl_detach_scalers(to_intel_crtc(crtc));
-
-		drm_atomic_helper_commit_planes_on_crtc(crtc_state);
-	}
-
-	drm_atomic_helper_wait_for_vblanks(dev, state);
-	drm_atomic_helper_cleanup_planes(dev, state);
-	drm_atomic_state_free(state);
-
-	return 0;
-}
-
 /**
  * intel_connector_atomic_get_property - fetch connector property value
  * @connector: connector to fetch property for
@@ -269,17 +142,12 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
 	struct drm_plane *plane = NULL;
 	struct intel_plane *intel_plane;
 	struct intel_plane_state *plane_state = NULL;
-	struct intel_crtc_scaler_state *scaler_state;
-	struct drm_atomic_state *drm_state;
+	struct intel_crtc_scaler_state *scaler_state =
+		&crtc_state->scaler_state;
+	struct drm_atomic_state *drm_state = crtc_state->base.state;
 	int num_scalers_need;
 	int i, j;
 
-	if (INTEL_INFO(dev)->gen < 9 || !intel_crtc || !crtc_state)
-		return 0;
-
-	scaler_state = &crtc_state->scaler_state;
-	drm_state = crtc_state->base.state;
-
 	num_scalers_need = hweight32(scaler_state->scaler_users);
 	DRM_DEBUG_KMS("crtc_state = %p need = %d avail = %d scaler_users = 0x%x\n",
 		crtc_state, num_scalers_need, intel_crtc->num_scalers,
@@ -307,17 +175,21 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
 	/* walkthrough scaler_users bits and start assigning scalers */
 	for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
 		int *scaler_id;
+		const char *name;
+		int idx;
 
 		/* skip if scaler not required */
 		if (!(scaler_state->scaler_users & (1 << i)))
 			continue;
 
 		if (i == SKL_CRTC_INDEX) {
+			name = "CRTC";
+			idx = intel_crtc->base.base.id;
+
 			/* panel fitter case: assign as a crtc scaler */
 			scaler_id = &scaler_state->scaler_id;
 		} else {
-			if (!drm_state)
-				continue;
+			name = "PLANE";
 
 			/* plane scaler case: assign as a plane scaler */
 			/* find the plane that set the bit as scaler_user */
@@ -336,9 +208,19 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
 						plane->base.id);
 					return PTR_ERR(state);
 				}
+
+				/*
+				 * the plane is added after plane checks are run,
+				 * but since this plane is unchanged just do the
+				 * minimum required validation.
+				 */
+				if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+					intel_crtc->atomic.wait_for_flips = true;
+				crtc_state->base.planes_changed = true;
 			}
 
 			intel_plane = to_intel_plane(plane);
+			idx = plane->base.id;
 
 			/* plane on different crtc cannot be a scaler user of this crtc */
 			if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
@@ -354,23 +236,16 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
 			for (j = 0; j < intel_crtc->num_scalers; j++) {
 				if (!scaler_state->scalers[j].in_use) {
 					scaler_state->scalers[j].in_use = 1;
-					*scaler_id = scaler_state->scalers[j].id;
+					*scaler_id = j;
 					DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
-						intel_crtc->pipe,
-						i == SKL_CRTC_INDEX ? scaler_state->scaler_id :
-							plane_state->scaler_id,
-						i == SKL_CRTC_INDEX ? "CRTC" : "PLANE",
-						i == SKL_CRTC_INDEX ?  intel_crtc->base.base.id :
-						plane->base.id);
+						intel_crtc->pipe, *scaler_id, name, idx);
 					break;
 				}
 			}
 		}
 
 		if (WARN_ON(*scaler_id < 0)) {
-			DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n",
-				i == SKL_CRTC_INDEX ? "CRTC" : "PLANE",
-				i == SKL_CRTC_INDEX ? intel_crtc->base.base.id:plane->base.id);
+			DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
 			continue;
 		}
 
@@ -392,3 +267,54 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
 
 	return 0;
 }
+
+static void
+intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
+				  struct intel_shared_dpll_config *shared_dpll)
+{
+	enum intel_dpll_id i;
+
+	/* Copy shared dpll state */
+	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+		shared_dpll[i] = pll->config;
+	}
+}
+
+struct intel_shared_dpll_config *
+intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
+{
+	struct intel_atomic_state *state = to_intel_atomic_state(s);
+
+	WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
+
+	if (!state->dpll_set) {
+		state->dpll_set = true;
+
+		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
+						  state->shared_dpll);
+	}
+
+	return state->shared_dpll;
+}
+
+struct drm_atomic_state *
+intel_atomic_state_alloc(struct drm_device *dev)
+{
+	struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+	if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
+		kfree(state);
+		return NULL;
+	}
+
+	return &state->base;
+}
+
+void intel_atomic_state_clear(struct drm_atomic_state *s)
+{
+	struct intel_atomic_state *state = to_intel_atomic_state(s);
+	drm_atomic_state_default_clear(&state->base);
+	state->dpll_set = false;
+}
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 86ba4b2c3a65..f1ab8e4b9c11 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -56,6 +56,7 @@ intel_create_plane_state(struct drm_plane *plane)
 
 	state->base.plane = plane;
 	state->base.rotation = BIT(DRM_ROTATE_0);
+	state->ckey.flags = I915_SET_COLORKEY_NONE;
 
 	return state;
 }
@@ -114,8 +115,10 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
 	struct intel_crtc_state *crtc_state;
 	struct intel_plane *intel_plane = to_intel_plane(plane);
 	struct intel_plane_state *intel_state = to_intel_plane_state(state);
+	struct drm_crtc_state *drm_crtc_state;
+	int ret;
 
-	crtc = crtc ? crtc : plane->crtc;
+	crtc = crtc ? crtc : plane->state->crtc;
 	intel_crtc = to_intel_crtc(crtc);
 
 	/*
@@ -127,16 +130,11 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
 	if (!crtc)
 		return 0;
 
-	/* FIXME: temporary hack necessary while we still use the plane update
-	 * helper. */
-	if (state->state) {
-		crtc_state =
-			intel_atomic_get_crtc_state(state->state, intel_crtc);
-		if (IS_ERR(crtc_state))
-			return PTR_ERR(crtc_state);
-	} else {
-		crtc_state = intel_crtc->config;
-	}
+	drm_crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+	if (WARN_ON(!drm_crtc_state))
+		return -EINVAL;
+
+	crtc_state = to_intel_crtc_state(drm_crtc_state);
 
 	/*
 	 * The original src/dest coordinates are stored in state->base, but
@@ -160,20 +158,6 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
 	intel_state->clip.y2 =
 		crtc_state->base.active ? crtc_state->pipe_src_h : 0;
 
-	/*
-	 * Disabling a plane is always okay; we just need to update
-	 * fb tracking in a special way since cleanup_fb() won't
-	 * get called by the plane helpers.
-	 */
-	if (state->fb == NULL && plane->state->fb != NULL) {
-		/*
-		 * 'prepare' is never called when plane is being disabled, so
-		 * we need to handle frontbuffer tracking as a special case
-		 */
-		intel_crtc->atomic.disabled_planes |=
-			(1 << drm_plane_index(plane));
-	}
-
 	if (state->fb && intel_rotation_90_or_270(state->rotation)) {
 		if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
 			state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) {
@@ -198,7 +182,12 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
 		}
 	}
 
-	return intel_plane->check_plane(plane, intel_state);
+	intel_state->visible = false;
+	ret = intel_plane->check_plane(plane, crtc_state, intel_state);
+	if (ret)
+		return ret;
+
+	return intel_plane_atomic_calc_changes(&crtc_state->base, state);
 }
 
 static void intel_plane_atomic_update(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 678a34f87c1c..89c1a8ce1f98 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -41,7 +41,8 @@
  *
  * The disable sequences must be performed before disabling the transcoder or
  * port. The enable sequences may only be performed after enabling the
- * transcoder and port, and after completed link training.
+ * transcoder and port, and after completed link training. Therefore the audio
+ * enable/disable sequences are part of the modeset sequence.
  *
  * The codec and controller sequences could be done either parallel or serial,
  * but generally the ELDV/PD change in the codec sequence indicates to the audio
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 198fc3c3291b..b3e437b3bb54 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -122,42 +122,6 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
 	drm_mode_set_name(panel_fixed_mode);
 }
 
-static bool
-lvds_dvo_timing_equal_size(const struct lvds_dvo_timing *a,
-			   const struct lvds_dvo_timing *b)
-{
-	if (a->hactive_hi != b->hactive_hi ||
-	    a->hactive_lo != b->hactive_lo)
-		return false;
-
-	if (a->hsync_off_hi != b->hsync_off_hi ||
-	    a->hsync_off_lo != b->hsync_off_lo)
-		return false;
-
-	if (a->hsync_pulse_width != b->hsync_pulse_width)
-		return false;
-
-	if (a->hblank_hi != b->hblank_hi ||
-	    a->hblank_lo != b->hblank_lo)
-		return false;
-
-	if (a->vactive_hi != b->vactive_hi ||
-	    a->vactive_lo != b->vactive_lo)
-		return false;
-
-	if (a->vsync_off != b->vsync_off)
-		return false;
-
-	if (a->vsync_pulse_width != b->vsync_pulse_width)
-		return false;
-
-	if (a->vblank_hi != b->vblank_hi ||
-	    a->vblank_lo != b->vblank_lo)
-		return false;
-
-	return true;
-}
-
 static const struct lvds_dvo_timing *
 get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
 		    const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs,
@@ -213,7 +177,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
 	const struct lvds_dvo_timing *panel_dvo_timing;
 	const struct lvds_fp_timing *fp_timing;
 	struct drm_display_mode *panel_fixed_mode;
-	int i, downclock, drrs_mode;
+	int drrs_mode;
 
 	lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
 	if (!lvds_options)
@@ -272,30 +236,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
 	DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
 	drm_mode_debug_printmodeline(panel_fixed_mode);
 
-	/*
-	 * Iterate over the LVDS panel timing info to find the lowest clock
-	 * for the native resolution.
-	 */
-	downclock = panel_dvo_timing->clock;
-	for (i = 0; i < 16; i++) {
-		const struct lvds_dvo_timing *dvo_timing;
-
-		dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
-						 lvds_lfp_data_ptrs,
-						 i);
-		if (lvds_dvo_timing_equal_size(dvo_timing, panel_dvo_timing) &&
-		    dvo_timing->clock < downclock)
-			downclock = dvo_timing->clock;
-	}
-
-	if (downclock < panel_dvo_timing->clock && i915.lvds_downclock) {
-		dev_priv->lvds_downclock_avail = 1;
-		dev_priv->lvds_downclock = downclock * 10;
-		DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
-			      "Normal Clock %dKHz, downclock %dKHz\n",
-			      panel_fixed_mode->clock, 10*downclock);
-	}
-
 	fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
 				       lvds_lfp_data_ptrs,
 				       lvds_options->panel_type);
@@ -461,7 +401,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
 {
 	struct sdvo_device_mapping *p_mapping;
 	const struct bdb_general_definitions *p_defs;
-	const union child_device_config *p_child;
+	const struct old_child_dev_config *child; /* legacy */
 	int i, child_device_num, count;
 	u16	block_size;
 
@@ -470,14 +410,14 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
 		DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
 		return;
 	}
-	/* judge whether the size of child device meets the requirements.
-	 * If the child device size obtained from general definition block
-	 * is different with sizeof(struct child_device_config), skip the
-	 * parsing of sdvo device info
+
+	/*
+	 * Only parse SDVO mappings when the general definitions block child
+	 * device size matches that of the *legacy* child device config
+	 * struct. Thus, SDVO mapping will be skipped for newer VBT.
 	 */
-	if (p_defs->child_dev_size != sizeof(*p_child)) {
-		/* different child dev size . Ignore it */
-		DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+	if (p_defs->child_dev_size != sizeof(*child)) {
+		DRM_DEBUG_KMS("Unsupported child device size for SDVO mapping.\n");
 		return;
 	}
 	/* get the block size of general definitions */
@@ -487,37 +427,37 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
 		p_defs->child_dev_size;
 	count = 0;
 	for (i = 0; i < child_device_num; i++) {
-		p_child = child_device_ptr(p_defs, i);
-		if (!p_child->old.device_type) {
+		child = &child_device_ptr(p_defs, i)->old;
+		if (!child->device_type) {
 			/* skip the device block if device type is invalid */
 			continue;
 		}
-		if (p_child->old.slave_addr != SLAVE_ADDR1 &&
-			p_child->old.slave_addr != SLAVE_ADDR2) {
+		if (child->slave_addr != SLAVE_ADDR1 &&
+		    child->slave_addr != SLAVE_ADDR2) {
 			/*
 			 * If the slave address is neither 0x70 nor 0x72,
 			 * it is not a SDVO device. Skip it.
 			 */
 			continue;
 		}
-		if (p_child->old.dvo_port != DEVICE_PORT_DVOB &&
-			p_child->old.dvo_port != DEVICE_PORT_DVOC) {
+		if (child->dvo_port != DEVICE_PORT_DVOB &&
+		    child->dvo_port != DEVICE_PORT_DVOC) {
 			/* skip the incorrect SDVO port */
 			DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
 			continue;
 		}
 		DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
-				" %s port\n",
-				p_child->old.slave_addr,
-				(p_child->old.dvo_port == DEVICE_PORT_DVOB) ?
-					"SDVOB" : "SDVOC");
-		p_mapping = &(dev_priv->sdvo_mappings[p_child->old.dvo_port - 1]);
+			      " %s port\n",
+			      child->slave_addr,
+			      (child->dvo_port == DEVICE_PORT_DVOB) ?
+			      "SDVOB" : "SDVOC");
+		p_mapping = &(dev_priv->sdvo_mappings[child->dvo_port - 1]);
 		if (!p_mapping->initialized) {
-			p_mapping->dvo_port = p_child->old.dvo_port;
-			p_mapping->slave_addr = p_child->old.slave_addr;
-			p_mapping->dvo_wiring = p_child->old.dvo_wiring;
-			p_mapping->ddc_pin = p_child->old.ddc_pin;
-			p_mapping->i2c_pin = p_child->old.i2c_pin;
+			p_mapping->dvo_port = child->dvo_port;
+			p_mapping->slave_addr = child->slave_addr;
+			p_mapping->dvo_wiring = child->dvo_wiring;
+			p_mapping->ddc_pin = child->ddc_pin;
+			p_mapping->i2c_pin = child->i2c_pin;
 			p_mapping->initialized = 1;
 			DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
 				      p_mapping->dvo_port,
@@ -529,7 +469,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
 			DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
 					 "two SDVO device.\n");
 		}
-		if (p_child->old.slave2_addr) {
+		if (child->slave2_addr) {
 			/* Maybe this is a SDVO device with multiple inputs */
 			/* And the mapping info is not added */
 			DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
@@ -946,6 +886,17 @@ err:
 	memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence));
 }
 
+static u8 translate_iboost(u8 val)
+{
+	static const u8 mapping[] = { 1, 3, 7 }; /* See VBT spec */
+
+	if (val >= ARRAY_SIZE(mapping)) {
+		DRM_DEBUG_KMS("Unsupported I_boost value found in VBT (%d), display may not work properly\n", val);
+		return 0;
+	}
+	return mapping[val];
+}
+
 static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
 			   const struct bdb_header *bdb)
 {
@@ -954,23 +905,23 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
 	uint8_t hdmi_level_shift;
 	int i, j;
 	bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
-	uint8_t aux_channel;
+	uint8_t aux_channel, ddc_pin;
 	/* Each DDI port can have more than one value on the "DVO Port" field,
 	 * so look for all the possible values for each port and abort if more
 	 * than one is found. */
-	int dvo_ports[][2] = {
-		{DVO_PORT_HDMIA, DVO_PORT_DPA},
-		{DVO_PORT_HDMIB, DVO_PORT_DPB},
-		{DVO_PORT_HDMIC, DVO_PORT_DPC},
-		{DVO_PORT_HDMID, DVO_PORT_DPD},
-		{DVO_PORT_CRT, -1 /* Port E can only be DVO_PORT_CRT */ },
+	int dvo_ports[][3] = {
+		{DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
+		{DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
+		{DVO_PORT_HDMIC, DVO_PORT_DPC, -1},
+		{DVO_PORT_HDMID, DVO_PORT_DPD, -1},
+		{DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
 	};
 
 	/* Find the child device to use, abort if more than one found. */
 	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
 		it = dev_priv->vbt.child_dev + i;
 
-		for (j = 0; j < 2; j++) {
+		for (j = 0; j < 3; j++) {
 			if (dvo_ports[port][j] == -1)
 				break;
 
@@ -988,6 +939,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
 		return;
 
 	aux_channel = child->raw[25];
+	ddc_pin = child->common.ddc_pin;
 
 	is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
 	is_dp = child->common.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
@@ -1019,22 +971,53 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
 		DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
 
 	if (is_dvi) {
-		if (child->common.ddc_pin == 0x05 && port != PORT_B)
+		if (port == PORT_E) {
+			info->alternate_ddc_pin = ddc_pin;
+			/* if DDIE share ddc pin with other port, then
+			 * dvi/hdmi couldn't exist on the shared port.
+			 * Otherwise they share the same ddc bin and system
+			 * couldn't communicate with them seperately. */
+			if (ddc_pin == DDC_PIN_B) {
+				dev_priv->vbt.ddi_port_info[PORT_B].supports_dvi = 0;
+				dev_priv->vbt.ddi_port_info[PORT_B].supports_hdmi = 0;
+			} else if (ddc_pin == DDC_PIN_C) {
+				dev_priv->vbt.ddi_port_info[PORT_C].supports_dvi = 0;
+				dev_priv->vbt.ddi_port_info[PORT_C].supports_hdmi = 0;
+			} else if (ddc_pin == DDC_PIN_D) {
+				dev_priv->vbt.ddi_port_info[PORT_D].supports_dvi = 0;
+				dev_priv->vbt.ddi_port_info[PORT_D].supports_hdmi = 0;
+			}
+		} else if (ddc_pin == DDC_PIN_B && port != PORT_B)
 			DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
-		if (child->common.ddc_pin == 0x04 && port != PORT_C)
+		else if (ddc_pin == DDC_PIN_C && port != PORT_C)
 			DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
-		if (child->common.ddc_pin == 0x06 && port != PORT_D)
+		else if (ddc_pin == DDC_PIN_D && port != PORT_D)
 			DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
 	}
 
 	if (is_dp) {
-		if (aux_channel == 0x40 && port != PORT_A)
+		if (port == PORT_E) {
+			info->alternate_aux_channel = aux_channel;
+			/* if DDIE share aux channel with other port, then
+			 * DP couldn't exist on the shared port. Otherwise
+			 * they share the same aux channel and system
+			 * couldn't communicate with them seperately. */
+			if (aux_channel == DP_AUX_A)
+				dev_priv->vbt.ddi_port_info[PORT_A].supports_dp = 0;
+			else if (aux_channel == DP_AUX_B)
+				dev_priv->vbt.ddi_port_info[PORT_B].supports_dp = 0;
+			else if (aux_channel == DP_AUX_C)
+				dev_priv->vbt.ddi_port_info[PORT_C].supports_dp = 0;
+			else if (aux_channel == DP_AUX_D)
+				dev_priv->vbt.ddi_port_info[PORT_D].supports_dp = 0;
+		}
+		else if (aux_channel == DP_AUX_A && port != PORT_A)
 			DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
-		if (aux_channel == 0x10 && port != PORT_B)
+		else if (aux_channel == DP_AUX_B && port != PORT_B)
 			DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
-		if (aux_channel == 0x20 && port != PORT_C)
+		else if (aux_channel == DP_AUX_C && port != PORT_C)
 			DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
-		if (aux_channel == 0x30 && port != PORT_D)
+		else if (aux_channel == DP_AUX_D && port != PORT_D)
 			DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
 	}
 
@@ -1046,6 +1029,16 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
 			      hdmi_level_shift);
 		info->hdmi_level_shift = hdmi_level_shift;
 	}
+
+	/* Parse the I_boost config for SKL and above */
+	if (bdb->version >= 196 && (child->common.flags_1 & IBOOST_ENABLE)) {
+		info->dp_boost_level = translate_iboost(child->common.iboost_level & 0xF);
+		DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n",
+			      port_name(port), info->dp_boost_level);
+		info->hdmi_boost_level = translate_iboost(child->common.iboost_level >> 4);
+		DRM_DEBUG_KMS("VBT HDMI boost level for port %c: %d\n",
+			      port_name(port), info->hdmi_boost_level);
+	}
 }
 
 static void parse_ddi_ports(struct drm_i915_private *dev_priv,
@@ -1075,17 +1068,39 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
 	const union child_device_config *p_child;
 	union child_device_config *child_dev_ptr;
 	int i, child_device_num, count;
-	u16	block_size;
+	u8 expected_size;
+	u16 block_size;
 
 	p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
 	if (!p_defs) {
 		DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
 		return;
 	}
-	if (p_defs->child_dev_size < sizeof(*p_child)) {
-		DRM_ERROR("General definiton block child device size is too small.\n");
+	if (bdb->version < 195) {
+		expected_size = sizeof(struct old_child_dev_config);
+	} else if (bdb->version == 195) {
+		expected_size = 37;
+	} else if (bdb->version <= 197) {
+		expected_size = 38;
+	} else {
+		expected_size = 38;
+		BUILD_BUG_ON(sizeof(*p_child) < 38);
+		DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n",
+				 bdb->version, expected_size);
+	}
+
+	/* The legacy sized child device config is the minimum we need. */
+	if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) {
+		DRM_ERROR("Child device config size %u is too small.\n",
+			  p_defs->child_dev_size);
 		return;
 	}
+
+	/* Flag an error for unexpected size, but continue anyway. */
+	if (p_defs->child_dev_size != expected_size)
+		DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n",
+			  p_defs->child_dev_size, expected_size, bdb->version);
+
 	/* get the block size of general definitions */
 	block_size = get_blocksize(p_defs);
 	/* get the number of child device */
@@ -1130,7 +1145,14 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
 
 		child_dev_ptr = dev_priv->vbt.child_dev + count;
 		count++;
-		memcpy(child_dev_ptr, p_child, sizeof(*p_child));
+
+		/*
+		 * Copy as much as we know (sizeof) and is available
+		 * (child_dev_size) of the child device. Accessing the data must
+		 * depend on VBT version.
+		 */
+		memcpy(child_dev_ptr, p_child,
+		       min_t(size_t, p_defs->child_dev_size, sizeof(*p_child)));
 	}
 	return;
 }
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index af0b47652752..46cd5c7ebacd 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -203,9 +203,11 @@ struct bdb_general_features {
 #define DEVICE_PORT_DVOB	0x01
 #define DEVICE_PORT_DVOC	0x02
 
-/* We used to keep this struct but without any version control. We should avoid
+/*
+ * We used to keep this struct but without any version control. We should avoid
  * using it in the future, but it should be safe to keep using it in the old
- * code. */
+ * code. Do not change; we rely on its size.
+ */
 struct old_child_dev_config {
 	u16 handle;
 	u16 device_type;
@@ -231,6 +233,10 @@ struct old_child_dev_config {
 /* This one contains field offsets that are known to be common for all BDB
  * versions. Notice that the meaning of the contents contents may still change,
  * but at least the offsets are consistent. */
+
+/* Definitions for flags_1 */
+#define IBOOST_ENABLE (1<<3)
+
 struct common_child_dev_config {
 	u16 handle;
 	u16 device_type;
@@ -239,8 +245,13 @@ struct common_child_dev_config {
 	u8 not_common2[2];
 	u8 ddc_pin;
 	u16 edid_ptr;
+	u8 obsolete;
+	u8 flags_1;
+	u8 not_common3[13];
+	u8 iboost_level;
 } __packed;
 
+
 /* This field changes depending on the BDB version, so the most reliable way to
  * read it is by checking the BDB version and reading the raw pointer. */
 union child_device_config {
@@ -747,11 +758,6 @@ int intel_parse_bios(struct drm_device *dev);
 #define		DVO_C		2
 #define		DVO_D		3
 
-/* define the PORT for DP output type */
-#define		PORT_IDPB	7
-#define		PORT_IDPC	8
-#define		PORT_IDPD	9
-
 /* Possible values for the "DVO Port" field for versions >= 155: */
 #define DVO_PORT_HDMIA	0
 #define DVO_PORT_HDMIB	1
@@ -764,6 +770,8 @@ int intel_parse_bios(struct drm_device *dev);
 #define DVO_PORT_DPC	8
 #define DVO_PORT_DPD	9
 #define DVO_PORT_DPA	10
+#define DVO_PORT_DPE	11
+#define DVO_PORT_HDMIE	12
 #define DVO_PORT_MIPIA	21
 #define DVO_PORT_MIPIB	22
 #define DVO_PORT_MIPIC	23
@@ -778,6 +786,13 @@ int intel_parse_bios(struct drm_device *dev);
 #define MIPI_DSI_UNDEFINED_PANEL_ID	0
 #define MIPI_DSI_GENERIC_PANEL_ID	1
 
+/*
+ * PMIC vs SoC Backlight support specified in pwm_blc
+ * field in mipi_config block below.
+*/
+#define PPS_BLC_PMIC   0
+#define PPS_BLC_SOC    1
+
 struct mipi_config {
 	u16 panel_id;
 
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 521af2c069cb..af5e43bef4a4 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -236,53 +236,6 @@ static void intel_enable_crt(struct intel_encoder *encoder)
 	intel_crt_set_dpms(encoder, crt->connector->base.dpms);
 }
 
-/* Special dpms function to support cloning between dvo/sdvo/crt. */
-static void intel_crt_dpms(struct drm_connector *connector, int mode)
-{
-	struct drm_device *dev = connector->dev;
-	struct intel_encoder *encoder = intel_attached_encoder(connector);
-	struct drm_crtc *crtc;
-	int old_dpms;
-
-	/* PCH platforms and VLV only support on/off. */
-	if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON)
-		mode = DRM_MODE_DPMS_OFF;
-
-	if (mode == connector->dpms)
-		return;
-
-	old_dpms = connector->dpms;
-	connector->dpms = mode;
-
-	/* Only need to change hw state when actually enabled */
-	crtc = encoder->base.crtc;
-	if (!crtc) {
-		encoder->connectors_active = false;
-		return;
-	}
-
-	/* We need the pipe to run for anything but OFF. */
-	if (mode == DRM_MODE_DPMS_OFF)
-		encoder->connectors_active = false;
-	else
-		encoder->connectors_active = true;
-
-	/* We call connector dpms manually below in case pipe dpms doesn't
-	 * change due to cloning. */
-	if (mode < old_dpms) {
-		/* From off to on, enable the pipe first. */
-		intel_crtc_update_dpms(crtc);
-
-		intel_crt_set_dpms(encoder, mode);
-	} else {
-		intel_crt_set_dpms(encoder, mode);
-
-		intel_crtc_update_dpms(crtc);
-	}
-
-	intel_modeset_check_state(connector->dev);
-}
-
 static enum drm_mode_status
 intel_crt_mode_valid(struct drm_connector *connector,
 		     struct drm_display_mode *mode)
@@ -798,7 +751,7 @@ static void intel_crt_reset(struct drm_connector *connector)
 
 static const struct drm_connector_funcs intel_crt_connector_funcs = {
 	.reset = intel_crt_reset,
-	.dpms = intel_crt_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_crt_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.destroy = intel_crt_destroy,
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index bcb41e61877d..ba1ae031e6fd 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -244,7 +244,7 @@ void intel_csr_load_status_set(struct drm_i915_private *dev_priv,
 void intel_csr_load_program(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	__be32 *payload = dev_priv->csr.dmc_payload;
+	u32 *payload = dev_priv->csr.dmc_payload;
 	uint32_t i, fw_size;
 
 	if (!IS_GEN9(dev)) {
@@ -256,7 +256,7 @@ void intel_csr_load_program(struct drm_device *dev)
 	fw_size = dev_priv->csr.dmc_fw_size;
 	for (i = 0; i < fw_size; i++)
 		I915_WRITE(CSR_PROGRAM_BASE + i * 4,
-			(u32 __force)payload[i]);
+			payload[i]);
 
 	for (i = 0; i < dev_priv->csr.mmio_count; i++) {
 		I915_WRITE(dev_priv->csr.mmioaddr[i],
@@ -279,7 +279,7 @@ static void finish_csr_load(const struct firmware *fw, void *context)
 	char substepping = intel_get_substepping(dev);
 	uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
 	uint32_t i;
-	__be32 *dmc_payload;
+	uint32_t *dmc_payload;
 	bool fw_loaded = false;
 
 	if (!fw) {
@@ -375,20 +375,13 @@ static void finish_csr_load(const struct firmware *fw, void *context)
 	}
 
 	dmc_payload = csr->dmc_payload;
-	for (i = 0; i < dmc_header->fw_size; i++) {
-		uint32_t *tmp = (u32 *)&fw->data[readcount + i * 4];
-		/*
-		 * The firmware payload is an array of 32 bit words stored in
-		 * little-endian format in the firmware image and programmed
-		 * as 32 bit big-endian format to memory.
-		 */
-		dmc_payload[i] = cpu_to_be32(*tmp);
-	}
+	memcpy(dmc_payload, &fw->data[readcount], nbytes);
 
 	/* load csr program during system boot, as needed for DC states */
 	intel_csr_load_program(dev);
 	fw_loaded = true;
 
+	DRM_DEBUG_KMS("Finished loading %s\n", dev_priv->csr.fw_path);
 out:
 	if (fw_loaded)
 		intel_runtime_pm_put(dev_priv);
@@ -422,6 +415,8 @@ void intel_csr_ucode_init(struct drm_device *dev)
 		return;
 	}
 
+	DRM_DEBUG_KMS("Loading %s\n", csr->fw_path);
+
 	/*
 	 * Obtain a runtime pm reference, until CSR is loaded,
 	 * to avoid entering runtime-suspend.
@@ -459,7 +454,8 @@ void intel_csr_ucode_fini(struct drm_device *dev)
 
 void assert_csr_loaded(struct drm_i915_private *dev_priv)
 {
-	WARN((intel_csr_load_status_get(dev_priv) != FW_LOADED), "CSR is not loaded.\n");
+	WARN(intel_csr_load_status_get(dev_priv) != FW_LOADED,
+	     "CSR is not loaded.\n");
 	WARN(!I915_READ(CSR_PROGRAM_BASE),
 				"CSR program storage start is NULL\n");
 	WARN(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index cacb07b7a8f1..61575f67a626 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -31,6 +31,7 @@
 struct ddi_buf_trans {
 	u32 trans1;	/* balance leg enable, de-emph level */
 	u32 trans2;	/* vref sel, vswing */
+	u8 i_boost;	/* SKL: I_boost; valid: 0x0, 0x1, 0x3, 0x7 */
 };
 
 /* HDMI/DVI modes ignore everything but the last 2 items. So we share
@@ -38,134 +39,213 @@ struct ddi_buf_trans {
  * automatically adapt to HDMI connections as well
  */
 static const struct ddi_buf_trans hsw_ddi_translations_dp[] = {
-	{ 0x00FFFFFF, 0x0006000E },
-	{ 0x00D75FFF, 0x0005000A },
-	{ 0x00C30FFF, 0x00040006 },
-	{ 0x80AAAFFF, 0x000B0000 },
-	{ 0x00FFFFFF, 0x0005000A },
-	{ 0x00D75FFF, 0x000C0004 },
-	{ 0x80C30FFF, 0x000B0000 },
-	{ 0x00FFFFFF, 0x00040006 },
-	{ 0x80D75FFF, 0x000B0000 },
+	{ 0x00FFFFFF, 0x0006000E, 0x0 },
+	{ 0x00D75FFF, 0x0005000A, 0x0 },
+	{ 0x00C30FFF, 0x00040006, 0x0 },
+	{ 0x80AAAFFF, 0x000B0000, 0x0 },
+	{ 0x00FFFFFF, 0x0005000A, 0x0 },
+	{ 0x00D75FFF, 0x000C0004, 0x0 },
+	{ 0x80C30FFF, 0x000B0000, 0x0 },
+	{ 0x00FFFFFF, 0x00040006, 0x0 },
+	{ 0x80D75FFF, 0x000B0000, 0x0 },
 };
 
 static const struct ddi_buf_trans hsw_ddi_translations_fdi[] = {
-	{ 0x00FFFFFF, 0x0007000E },
-	{ 0x00D75FFF, 0x000F000A },
-	{ 0x00C30FFF, 0x00060006 },
-	{ 0x00AAAFFF, 0x001E0000 },
-	{ 0x00FFFFFF, 0x000F000A },
-	{ 0x00D75FFF, 0x00160004 },
-	{ 0x00C30FFF, 0x001E0000 },
-	{ 0x00FFFFFF, 0x00060006 },
-	{ 0x00D75FFF, 0x001E0000 },
+	{ 0x00FFFFFF, 0x0007000E, 0x0 },
+	{ 0x00D75FFF, 0x000F000A, 0x0 },
+	{ 0x00C30FFF, 0x00060006, 0x0 },
+	{ 0x00AAAFFF, 0x001E0000, 0x0 },
+	{ 0x00FFFFFF, 0x000F000A, 0x0 },
+	{ 0x00D75FFF, 0x00160004, 0x0 },
+	{ 0x00C30FFF, 0x001E0000, 0x0 },
+	{ 0x00FFFFFF, 0x00060006, 0x0 },
+	{ 0x00D75FFF, 0x001E0000, 0x0 },
 };
 
 static const struct ddi_buf_trans hsw_ddi_translations_hdmi[] = {
 					/* Idx	NT mV d	T mV d	db	*/
-	{ 0x00FFFFFF, 0x0006000E },	/* 0:	400	400	0	*/
-	{ 0x00E79FFF, 0x000E000C },	/* 1:	400	500	2	*/
-	{ 0x00D75FFF, 0x0005000A },	/* 2:	400	600	3.5	*/
-	{ 0x00FFFFFF, 0x0005000A },	/* 3:	600	600	0	*/
-	{ 0x00E79FFF, 0x001D0007 },	/* 4:	600	750	2	*/
-	{ 0x00D75FFF, 0x000C0004 },	/* 5:	600	900	3.5	*/
-	{ 0x00FFFFFF, 0x00040006 },	/* 6:	800	800	0	*/
-	{ 0x80E79FFF, 0x00030002 },	/* 7:	800	1000	2	*/
-	{ 0x00FFFFFF, 0x00140005 },	/* 8:	850	850	0	*/
-	{ 0x00FFFFFF, 0x000C0004 },	/* 9:	900	900	0	*/
-	{ 0x00FFFFFF, 0x001C0003 },	/* 10:	950	950	0	*/
-	{ 0x80FFFFFF, 0x00030002 },	/* 11:	1000	1000	0	*/
+	{ 0x00FFFFFF, 0x0006000E, 0x0 },/* 0:	400	400	0	*/
+	{ 0x00E79FFF, 0x000E000C, 0x0 },/* 1:	400	500	2	*/
+	{ 0x00D75FFF, 0x0005000A, 0x0 },/* 2:	400	600	3.5	*/
+	{ 0x00FFFFFF, 0x0005000A, 0x0 },/* 3:	600	600	0	*/
+	{ 0x00E79FFF, 0x001D0007, 0x0 },/* 4:	600	750	2	*/
+	{ 0x00D75FFF, 0x000C0004, 0x0 },/* 5:	600	900	3.5	*/
+	{ 0x00FFFFFF, 0x00040006, 0x0 },/* 6:	800	800	0	*/
+	{ 0x80E79FFF, 0x00030002, 0x0 },/* 7:	800	1000	2	*/
+	{ 0x00FFFFFF, 0x00140005, 0x0 },/* 8:	850	850	0	*/
+	{ 0x00FFFFFF, 0x000C0004, 0x0 },/* 9:	900	900	0	*/
+	{ 0x00FFFFFF, 0x001C0003, 0x0 },/* 10:	950	950	0	*/
+	{ 0x80FFFFFF, 0x00030002, 0x0 },/* 11:	1000	1000	0	*/
 };
 
 static const struct ddi_buf_trans bdw_ddi_translations_edp[] = {
-	{ 0x00FFFFFF, 0x00000012 },
-	{ 0x00EBAFFF, 0x00020011 },
-	{ 0x00C71FFF, 0x0006000F },
-	{ 0x00AAAFFF, 0x000E000A },
-	{ 0x00FFFFFF, 0x00020011 },
-	{ 0x00DB6FFF, 0x0005000F },
-	{ 0x00BEEFFF, 0x000A000C },
-	{ 0x00FFFFFF, 0x0005000F },
-	{ 0x00DB6FFF, 0x000A000C },
+	{ 0x00FFFFFF, 0x00000012, 0x0 },
+	{ 0x00EBAFFF, 0x00020011, 0x0 },
+	{ 0x00C71FFF, 0x0006000F, 0x0 },
+	{ 0x00AAAFFF, 0x000E000A, 0x0 },
+	{ 0x00FFFFFF, 0x00020011, 0x0 },
+	{ 0x00DB6FFF, 0x0005000F, 0x0 },
+	{ 0x00BEEFFF, 0x000A000C, 0x0 },
+	{ 0x00FFFFFF, 0x0005000F, 0x0 },
+	{ 0x00DB6FFF, 0x000A000C, 0x0 },
 };
 
 static const struct ddi_buf_trans bdw_ddi_translations_dp[] = {
-	{ 0x00FFFFFF, 0x0007000E },
-	{ 0x00D75FFF, 0x000E000A },
-	{ 0x00BEFFFF, 0x00140006 },
-	{ 0x80B2CFFF, 0x001B0002 },
-	{ 0x00FFFFFF, 0x000E000A },
-	{ 0x00DB6FFF, 0x00160005 },
-	{ 0x80C71FFF, 0x001A0002 },
-	{ 0x00F7DFFF, 0x00180004 },
-	{ 0x80D75FFF, 0x001B0002 },
+	{ 0x00FFFFFF, 0x0007000E, 0x0 },
+	{ 0x00D75FFF, 0x000E000A, 0x0 },
+	{ 0x00BEFFFF, 0x00140006, 0x0 },
+	{ 0x80B2CFFF, 0x001B0002, 0x0 },
+	{ 0x00FFFFFF, 0x000E000A, 0x0 },
+	{ 0x00DB6FFF, 0x00160005, 0x0 },
+	{ 0x80C71FFF, 0x001A0002, 0x0 },
+	{ 0x00F7DFFF, 0x00180004, 0x0 },
+	{ 0x80D75FFF, 0x001B0002, 0x0 },
 };
 
 static const struct ddi_buf_trans bdw_ddi_translations_fdi[] = {
-	{ 0x00FFFFFF, 0x0001000E },
-	{ 0x00D75FFF, 0x0004000A },
-	{ 0x00C30FFF, 0x00070006 },
-	{ 0x00AAAFFF, 0x000C0000 },
-	{ 0x00FFFFFF, 0x0004000A },
-	{ 0x00D75FFF, 0x00090004 },
-	{ 0x00C30FFF, 0x000C0000 },
-	{ 0x00FFFFFF, 0x00070006 },
-	{ 0x00D75FFF, 0x000C0000 },
+	{ 0x00FFFFFF, 0x0001000E, 0x0 },
+	{ 0x00D75FFF, 0x0004000A, 0x0 },
+	{ 0x00C30FFF, 0x00070006, 0x0 },
+	{ 0x00AAAFFF, 0x000C0000, 0x0 },
+	{ 0x00FFFFFF, 0x0004000A, 0x0 },
+	{ 0x00D75FFF, 0x00090004, 0x0 },
+	{ 0x00C30FFF, 0x000C0000, 0x0 },
+	{ 0x00FFFFFF, 0x00070006, 0x0 },
+	{ 0x00D75FFF, 0x000C0000, 0x0 },
 };
 
 static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
 					/* Idx	NT mV d	T mV df	db	*/
-	{ 0x00FFFFFF, 0x0007000E },	/* 0:	400	400	0	*/
-	{ 0x00D75FFF, 0x000E000A },	/* 1:	400	600	3.5	*/
-	{ 0x00BEFFFF, 0x00140006 },	/* 2:	400	800	6	*/
-	{ 0x00FFFFFF, 0x0009000D },	/* 3:	450	450	0	*/
-	{ 0x00FFFFFF, 0x000E000A },	/* 4:	600	600	0	*/
-	{ 0x00D7FFFF, 0x00140006 },	/* 5:	600	800	2.5	*/
-	{ 0x80CB2FFF, 0x001B0002 },	/* 6:	600	1000	4.5	*/
-	{ 0x00FFFFFF, 0x00140006 },	/* 7:	800	800	0	*/
-	{ 0x80E79FFF, 0x001B0002 },	/* 8:	800	1000	2	*/
-	{ 0x80FFFFFF, 0x001B0002 },	/* 9:	1000	1000	0	*/
+	{ 0x00FFFFFF, 0x0007000E, 0x0 },/* 0:	400	400	0	*/
+	{ 0x00D75FFF, 0x000E000A, 0x0 },/* 1:	400	600	3.5	*/
+	{ 0x00BEFFFF, 0x00140006, 0x0 },/* 2:	400	800	6	*/
+	{ 0x00FFFFFF, 0x0009000D, 0x0 },/* 3:	450	450	0	*/
+	{ 0x00FFFFFF, 0x000E000A, 0x0 },/* 4:	600	600	0	*/
+	{ 0x00D7FFFF, 0x00140006, 0x0 },/* 5:	600	800	2.5	*/
+	{ 0x80CB2FFF, 0x001B0002, 0x0 },/* 6:	600	1000	4.5	*/
+	{ 0x00FFFFFF, 0x00140006, 0x0 },/* 7:	800	800	0	*/
+	{ 0x80E79FFF, 0x001B0002, 0x0 },/* 8:	800	1000	2	*/
+	{ 0x80FFFFFF, 0x001B0002, 0x0 },/* 9:	1000	1000	0	*/
 };
 
+/* Skylake H and S */
 static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
-	{ 0x00000018, 0x000000a2 },
-	{ 0x00004014, 0x0000009B },
-	{ 0x00006012, 0x00000088 },
-	{ 0x00008010, 0x00000087 },
-	{ 0x00000018, 0x0000009B },
-	{ 0x00004014, 0x00000088 },
-	{ 0x00006012, 0x00000087 },
-	{ 0x00000018, 0x00000088 },
-	{ 0x00004014, 0x00000087 },
+	{ 0x00002016, 0x000000A0, 0x0 },
+	{ 0x00005012, 0x0000009B, 0x0 },
+	{ 0x00007011, 0x00000088, 0x0 },
+	{ 0x00009010, 0x000000C7, 0x0 },
+	{ 0x00002016, 0x0000009B, 0x0 },
+	{ 0x00005012, 0x00000088, 0x0 },
+	{ 0x00007011, 0x000000C7, 0x0 },
+	{ 0x00002016, 0x000000DF, 0x0 },
+	{ 0x00005012, 0x000000C7, 0x0 },
 };
 
-/* eDP 1.4 low vswing translation parameters */
+/* Skylake U */
+static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
+	{ 0x0000201B, 0x000000A2, 0x0 },
+	{ 0x00005012, 0x00000088, 0x0 },
+	{ 0x00007011, 0x00000087, 0x0 },
+	{ 0x80009010, 0x000000C7, 0x1 },	/* Uses I_boost level 0x1 */
+	{ 0x0000201B, 0x0000009D, 0x0 },
+	{ 0x00005012, 0x000000C7, 0x0 },
+	{ 0x00007011, 0x000000C7, 0x0 },
+	{ 0x00002016, 0x00000088, 0x0 },
+	{ 0x00005012, 0x000000C7, 0x0 },
+};
+
+/* Skylake Y */
+static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
+	{ 0x00000018, 0x000000A2, 0x0 },
+	{ 0x00005012, 0x00000088, 0x0 },
+	{ 0x00007011, 0x00000087, 0x0 },
+	{ 0x80009010, 0x000000C7, 0x3 },	/* Uses I_boost level 0x3 */
+	{ 0x00000018, 0x0000009D, 0x0 },
+	{ 0x00005012, 0x000000C7, 0x0 },
+	{ 0x00007011, 0x000000C7, 0x0 },
+	{ 0x00000018, 0x00000088, 0x0 },
+	{ 0x00005012, 0x000000C7, 0x0 },
+};
+
+/*
+ * Skylake H and S
+ * eDP 1.4 low vswing translation parameters
+ */
 static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
-	{ 0x00000018, 0x000000a8 },
-	{ 0x00002016, 0x000000ab },
-	{ 0x00006012, 0x000000a2 },
-	{ 0x00008010, 0x00000088 },
-	{ 0x00000018, 0x000000ab },
-	{ 0x00004014, 0x000000a2 },
-	{ 0x00006012, 0x000000a6 },
-	{ 0x00000018, 0x000000a2 },
-	{ 0x00005013, 0x0000009c },
-	{ 0x00000018, 0x00000088 },
+	{ 0x00000018, 0x000000A8, 0x0 },
+	{ 0x00004013, 0x000000A9, 0x0 },
+	{ 0x00007011, 0x000000A2, 0x0 },
+	{ 0x00009010, 0x0000009C, 0x0 },
+	{ 0x00000018, 0x000000A9, 0x0 },
+	{ 0x00006013, 0x000000A2, 0x0 },
+	{ 0x00007011, 0x000000A6, 0x0 },
+	{ 0x00000018, 0x000000AB, 0x0 },
+	{ 0x00007013, 0x0000009F, 0x0 },
+	{ 0x00000018, 0x000000DF, 0x0 },
+};
+
+/*
+ * Skylake U
+ * eDP 1.4 low vswing translation parameters
+ */
+static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
+	{ 0x00000018, 0x000000A8, 0x0 },
+	{ 0x00004013, 0x000000A9, 0x0 },
+	{ 0x00007011, 0x000000A2, 0x0 },
+	{ 0x00009010, 0x0000009C, 0x0 },
+	{ 0x00000018, 0x000000A9, 0x0 },
+	{ 0x00006013, 0x000000A2, 0x0 },
+	{ 0x00007011, 0x000000A6, 0x0 },
+	{ 0x00002016, 0x000000AB, 0x0 },
+	{ 0x00005013, 0x0000009F, 0x0 },
+	{ 0x00000018, 0x000000DF, 0x0 },
 };
 
+/*
+ * Skylake Y
+ * eDP 1.4 low vswing translation parameters
+ */
+static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = {
+	{ 0x00000018, 0x000000A8, 0x0 },
+	{ 0x00004013, 0x000000AB, 0x0 },
+	{ 0x00007011, 0x000000A4, 0x0 },
+	{ 0x00009010, 0x000000DF, 0x0 },
+	{ 0x00000018, 0x000000AA, 0x0 },
+	{ 0x00006013, 0x000000A4, 0x0 },
+	{ 0x00007011, 0x0000009D, 0x0 },
+	{ 0x00000018, 0x000000A0, 0x0 },
+	{ 0x00006012, 0x000000DF, 0x0 },
+	{ 0x00000018, 0x0000008A, 0x0 },
+};
 
+/* Skylake U, H and S */
 static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
-	{ 0x00000018, 0x000000ac },
-	{ 0x00005012, 0x0000009d },
-	{ 0x00007011, 0x00000088 },
-	{ 0x00000018, 0x000000a1 },
-	{ 0x00000018, 0x00000098 },
-	{ 0x00004013, 0x00000088 },
-	{ 0x00006012, 0x00000087 },
-	{ 0x00000018, 0x000000df },
-	{ 0x00003015, 0x00000087 },
-	{ 0x00003015, 0x000000c7 },
-	{ 0x00000018, 0x000000c7 },
+	{ 0x00000018, 0x000000AC, 0x0 },
+	{ 0x00005012, 0x0000009D, 0x0 },
+	{ 0x00007011, 0x00000088, 0x0 },
+	{ 0x00000018, 0x000000A1, 0x0 },
+	{ 0x00000018, 0x00000098, 0x0 },
+	{ 0x00004013, 0x00000088, 0x0 },
+	{ 0x00006012, 0x00000087, 0x0 },
+	{ 0x00000018, 0x000000DF, 0x0 },
+	{ 0x00003015, 0x00000087, 0x0 },	/* Default */
+	{ 0x00003015, 0x000000C7, 0x0 },
+	{ 0x00000018, 0x000000C7, 0x0 },
+};
+
+/* Skylake Y */
+static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
+	{ 0x00000018, 0x000000A1, 0x0 },
+	{ 0x00005012, 0x000000DF, 0x0 },
+	{ 0x00007011, 0x00000084, 0x0 },
+	{ 0x00000018, 0x000000A4, 0x0 },
+	{ 0x00000018, 0x0000009D, 0x0 },
+	{ 0x00004013, 0x00000080, 0x0 },
+	{ 0x00006013, 0x000000C7, 0x0 },
+	{ 0x00000018, 0x0000008A, 0x0 },
+	{ 0x00003015, 0x000000C7, 0x0 },	/* Default */
+	{ 0x80003015, 0x000000C7, 0x7 },	/* Uses I_boost level 0x7 */
+	{ 0x00000018, 0x000000C7, 0x0 },
 };
 
 struct bxt_ddi_buf_trans {
@@ -181,16 +261,16 @@ struct bxt_ddi_buf_trans {
  */
 static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = {
 					/* Idx	NT mV diff	db  */
-	{ 52,  0,    0, 128, true  },	/* 0:	400		0   */
-	{ 78,  0,    0, 85,  false },	/* 1:	400		3.5 */
-	{ 104, 0,    0, 64,  false },	/* 2:	400		6   */
-	{ 154, 0,    0, 43,  false },	/* 3:	400		9.5 */
-	{ 77,  0,    0, 128, false },	/* 4:	600		0   */
-	{ 116, 0,    0, 85,  false },	/* 5:	600		3.5 */
-	{ 154, 0,    0, 64,  false },	/* 6:	600		6   */
-	{ 102, 0,    0, 128, false },	/* 7:	800		0   */
-	{ 154, 0,    0, 85,  false },	/* 8:	800		3.5 */
-	{ 154, 0x9A, 1, 128, false },  /* 9:	1200		0   */
+	{ 52,  0x9A, 0, 128, true  },	/* 0:	400		0   */
+	{ 78,  0x9A, 0, 85,  false },	/* 1:	400		3.5 */
+	{ 104, 0x9A, 0, 64,  false },	/* 2:	400		6   */
+	{ 154, 0x9A, 0, 43,  false },	/* 3:	400		9.5 */
+	{ 77,  0x9A, 0, 128, false },	/* 4:	600		0   */
+	{ 116, 0x9A, 0, 85,  false },	/* 5:	600		3.5 */
+	{ 154, 0x9A, 0, 64,  false },	/* 6:	600		6   */
+	{ 102, 0x9A, 0, 128, false },	/* 7:	800		0   */
+	{ 154, 0x9A, 0, 85,  false },	/* 8:	800		3.5 */
+	{ 154, 0x9A, 1, 128, false },	/* 9:	1200		0   */
 };
 
 /* BSpec has 2 recommended values - entries 0 and 8.
@@ -198,18 +278,21 @@ static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = {
  */
 static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
 					/* Idx	NT mV diff	db  */
-	{ 52,  0,    0, 128, false },	/* 0:	400		0   */
-	{ 52,  0,    0, 85,  false },	/* 1:	400		3.5 */
-	{ 52,  0,    0, 64,  false },	/* 2:	400		6   */
-	{ 42,  0,    0, 43,  false },	/* 3:	400		9.5 */
-	{ 77,  0,    0, 128, false },	/* 4:	600		0   */
-	{ 77,  0,    0, 85,  false },	/* 5:	600		3.5 */
-	{ 77,  0,    0, 64,  false },	/* 6:	600		6   */
-	{ 102, 0,    0, 128, false },	/* 7:	800		0   */
-	{ 102, 0,    0, 85,  false },	/* 8:	800		3.5 */
+	{ 52,  0x9A, 0, 128, false },	/* 0:	400		0   */
+	{ 52,  0x9A, 0, 85,  false },	/* 1:	400		3.5 */
+	{ 52,  0x9A, 0, 64,  false },	/* 2:	400		6   */
+	{ 42,  0x9A, 0, 43,  false },	/* 3:	400		9.5 */
+	{ 77,  0x9A, 0, 128, false },	/* 4:	600		0   */
+	{ 77,  0x9A, 0, 85,  false },	/* 5:	600		3.5 */
+	{ 77,  0x9A, 0, 64,  false },	/* 6:	600		6   */
+	{ 102, 0x9A, 0, 128, false },	/* 7:	800		0   */
+	{ 102, 0x9A, 0, 85,  false },	/* 8:	800		3.5 */
 	{ 154, 0x9A, 1, 128, true },	/* 9:	1200		0   */
 };
 
+static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
+				    enum port port, int type);
+
 static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
 				 struct intel_digital_port **dig_port,
 				 enum port *port)
@@ -249,6 +332,77 @@ intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port)
 	return intel_dig_port->hdmi.hdmi_reg;
 }
 
+static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev,
+							int *n_entries)
+{
+	const struct ddi_buf_trans *ddi_translations;
+
+	if (IS_SKL_ULX(dev)) {
+		ddi_translations = skl_y_ddi_translations_dp;
+		*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
+	} else if (IS_SKL_ULT(dev)) {
+		ddi_translations = skl_u_ddi_translations_dp;
+		*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
+	} else {
+		ddi_translations = skl_ddi_translations_dp;
+		*n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+	}
+
+	return ddi_translations;
+}
+
+static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev,
+							 int *n_entries)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const struct ddi_buf_trans *ddi_translations;
+
+	if (IS_SKL_ULX(dev)) {
+		if (dev_priv->edp_low_vswing) {
+			ddi_translations = skl_y_ddi_translations_edp;
+			*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
+		} else {
+			ddi_translations = skl_y_ddi_translations_dp;
+			*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
+		}
+	} else if (IS_SKL_ULT(dev)) {
+		if (dev_priv->edp_low_vswing) {
+			ddi_translations = skl_u_ddi_translations_edp;
+			*n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
+		} else {
+			ddi_translations = skl_u_ddi_translations_dp;
+			*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
+		}
+	} else {
+		if (dev_priv->edp_low_vswing) {
+			ddi_translations = skl_ddi_translations_edp;
+			*n_entries = ARRAY_SIZE(skl_ddi_translations_edp);
+		} else {
+			ddi_translations = skl_ddi_translations_dp;
+			*n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+		}
+	}
+
+	return ddi_translations;
+}
+
+static const struct ddi_buf_trans *
+skl_get_buf_trans_hdmi(struct drm_device *dev,
+		       int *n_entries)
+{
+	const struct ddi_buf_trans *ddi_translations;
+
+	if (IS_SKL_ULX(dev)) {
+		ddi_translations = skl_y_ddi_translations_hdmi;
+		*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
+	} else {
+		ddi_translations = skl_ddi_translations_hdmi;
+		*n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
+	}
+
+	return ddi_translations;
+}
+
 /*
  * Starting with Haswell, DDI port buffers must be programmed with correct
  * values in advance. The buffer values are different for FDI and DP modes,
@@ -261,6 +415,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 reg;
+	u32 iboost_bit = 0;
 	int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
 	    size;
 	int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
@@ -280,19 +435,17 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
 		return;
 	} else if (IS_SKYLAKE(dev)) {
 		ddi_translations_fdi = NULL;
-		ddi_translations_dp = skl_ddi_translations_dp;
-		n_dp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
-		if (dev_priv->edp_low_vswing) {
-			ddi_translations_edp = skl_ddi_translations_edp;
-			n_edp_entries = ARRAY_SIZE(skl_ddi_translations_edp);
-		} else {
-			ddi_translations_edp = skl_ddi_translations_dp;
-			n_edp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
-		}
-
-		ddi_translations_hdmi = skl_ddi_translations_hdmi;
-		n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
-		hdmi_default_entry = 7;
+		ddi_translations_dp =
+				skl_get_buf_trans_dp(dev, &n_dp_entries);
+		ddi_translations_edp =
+				skl_get_buf_trans_edp(dev, &n_edp_entries);
+		ddi_translations_hdmi =
+				skl_get_buf_trans_hdmi(dev, &n_hdmi_entries);
+		hdmi_default_entry = 8;
+		/* If we're boosting the current, set bit 31 of trans1 */
+		if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level ||
+		    dev_priv->vbt.ddi_port_info[port].dp_boost_level)
+			iboost_bit = 1<<31;
 	} else if (IS_BROADWELL(dev)) {
 		ddi_translations_fdi = bdw_ddi_translations_fdi;
 		ddi_translations_dp = bdw_ddi_translations_dp;
@@ -353,7 +506,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
 	}
 
 	for (i = 0, reg = DDI_BUF_TRANS(port); i < size; i++) {
-		I915_WRITE(reg, ddi_translations[i].trans1);
+		I915_WRITE(reg, ddi_translations[i].trans1 | iboost_bit);
 		reg += 4;
 		I915_WRITE(reg, ddi_translations[i].trans2);
 		reg += 4;
@@ -368,7 +521,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
 		hdmi_level = hdmi_default_entry;
 
 	/* Entry 9 is for HDMI: */
-	I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans1);
+	I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit);
 	reg += 4;
 	I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans2);
 	reg += 4;
@@ -625,11 +778,11 @@ intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
 	(void) (&__a == &__b);			\
 	__a > __b ? (__a - __b) : (__b - __a); })
 
-struct wrpll_rnp {
+struct hsw_wrpll_rnp {
 	unsigned p, n2, r2;
 };
 
-static unsigned wrpll_get_budget_for_freq(int clock)
+static unsigned hsw_wrpll_get_budget_for_freq(int clock)
 {
 	unsigned budget;
 
@@ -703,9 +856,9 @@ static unsigned wrpll_get_budget_for_freq(int clock)
 	return budget;
 }
 
-static void wrpll_update_rnp(uint64_t freq2k, unsigned budget,
-			     unsigned r2, unsigned n2, unsigned p,
-			     struct wrpll_rnp *best)
+static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
+				 unsigned r2, unsigned n2, unsigned p,
+				 struct hsw_wrpll_rnp *best)
 {
 	uint64_t a, b, c, d, diff, diff_best;
 
@@ -762,8 +915,7 @@ static void wrpll_update_rnp(uint64_t freq2k, unsigned budget,
 	/* Otherwise a < c && b >= d, do nothing */
 }
 
-static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
-				     int reg)
+static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, int reg)
 {
 	int refclk = LC_FREQ;
 	int n, p, r;
@@ -856,6 +1008,26 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
 	return dco_freq / (p0 * p1 * p2 * 5);
 }
 
+static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
+{
+	int dotclock;
+
+	if (pipe_config->has_pch_encoder)
+		dotclock = intel_dotclock_calculate(pipe_config->port_clock,
+						    &pipe_config->fdi_m_n);
+	else if (pipe_config->has_dp_encoder)
+		dotclock = intel_dotclock_calculate(pipe_config->port_clock,
+						    &pipe_config->dp_m_n);
+	else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36)
+		dotclock = pipe_config->port_clock * 2 / 3;
+	else
+		dotclock = pipe_config->port_clock;
+
+	if (pipe_config->pixel_multiplier)
+		dotclock /= pipe_config->pixel_multiplier;
+
+	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+}
 
 static void skl_ddi_clock_get(struct intel_encoder *encoder,
 				struct intel_crtc_state *pipe_config)
@@ -902,12 +1074,7 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
 
 	pipe_config->port_clock = link_clock;
 
-	if (pipe_config->has_dp_encoder)
-		pipe_config->base.adjusted_mode.crtc_clock =
-			intel_dotclock_calculate(pipe_config->port_clock,
-						 &pipe_config->dp_m_n);
-	else
-		pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+	ddi_dotclock_get(pipe_config);
 }
 
 static void hsw_ddi_clock_get(struct intel_encoder *encoder,
@@ -929,10 +1096,10 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
 		link_clock = 270000;
 		break;
 	case PORT_CLK_SEL_WRPLL1:
-		link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1);
+		link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1);
 		break;
 	case PORT_CLK_SEL_WRPLL2:
-		link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2);
+		link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2);
 		break;
 	case PORT_CLK_SEL_SPLL:
 		pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK;
@@ -954,23 +1121,32 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
 
 	pipe_config->port_clock = link_clock * 2;
 
-	if (pipe_config->has_pch_encoder)
-		pipe_config->base.adjusted_mode.crtc_clock =
-			intel_dotclock_calculate(pipe_config->port_clock,
-						 &pipe_config->fdi_m_n);
-	else if (pipe_config->has_dp_encoder)
-		pipe_config->base.adjusted_mode.crtc_clock =
-			intel_dotclock_calculate(pipe_config->port_clock,
-						 &pipe_config->dp_m_n);
-	else
-		pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+	ddi_dotclock_get(pipe_config);
 }
 
 static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
 				enum intel_dpll_id dpll)
 {
-	/* FIXME formula not available in bspec */
-	return 0;
+	struct intel_shared_dpll *pll;
+	struct intel_dpll_hw_state *state;
+	intel_clock_t clock;
+
+	/* For DDI ports we always use a shared PLL. */
+	if (WARN_ON(dpll == DPLL_ID_PRIVATE))
+		return 0;
+
+	pll = &dev_priv->shared_dplls[dpll];
+	state = &pll->config.hw_state;
+
+	clock.m1 = 2;
+	clock.m2 = (state->pll0 & PORT_PLL_M2_MASK) << 22;
+	if (state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
+		clock.m2 |= state->pll2 & PORT_PLL_M2_FRAC_MASK;
+	clock.n = (state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
+	clock.p1 = (state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
+	clock.p2 = (state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
+
+	return chv_calc_dpll_params(100000, &clock);
 }
 
 static void bxt_ddi_clock_get(struct intel_encoder *encoder,
@@ -980,16 +1156,9 @@ static void bxt_ddi_clock_get(struct intel_encoder *encoder,
 	enum port port = intel_ddi_get_encoder_port(encoder);
 	uint32_t dpll = port;
 
-	pipe_config->port_clock =
-		bxt_calc_pll_link(dev_priv, dpll);
+	pipe_config->port_clock = bxt_calc_pll_link(dev_priv, dpll);
 
-	if (pipe_config->has_dp_encoder)
-		pipe_config->base.adjusted_mode.crtc_clock =
-			intel_dotclock_calculate(pipe_config->port_clock,
-							&pipe_config->dp_m_n);
-	else
-		pipe_config->base.adjusted_mode.crtc_clock =
-							pipe_config->port_clock;
+	ddi_dotclock_get(pipe_config);
 }
 
 void intel_ddi_clock_get(struct intel_encoder *encoder,
@@ -1011,12 +1180,12 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
 {
 	uint64_t freq2k;
 	unsigned p, n2, r2;
-	struct wrpll_rnp best = { 0, 0, 0 };
+	struct hsw_wrpll_rnp best = { 0, 0, 0 };
 	unsigned budget;
 
 	freq2k = clock / 100;
 
-	budget = wrpll_get_budget_for_freq(clock);
+	budget = hsw_wrpll_get_budget_for_freq(clock);
 
 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
 	 * and directly pass the LC PLL to it. */
@@ -1060,8 +1229,8 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
 		     n2++) {
 
 			for (p = P_MIN; p <= P_MAX; p += P_INC)
-				wrpll_update_rnp(freq2k, budget,
-						 r2, n2, p, &best);
+				hsw_wrpll_update_rnp(freq2k, budget,
+						     r2, n2, p, &best);
 		}
 	}
 
@@ -1105,6 +1274,102 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
 	return true;
 }
 
+struct skl_wrpll_context {
+	uint64_t min_deviation;		/* current minimal deviation */
+	uint64_t central_freq;		/* chosen central freq */
+	uint64_t dco_freq;		/* chosen dco freq */
+	unsigned int p;			/* chosen divider */
+};
+
+static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
+{
+	memset(ctx, 0, sizeof(*ctx));
+
+	ctx->min_deviation = U64_MAX;
+}
+
+/* DCO freq must be within +1%/-6%  of the DCO central freq */
+#define SKL_DCO_MAX_PDEVIATION	100
+#define SKL_DCO_MAX_NDEVIATION	600
+
+static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
+				  uint64_t central_freq,
+				  uint64_t dco_freq,
+				  unsigned int divider)
+{
+	uint64_t deviation;
+
+	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
+			      central_freq);
+
+	/* positive deviation */
+	if (dco_freq >= central_freq) {
+		if (deviation < SKL_DCO_MAX_PDEVIATION &&
+		    deviation < ctx->min_deviation) {
+			ctx->min_deviation = deviation;
+			ctx->central_freq = central_freq;
+			ctx->dco_freq = dco_freq;
+			ctx->p = divider;
+		}
+	/* negative deviation */
+	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
+		   deviation < ctx->min_deviation) {
+		ctx->min_deviation = deviation;
+		ctx->central_freq = central_freq;
+		ctx->dco_freq = dco_freq;
+		ctx->p = divider;
+	}
+}
+
+static void skl_wrpll_get_multipliers(unsigned int p,
+				      unsigned int *p0 /* out */,
+				      unsigned int *p1 /* out */,
+				      unsigned int *p2 /* out */)
+{
+	/* even dividers */
+	if (p % 2 == 0) {
+		unsigned int half = p / 2;
+
+		if (half == 1 || half == 2 || half == 3 || half == 5) {
+			*p0 = 2;
+			*p1 = 1;
+			*p2 = half;
+		} else if (half % 2 == 0) {
+			*p0 = 2;
+			*p1 = half / 2;
+			*p2 = 2;
+		} else if (half % 3 == 0) {
+			*p0 = 3;
+			*p1 = half / 3;
+			*p2 = 2;
+		} else if (half % 7 == 0) {
+			*p0 = 7;
+			*p1 = half / 7;
+			*p2 = 2;
+		}
+	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
+		*p0 = 3;
+		*p1 = 1;
+		*p2 = p / 3;
+	} else if (p == 5 || p == 7) {
+		*p0 = p;
+		*p1 = 1;
+		*p2 = 1;
+	} else if (p == 15) {
+		*p0 = 3;
+		*p1 = 1;
+		*p2 = 5;
+	} else if (p == 21) {
+		*p0 = 7;
+		*p1 = 1;
+		*p2 = 3;
+	} else if (p == 35) {
+		*p0 = 7;
+		*p1 = 1;
+		*p2 = 5;
+	}
+}
+
 struct skl_wrpll_params {
 	uint32_t        dco_fraction;
 	uint32_t        dco_integer;
@@ -1115,150 +1380,145 @@ struct skl_wrpll_params {
 	uint32_t        central_freq;
 };
 
-static void
-skl_ddi_calculate_wrpll(int clock /* in Hz */,
-			struct skl_wrpll_params *wrpll_params)
+static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
+				      uint64_t afe_clock,
+				      uint64_t central_freq,
+				      uint32_t p0, uint32_t p1, uint32_t p2)
 {
-	uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
-	uint64_t dco_central_freq[3] = {8400000000ULL,
-					9000000000ULL,
-					9600000000ULL};
-	uint32_t min_dco_deviation = 400;
-	uint32_t min_dco_index = 3;
-	uint32_t P0[4] = {1, 2, 3, 7};
-	uint32_t P2[4] = {1, 2, 3, 5};
-	bool found = false;
-	uint32_t candidate_p = 0;
-	uint32_t candidate_p0[3] = {0}, candidate_p1[3] = {0};
-	uint32_t candidate_p2[3] = {0};
-	uint32_t dco_central_freq_deviation[3];
-	uint32_t i, P1, k, dco_count;
-	bool retry_with_odd = false;
 	uint64_t dco_freq;
 
-	/* Determine P0, P1 or P2 */
-	for (dco_count = 0; dco_count < 3; dco_count++) {
-		found = false;
-		candidate_p =
-			div64_u64(dco_central_freq[dco_count], afe_clock);
-		if (retry_with_odd == false)
-			candidate_p = (candidate_p % 2 == 0 ?
-				candidate_p : candidate_p + 1);
-
-		for (P1 = 1; P1 < candidate_p; P1++) {
-			for (i = 0; i < 4; i++) {
-				if (!(P0[i] != 1 || P1 == 1))
-					continue;
-
-				for (k = 0; k < 4; k++) {
-					if (P1 != 1 && P2[k] != 2)
-						continue;
-
-					if (candidate_p == P0[i] * P1 * P2[k]) {
-						/* Found possible P0, P1, P2 */
-						found = true;
-						candidate_p0[dco_count] = P0[i];
-						candidate_p1[dco_count] = P1;
-						candidate_p2[dco_count] = P2[k];
-						goto found;
-					}
-
-				}
-			}
-		}
+	switch (central_freq) {
+	case 9600000000ULL:
+		params->central_freq = 0;
+		break;
+	case 9000000000ULL:
+		params->central_freq = 1;
+		break;
+	case 8400000000ULL:
+		params->central_freq = 3;
+	}
 
-found:
-		if (found) {
-			dco_central_freq_deviation[dco_count] =
-				div64_u64(10000 *
-					  abs_diff((candidate_p * afe_clock),
-						   dco_central_freq[dco_count]),
-					  dco_central_freq[dco_count]);
-
-			if (dco_central_freq_deviation[dco_count] <
-				min_dco_deviation) {
-				min_dco_deviation =
-					dco_central_freq_deviation[dco_count];
-				min_dco_index = dco_count;
-			}
-		}
+	switch (p0) {
+	case 1:
+		params->pdiv = 0;
+		break;
+	case 2:
+		params->pdiv = 1;
+		break;
+	case 3:
+		params->pdiv = 2;
+		break;
+	case 7:
+		params->pdiv = 4;
+		break;
+	default:
+		WARN(1, "Incorrect PDiv\n");
+	}
 
-		if (min_dco_index > 2 && dco_count == 2) {
-			retry_with_odd = true;
-			dco_count = 0;
-		}
+	switch (p2) {
+	case 5:
+		params->kdiv = 0;
+		break;
+	case 2:
+		params->kdiv = 1;
+		break;
+	case 3:
+		params->kdiv = 2;
+		break;
+	case 1:
+		params->kdiv = 3;
+		break;
+	default:
+		WARN(1, "Incorrect KDiv\n");
 	}
 
-	if (min_dco_index > 2) {
-		WARN(1, "No valid values found for the given pixel clock\n");
-	} else {
-		wrpll_params->central_freq = dco_central_freq[min_dco_index];
+	params->qdiv_ratio = p1;
+	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
 
-		switch (dco_central_freq[min_dco_index]) {
-		case 9600000000ULL:
-			wrpll_params->central_freq = 0;
-			break;
-		case 9000000000ULL:
-			wrpll_params->central_freq = 1;
-			break;
-		case 8400000000ULL:
-			wrpll_params->central_freq = 3;
-		}
+	dco_freq = p0 * p1 * p2 * afe_clock;
 
-		switch (candidate_p0[min_dco_index]) {
-		case 1:
-			wrpll_params->pdiv = 0;
-			break;
-		case 2:
-			wrpll_params->pdiv = 1;
-			break;
-		case 3:
-			wrpll_params->pdiv = 2;
-			break;
-		case 7:
-			wrpll_params->pdiv = 4;
-			break;
-		default:
-			WARN(1, "Incorrect PDiv\n");
-		}
+	/*
+	 * Intermediate values are in Hz.
+	 * Divide by MHz to match bsepc
+	 */
+	params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
+	params->dco_fraction =
+		div_u64((div_u64(dco_freq, 24) -
+			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
+}
 
-		switch (candidate_p2[min_dco_index]) {
-		case 5:
-			wrpll_params->kdiv = 0;
-			break;
-		case 2:
-			wrpll_params->kdiv = 1;
-			break;
-		case 3:
-			wrpll_params->kdiv = 2;
-			break;
-		case 1:
-			wrpll_params->kdiv = 3;
-			break;
-		default:
-			WARN(1, "Incorrect KDiv\n");
+static bool
+skl_ddi_calculate_wrpll(int clock /* in Hz */,
+			struct skl_wrpll_params *wrpll_params)
+{
+	uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
+	uint64_t dco_central_freq[3] = {8400000000ULL,
+					9000000000ULL,
+					9600000000ULL};
+	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
+					     24, 28, 30, 32, 36, 40, 42, 44,
+					     48, 52, 54, 56, 60, 64, 66, 68,
+					     70, 72, 76, 78, 80, 84, 88, 90,
+					     92, 96, 98 };
+	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
+	static const struct {
+		const int *list;
+		int n_dividers;
+	} dividers[] = {
+		{ even_dividers, ARRAY_SIZE(even_dividers) },
+		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
+	};
+	struct skl_wrpll_context ctx;
+	unsigned int dco, d, i;
+	unsigned int p0, p1, p2;
+
+	skl_wrpll_context_init(&ctx);
+
+	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
+		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
+			for (i = 0; i < dividers[d].n_dividers; i++) {
+				unsigned int p = dividers[d].list[i];
+				uint64_t dco_freq = p * afe_clock;
+
+				skl_wrpll_try_divider(&ctx,
+						      dco_central_freq[dco],
+						      dco_freq,
+						      p);
+				/*
+				 * Skip the remaining dividers if we're sure to
+				 * have found the definitive divider, we can't
+				 * improve a 0 deviation.
+				 */
+				if (ctx.min_deviation == 0)
+					goto skip_remaining_dividers;
+			}
 		}
 
-		wrpll_params->qdiv_ratio = candidate_p1[min_dco_index];
-		wrpll_params->qdiv_mode =
-			(wrpll_params->qdiv_ratio == 1) ? 0 : 1;
-
-		dco_freq = candidate_p0[min_dco_index] *
-			candidate_p1[min_dco_index] *
-			candidate_p2[min_dco_index] * afe_clock;
-
+skip_remaining_dividers:
 		/*
-		 * Intermediate values are in Hz.
-		 * Divide by MHz to match bsepc
+		 * If a solution is found with an even divider, prefer
+		 * this one.
 		 */
-		wrpll_params->dco_integer = div_u64(dco_freq, (24 * MHz(1)));
-		wrpll_params->dco_fraction =
-			div_u64(((div_u64(dco_freq, 24) -
-				  wrpll_params->dco_integer * MHz(1)) * 0x8000), MHz(1));
+		if (d == 0 && ctx.p)
+			break;
+	}
 
+	if (!ctx.p) {
+		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
+		return false;
 	}
-}
 
+	/*
+	 * gcc incorrectly analyses that these can be used without being
+	 * initialized. To be fair, it's hard to guess.
+	 */
+	p0 = p1 = p2 = 0;
+	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
+	skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
+				  p0, p1, p2);
+
+	return true;
+}
 
 static bool
 skl_ddi_pll_select(struct intel_crtc *intel_crtc,
@@ -1281,7 +1541,8 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
 
 		ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
 
-		skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params);
+		if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
+			return false;
 
 		cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
 			 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
@@ -1293,17 +1554,14 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
 			 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
 			 wrpll_params.central_freq;
 	} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
-		struct drm_encoder *encoder = &intel_encoder->base;
-		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
-		switch (intel_dp->link_bw) {
-		case DP_LINK_BW_1_62:
+		switch (crtc_state->port_clock / 2) {
+		case 81000:
 			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
 			break;
-		case DP_LINK_BW_2_7:
+		case 135000:
 			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
 			break;
-		case DP_LINK_BW_5_4:
+		case 270000:
 			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
 			break;
 		}
@@ -1334,6 +1592,7 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
 
 /* bxt clock parameters */
 struct bxt_clk_div {
+	int clock;
 	uint32_t p1;
 	uint32_t p2;
 	uint32_t m2_int;
@@ -1343,14 +1602,14 @@ struct bxt_clk_div {
 };
 
 /* pre-calculated values for DP linkrates */
-static struct bxt_clk_div bxt_dp_clk_val[7] = {
-	/* 162 */ {4, 2, 32, 1677722, 1, 1},
-	/* 270 */ {4, 1, 27,       0, 0, 1},
-	/* 540 */ {2, 1, 27,       0, 0, 1},
-	/* 216 */ {3, 2, 32, 1677722, 1, 1},
-	/* 243 */ {4, 1, 24, 1258291, 1, 1},
-	/* 324 */ {4, 1, 32, 1677722, 1, 1},
-	/* 432 */ {3, 1, 32, 1677722, 1, 1}
+static const struct bxt_clk_div bxt_dp_clk_val[] = {
+	{162000, 4, 2, 32, 1677722, 1, 1},
+	{270000, 4, 1, 27,       0, 0, 1},
+	{540000, 2, 1, 27,       0, 0, 1},
+	{216000, 3, 2, 32, 1677722, 1, 1},
+	{243000, 4, 1, 24, 1258291, 1, 1},
+	{324000, 4, 1, 32, 1677722, 1, 1},
+	{432000, 3, 1, 32, 1677722, 1, 1}
 };
 
 static bool
@@ -1363,7 +1622,7 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
 	struct bxt_clk_div clk_div = {0};
 	int vco = 0;
 	uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
-	uint32_t dcoampovr_en_h, dco_amp, lanestagger;
+	uint32_t lanestagger;
 
 	if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
 		intel_clock_t best_clock;
@@ -1390,29 +1649,19 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
 		vco = best_clock.vco;
 	} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
 			intel_encoder->type == INTEL_OUTPUT_EDP) {
-		struct drm_encoder *encoder = &intel_encoder->base;
-		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+		int i;
 
-		switch (intel_dp->link_bw) {
-		case DP_LINK_BW_1_62:
-			clk_div = bxt_dp_clk_val[0];
-			break;
-		case DP_LINK_BW_2_7:
-			clk_div = bxt_dp_clk_val[1];
-			break;
-		case DP_LINK_BW_5_4:
-			clk_div = bxt_dp_clk_val[2];
-			break;
-		default:
-			clk_div = bxt_dp_clk_val[0];
-			DRM_ERROR("Unknown link rate\n");
+		clk_div = bxt_dp_clk_val[0];
+		for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
+			if (bxt_dp_clk_val[i].clock == clock) {
+				clk_div = bxt_dp_clk_val[i];
+				break;
+			}
 		}
 		vco = clock * 10 / 2 * clk_div.p1 * clk_div.p2;
 	}
 
-	dco_amp = 15;
-	dcoampovr_en_h = 0;
-	if (vco >= 6200000 && vco <= 6480000) {
+	if (vco >= 6200000 && vco <= 6700000) {
 		prop_coef = 4;
 		int_coef = 9;
 		gain_ctl = 3;
@@ -1423,8 +1672,6 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
 		int_coef = 11;
 		gain_ctl = 3;
 		targ_cnt = 9;
-		if (vco >= 4800000 && vco < 5400000)
-			dcoampovr_en_h = 1;
 	} else if (vco == 5400000) {
 		prop_coef = 3;
 		int_coef = 8;
@@ -1466,10 +1713,13 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
 
 	crtc_state->dpll_hw_state.pll8 = targ_cnt;
 
-	if (dcoampovr_en_h)
-		crtc_state->dpll_hw_state.pll10 = PORT_PLL_DCO_AMP_OVR_EN_H;
+	crtc_state->dpll_hw_state.pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
 
-	crtc_state->dpll_hw_state.pll10 |= PORT_PLL_DCO_AMP(dco_amp);
+	crtc_state->dpll_hw_state.pll10 =
+		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
+		| PORT_PLL_DCO_AMP_OVR_EN_H;
+
+	crtc_state->dpll_hw_state.ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
 
 	crtc_state->dpll_hw_state.pcsdw12 =
 		LANESTAGGER_STRAP_OVRD | lanestagger;
@@ -1799,8 +2049,65 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
 			   TRANS_CLK_SEL_DISABLED);
 }
 
-void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
-			     enum port port, int type)
+static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
+			       enum port port, int type)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const struct ddi_buf_trans *ddi_translations;
+	uint8_t iboost;
+	uint8_t dp_iboost, hdmi_iboost;
+	int n_entries;
+	u32 reg;
+
+	/* VBT may override standard boost values */
+	dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
+	hdmi_iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
+
+	if (type == INTEL_OUTPUT_DISPLAYPORT) {
+		if (dp_iboost) {
+			iboost = dp_iboost;
+		} else {
+			ddi_translations = skl_get_buf_trans_dp(dev, &n_entries);
+			iboost = ddi_translations[port].i_boost;
+		}
+	} else if (type == INTEL_OUTPUT_EDP) {
+		if (dp_iboost) {
+			iboost = dp_iboost;
+		} else {
+			ddi_translations = skl_get_buf_trans_edp(dev, &n_entries);
+			iboost = ddi_translations[port].i_boost;
+		}
+	} else if (type == INTEL_OUTPUT_HDMI) {
+		if (hdmi_iboost) {
+			iboost = hdmi_iboost;
+		} else {
+			ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries);
+			iboost = ddi_translations[port].i_boost;
+		}
+	} else {
+		return;
+	}
+
+	/* Make sure that the requested I_boost is valid */
+	if (iboost && iboost != 0x1 && iboost != 0x3 && iboost != 0x7) {
+		DRM_ERROR("Invalid I_boost value %u\n", iboost);
+		return;
+	}
+
+	reg = I915_READ(DISPIO_CR_TX_BMU_CR0);
+	reg &= ~BALANCE_LEG_MASK(port);
+	reg &= ~(1 << (BALANCE_LEG_DISABLE_SHIFT + port));
+
+	if (iboost)
+		reg |= iboost << BALANCE_LEG_SHIFT(port);
+	else
+		reg |= 1 << (BALANCE_LEG_DISABLE_SHIFT + port);
+
+	I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg);
+}
+
+static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
+				    enum port port, int type)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	const struct bxt_ddi_buf_trans *ddi_translations;
@@ -1860,6 +2167,73 @@ void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
 	I915_WRITE(BXT_PORT_PCS_DW10_GRP(port), val);
 }
 
+static uint32_t translate_signal_level(int signal_levels)
+{
+	uint32_t level;
+
+	switch (signal_levels) {
+	default:
+		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level: 0x%x\n",
+			      signal_levels);
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+		level = 0;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+		level = 1;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+		level = 2;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
+		level = 3;
+		break;
+
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+		level = 4;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+		level = 5;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+		level = 6;
+		break;
+
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+		level = 7;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+		level = 8;
+		break;
+
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+		level = 9;
+		break;
+	}
+
+	return level;
+}
+
+uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = dport->base.base.dev;
+	struct intel_encoder *encoder = &dport->base;
+	uint8_t train_set = intel_dp->train_set[0];
+	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+					 DP_TRAIN_PRE_EMPHASIS_MASK);
+	enum port port = dport->port;
+	uint32_t level;
+
+	level = translate_signal_level(signal_levels);
+
+	if (IS_SKYLAKE(dev))
+		skl_ddi_set_iboost(dev, level, port, encoder->type);
+	else if (IS_BROXTON(dev))
+		bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
+
+	return DDI_BUF_TRANS_SELECT(level);
+}
+
 static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
 {
 	struct drm_encoder *encoder = &intel_encoder->base;
@@ -2404,7 +2778,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
 
 	temp = I915_READ(BXT_PORT_PLL(port, 9));
 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
-	temp |= (5 << 1);
+	temp |= pll->config.hw_state.pll9;
 	I915_WRITE(BXT_PORT_PLL(port, 9), temp);
 
 	temp = I915_READ(BXT_PORT_PLL(port, 10));
@@ -2417,8 +2791,8 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
 	temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
 	temp |= PORT_PLL_RECALIBRATE;
 	I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
-	/* Enable 10 bit clock */
-	temp |= PORT_PLL_10BIT_CLK_ENABLE;
+	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
+	temp |= pll->config.hw_state.ebb4;
 	I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
 
 	/* Enable PLL */
@@ -2469,13 +2843,38 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
 		return false;
 
 	hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
+	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
+
+	hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(port));
+	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
+
 	hw_state->pll0 = I915_READ(BXT_PORT_PLL(port, 0));
+	hw_state->pll0 &= PORT_PLL_M2_MASK;
+
 	hw_state->pll1 = I915_READ(BXT_PORT_PLL(port, 1));
+	hw_state->pll1 &= PORT_PLL_N_MASK;
+
 	hw_state->pll2 = I915_READ(BXT_PORT_PLL(port, 2));
+	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
+
 	hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3));
+	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
+
 	hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6));
+	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
+			  PORT_PLL_INT_COEFF_MASK |
+			  PORT_PLL_GAIN_CTL_MASK;
+
 	hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8));
+	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
+
+	hw_state->pll9 = I915_READ(BXT_PORT_PLL(port, 9));
+	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
+
 	hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10));
+	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
+			   PORT_PLL_DCO_AMP_MASK;
+
 	/*
 	 * While we write to the group register to program all lanes at once we
 	 * can read only lane registers. We configure all lanes the same way, so
@@ -2486,6 +2885,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
 		DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
 				 hw_state->pcsdw12,
 				 I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
+	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
 
 	return true;
 }
@@ -2510,7 +2910,6 @@ void intel_ddi_pll_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t val = I915_READ(LCPLL_CTL);
-	int cdclk_freq;
 
 	if (IS_SKYLAKE(dev))
 		skl_shared_dplls_init(dev_priv);
@@ -2519,10 +2918,10 @@ void intel_ddi_pll_init(struct drm_device *dev)
 	else
 		hsw_shared_dplls_init(dev_priv);
 
-	cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
-	DRM_DEBUG_KMS("CDCLK running at %dKHz\n", cdclk_freq);
-
 	if (IS_SKYLAKE(dev)) {
+		int cdclk_freq;
+
+		cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
 		dev_priv->skl_boot_cdclk = cdclk_freq;
 		if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
 			DRM_ERROR("LCPLL1 is disabled\n");
@@ -2618,20 +3017,6 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
 	I915_WRITE(_FDI_RXA_CTL, val);
 }
 
-static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
-{
-	struct intel_digital_port *intel_dig_port = enc_to_dig_port(&intel_encoder->base);
-	int type = intel_dig_port->base.type;
-
-	if (type != INTEL_OUTPUT_DISPLAYPORT &&
-	    type != INTEL_OUTPUT_EDP &&
-	    type != INTEL_OUTPUT_UNKNOWN) {
-		return;
-	}
-
-	intel_dp_hot_plug(intel_encoder);
-}
-
 void intel_ddi_get_config(struct intel_encoder *encoder,
 			  struct intel_crtc_state *pipe_config)
 {
@@ -2793,10 +3178,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
 		     dev_priv->vbt.ddi_port_info[port].supports_hdmi);
 	init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
 	if (!init_dp && !init_hdmi) {
-		DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, assuming it is\n",
+		DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
 			      port_name(port));
-		init_hdmi = true;
-		init_dp = true;
+		return;
 	}
 
 	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
@@ -2825,14 +3209,13 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
 	intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 	intel_encoder->cloneable = 0;
-	intel_encoder->hot_plug = intel_ddi_hot_plug;
 
 	if (init_dp) {
 		if (!intel_ddi_init_dp_connector(intel_dig_port))
 			goto err;
 
 		intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
-		dev_priv->hpd_irq_port[port] = intel_dig_port;
+		dev_priv->hotplug.irq_port[port] = intel_dig_port;
 	}
 
 	/* In theory we don't need the encoder->type check, but leave it just in
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 87476ff181dd..ca9278be49f7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -86,9 +86,6 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
 				   struct intel_crtc_state *pipe_config);
 
-static int intel_set_mode(struct drm_crtc *crtc,
-			  struct drm_atomic_state *state,
-			  bool force_restore);
 static int intel_framebuffer_init(struct drm_device *dev,
 				  struct intel_framebuffer *ifb,
 				  struct drm_mode_fb_cmd2 *mode_cmd,
@@ -105,22 +102,13 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
 			    const struct intel_crtc_state *pipe_config);
 static void chv_prepare_pll(struct intel_crtc *crtc,
 			    const struct intel_crtc_state *pipe_config);
-static void intel_begin_crtc_commit(struct drm_crtc *crtc);
-static void intel_finish_crtc_commit(struct drm_crtc *crtc);
+static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
+static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
 	struct intel_crtc_state *crtc_state);
 static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
 			   int num_connectors);
-static void intel_crtc_enable_planes(struct drm_crtc *crtc);
-static void intel_crtc_disable_planes(struct drm_crtc *crtc);
-
-static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
-{
-	if (!connector->mst_port)
-		return connector->encoder;
-	else
-		return &connector->mst_port->mst_encoders[pipe]->base;
-}
+static void intel_modeset_setup_hw_state(struct drm_device *dev);
 
 typedef struct {
 	int	min, max;
@@ -413,7 +401,7 @@ static const intel_limit_t intel_limits_chv = {
 static const intel_limit_t intel_limits_bxt = {
 	/* FIXME: find real dot limits */
 	.dot = { .min = 0, .max = INT_MAX },
-	.vco = { .min = 4800000, .max = 6480000 },
+	.vco = { .min = 4800000, .max = 6700000 },
 	.n = { .min = 1, .max = 1 },
 	.m1 = { .min = 2, .max = 2 },
 	/* FIXME: find real m2 limits */
@@ -422,14 +410,10 @@ static const intel_limit_t intel_limits_bxt = {
 	.p2 = { .p2_slow = 1, .p2_fast = 20 },
 };
 
-static void vlv_clock(int refclk, intel_clock_t *clock)
+static bool
+needs_modeset(struct drm_crtc_state *state)
 {
-	clock->m = clock->m1 * clock->m2;
-	clock->p = clock->p1 * clock->p2;
-	if (WARN_ON(clock->n == 0 || clock->p == 0))
-		return;
-	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
-	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+	return drm_atomic_crtc_needs_modeset(state);
 }
 
 /**
@@ -561,15 +545,25 @@ intel_limit(struct intel_crtc_state *crtc_state, int refclk)
 	return limit;
 }
 
+/*
+ * Platform specific helpers to calculate the port PLL loopback- (clock.m),
+ * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
+ * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
+ * The helpers' return value is the rate of the clock that is fed to the
+ * display engine's pipe which can be the above fast dot clock rate or a
+ * divided-down version of it.
+ */
 /* m1 is reserved as 0 in Pineview, n is a ring counter */
-static void pineview_clock(int refclk, intel_clock_t *clock)
+static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
 {
 	clock->m = clock->m2 + 2;
 	clock->p = clock->p1 * clock->p2;
 	if (WARN_ON(clock->n == 0 || clock->p == 0))
-		return;
+		return 0;
 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+	return clock->dot;
 }
 
 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
@@ -577,25 +571,41 @@ static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
 	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
 }
 
-static void i9xx_clock(int refclk, intel_clock_t *clock)
+static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
 {
 	clock->m = i9xx_dpll_compute_m(clock);
 	clock->p = clock->p1 * clock->p2;
 	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
-		return;
+		return 0;
 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+	return clock->dot;
 }
 
-static void chv_clock(int refclk, intel_clock_t *clock)
+static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
 {
 	clock->m = clock->m1 * clock->m2;
 	clock->p = clock->p1 * clock->p2;
 	if (WARN_ON(clock->n == 0 || clock->p == 0))
-		return;
+		return 0;
+	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+	return clock->dot / 5;
+}
+
+int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
+{
+	clock->m = clock->m1 * clock->m2;
+	clock->p = clock->p1 * clock->p2;
+	if (WARN_ON(clock->n == 0 || clock->p == 0))
+		return 0;
 	clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
 			clock->n << 22);
 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+	return clock->dot / 5;
 }
 
 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
@@ -639,16 +649,12 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
 	return true;
 }
 
-static bool
-i9xx_find_best_dpll(const intel_limit_t *limit,
-		    struct intel_crtc_state *crtc_state,
-		    int target, int refclk, intel_clock_t *match_clock,
-		    intel_clock_t *best_clock)
+static int
+i9xx_select_p2_div(const intel_limit_t *limit,
+		   const struct intel_crtc_state *crtc_state,
+		   int target)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_device *dev = crtc->base.dev;
-	intel_clock_t clock;
-	int err = target;
+	struct drm_device *dev = crtc_state->base.crtc->dev;
 
 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 		/*
@@ -657,18 +663,31 @@ i9xx_find_best_dpll(const intel_limit_t *limit,
 		 * single/dual channel state, if we even can.
 		 */
 		if (intel_is_dual_link_lvds(dev))
-			clock.p2 = limit->p2.p2_fast;
+			return limit->p2.p2_fast;
 		else
-			clock.p2 = limit->p2.p2_slow;
+			return limit->p2.p2_slow;
 	} else {
 		if (target < limit->p2.dot_limit)
-			clock.p2 = limit->p2.p2_slow;
+			return limit->p2.p2_slow;
 		else
-			clock.p2 = limit->p2.p2_fast;
+			return limit->p2.p2_fast;
 	}
+}
+
+static bool
+i9xx_find_best_dpll(const intel_limit_t *limit,
+		    struct intel_crtc_state *crtc_state,
+		    int target, int refclk, intel_clock_t *match_clock,
+		    intel_clock_t *best_clock)
+{
+	struct drm_device *dev = crtc_state->base.crtc->dev;
+	intel_clock_t clock;
+	int err = target;
 
 	memset(best_clock, 0, sizeof(*best_clock));
 
+	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
 	     clock.m1++) {
 		for (clock.m2 = limit->m2.min;
@@ -681,7 +700,7 @@ i9xx_find_best_dpll(const intel_limit_t *limit,
 					clock.p1 <= limit->p1.max; clock.p1++) {
 					int this_err;
 
-					i9xx_clock(refclk, &clock);
+					i9xx_calc_dpll_params(refclk, &clock);
 					if (!intel_PLL_is_valid(dev, limit,
 								&clock))
 						continue;
@@ -708,30 +727,14 @@ pnv_find_best_dpll(const intel_limit_t *limit,
 		   int target, int refclk, intel_clock_t *match_clock,
 		   intel_clock_t *best_clock)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_device *dev = crtc->base.dev;
+	struct drm_device *dev = crtc_state->base.crtc->dev;
 	intel_clock_t clock;
 	int err = target;
 
-	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-		/*
-		 * For LVDS just rely on its current settings for dual-channel.
-		 * We haven't figured out how to reliably set up different
-		 * single/dual channel state, if we even can.
-		 */
-		if (intel_is_dual_link_lvds(dev))
-			clock.p2 = limit->p2.p2_fast;
-		else
-			clock.p2 = limit->p2.p2_slow;
-	} else {
-		if (target < limit->p2.dot_limit)
-			clock.p2 = limit->p2.p2_slow;
-		else
-			clock.p2 = limit->p2.p2_fast;
-	}
-
 	memset(best_clock, 0, sizeof(*best_clock));
 
+	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
 	     clock.m1++) {
 		for (clock.m2 = limit->m2.min;
@@ -742,7 +745,7 @@ pnv_find_best_dpll(const intel_limit_t *limit,
 					clock.p1 <= limit->p1.max; clock.p1++) {
 					int this_err;
 
-					pineview_clock(refclk, &clock);
+					pnv_calc_dpll_params(refclk, &clock);
 					if (!intel_PLL_is_valid(dev, limit,
 								&clock))
 						continue;
@@ -769,28 +772,17 @@ g4x_find_best_dpll(const intel_limit_t *limit,
 		   int target, int refclk, intel_clock_t *match_clock,
 		   intel_clock_t *best_clock)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_device *dev = crtc->base.dev;
+	struct drm_device *dev = crtc_state->base.crtc->dev;
 	intel_clock_t clock;
 	int max_n;
-	bool found;
+	bool found = false;
 	/* approximately equals target * 0.00585 */
 	int err_most = (target >> 8) + (target >> 9);
-	found = false;
-
-	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-		if (intel_is_dual_link_lvds(dev))
-			clock.p2 = limit->p2.p2_fast;
-		else
-			clock.p2 = limit->p2.p2_slow;
-	} else {
-		if (target < limit->p2.dot_limit)
-			clock.p2 = limit->p2.p2_slow;
-		else
-			clock.p2 = limit->p2.p2_fast;
-	}
 
 	memset(best_clock, 0, sizeof(*best_clock));
+
+	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
 	max_n = limit->n.max;
 	/* based on hardware requirement, prefer smaller n to precision */
 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
@@ -803,7 +795,7 @@ g4x_find_best_dpll(const intel_limit_t *limit,
 				     clock.p1 >= limit->p1.min; clock.p1--) {
 					int this_err;
 
-					i9xx_clock(refclk, &clock);
+					i9xx_calc_dpll_params(refclk, &clock);
 					if (!intel_PLL_is_valid(dev, limit,
 								&clock))
 						continue;
@@ -893,7 +885,7 @@ vlv_find_best_dpll(const intel_limit_t *limit,
 					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
 								     refclk * clock.m1);
 
-					vlv_clock(refclk, &clock);
+					vlv_calc_dpll_params(refclk, &clock);
 
 					if (!intel_PLL_is_valid(dev, limit,
 								&clock))
@@ -956,7 +948,7 @@ chv_find_best_dpll(const intel_limit_t *limit,
 
 			clock.m2 = m2;
 
-			chv_clock(refclk, &clock);
+			chv_calc_dpll_params(refclk, &clock);
 
 			if (!intel_PLL_is_valid(dev, limit, &clock))
 				continue;
@@ -1026,7 +1018,7 @@ static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
 		line_mask = DSL_LINEMASK_GEN3;
 
 	line1 = I915_READ(reg) & line_mask;
-	mdelay(5);
+	msleep(5);
 	line2 = I915_READ(reg) & line_mask;
 
 	return line1 == line2;
@@ -1106,6 +1098,9 @@ bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
 		case PORT_D:
 			bit = SDE_PORTD_HOTPLUG_CPT;
 			break;
+		case PORT_E:
+			bit = SDE_PORTE_HOTPLUG_SPT;
+			break;
 		default:
 			return true;
 		}
@@ -1694,7 +1689,7 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
 	int count = 0;
 
 	for_each_intel_crtc(dev, crtc)
-		count += crtc->active &&
+		count += crtc->base.state->active &&
 			intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
 
 	return count;
@@ -1775,7 +1770,7 @@ static void i9xx_disable_pll(struct intel_crtc *crtc)
 	/* Disable DVO 2x clock on both PLLs if necessary */
 	if (IS_I830(dev) &&
 	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
-	    intel_num_dvo_pipes(dev) == 1) {
+	    !intel_num_dvo_pipes(dev)) {
 		I915_WRITE(DPLL(PIPE_B),
 			   I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
 		I915_WRITE(DPLL(PIPE_A),
@@ -1790,13 +1785,13 @@ static void i9xx_disable_pll(struct intel_crtc *crtc)
 	/* Make sure the pipe isn't still relying on us */
 	assert_pipe_disabled(dev_priv, pipe);
 
-	I915_WRITE(DPLL(pipe), 0);
+	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
 	POSTING_READ(DPLL(pipe));
 }
 
 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
-	u32 val = 0;
+	u32 val;
 
 	/* Make sure the pipe isn't still relying on us */
 	assert_pipe_disabled(dev_priv, pipe);
@@ -1805,8 +1800,9 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
 	 * Leave integrated clock source and reference clock enabled for pipe B.
 	 * The latter is needed for VGA hotplug / manual detection.
 	 */
+	val = DPLL_VGA_MODE_DIS;
 	if (pipe == PIPE_B)
-		val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
+		val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV;
 	I915_WRITE(DPLL(pipe), val);
 	POSTING_READ(DPLL(pipe));
 
@@ -1821,7 +1817,8 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
 	assert_pipe_disabled(dev_priv, pipe);
 
 	/* Set PLL en = 0 */
-	val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV;
+	val = DPLL_SSC_REF_CLK_CHV |
+		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
 	if (pipe != PIPE_A)
 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
 	I915_WRITE(DPLL(pipe), val);
@@ -1942,11 +1939,13 @@ static void intel_disable_shared_dpll(struct intel_crtc *crtc)
 	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
 
 	/* PCH only available on ILK+ */
-	BUG_ON(INTEL_INFO(dev)->gen < 5);
-	if (WARN_ON(pll == NULL))
-	       return;
+	if (INTEL_INFO(dev)->gen < 5)
+		return;
 
-	if (WARN_ON(pll->config.crtc_mask == 0))
+	if (pll == NULL)
+		return;
+
+	if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base)))))
 		return;
 
 	DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
@@ -2004,11 +2003,15 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
 
 	if (HAS_PCH_IBX(dev_priv->dev)) {
 		/*
-		 * make the BPC in transcoder be consistent with
-		 * that in pipeconf reg.
+		 * Make the BPC in transcoder be consistent with
+		 * that in pipeconf reg. For HDMI we must use 8bpc
+		 * here for both 8bpc and 12bpc.
 		 */
 		val &= ~PIPECONF_BPC_MASK;
-		val |= pipeconf_val & PIPECONF_BPC_MASK;
+		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
+			val |= PIPECONF_8BPC;
+		else
+			val |= pipeconf_val & PIPECONF_BPC_MASK;
 	}
 
 	val &= ~TRANS_INTERLACE_MASK;
@@ -2122,6 +2125,8 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
 	int reg;
 	u32 val;
 
+	DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
+
 	assert_planes_disabled(dev_priv, pipe);
 	assert_cursor_disabled(dev_priv, pipe);
 	assert_sprites_disabled(dev_priv, pipe);
@@ -2181,6 +2186,8 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
 	int reg;
 	u32 val;
 
+	DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
+
 	/*
 	 * Make sure planes won't keep trying to pump pixels to us,
 	 * or we might hang the display.
@@ -2211,28 +2218,6 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
 		intel_wait_for_pipe_off(crtc);
 }
 
-/**
- * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
- * @plane:  plane to be enabled
- * @crtc: crtc for the plane
- *
- * Enable @plane on @crtc, making sure that the pipe is running first.
- */
-static void intel_enable_primary_hw_plane(struct drm_plane *plane,
-					  struct drm_crtc *crtc)
-{
-	struct drm_device *dev = plane->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-	/* If the pipe isn't enabled, we can't pump pixels and may hang */
-	assert_pipe_enabled(dev_priv, intel_crtc->pipe);
-	to_intel_plane_state(plane->state)->visible = true;
-
-	dev_priv->display.update_primary_plane(crtc, plane->fb,
-					       crtc->x, crtc->y);
-}
-
 static bool need_vtd_wa(struct drm_device *dev)
 {
 #ifdef CONFIG_INTEL_IOMMU
@@ -2302,6 +2287,7 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
 			const struct drm_plane_state *plane_state)
 {
 	struct intel_rotation_info *info = &view->rotation_info;
+	unsigned int tile_height, tile_pitch;
 
 	*view = i915_ggtt_view_normal;
 
@@ -2318,14 +2304,35 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
 	info->pitch = fb->pitches[0];
 	info->fb_modifier = fb->modifier[0];
 
+	tile_height = intel_tile_height(fb->dev, fb->pixel_format,
+					fb->modifier[0]);
+	tile_pitch = PAGE_SIZE / tile_height;
+	info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
+	info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
+	info->size = info->width_pages * info->height_pages * PAGE_SIZE;
+
 	return 0;
 }
 
+static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
+{
+	if (INTEL_INFO(dev_priv)->gen >= 9)
+		return 256 * 1024;
+	else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
+		 IS_VALLEYVIEW(dev_priv))
+		return 128 * 1024;
+	else if (INTEL_INFO(dev_priv)->gen >= 4)
+		return 4 * 1024;
+	else
+		return 0;
+}
+
 int
 intel_pin_and_fence_fb_obj(struct drm_plane *plane,
 			   struct drm_framebuffer *fb,
 			   const struct drm_plane_state *plane_state,
-			   struct intel_engine_cs *pipelined)
+			   struct intel_engine_cs *pipelined,
+			   struct drm_i915_gem_request **pipelined_request)
 {
 	struct drm_device *dev = fb->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2338,14 +2345,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
 
 	switch (fb->modifier[0]) {
 	case DRM_FORMAT_MOD_NONE:
-		if (INTEL_INFO(dev)->gen >= 9)
-			alignment = 256 * 1024;
-		else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
-			alignment = 128 * 1024;
-		else if (INTEL_INFO(dev)->gen >= 4)
-			alignment = 4 * 1024;
-		else
-			alignment = 64 * 1024;
+		alignment = intel_linear_alignment(dev_priv);
 		break;
 	case I915_FORMAT_MOD_X_TILED:
 		if (INTEL_INFO(dev)->gen >= 9)
@@ -2390,7 +2390,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
 
 	dev_priv->mm.interruptible = false;
 	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined,
-						   &view);
+						   pipelined_request, &view);
 	if (ret)
 		goto err_interruptible;
 
@@ -2400,7 +2400,18 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
 	 * a fence as the cost is not that onerous.
 	 */
 	ret = i915_gem_object_get_fence(obj);
-	if (ret)
+	if (ret == -EDEADLK) {
+		/*
+		 * -EDEADLK means there are no free fences
+		 * no pending flips.
+		 *
+		 * This is propagated to atomic, but it uses
+		 * -EDEADLK to force a locking recovery, so
+		 * change the returned error to -EBUSY.
+		 */
+		ret = -EBUSY;
+		goto err_unpin;
+	} else if (ret)
 		goto err_unpin;
 
 	i915_gem_object_pin_fence(obj);
@@ -2435,7 +2446,8 @@ static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
 
 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
  * is assumed to be a power-of-two. */
-unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
+					     int *x, int *y,
 					     unsigned int tiling_mode,
 					     unsigned int cpp,
 					     unsigned int pitch)
@@ -2451,12 +2463,13 @@ unsigned long intel_gen4_compute_page_offset(int *x, int *y,
 
 		return tile_rows * pitch * 8 + tiles * 4096;
 	} else {
+		unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
 		unsigned int offset;
 
 		offset = *y * pitch + *x * cpp;
-		*y = 0;
-		*x = (offset & 4095) / cpp;
-		return offset & -4096;
+		*y = (offset & alignment) / pitch;
+		*x = ((offset & alignment) - *y * pitch) / cpp;
+		return offset & ~alignment;
 	}
 }
 
@@ -2583,6 +2596,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 	struct intel_crtc *i;
 	struct drm_i915_gem_object *obj;
 	struct drm_plane *primary = intel_crtc->base.primary;
+	struct drm_plane_state *plane_state = primary->state;
 	struct drm_framebuffer *fb;
 
 	if (!plane_config->fb)
@@ -2622,15 +2636,23 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 	return;
 
 valid_fb:
+	plane_state->src_x = plane_state->src_y = 0;
+	plane_state->src_w = fb->width << 16;
+	plane_state->src_h = fb->height << 16;
+
+	plane_state->crtc_x = plane_state->src_y = 0;
+	plane_state->crtc_w = fb->width;
+	plane_state->crtc_h = fb->height;
+
 	obj = intel_fb_obj(fb);
 	if (obj->tiling_mode != I915_TILING_NONE)
 		dev_priv->preserve_bios_swizzle = true;
 
-	primary->fb = fb;
-	primary->state->crtc = &intel_crtc->base;
-	primary->crtc = &intel_crtc->base;
-	update_state_fb(primary);
-	obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
+	drm_framebuffer_reference(fb);
+	primary->fb = primary->state->fb = fb;
+	primary->crtc = primary->state->crtc = &intel_crtc->base;
+	intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
+	obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
 }
 
 static void i9xx_update_primary_plane(struct drm_crtc *crtc,
@@ -2725,7 +2747,8 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
 
 	if (INTEL_INFO(dev)->gen >= 4) {
 		intel_crtc->dspaddr_offset =
-			intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+			intel_gen4_compute_page_offset(dev_priv,
+						       &x, &y, obj->tiling_mode,
 						       pixel_size,
 						       fb->pitches[0]);
 		linear_offset -= intel_crtc->dspaddr_offset;
@@ -2826,7 +2849,8 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
 
 	linear_offset = y * fb->pitches[0] + x * pixel_size;
 	intel_crtc->dspaddr_offset =
-		intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+		intel_gen4_compute_page_offset(dev_priv,
+					       &x, &y, obj->tiling_mode,
 					       pixel_size,
 					       fb->pitches[0]);
 	linear_offset -= intel_crtc->dspaddr_offset;
@@ -2904,32 +2928,32 @@ unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
 	return i915_gem_obj_ggtt_offset_view(obj, view);
 }
 
+static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
+	I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
+	I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
+	DRM_DEBUG_KMS("CRTC:%d Disabled scaler id %u.%u\n",
+		intel_crtc->base.base.id, intel_crtc->pipe, id);
+}
+
 /*
  * This function detaches (aka. unbinds) unused scalers in hardware
  */
-void skl_detach_scalers(struct intel_crtc *intel_crtc)
+static void skl_detach_scalers(struct intel_crtc *intel_crtc)
 {
-	struct drm_device *dev;
-	struct drm_i915_private *dev_priv;
 	struct intel_crtc_scaler_state *scaler_state;
 	int i;
 
-	if (!intel_crtc || !intel_crtc->config)
-		return;
-
-	dev = intel_crtc->base.dev;
-	dev_priv = dev->dev_private;
 	scaler_state = &intel_crtc->config->scaler_state;
 
 	/* loop through and disable scalers that aren't in use */
 	for (i = 0; i < intel_crtc->num_scalers; i++) {
-		if (!scaler_state->scalers[i].in_use) {
-			I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, i), 0);
-			I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, i), 0);
-			I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, i), 0);
-			DRM_DEBUG_KMS("CRTC:%d Disabled scaler id %u.%u\n",
-				intel_crtc->base.base.id, intel_crtc->pipe, i);
-		}
+		if (!scaler_state->scalers[i].in_use)
+			skl_detach_scaler(intel_crtc, i);
 	}
 }
 
@@ -3132,8 +3156,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (dev_priv->display.disable_fbc)
-		dev_priv->display.disable_fbc(dev);
+	if (dev_priv->fbc.disable_fbc)
+		dev_priv->fbc.disable_fbc(dev_priv);
 
 	dev_priv->display.update_primary_plane(crtc, fb, x, y);
 
@@ -3176,24 +3200,8 @@ static void intel_update_primary_planes(struct drm_device *dev)
 	}
 }
 
-void intel_crtc_reset(struct intel_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-	if (!crtc->active)
-		return;
-
-	intel_crtc_disable_planes(&crtc->base);
-	dev_priv->display.crtc_disable(&crtc->base);
-	dev_priv->display.crtc_enable(&crtc->base);
-	intel_crtc_enable_planes(&crtc->base);
-}
-
 void intel_prepare_reset(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *crtc;
-
 	/* no reset support for gen2 */
 	if (IS_GEN2(dev))
 		return;
@@ -3203,18 +3211,11 @@ void intel_prepare_reset(struct drm_device *dev)
 		return;
 
 	drm_modeset_lock_all(dev);
-
 	/*
 	 * Disabling the crtcs gracefully seems nicer. Also the
 	 * g33 docs say we should at least disable all the planes.
 	 */
-	for_each_intel_crtc(dev, crtc) {
-		if (!crtc->active)
-			continue;
-
-		intel_crtc_disable_planes(&crtc->base);
-		dev_priv->display.crtc_disable(&crtc->base);
-	}
+	intel_display_suspend(dev);
 }
 
 void intel_finish_reset(struct drm_device *dev)
@@ -3258,7 +3259,7 @@ void intel_finish_reset(struct drm_device *dev)
 		dev_priv->display.hpd_irq_setup(dev);
 	spin_unlock_irq(&dev_priv->irq_lock);
 
-	intel_modeset_setup_hw_state(dev, true);
+	intel_display_resume(dev);
 
 	intel_hpd_init(dev_priv);
 
@@ -4200,34 +4201,16 @@ static void lpt_pch_enable(struct drm_crtc *crtc)
 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
 }
 
-void intel_put_shared_dpll(struct intel_crtc *crtc)
-{
-	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
-
-	if (pll == NULL)
-		return;
-
-	if (!(pll->config.crtc_mask & (1 << crtc->pipe))) {
-		WARN(1, "bad %s crtc mask\n", pll->name);
-		return;
-	}
-
-	pll->config.crtc_mask &= ~(1 << crtc->pipe);
-	if (pll->config.crtc_mask == 0) {
-		WARN_ON(pll->on);
-		WARN_ON(pll->active);
-	}
-
-	crtc->config->shared_dpll = DPLL_ID_PRIVATE;
-}
-
 struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
 						struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 	struct intel_shared_dpll *pll;
+	struct intel_shared_dpll_config *shared_dpll;
 	enum intel_dpll_id i;
 
+	shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
+
 	if (HAS_PCH_IBX(dev_priv->dev)) {
 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
 		i = (enum intel_dpll_id) crtc->pipe;
@@ -4236,7 +4219,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
 		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
 			      crtc->base.base.id, pll->name);
 
-		WARN_ON(pll->new_config->crtc_mask);
+		WARN_ON(shared_dpll[i].crtc_mask);
 
 		goto found;
 	}
@@ -4256,7 +4239,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
 		pll = &dev_priv->shared_dplls[i];
 		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
 			crtc->base.base.id, pll->name);
-		WARN_ON(pll->new_config->crtc_mask);
+		WARN_ON(shared_dpll[i].crtc_mask);
 
 		goto found;
 	}
@@ -4265,15 +4248,15 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
 		pll = &dev_priv->shared_dplls[i];
 
 		/* Only want to check enabled timings first */
-		if (pll->new_config->crtc_mask == 0)
+		if (shared_dpll[i].crtc_mask == 0)
 			continue;
 
 		if (memcmp(&crtc_state->dpll_hw_state,
-			   &pll->new_config->hw_state,
-			   sizeof(pll->new_config->hw_state)) == 0) {
+			   &shared_dpll[i].hw_state,
+			   sizeof(crtc_state->dpll_hw_state)) == 0) {
 			DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
 				      crtc->base.base.id, pll->name,
-				      pll->new_config->crtc_mask,
+				      shared_dpll[i].crtc_mask,
 				      pll->active);
 			goto found;
 		}
@@ -4282,7 +4265,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
 	/* Ok no matching timings, maybe there's a free one? */
 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
 		pll = &dev_priv->shared_dplls[i];
-		if (pll->new_config->crtc_mask == 0) {
+		if (shared_dpll[i].crtc_mask == 0) {
 			DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
 				      crtc->base.base.id, pll->name);
 			goto found;
@@ -4292,83 +4275,33 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
 	return NULL;
 
 found:
-	if (pll->new_config->crtc_mask == 0)
-		pll->new_config->hw_state = crtc_state->dpll_hw_state;
+	if (shared_dpll[i].crtc_mask == 0)
+		shared_dpll[i].hw_state =
+			crtc_state->dpll_hw_state;
 
 	crtc_state->shared_dpll = i;
 	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
 			 pipe_name(crtc->pipe));
 
-	pll->new_config->crtc_mask |= 1 << crtc->pipe;
+	shared_dpll[i].crtc_mask |= 1 << crtc->pipe;
 
 	return pll;
 }
 
-/**
- * intel_shared_dpll_start_config - start a new PLL staged config
- * @dev_priv: DRM device
- * @clear_pipes: mask of pipes that will have their PLLs freed
- *
- * Starts a new PLL staged config, copying the current config but
- * releasing the references of pipes specified in clear_pipes.
- */
-static int intel_shared_dpll_start_config(struct drm_i915_private *dev_priv,
-					  unsigned clear_pipes)
-{
-	struct intel_shared_dpll *pll;
-	enum intel_dpll_id i;
-
-	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-		pll = &dev_priv->shared_dplls[i];
-
-		pll->new_config = kmemdup(&pll->config, sizeof pll->config,
-					  GFP_KERNEL);
-		if (!pll->new_config)
-			goto cleanup;
-
-		pll->new_config->crtc_mask &= ~clear_pipes;
-	}
-
-	return 0;
-
-cleanup:
-	while (--i >= 0) {
-		pll = &dev_priv->shared_dplls[i];
-		kfree(pll->new_config);
-		pll->new_config = NULL;
-	}
-
-	return -ENOMEM;
-}
-
-static void intel_shared_dpll_commit(struct drm_i915_private *dev_priv)
+static void intel_shared_dpll_commit(struct drm_atomic_state *state)
 {
+	struct drm_i915_private *dev_priv = to_i915(state->dev);
+	struct intel_shared_dpll_config *shared_dpll;
 	struct intel_shared_dpll *pll;
 	enum intel_dpll_id i;
 
-	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-		pll = &dev_priv->shared_dplls[i];
-
-		WARN_ON(pll->new_config == &pll->config);
-
-		pll->config = *pll->new_config;
-		kfree(pll->new_config);
-		pll->new_config = NULL;
-	}
-}
-
-static void intel_shared_dpll_abort_config(struct drm_i915_private *dev_priv)
-{
-	struct intel_shared_dpll *pll;
-	enum intel_dpll_id i;
+	if (!to_intel_atomic_state(state)->dpll_set)
+		return;
 
+	shared_dpll = to_intel_atomic_state(state)->shared_dpll;
 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
 		pll = &dev_priv->shared_dplls[i];
-
-		WARN_ON(pll->new_config == &pll->config);
-
-		kfree(pll->new_config);
-		pll->new_config = NULL;
+		pll->config = shared_dpll[i];
 	}
 }
 
@@ -4386,62 +4319,16 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
 	}
 }
 
-/**
- * skl_update_scaler_users - Stages update to crtc's scaler state
- * @intel_crtc: crtc
- * @crtc_state: crtc_state
- * @plane: plane (NULL indicates crtc is requesting update)
- * @plane_state: plane's state
- * @force_detach: request unconditional detachment of scaler
- *
- * This function updates scaler state for requested plane or crtc.
- * To request scaler usage update for a plane, caller shall pass plane pointer.
- * To request scaler usage update for crtc, caller shall pass plane pointer
- * as NULL.
- *
- * Return
- *     0 - scaler_usage updated successfully
- *    error - requested scaling cannot be supported or other error condition
- */
-int
-skl_update_scaler_users(
-	struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state,
-	struct intel_plane *intel_plane, struct intel_plane_state *plane_state,
-	int force_detach)
+static int
+skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
+		  unsigned scaler_user, int *scaler_id, unsigned int rotation,
+		  int src_w, int src_h, int dst_w, int dst_h)
 {
+	struct intel_crtc_scaler_state *scaler_state =
+		&crtc_state->scaler_state;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(crtc_state->base.crtc);
 	int need_scaling;
-	int idx;
-	int src_w, src_h, dst_w, dst_h;
-	int *scaler_id;
-	struct drm_framebuffer *fb;
-	struct intel_crtc_scaler_state *scaler_state;
-	unsigned int rotation;
-
-	if (!intel_crtc || !crtc_state)
-		return 0;
-
-	scaler_state = &crtc_state->scaler_state;
-
-	idx = intel_plane ? drm_plane_index(&intel_plane->base) : SKL_CRTC_INDEX;
-	fb = intel_plane ? plane_state->base.fb : NULL;
-
-	if (intel_plane) {
-		src_w = drm_rect_width(&plane_state->src) >> 16;
-		src_h = drm_rect_height(&plane_state->src) >> 16;
-		dst_w = drm_rect_width(&plane_state->dst);
-		dst_h = drm_rect_height(&plane_state->dst);
-		scaler_id = &plane_state->scaler_id;
-		rotation = plane_state->base.rotation;
-	} else {
-		struct drm_display_mode *adjusted_mode =
-			&crtc_state->base.adjusted_mode;
-		src_w = crtc_state->pipe_src_w;
-		src_h = crtc_state->pipe_src_h;
-		dst_w = adjusted_mode->hdisplay;
-		dst_h = adjusted_mode->vdisplay;
-		scaler_id = &scaler_state->scaler_id;
-		rotation = DRM_ROTATE_0;
-	}
 
 	need_scaling = intel_rotation_90_or_270(rotation) ?
 		(src_h != dst_w || src_w != dst_h):
@@ -4457,17 +4344,14 @@ skl_update_scaler_users(
 	 * update to free the scaler is done in plane/panel-fit programming.
 	 * For this purpose crtc/plane_state->scaler_id isn't reset here.
 	 */
-	if (force_detach || !need_scaling || (intel_plane &&
-		(!fb || !plane_state->visible))) {
+	if (force_detach || !need_scaling) {
 		if (*scaler_id >= 0) {
-			scaler_state->scaler_users &= ~(1 << idx);
+			scaler_state->scaler_users &= ~(1 << scaler_user);
 			scaler_state->scalers[*scaler_id].in_use = 0;
 
-			DRM_DEBUG_KMS("Staged freeing scaler id %d.%d from %s:%d "
-				"crtc_state = %p scaler_users = 0x%x\n",
-				intel_crtc->pipe, *scaler_id, intel_plane ? "PLANE" : "CRTC",
-				intel_plane ? intel_plane->base.base.id :
-				intel_crtc->base.base.id, crtc_state,
+			DRM_DEBUG_KMS("scaler_user index %u.%u: "
+				"Staged freeing scaler id %d scaler_users = 0x%x\n",
+				intel_crtc->pipe, scaler_user, *scaler_id,
 				scaler_state->scaler_users);
 			*scaler_id = -1;
 		}
@@ -4480,55 +4364,123 @@ skl_update_scaler_users(
 
 		src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
 		dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
-		DRM_DEBUG_KMS("%s:%d scaler_user index %u.%u: src %ux%u dst %ux%u "
+		DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
 			"size is out of scaler range\n",
-			intel_plane ? "PLANE" : "CRTC",
-			intel_plane ? intel_plane->base.base.id : intel_crtc->base.base.id,
-			intel_crtc->pipe, idx, src_w, src_h, dst_w, dst_h);
+			intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
 		return -EINVAL;
 	}
 
+	/* mark this plane as a scaler user in crtc_state */
+	scaler_state->scaler_users |= (1 << scaler_user);
+	DRM_DEBUG_KMS("scaler_user index %u.%u: "
+		"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
+		intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
+		scaler_state->scaler_users);
+
+	return 0;
+}
+
+/**
+ * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
+ *
+ * @state: crtc's scaler state
+ *
+ * Return
+ *     0 - scaler_usage updated successfully
+ *    error - requested scaling cannot be supported or other error condition
+ */
+int skl_update_scaler_crtc(struct intel_crtc_state *state)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
+	struct drm_display_mode *adjusted_mode =
+		&state->base.adjusted_mode;
+
+	DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
+		      intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
+
+	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
+		&state->scaler_state.scaler_id, DRM_ROTATE_0,
+		state->pipe_src_w, state->pipe_src_h,
+		adjusted_mode->hdisplay, adjusted_mode->vdisplay);
+}
+
+/**
+ * skl_update_scaler_plane - Stages update to scaler state for a given plane.
+ *
+ * @state: crtc's scaler state
+ * @plane_state: atomic plane state to update
+ *
+ * Return
+ *     0 - scaler_usage updated successfully
+ *    error - requested scaling cannot be supported or other error condition
+ */
+static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
+				   struct intel_plane_state *plane_state)
+{
+
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_plane *intel_plane =
+		to_intel_plane(plane_state->base.plane);
+	struct drm_framebuffer *fb = plane_state->base.fb;
+	int ret;
+
+	bool force_detach = !fb || !plane_state->visible;
+
+	DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
+		      intel_plane->base.base.id, intel_crtc->pipe,
+		      drm_plane_index(&intel_plane->base));
+
+	ret = skl_update_scaler(crtc_state, force_detach,
+				drm_plane_index(&intel_plane->base),
+				&plane_state->scaler_id,
+				plane_state->base.rotation,
+				drm_rect_width(&plane_state->src) >> 16,
+				drm_rect_height(&plane_state->src) >> 16,
+				drm_rect_width(&plane_state->dst),
+				drm_rect_height(&plane_state->dst));
+
+	if (ret || plane_state->scaler_id < 0)
+		return ret;
+
 	/* check colorkey */
-	if (WARN_ON(intel_plane &&
-		intel_plane->ckey.flags != I915_SET_COLORKEY_NONE)) {
-		DRM_DEBUG_KMS("PLANE:%d scaling %ux%u->%ux%u not allowed with colorkey",
-			intel_plane->base.base.id, src_w, src_h, dst_w, dst_h);
+	if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
+		DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
+			      intel_plane->base.base.id);
 		return -EINVAL;
 	}
 
 	/* Check src format */
-	if (intel_plane) {
-		switch (fb->pixel_format) {
-		case DRM_FORMAT_RGB565:
-		case DRM_FORMAT_XBGR8888:
-		case DRM_FORMAT_XRGB8888:
-		case DRM_FORMAT_ABGR8888:
-		case DRM_FORMAT_ARGB8888:
-		case DRM_FORMAT_XRGB2101010:
-		case DRM_FORMAT_XBGR2101010:
-		case DRM_FORMAT_YUYV:
-		case DRM_FORMAT_YVYU:
-		case DRM_FORMAT_UYVY:
-		case DRM_FORMAT_VYUY:
-			break;
-		default:
-			DRM_DEBUG_KMS("PLANE:%d FB:%d unsupported scaling format 0x%x\n",
-				intel_plane->base.base.id, fb->base.id, fb->pixel_format);
-			return -EINVAL;
-		}
+	switch (fb->pixel_format) {
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ABGR8888:
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_YVYU:
+	case DRM_FORMAT_UYVY:
+	case DRM_FORMAT_VYUY:
+		break;
+	default:
+		DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
+			intel_plane->base.base.id, fb->base.id, fb->pixel_format);
+		return -EINVAL;
 	}
 
-	/* mark this plane as a scaler user in crtc_state */
-	scaler_state->scaler_users |= (1 << idx);
-	DRM_DEBUG_KMS("%s:%d staged scaling request for %ux%u->%ux%u "
-		"crtc_state = %p scaler_users = 0x%x\n",
-		intel_plane ? "PLANE" : "CRTC",
-		intel_plane ? intel_plane->base.base.id : intel_crtc->base.base.id,
-		src_w, src_h, dst_w, dst_h, crtc_state, scaler_state->scaler_users);
 	return 0;
 }
 
-static void skylake_pfit_update(struct intel_crtc *crtc, int enable)
+static void skylake_scaler_disable(struct intel_crtc *crtc)
+{
+	int i;
+
+	for (i = 0; i < crtc->num_scalers; i++)
+		skl_detach_scaler(crtc, i);
+}
+
+static void skylake_pfit_enable(struct intel_crtc *crtc)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4538,13 +4490,6 @@ static void skylake_pfit_update(struct intel_crtc *crtc, int enable)
 
 	DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
 
-	/* To update pfit, first update scaler state */
-	skl_update_scaler_users(crtc, crtc->config, NULL, NULL, !enable);
-	intel_atomic_setup_scalers(crtc->base.dev, crtc, crtc->config);
-	skl_detach_scalers(crtc);
-	if (!enable)
-		return;
-
 	if (crtc->config->pch_pfit.enabled) {
 		int id;
 
@@ -4584,20 +4529,6 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
 	}
 }
 
-static void intel_enable_sprite_planes(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	enum pipe pipe = to_intel_crtc(crtc)->pipe;
-	struct drm_plane *plane;
-	struct intel_plane *intel_plane;
-
-	drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
-		intel_plane = to_intel_plane(plane);
-		if (intel_plane->pipe == pipe)
-			intel_plane_restore(&intel_plane->base);
-	}
-}
-
 void hsw_enable_ips(struct intel_crtc *crtc)
 {
 	struct drm_device *dev = crtc->base.dev;
@@ -4668,7 +4599,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
 	bool reenable_ips = false;
 
 	/* The clocks have to be on to load the palette. */
-	if (!crtc->state->enable || !intel_crtc->active)
+	if (!crtc->state->active)
 		return;
 
 	if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
@@ -4755,10 +4686,6 @@ intel_post_enable_primary(struct drm_crtc *crtc)
 	 */
 	hsw_enable_ips(intel_crtc);
 
-	mutex_lock(&dev->struct_mutex);
-	intel_fbc_update(dev);
-	mutex_unlock(&dev->struct_mutex);
-
 	/*
 	 * Gen2 reports pipe underruns whenever all planes are disabled.
 	 * So don't enable underrun reporting before at least some planes
@@ -4810,13 +4737,11 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
 	 * event which is after the vblank start event, so we need to have a
 	 * wait-for-vblank between disabling the plane and the pipe.
 	 */
-	if (HAS_GMCH_DISPLAY(dev))
+	if (HAS_GMCH_DISPLAY(dev)) {
 		intel_set_memory_cxsr(dev_priv, false);
-
-	mutex_lock(&dev->struct_mutex);
-	if (dev_priv->fbc.crtc == intel_crtc)
-		intel_fbc_disable(dev);
-	mutex_unlock(&dev->struct_mutex);
+		dev_priv->wm.vlv.cxsr = false;
+		intel_wait_for_vblank(dev, pipe);
+	}
 
 	/*
 	 * FIXME IPS should be fine as long as one plane is
@@ -4827,49 +4752,83 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
 	hsw_disable_ips(intel_crtc);
 }
 
-static void intel_crtc_enable_planes(struct drm_crtc *crtc)
+static void intel_post_plane_update(struct intel_crtc *crtc)
 {
-	struct drm_device *dev = crtc->dev;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	int pipe = intel_crtc->pipe;
+	struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_plane *plane;
 
-	intel_enable_primary_hw_plane(crtc->primary, crtc);
-	intel_enable_sprite_planes(crtc);
-	intel_crtc_update_cursor(crtc, true);
+	if (atomic->wait_vblank)
+		intel_wait_for_vblank(dev, crtc->pipe);
 
-	intel_post_enable_primary(crtc);
+	intel_frontbuffer_flip(dev, atomic->fb_bits);
 
-	/*
-	 * FIXME: Once we grow proper nuclear flip support out of this we need
-	 * to compute the mask of flip planes precisely. For the time being
-	 * consider this a flip to a NULL plane.
-	 */
-	intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
+	if (atomic->disable_cxsr)
+		crtc->wm.cxsr_allowed = true;
+
+	if (crtc->atomic.update_wm_post)
+		intel_update_watermarks(&crtc->base);
+
+	if (atomic->update_fbc)
+		intel_fbc_update(dev_priv);
+
+	if (atomic->post_enable_primary)
+		intel_post_enable_primary(&crtc->base);
+
+	drm_for_each_plane_mask(plane, dev, atomic->update_sprite_watermarks)
+		intel_update_sprite_watermarks(plane, &crtc->base,
+					       0, 0, 0, false, false);
+
+	memset(atomic, 0, sizeof(*atomic));
 }
 
-static void intel_crtc_disable_planes(struct drm_crtc *crtc)
+static void intel_pre_plane_update(struct intel_crtc *crtc)
 {
-	struct drm_device *dev = crtc->dev;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_plane *intel_plane;
-	int pipe = intel_crtc->pipe;
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
+	struct drm_plane *p;
 
-	if (!intel_crtc->active)
-		return;
+	/* Track fb's for any planes being disabled */
+	drm_for_each_plane_mask(p, dev, atomic->disabled_planes) {
+		struct intel_plane *plane = to_intel_plane(p);
 
-	intel_crtc_wait_for_pending_flips(crtc);
+		mutex_lock(&dev->struct_mutex);
+		i915_gem_track_fb(intel_fb_obj(plane->base.fb), NULL,
+				  plane->frontbuffer_bit);
+		mutex_unlock(&dev->struct_mutex);
+	}
 
-	intel_pre_disable_primary(crtc);
+	if (atomic->wait_for_flips)
+		intel_crtc_wait_for_pending_flips(&crtc->base);
 
-	intel_crtc_dpms_overlay_disable(intel_crtc);
-	for_each_intel_plane(dev, intel_plane) {
-		if (intel_plane->pipe == pipe) {
-			struct drm_crtc *from = intel_plane->base.crtc;
+	if (atomic->disable_fbc)
+		intel_fbc_disable_crtc(crtc);
 
-			intel_plane->disable_plane(&intel_plane->base,
-						   from ?: crtc, true);
-		}
+	if (crtc->atomic.disable_ips)
+		hsw_disable_ips(crtc);
+
+	if (atomic->pre_disable_primary)
+		intel_pre_disable_primary(&crtc->base);
+
+	if (atomic->disable_cxsr) {
+		crtc->wm.cxsr_allowed = false;
+		intel_set_memory_cxsr(dev_priv, false);
 	}
+}
+
+static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
+{
+	struct drm_device *dev = crtc->dev;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_plane *p;
+	int pipe = intel_crtc->pipe;
+
+	intel_crtc_dpms_overlay_disable(intel_crtc);
+
+	drm_for_each_plane_mask(p, dev, plane_mask)
+		to_intel_plane(p)->disable_plane(p, crtc);
 
 	/*
 	 * FIXME: Once we grow proper nuclear flip support out of this we need
@@ -4887,9 +4846,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
 	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
 
-	WARN_ON(!crtc->state->enable);
-
-	if (intel_crtc->active)
+	if (WARN_ON(intel_crtc->active))
 		return;
 
 	if (intel_crtc->config->has_pch_encoder)
@@ -4956,46 +4913,17 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
 	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
 }
 
-/*
- * This implements the workaround described in the "notes" section of the mode
- * set sequence documentation. When going from no pipes or single pipe to
- * multiple pipes, and planes are enabled after the pipe, we need to wait at
- * least 2 vblanks on the first pipe before enabling planes on the second pipe.
- */
-static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
-{
-	struct drm_device *dev = crtc->base.dev;
-	struct intel_crtc *crtc_it, *other_active_crtc = NULL;
-
-	/* We want to get the other_active_crtc only if there's only 1 other
-	 * active crtc. */
-	for_each_intel_crtc(dev, crtc_it) {
-		if (!crtc_it->active || crtc_it == crtc)
-			continue;
-
-		if (other_active_crtc)
-			return;
-
-		other_active_crtc = crtc_it;
-	}
-	if (!other_active_crtc)
-		return;
-
-	intel_wait_for_vblank(dev, other_active_crtc->pipe);
-	intel_wait_for_vblank(dev, other_active_crtc->pipe);
-}
-
 static void haswell_crtc_enable(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_encoder *encoder;
-	int pipe = intel_crtc->pipe;
+	int pipe = intel_crtc->pipe, hsw_workaround_pipe;
+	struct intel_crtc_state *pipe_config =
+		to_intel_crtc_state(crtc->state);
 
-	WARN_ON(!crtc->state->enable);
-
-	if (intel_crtc->active)
+	if (WARN_ON(intel_crtc->active))
 		return;
 
 	if (intel_crtc_to_shared_dpll(intel_crtc))
@@ -5036,7 +4964,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
 	intel_ddi_enable_pipe_clock(intel_crtc);
 
 	if (INTEL_INFO(dev)->gen == 9)
-		skylake_pfit_update(intel_crtc, 1);
+		skylake_pfit_enable(intel_crtc);
 	else if (INTEL_INFO(dev)->gen < 9)
 		ironlake_pfit_enable(intel_crtc);
 	else
@@ -5070,7 +4998,11 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
 
 	/* If we change the relative order between pipe/planes enabling, we need
 	 * to change the workaround. */
-	haswell_mode_set_planes_workaround(intel_crtc);
+	hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
+	if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
+		intel_wait_for_vblank(dev, hsw_workaround_pipe);
+		intel_wait_for_vblank(dev, hsw_workaround_pipe);
+	}
 }
 
 static void ironlake_pfit_disable(struct intel_crtc *crtc)
@@ -5097,9 +5029,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
 	int pipe = intel_crtc->pipe;
 	u32 reg, temp;
 
-	if (!intel_crtc->active)
-		return;
-
 	for_each_encoder_on_crtc(dev, crtc, encoder)
 		encoder->disable(encoder);
 
@@ -5138,18 +5067,11 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
 			I915_WRITE(PCH_DPLL_SEL, temp);
 		}
 
-		/* disable PCH DPLL */
-		intel_disable_shared_dpll(intel_crtc);
-
 		ironlake_fdi_pll_disable(intel_crtc);
 	}
 
 	intel_crtc->active = false;
 	intel_update_watermarks(crtc);
-
-	mutex_lock(&dev->struct_mutex);
-	intel_fbc_update(dev);
-	mutex_unlock(&dev->struct_mutex);
 }
 
 static void haswell_crtc_disable(struct drm_crtc *crtc)
@@ -5160,9 +5082,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
 	struct intel_encoder *encoder;
 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
 
-	if (!intel_crtc->active)
-		return;
-
 	for_each_encoder_on_crtc(dev, crtc, encoder) {
 		intel_opregion_notify_encoder(encoder, false);
 		encoder->disable(encoder);
@@ -5182,7 +5101,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
 	intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
 
 	if (INTEL_INFO(dev)->gen == 9)
-		skylake_pfit_update(intel_crtc, 0);
+		skylake_scaler_disable(intel_crtc);
 	else if (INTEL_INFO(dev)->gen < 9)
 		ironlake_pfit_disable(intel_crtc);
 	else
@@ -5201,22 +5120,8 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
 
 	intel_crtc->active = false;
 	intel_update_watermarks(crtc);
-
-	mutex_lock(&dev->struct_mutex);
-	intel_fbc_update(dev);
-	mutex_unlock(&dev->struct_mutex);
-
-	if (intel_crtc_to_shared_dpll(intel_crtc))
-		intel_disable_shared_dpll(intel_crtc);
 }
 
-static void ironlake_crtc_off(struct drm_crtc *crtc)
-{
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	intel_put_shared_dpll(intel_crtc);
-}
-
-
 static void i9xx_pfit_enable(struct intel_crtc *crtc)
 {
 	struct drm_device *dev = crtc->base.dev;
@@ -5252,6 +5157,8 @@ static enum intel_display_power_domain port_to_power_domain(enum port port)
 		return POWER_DOMAIN_PORT_DDI_C_4_LANES;
 	case PORT_D:
 		return POWER_DOMAIN_PORT_DDI_D_4_LANES;
+	case PORT_E:
+		return POWER_DOMAIN_PORT_DDI_E_2_LANES;
 	default:
 		WARN_ON_ONCE(1);
 		return POWER_DOMAIN_PORT_OTHER;
@@ -5298,6 +5205,9 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
 	unsigned long mask;
 	enum transcoder transcoder;
 
+	if (!crtc->state->active)
+		return 0;
+
 	transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
 
 	mask = BIT(POWER_DOMAIN_PIPE(pipe));
@@ -5312,45 +5222,131 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
 	return mask;
 }
 
+static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum intel_display_power_domain domain;
+	unsigned long domains, new_domains, old_domains;
+
+	old_domains = intel_crtc->enabled_power_domains;
+	intel_crtc->enabled_power_domains = new_domains = get_crtc_power_domains(crtc);
+
+	domains = new_domains & ~old_domains;
+
+	for_each_power_domain(domain, domains)
+		intel_display_power_get(dev_priv, domain);
+
+	return old_domains & ~new_domains;
+}
+
+static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
+				      unsigned long domains)
+{
+	enum intel_display_power_domain domain;
+
+	for_each_power_domain(domain, domains)
+		intel_display_power_put(dev_priv, domain);
+}
+
 static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
 {
 	struct drm_device *dev = state->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
-	struct intel_crtc *crtc;
+	unsigned long put_domains[I915_MAX_PIPES] = {};
+	struct drm_crtc_state *crtc_state;
+	struct drm_crtc *crtc;
+	int i;
 
-	/*
-	 * First get all needed power domains, then put all unneeded, to avoid
-	 * any unnecessary toggling of the power wells.
-	 */
-	for_each_intel_crtc(dev, crtc) {
-		enum intel_display_power_domain domain;
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		if (needs_modeset(crtc->state))
+			put_domains[to_intel_crtc(crtc)->pipe] =
+				modeset_get_crtc_power_domains(crtc);
+	}
 
-		if (!crtc->base.state->enable)
-			continue;
+	if (dev_priv->display.modeset_commit_cdclk) {
+		unsigned int cdclk = to_intel_atomic_state(state)->cdclk;
+
+		if (cdclk != dev_priv->cdclk_freq &&
+		    !WARN_ON(!state->allow_modeset))
+			dev_priv->display.modeset_commit_cdclk(state);
+	}
+
+	for (i = 0; i < I915_MAX_PIPES; i++)
+		if (put_domains[i])
+			modeset_put_power_domains(dev_priv, put_domains[i]);
+}
+
+static void intel_update_max_cdclk(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
 
-		pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
+	if (IS_SKYLAKE(dev)) {
+		u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
 
-		for_each_power_domain(domain, pipe_domains[crtc->pipe])
-			intel_display_power_get(dev_priv, domain);
+		if (limit == SKL_DFSM_CDCLK_LIMIT_675)
+			dev_priv->max_cdclk_freq = 675000;
+		else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
+			dev_priv->max_cdclk_freq = 540000;
+		else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
+			dev_priv->max_cdclk_freq = 450000;
+		else
+			dev_priv->max_cdclk_freq = 337500;
+	} else if (IS_BROADWELL(dev))  {
+		/*
+		 * FIXME with extra cooling we can allow
+		 * 540 MHz for ULX and 675 Mhz for ULT.
+		 * How can we know if extra cooling is
+		 * available? PCI ID, VTB, something else?
+		 */
+		if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
+			dev_priv->max_cdclk_freq = 450000;
+		else if (IS_BDW_ULX(dev))
+			dev_priv->max_cdclk_freq = 450000;
+		else if (IS_BDW_ULT(dev))
+			dev_priv->max_cdclk_freq = 540000;
+		else
+			dev_priv->max_cdclk_freq = 675000;
+	} else if (IS_CHERRYVIEW(dev)) {
+		dev_priv->max_cdclk_freq = 320000;
+	} else if (IS_VALLEYVIEW(dev)) {
+		dev_priv->max_cdclk_freq = 400000;
+	} else {
+		/* otherwise assume cdclk is fixed */
+		dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
 	}
 
-	if (dev_priv->display.modeset_global_resources)
-		dev_priv->display.modeset_global_resources(state);
+	DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
+			 dev_priv->max_cdclk_freq);
+}
 
-	for_each_intel_crtc(dev, crtc) {
-		enum intel_display_power_domain domain;
+static void intel_update_cdclk(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
 
-		for_each_power_domain(domain, crtc->enabled_power_domains)
-			intel_display_power_put(dev_priv, domain);
+	dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
+	DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
+			 dev_priv->cdclk_freq);
 
-		crtc->enabled_power_domains = pipe_domains[crtc->pipe];
+	/*
+	 * Program the gmbus_freq based on the cdclk frequency.
+	 * BSpec erroneously claims we should aim for 4MHz, but
+	 * in fact 1MHz is the correct frequency.
+	 */
+	if (IS_VALLEYVIEW(dev)) {
+		/*
+		 * Program the gmbus_freq based on the cdclk frequency.
+		 * BSpec erroneously claims we should aim for 4MHz, but
+		 * in fact 1MHz is the correct frequency.
+		 */
+		I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
 	}
 
-	intel_display_set_init_power(dev_priv, false);
+	if (dev_priv->max_cdclk_freq == 0)
+		intel_update_max_cdclk(dev);
 }
 
-void broxton_set_cdclk(struct drm_device *dev, int frequency)
+static void broxton_set_cdclk(struct drm_device *dev, int frequency)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t divider;
@@ -5466,7 +5462,7 @@ void broxton_set_cdclk(struct drm_device *dev, int frequency)
 		return;
 	}
 
-	dev_priv->cdclk_freq = frequency;
+	intel_update_cdclk(dev);
 }
 
 void broxton_init_cdclk(struct drm_device *dev)
@@ -5641,6 +5637,7 @@ static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
 
 static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
 {
+	struct drm_device *dev = dev_priv->dev;
 	u32 freq_select, pcu_ack;
 
 	DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
@@ -5681,6 +5678,8 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
 	mutex_lock(&dev_priv->rps.hw_lock);
 	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
 	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	intel_update_cdclk(dev);
 }
 
 void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
@@ -5714,16 +5713,13 @@ void skl_init_cdclk(struct drm_i915_private *dev_priv)
 	/* enable PG1 and Misc I/O */
 	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
 
-	/* DPLL0 already enabed !? */
-	if (I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE) {
-		DRM_DEBUG_DRIVER("DPLL0 already running\n");
-		return;
+	/* DPLL0 not enabled (happens on early BIOS versions) */
+	if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
+		/* enable DPLL0 */
+		required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
+		skl_dpll0_enable(dev_priv, required_vco);
 	}
 
-	/* enable DPLL0 */
-	required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
-	skl_dpll0_enable(dev_priv, required_vco);
-
 	/* set CDCLK to the frequency the BIOS chose */
 	skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
 
@@ -5751,22 +5747,6 @@ static int valleyview_get_vco(struct drm_i915_private *dev_priv)
 	return vco_freq[hpll_freq] * 1000;
 }
 
-static void vlv_update_cdclk(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
-	DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
-			 dev_priv->cdclk_freq);
-
-	/*
-	 * Program the gmbus_freq based on the cdclk frequency.
-	 * BSpec erroneously claims we should aim for 4MHz, but
-	 * in fact 1MHz is the correct frequency.
-	 */
-	I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
-}
-
 /* Adjust CDclk dividers to allow high res or save power if possible */
 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
 {
@@ -5830,7 +5810,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
 
 	mutex_unlock(&dev_priv->sb_lock);
 
-	vlv_update_cdclk(dev);
+	intel_update_cdclk(dev);
 }
 
 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
@@ -5871,7 +5851,7 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
 	}
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
-	vlv_update_cdclk(dev);
+	intel_update_cdclk(dev);
 }
 
 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
@@ -5934,11 +5914,7 @@ static int intel_mode_max_pixclk(struct drm_device *dev,
 	int max_pixclk = 0;
 
 	for_each_intel_crtc(dev, intel_crtc) {
-		if (state)
-			crtc_state =
-				intel_atomic_get_crtc_state(state, intel_crtc);
-		else
-			crtc_state = intel_crtc->config;
+		crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
 		if (IS_ERR(crtc_state))
 			return PTR_ERR(crtc_state);
 
@@ -5952,39 +5928,32 @@ static int intel_mode_max_pixclk(struct drm_device *dev,
 	return max_pixclk;
 }
 
-static int valleyview_modeset_global_pipes(struct drm_atomic_state *state)
+static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
 {
-	struct drm_i915_private *dev_priv = to_i915(state->dev);
-	struct drm_crtc *crtc;
-	struct drm_crtc_state *crtc_state;
-	int max_pixclk = intel_mode_max_pixclk(state->dev, state);
-	int cdclk, i;
+	struct drm_device *dev = state->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int max_pixclk = intel_mode_max_pixclk(dev, state);
 
 	if (max_pixclk < 0)
 		return max_pixclk;
 
-	if (IS_VALLEYVIEW(dev_priv))
-		cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
-	else
-		cdclk = broxton_calc_cdclk(dev_priv, max_pixclk);
+	to_intel_atomic_state(state)->cdclk =
+		valleyview_calc_cdclk(dev_priv, max_pixclk);
 
-	if (cdclk == dev_priv->cdclk_freq)
-		return 0;
+	return 0;
+}
 
-	/* add all active pipes to the state */
-	for_each_crtc(state->dev, crtc) {
-		if (!crtc->state->enable)
-			continue;
+static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
+{
+	struct drm_device *dev = state->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int max_pixclk = intel_mode_max_pixclk(dev, state);
 
-		crtc_state = drm_atomic_get_crtc_state(state, crtc);
-		if (IS_ERR(crtc_state))
-			return PTR_ERR(crtc_state);
-	}
+	if (max_pixclk < 0)
+		return max_pixclk;
 
-	/* disable/enable all currently active pipes while we change cdclk */
-	for_each_crtc_in_state(state, crtc, crtc_state, i)
-		if (crtc_state->enable)
-			crtc_state->mode_changed = true;
+	to_intel_atomic_state(state)->cdclk =
+		broxton_calc_cdclk(dev_priv, max_pixclk);
 
 	return 0;
 }
@@ -6001,7 +5970,7 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
 	if (DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 1000) >= dev_priv->rps.cz_freq) {
 		/* CHV suggested value is 31 or 63 */
 		if (IS_CHERRYVIEW(dev_priv))
-			credits = PFI_CREDIT_31;
+			credits = PFI_CREDIT_63;
 		else
 			credits = PFI_CREDIT(15);
 	} else {
@@ -6025,41 +5994,31 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
 	WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
 }
 
-static void valleyview_modeset_global_resources(struct drm_atomic_state *old_state)
+static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
 {
 	struct drm_device *dev = old_state->dev;
+	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int max_pixclk = intel_mode_max_pixclk(dev, NULL);
-	int req_cdclk;
 
-	/* The path in intel_mode_max_pixclk() with a NULL atomic state should
-	 * never fail. */
-	if (WARN_ON(max_pixclk < 0))
-		return;
-
-	req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
-
-	if (req_cdclk != dev_priv->cdclk_freq) {
-		/*
-		 * FIXME: We can end up here with all power domains off, yet
-		 * with a CDCLK frequency other than the minimum. To account
-		 * for this take the PIPE-A power domain, which covers the HW
-		 * blocks needed for the following programming. This can be
-		 * removed once it's guaranteed that we get here either with
-		 * the minimum CDCLK set, or the required power domains
-		 * enabled.
-		 */
-		intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+	/*
+	 * FIXME: We can end up here with all power domains off, yet
+	 * with a CDCLK frequency other than the minimum. To account
+	 * for this take the PIPE-A power domain, which covers the HW
+	 * blocks needed for the following programming. This can be
+	 * removed once it's guaranteed that we get here either with
+	 * the minimum CDCLK set, or the required power domains
+	 * enabled.
+	 */
+	intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
 
-		if (IS_CHERRYVIEW(dev))
-			cherryview_set_cdclk(dev, req_cdclk);
-		else
-			valleyview_set_cdclk(dev, req_cdclk);
+	if (IS_CHERRYVIEW(dev))
+		cherryview_set_cdclk(dev, req_cdclk);
+	else
+		valleyview_set_cdclk(dev, req_cdclk);
 
-		vlv_program_pfi_credits(dev_priv);
+	vlv_program_pfi_credits(dev_priv);
 
-		intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
-	}
+	intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
 }
 
 static void valleyview_crtc_enable(struct drm_crtc *crtc)
@@ -6071,9 +6030,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
 	int pipe = intel_crtc->pipe;
 	bool is_dsi;
 
-	WARN_ON(!crtc->state->enable);
-
-	if (intel_crtc->active)
+	if (WARN_ON(intel_crtc->active))
 		return;
 
 	is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
@@ -6122,7 +6079,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
 
 	intel_crtc_load_lut(crtc);
 
-	intel_update_watermarks(crtc);
 	intel_enable_pipe(intel_crtc);
 
 	assert_vblank_disabled(crtc);
@@ -6149,9 +6105,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
 	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
 
-	WARN_ON(!crtc->state->enable);
-
-	if (intel_crtc->active)
+	if (WARN_ON(intel_crtc->active))
 		return;
 
 	i9xx_set_pll_dividers(intel_crtc);
@@ -6211,9 +6165,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
 	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
 
-	if (!intel_crtc->active)
-		return;
-
 	/*
 	 * On gen2 planes are double buffered but the pipe isn't, so we must
 	 * wait for planes to fully turn off before disabling the pipe.
@@ -6250,88 +6201,89 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
 
 	intel_crtc->active = false;
 	intel_update_watermarks(crtc);
-
-	mutex_lock(&dev->struct_mutex);
-	intel_fbc_update(dev);
-	mutex_unlock(&dev->struct_mutex);
 }
 
-static void i9xx_crtc_off(struct drm_crtc *crtc)
+static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
 {
-}
-
-/* Master function to enable/disable CRTC and corresponding power wells */
-void intel_crtc_control(struct drm_crtc *crtc, bool enable)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 	enum intel_display_power_domain domain;
 	unsigned long domains;
 
-	if (enable) {
-		if (!intel_crtc->active) {
-			domains = get_crtc_power_domains(crtc);
-			for_each_power_domain(domain, domains)
-				intel_display_power_get(dev_priv, domain);
-			intel_crtc->enabled_power_domains = domains;
-
-			dev_priv->display.crtc_enable(crtc);
-			intel_crtc_enable_planes(crtc);
-		}
-	} else {
-		if (intel_crtc->active) {
-			intel_crtc_disable_planes(crtc);
-			dev_priv->display.crtc_disable(crtc);
+	if (!intel_crtc->active)
+		return;
 
-			domains = intel_crtc->enabled_power_domains;
-			for_each_power_domain(domain, domains)
-				intel_display_power_put(dev_priv, domain);
-			intel_crtc->enabled_power_domains = 0;
-		}
+	if (to_intel_plane_state(crtc->primary->state)->visible) {
+		intel_crtc_wait_for_pending_flips(crtc);
+		intel_pre_disable_primary(crtc);
 	}
+
+	intel_crtc_disable_planes(crtc, crtc->state->plane_mask);
+	dev_priv->display.crtc_disable(crtc);
+	intel_disable_shared_dpll(intel_crtc);
+
+	domains = intel_crtc->enabled_power_domains;
+	for_each_power_domain(domain, domains)
+		intel_display_power_put(dev_priv, domain);
+	intel_crtc->enabled_power_domains = 0;
 }
 
-/**
- * Sets the power management mode of the pipe and plane.
+/*
+ * turn all crtc's off, but do not adjust state
+ * This has to be paired with a call to intel_modeset_setup_hw_state.
  */
-void intel_crtc_update_dpms(struct drm_crtc *crtc)
+int intel_display_suspend(struct drm_device *dev)
 {
-	struct drm_device *dev = crtc->dev;
-	struct intel_encoder *intel_encoder;
-	bool enable = false;
-
-	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
-		enable |= intel_encoder->connectors_active;
+	struct drm_mode_config *config = &dev->mode_config;
+	struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
+	struct drm_atomic_state *state;
+	struct drm_crtc *crtc;
+	unsigned crtc_mask = 0;
+	int ret = 0;
 
-	intel_crtc_control(crtc, enable);
+	if (WARN_ON(!ctx))
+		return 0;
 
-	crtc->state->active = enable;
-}
+	lockdep_assert_held(&ctx->ww_ctx);
+	state = drm_atomic_state_alloc(dev);
+	if (WARN_ON(!state))
+		return -ENOMEM;
 
-static void intel_crtc_disable(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_connector *connector;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	state->acquire_ctx = ctx;
+	state->allow_modeset = true;
 
-	intel_crtc_disable_planes(crtc);
-	dev_priv->display.crtc_disable(crtc);
-	dev_priv->display.off(crtc);
+	for_each_crtc(dev, crtc) {
+		struct drm_crtc_state *crtc_state =
+			drm_atomic_get_crtc_state(state, crtc);
 
-	drm_plane_helper_disable(crtc->primary);
+		ret = PTR_ERR_OR_ZERO(crtc_state);
+		if (ret)
+			goto free;
 
-	/* Update computed state. */
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		if (!connector->encoder || !connector->encoder->crtc)
+		if (!crtc_state->active)
 			continue;
 
-		if (connector->encoder->crtc != crtc)
-			continue;
+		crtc_state->active = false;
+		crtc_mask |= 1 << drm_crtc_index(crtc);
+	}
 
-		connector->dpms = DRM_MODE_DPMS_OFF;
-		to_intel_encoder(connector->encoder)->connectors_active = false;
+	if (crtc_mask) {
+		ret = drm_atomic_commit(state);
+
+		if (!ret) {
+			for_each_crtc(dev, crtc)
+				if (crtc_mask & (1 << drm_crtc_index(crtc)))
+					crtc->state->active = true;
+
+			return ret;
+		}
 	}
+
+free:
+	if (ret)
+		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
+	drm_atomic_state_free(state);
+	return ret;
 }
 
 void intel_encoder_destroy(struct drm_encoder *encoder)
@@ -6342,62 +6294,42 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
 	kfree(intel_encoder);
 }
 
-/* Simple dpms helper for encoders with just one connector, no cloning and only
- * one kind of off state. It clamps all !ON modes to fully OFF and changes the
- * state of the entire output pipe. */
-static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
-{
-	if (mode == DRM_MODE_DPMS_ON) {
-		encoder->connectors_active = true;
-
-		intel_crtc_update_dpms(encoder->base.crtc);
-	} else {
-		encoder->connectors_active = false;
-
-		intel_crtc_update_dpms(encoder->base.crtc);
-	}
-}
-
 /* Cross check the actual hw state with our own modeset state tracking (and it's
  * internal consistency). */
 static void intel_connector_check_state(struct intel_connector *connector)
 {
+	struct drm_crtc *crtc = connector->base.state->crtc;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+		      connector->base.base.id,
+		      connector->base.name);
+
 	if (connector->get_hw_state(connector)) {
-		struct intel_encoder *encoder = connector->encoder;
-		struct drm_crtc *crtc;
-		bool encoder_enabled;
-		enum pipe pipe;
+		struct drm_encoder *encoder = &connector->encoder->base;
+		struct drm_connector_state *conn_state = connector->base.state;
 
-		DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-			      connector->base.base.id,
-			      connector->base.name);
+		I915_STATE_WARN(!crtc,
+			 "connector enabled without attached crtc\n");
 
-		/* there is no real hw state for MST connectors */
-		if (connector->mst_port)
+		if (!crtc)
 			return;
 
-		I915_STATE_WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
-		     "wrong connector dpms state\n");
-		I915_STATE_WARN(connector->base.encoder != &encoder->base,
-		     "active connector not linked to encoder\n");
-
-		if (encoder) {
-			I915_STATE_WARN(!encoder->connectors_active,
-			     "encoder->connectors_active not set\n");
+		I915_STATE_WARN(!crtc->state->active,
+		      "connector is active, but attached crtc isn't\n");
 
-			encoder_enabled = encoder->get_hw_state(encoder, &pipe);
-			I915_STATE_WARN(!encoder_enabled, "encoder not enabled\n");
-			if (I915_STATE_WARN_ON(!encoder->base.crtc))
-				return;
+		if (!encoder)
+			return;
 
-			crtc = encoder->base.crtc;
+		I915_STATE_WARN(conn_state->best_encoder != encoder,
+			"atomic encoder doesn't match attached encoder\n");
 
-			I915_STATE_WARN(!crtc->state->enable,
-					"crtc not enabled\n");
-			I915_STATE_WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
-			I915_STATE_WARN(pipe != to_intel_crtc(crtc)->pipe,
-			     "encoder active on the wrong pipe\n");
-		}
+		I915_STATE_WARN(conn_state->crtc != encoder->crtc,
+			"attached encoder crtc differs from connector crtc\n");
+	} else {
+		I915_STATE_WARN(crtc && crtc->state->active,
+			"attached crtc is active, but connector isn't\n");
+		I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
+			"best encoder set without crtc!\n");
 	}
 }
 
@@ -6429,26 +6361,6 @@ struct intel_connector *intel_connector_alloc(void)
 	return connector;
 }
 
-/* Even simpler default implementation, if there's really no special case to
- * consider. */
-void intel_connector_dpms(struct drm_connector *connector, int mode)
-{
-	/* All the simple cases only support two dpms states. */
-	if (mode != DRM_MODE_DPMS_ON)
-		mode = DRM_MODE_DPMS_OFF;
-
-	if (mode == connector->dpms)
-		return;
-
-	connector->dpms = mode;
-
-	/* Only need to change hw state when actually enabled */
-	if (connector->encoder)
-		intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
-
-	intel_modeset_check_state(connector->dev);
-}
-
 /* Simple connector->get_hw_state implementation for encoders that support only
  * one connector and no cloning and hence the encoder state determines the state
  * of the connector. */
@@ -6586,12 +6498,36 @@ retry:
 	return ret;
 }
 
+static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
+				     struct intel_crtc_state *pipe_config)
+{
+	if (pipe_config->pipe_bpp > 24)
+		return false;
+
+	/* HSW can handle pixel rate up to cdclk? */
+	if (IS_HASWELL(dev_priv->dev))
+		return true;
+
+	/*
+	 * We compare against max which means we must take
+	 * the increased cdclk requirement into account when
+	 * calculating the new cdclk.
+	 *
+	 * Should measure whether using a lower cdclk w/o IPS
+	 */
+	return ilk_pipe_pixel_rate(pipe_config) <=
+		dev_priv->max_cdclk_freq * 95 / 100;
+}
+
 static void hsw_compute_ips_config(struct intel_crtc *crtc,
 				   struct intel_crtc_state *pipe_config)
 {
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
 	pipe_config->ips_enabled = i915.enable_ips &&
-				   hsw_crtc_supports_ips(crtc) &&
-				   pipe_config->pipe_bpp <= 24;
+		hsw_crtc_supports_ips(crtc) &&
+		pipe_config_supports_ips(dev_priv, pipe_config);
 }
 
 static int intel_crtc_compute_config(struct intel_crtc *crtc,
@@ -6600,12 +6536,10 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
-	int ret;
 
 	/* FIXME should check pixel clock limits on all platforms */
 	if (INTEL_INFO(dev)->gen < 4) {
-		int clock_limit =
-			dev_priv->display.get_display_clock_speed(dev);
+		int clock_limit = dev_priv->max_cdclk_freq;
 
 		/*
 		 * Enable pixel doubling when the dot clock
@@ -6647,14 +6581,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
 	if (pipe_config->has_pch_encoder)
 		return ironlake_fdi_compute_config(crtc, pipe_config);
 
-	/* FIXME: remove below call once atomic mode set is place and all crtc
-	 * related checks called from atomic_crtc_check function */
-	ret = 0;
-	DRM_DEBUG_KMS("intel_crtc = %p drm_state (pipe_config->base.state) = %p\n",
-		crtc, pipe_config->base.state);
-	ret = intel_atomic_setup_scalers(dev, crtc, pipe_config);
-
-	return ret;
+	return 0;
 }
 
 static int skylake_get_display_clock_speed(struct drm_device *dev)
@@ -6664,10 +6591,8 @@ static int skylake_get_display_clock_speed(struct drm_device *dev)
 	uint32_t cdctl = I915_READ(CDCLK_CTL);
 	uint32_t linkrate;
 
-	if (!(lcpll1 & LCPLL_PLL_ENABLE)) {
-		WARN(1, "LCPLL1 not enabled\n");
+	if (!(lcpll1 & LCPLL_PLL_ENABLE))
 		return 24000; /* 24MHz is the cd freq with NSSC ref */
-	}
 
 	if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
 		return 540000;
@@ -6706,6 +6631,34 @@ static int skylake_get_display_clock_speed(struct drm_device *dev)
 	return 24000;
 }
 
+static int broxton_get_display_clock_speed(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	uint32_t cdctl = I915_READ(CDCLK_CTL);
+	uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
+	uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
+	int cdclk;
+
+	if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
+		return 19200;
+
+	cdclk = 19200 * pll_ratio / 2;
+
+	switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
+	case BXT_CDCLK_CD2X_DIV_SEL_1:
+		return cdclk;  /* 576MHz or 624MHz */
+	case BXT_CDCLK_CD2X_DIV_SEL_1_5:
+		return cdclk * 2 / 3; /* 384MHz */
+	case BXT_CDCLK_CD2X_DIV_SEL_2:
+		return cdclk / 2; /* 288MHz */
+	case BXT_CDCLK_CD2X_DIV_SEL_4:
+		return cdclk / 4; /* 144MHz */
+	}
+
+	/* error case, do as if DE PLL isn't enabled */
+	return 19200;
+}
+
 static int broadwell_get_display_clock_speed(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6834,20 +6787,37 @@ static int i865_get_display_clock_speed(struct drm_device *dev)
 	return 266667;
 }
 
-static int i855_get_display_clock_speed(struct drm_device *dev)
+static int i85x_get_display_clock_speed(struct drm_device *dev)
 {
 	u16 hpllcc = 0;
+
+	/*
+	 * 852GM/852GMV only supports 133 MHz and the HPLLCC
+	 * encoding is different :(
+	 * FIXME is this the right way to detect 852GM/852GMV?
+	 */
+	if (dev->pdev->revision == 0x1)
+		return 133333;
+
+	pci_bus_read_config_word(dev->pdev->bus,
+				 PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
+
 	/* Assume that the hardware is in the high speed state.  This
 	 * should be the default.
 	 */
 	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
 	case GC_CLOCK_133_200:
+	case GC_CLOCK_133_200_2:
 	case GC_CLOCK_100_200:
 		return 200000;
 	case GC_CLOCK_166_250:
 		return 250000;
 	case GC_CLOCK_100_133:
 		return 133333;
+	case GC_CLOCK_133_266:
+	case GC_CLOCK_133_266_2:
+	case GC_CLOCK_166_266:
+		return 266667;
 	}
 
 	/* Shouldn't happen */
@@ -6859,6 +6829,175 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
 	return 133333;
 }
 
+static unsigned int intel_hpll_vco(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	static const unsigned int blb_vco[8] = {
+		[0] = 3200000,
+		[1] = 4000000,
+		[2] = 5333333,
+		[3] = 4800000,
+		[4] = 6400000,
+	};
+	static const unsigned int pnv_vco[8] = {
+		[0] = 3200000,
+		[1] = 4000000,
+		[2] = 5333333,
+		[3] = 4800000,
+		[4] = 2666667,
+	};
+	static const unsigned int cl_vco[8] = {
+		[0] = 3200000,
+		[1] = 4000000,
+		[2] = 5333333,
+		[3] = 6400000,
+		[4] = 3333333,
+		[5] = 3566667,
+		[6] = 4266667,
+	};
+	static const unsigned int elk_vco[8] = {
+		[0] = 3200000,
+		[1] = 4000000,
+		[2] = 5333333,
+		[3] = 4800000,
+	};
+	static const unsigned int ctg_vco[8] = {
+		[0] = 3200000,
+		[1] = 4000000,
+		[2] = 5333333,
+		[3] = 6400000,
+		[4] = 2666667,
+		[5] = 4266667,
+	};
+	const unsigned int *vco_table;
+	unsigned int vco;
+	uint8_t tmp = 0;
+
+	/* FIXME other chipsets? */
+	if (IS_GM45(dev))
+		vco_table = ctg_vco;
+	else if (IS_G4X(dev))
+		vco_table = elk_vco;
+	else if (IS_CRESTLINE(dev))
+		vco_table = cl_vco;
+	else if (IS_PINEVIEW(dev))
+		vco_table = pnv_vco;
+	else if (IS_G33(dev))
+		vco_table = blb_vco;
+	else
+		return 0;
+
+	tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
+
+	vco = vco_table[tmp & 0x7];
+	if (vco == 0)
+		DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
+	else
+		DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
+
+	return vco;
+}
+
+static int gm45_get_display_clock_speed(struct drm_device *dev)
+{
+	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+	uint16_t tmp = 0;
+
+	pci_read_config_word(dev->pdev, GCFGC, &tmp);
+
+	cdclk_sel = (tmp >> 12) & 0x1;
+
+	switch (vco) {
+	case 2666667:
+	case 4000000:
+	case 5333333:
+		return cdclk_sel ? 333333 : 222222;
+	case 3200000:
+		return cdclk_sel ? 320000 : 228571;
+	default:
+		DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
+		return 222222;
+	}
+}
+
+static int i965gm_get_display_clock_speed(struct drm_device *dev)
+{
+	static const uint8_t div_3200[] = { 16, 10,  8 };
+	static const uint8_t div_4000[] = { 20, 12, 10 };
+	static const uint8_t div_5333[] = { 24, 16, 14 };
+	const uint8_t *div_table;
+	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+	uint16_t tmp = 0;
+
+	pci_read_config_word(dev->pdev, GCFGC, &tmp);
+
+	cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
+
+	if (cdclk_sel >= ARRAY_SIZE(div_3200))
+		goto fail;
+
+	switch (vco) {
+	case 3200000:
+		div_table = div_3200;
+		break;
+	case 4000000:
+		div_table = div_4000;
+		break;
+	case 5333333:
+		div_table = div_5333;
+		break;
+	default:
+		goto fail;
+	}
+
+	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
+
+fail:
+	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
+	return 200000;
+}
+
+static int g33_get_display_clock_speed(struct drm_device *dev)
+{
+	static const uint8_t div_3200[] = { 12, 10,  8,  7, 5, 16 };
+	static const uint8_t div_4000[] = { 14, 12, 10,  8, 6, 20 };
+	static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
+	static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
+	const uint8_t *div_table;
+	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+	uint16_t tmp = 0;
+
+	pci_read_config_word(dev->pdev, GCFGC, &tmp);
+
+	cdclk_sel = (tmp >> 4) & 0x7;
+
+	if (cdclk_sel >= ARRAY_SIZE(div_3200))
+		goto fail;
+
+	switch (vco) {
+	case 3200000:
+		div_table = div_3200;
+		break;
+	case 4000000:
+		div_table = div_4000;
+		break;
+	case 4800000:
+		div_table = div_4800;
+		break;
+	case 5333333:
+		div_table = div_5333;
+		break;
+	default:
+		goto fail;
+	}
+
+	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
+
+fail:
+	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
+	return 190476;
+}
+
 static void
 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
 {
@@ -7064,8 +7203,8 @@ void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
 		intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
 }
 
-static void vlv_update_pll(struct intel_crtc *crtc,
-			   struct intel_crtc_state *pipe_config)
+static void vlv_compute_dpll(struct intel_crtc *crtc,
+			     struct intel_crtc_state *pipe_config)
 {
 	u32 dpll, dpll_md;
 
@@ -7074,8 +7213,8 @@ static void vlv_update_pll(struct intel_crtc *crtc,
 	 * clock for pipe B, since VGA hotplug / manual detection depends
 	 * on it.
 	 */
-	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
-		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
+	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV |
+		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV;
 	/* We should never disable this, set it here for state tracking */
 	if (crtc->pipe == PIPE_B)
 		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
@@ -7178,11 +7317,11 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
 	mutex_unlock(&dev_priv->sb_lock);
 }
 
-static void chv_update_pll(struct intel_crtc *crtc,
-			   struct intel_crtc_state *pipe_config)
+static void chv_compute_dpll(struct intel_crtc *crtc,
+			     struct intel_crtc_state *pipe_config)
 {
-	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
-		DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
+	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
+		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
 		DPLL_VCO_ENABLE;
 	if (crtc->pipe != PIPE_A)
 		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
@@ -7318,11 +7457,11 @@ void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
 	};
 
 	if (IS_CHERRYVIEW(dev)) {
-		chv_update_pll(crtc, &pipe_config);
+		chv_compute_dpll(crtc, &pipe_config);
 		chv_prepare_pll(crtc, &pipe_config);
 		chv_enable_pll(crtc, &pipe_config);
 	} else {
-		vlv_update_pll(crtc, &pipe_config);
+		vlv_compute_dpll(crtc, &pipe_config);
 		vlv_prepare_pll(crtc, &pipe_config);
 		vlv_enable_pll(crtc, &pipe_config);
 	}
@@ -7344,10 +7483,10 @@ void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
 		vlv_disable_pll(to_i915(dev), pipe);
 }
 
-static void i9xx_update_pll(struct intel_crtc *crtc,
-			    struct intel_crtc_state *crtc_state,
-			    intel_clock_t *reduced_clock,
-			    int num_connectors)
+static void i9xx_compute_dpll(struct intel_crtc *crtc,
+			      struct intel_crtc_state *crtc_state,
+			      intel_clock_t *reduced_clock,
+			      int num_connectors)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7421,10 +7560,10 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
 	}
 }
 
-static void i8xx_update_pll(struct intel_crtc *crtc,
-			    struct intel_crtc_state *crtc_state,
-			    intel_clock_t *reduced_clock,
-			    int num_connectors)
+static void i8xx_compute_dpll(struct intel_crtc *crtc,
+			      struct intel_crtc_state *crtc_state,
+			      intel_clock_t *reduced_clock,
+			      int num_connectors)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7584,9 +7723,14 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 	mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
 
 	mode->flags = pipe_config->base.adjusted_mode.flags;
+	mode->type = DRM_MODE_TYPE_DRIVER;
 
 	mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
 	mode->flags |= pipe_config->base.adjusted_mode.flags;
+
+	mode->hsync = drm_mode_hsync(mode);
+	mode->vrefresh = drm_mode_vrefresh(mode);
+	drm_mode_set_name(mode);
 }
 
 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
@@ -7658,9 +7802,9 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int refclk, num_connectors = 0;
-	intel_clock_t clock, reduced_clock;
-	bool ok, has_reduced_clock = false;
-	bool is_lvds = false, is_dsi = false;
+	intel_clock_t clock;
+	bool ok;
+	bool is_dsi = false;
 	struct intel_encoder *encoder;
 	const intel_limit_t *limit;
 	struct drm_atomic_state *state = crtc_state->base.state;
@@ -7678,9 +7822,6 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
 		encoder = to_intel_encoder(connector_state->best_encoder);
 
 		switch (encoder->type) {
-		case INTEL_OUTPUT_LVDS:
-			is_lvds = true;
-			break;
 		case INTEL_OUTPUT_DSI:
 			is_dsi = true;
 			break;
@@ -7712,19 +7853,6 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
 			return -EINVAL;
 		}
 
-		if (is_lvds && dev_priv->lvds_downclock_avail) {
-			/*
-			 * Ensure we match the reduced clock's P to the target
-			 * clock.  If the clocks don't match, we can't switch
-			 * the display clock by using the FP0/FP1. In such case
-			 * we will disable the LVDS downclock feature.
-			 */
-			has_reduced_clock =
-				dev_priv->display.find_dpll(limit, crtc_state,
-							    dev_priv->lvds_downclock,
-							    refclk, &clock,
-							    &reduced_clock);
-		}
 		/* Compat-code for transition, will disappear. */
 		crtc_state->dpll.n = clock.n;
 		crtc_state->dpll.m1 = clock.m1;
@@ -7734,17 +7862,15 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
 	}
 
 	if (IS_GEN2(dev)) {
-		i8xx_update_pll(crtc, crtc_state,
-				has_reduced_clock ? &reduced_clock : NULL,
-				num_connectors);
+		i8xx_compute_dpll(crtc, crtc_state, NULL,
+				  num_connectors);
 	} else if (IS_CHERRYVIEW(dev)) {
-		chv_update_pll(crtc, crtc_state);
+		chv_compute_dpll(crtc, crtc_state);
 	} else if (IS_VALLEYVIEW(dev)) {
-		vlv_update_pll(crtc, crtc_state);
+		vlv_compute_dpll(crtc, crtc_state);
 	} else {
-		i9xx_update_pll(crtc, crtc_state,
-				has_reduced_clock ? &reduced_clock : NULL,
-				num_connectors);
+		i9xx_compute_dpll(crtc, crtc_state, NULL,
+				  num_connectors);
 	}
 
 	return 0;
@@ -7804,10 +7930,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
 	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
 	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
 
-	vlv_clock(refclk, &clock);
-
-	/* clock.dot is the fast clock */
-	pipe_config->port_clock = clock.dot / 5;
+	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
 }
 
 static void
@@ -7906,10 +8029,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
 	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
 	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
 
-	chv_clock(refclk, &clock);
-
-	/* clock.dot is the fast clock */
-	pipe_config->port_clock = clock.dot / 5;
+	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
 }
 
 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
@@ -8558,9 +8678,7 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int refclk;
 	const intel_limit_t *limit;
-	bool ret, is_lvds = false;
-
-	is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
+	bool ret;
 
 	refclk = ironlake_get_refclk(crtc_state);
 
@@ -8576,20 +8694,6 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
 	if (!ret)
 		return false;
 
-	if (is_lvds && dev_priv->lvds_downclock_avail) {
-		/*
-		 * Ensure we match the reduced clock's P to the target clock.
-		 * If the clocks don't match, we can't switch the display clock
-		 * by using the FP0/FP1. In such case we will disable the LVDS
-		 * downclock feature.
-		*/
-		*has_reduced_clock =
-			dev_priv->display.find_dpll(limit, crtc_state,
-						    dev_priv->lvds_downclock,
-						    refclk, clock,
-						    reduced_clock);
-	}
-
 	return true;
 }
 
@@ -9297,6 +9401,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
 	}
 
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_update_cdclk(dev_priv->dev);
 }
 
 /*
@@ -9358,21 +9463,160 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
 	intel_prepare_ddi(dev);
 }
 
-static void broxton_modeset_global_resources(struct drm_atomic_state *old_state)
+static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
 {
 	struct drm_device *dev = old_state->dev;
+	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
+
+	broxton_set_cdclk(dev, req_cdclk);
+}
+
+/* compute the max rate for new configuration */
+static int ilk_max_pixel_rate(struct drm_atomic_state *state)
+{
+	struct intel_crtc *intel_crtc;
+	struct intel_crtc_state *crtc_state;
+	int max_pixel_rate = 0;
+
+	for_each_intel_crtc(state->dev, intel_crtc) {
+		int pixel_rate;
+
+		crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+		if (IS_ERR(crtc_state))
+			return PTR_ERR(crtc_state);
+
+		if (!crtc_state->base.enable)
+			continue;
+
+		pixel_rate = ilk_pipe_pixel_rate(crtc_state);
+
+		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+		if (IS_BROADWELL(state->dev) && crtc_state->ips_enabled)
+			pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
+
+		max_pixel_rate = max(max_pixel_rate, pixel_rate);
+	}
+
+	return max_pixel_rate;
+}
+
+static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
+{
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int max_pixclk = intel_mode_max_pixclk(dev, NULL);
-	int req_cdclk;
+	uint32_t val, data;
+	int ret;
 
-	/* see the comment in valleyview_modeset_global_resources */
-	if (WARN_ON(max_pixclk < 0))
+	if (WARN((I915_READ(LCPLL_CTL) &
+		  (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
+		   LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
+		   LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
+		   LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
+		 "trying to change cdclk frequency with cdclk not enabled\n"))
 		return;
 
-	req_cdclk = broxton_calc_cdclk(dev_priv, max_pixclk);
+	mutex_lock(&dev_priv->rps.hw_lock);
+	ret = sandybridge_pcode_write(dev_priv,
+				      BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+	if (ret) {
+		DRM_ERROR("failed to inform pcode about cdclk change\n");
+		return;
+	}
+
+	val = I915_READ(LCPLL_CTL);
+	val |= LCPLL_CD_SOURCE_FCLK;
+	I915_WRITE(LCPLL_CTL, val);
+
+	if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
+			       LCPLL_CD_SOURCE_FCLK_DONE, 1))
+		DRM_ERROR("Switching to FCLK failed\n");
+
+	val = I915_READ(LCPLL_CTL);
+	val &= ~LCPLL_CLK_FREQ_MASK;
+
+	switch (cdclk) {
+	case 450000:
+		val |= LCPLL_CLK_FREQ_450;
+		data = 0;
+		break;
+	case 540000:
+		val |= LCPLL_CLK_FREQ_54O_BDW;
+		data = 1;
+		break;
+	case 337500:
+		val |= LCPLL_CLK_FREQ_337_5_BDW;
+		data = 2;
+		break;
+	case 675000:
+		val |= LCPLL_CLK_FREQ_675_BDW;
+		data = 3;
+		break;
+	default:
+		WARN(1, "invalid cdclk frequency\n");
+		return;
+	}
+
+	I915_WRITE(LCPLL_CTL, val);
+
+	val = I915_READ(LCPLL_CTL);
+	val &= ~LCPLL_CD_SOURCE_FCLK;
+	I915_WRITE(LCPLL_CTL, val);
 
-	if (req_cdclk != dev_priv->cdclk_freq)
-		broxton_set_cdclk(dev, req_cdclk);
+	if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
+				LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+		DRM_ERROR("Switching back to LCPLL failed\n");
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	intel_update_cdclk(dev);
+
+	WARN(cdclk != dev_priv->cdclk_freq,
+	     "cdclk requested %d kHz but got %d kHz\n",
+	     cdclk, dev_priv->cdclk_freq);
+}
+
+static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
+{
+	struct drm_i915_private *dev_priv = to_i915(state->dev);
+	int max_pixclk = ilk_max_pixel_rate(state);
+	int cdclk;
+
+	/*
+	 * FIXME should also account for plane ratio
+	 * once 64bpp pixel formats are supported.
+	 */
+	if (max_pixclk > 540000)
+		cdclk = 675000;
+	else if (max_pixclk > 450000)
+		cdclk = 540000;
+	else if (max_pixclk > 337500)
+		cdclk = 450000;
+	else
+		cdclk = 337500;
+
+	/*
+	 * FIXME move the cdclk caclulation to
+	 * compute_config() so we can fail gracegully.
+	 */
+	if (cdclk > dev_priv->max_cdclk_freq) {
+		DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
+			  cdclk, dev_priv->max_cdclk_freq);
+		cdclk = dev_priv->max_cdclk_freq;
+	}
+
+	to_intel_atomic_state(state)->cdclk = cdclk;
+
+	return 0;
+}
+
+static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
+{
+	struct drm_device *dev = old_state->dev;
+	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
+
+	broadwell_set_cdclk(dev, req_cdclk);
 }
 
 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
@@ -9889,7 +10133,7 @@ static struct drm_framebuffer *
 mode_fits_in_fbdev(struct drm_device *dev,
 		   struct drm_display_mode *mode)
 {
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
 	struct drm_framebuffer *fb;
@@ -9978,7 +10222,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
 retry:
 	ret = drm_modeset_lock(&config->connection_mutex, ctx);
 	if (ret)
-		goto fail_unlock;
+		goto fail;
 
 	/*
 	 * Algorithm gets a little messy:
@@ -9996,10 +10240,10 @@ retry:
 
 		ret = drm_modeset_lock(&crtc->mutex, ctx);
 		if (ret)
-			goto fail_unlock;
+			goto fail;
 		ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
 		if (ret)
-			goto fail_unlock;
+			goto fail;
 
 		old->dpms_mode = connector->dpms;
 		old->load_detect_temp = false;
@@ -10018,9 +10262,6 @@ retry:
 			continue;
 		if (possible_crtc->state->enable)
 			continue;
-		/* This can occur when applying the pipe A quirk on resume. */
-		if (to_intel_crtc(possible_crtc)->new_enabled)
-			continue;
 
 		crtc = possible_crtc;
 		break;
@@ -10031,20 +10272,17 @@ retry:
 	 */
 	if (!crtc) {
 		DRM_DEBUG_KMS("no pipe available for load-detect\n");
-		goto fail_unlock;
+		goto fail;
 	}
 
 	ret = drm_modeset_lock(&crtc->mutex, ctx);
 	if (ret)
-		goto fail_unlock;
+		goto fail;
 	ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
 	if (ret)
-		goto fail_unlock;
-	intel_encoder->new_crtc = to_intel_crtc(crtc);
-	to_intel_connector(connector)->new_encoder = intel_encoder;
+		goto fail;
 
 	intel_crtc = to_intel_crtc(crtc);
-	intel_crtc->new_enabled = true;
 	old->dpms_mode = connector->dpms;
 	old->load_detect_temp = true;
 	old->release_fb = NULL;
@@ -10100,7 +10338,7 @@ retry:
 
 	drm_mode_copy(&crtc_state->base.mode, mode);
 
-	if (intel_set_mode(crtc, state, true)) {
+	if (drm_atomic_commit(state)) {
 		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
 		if (old->release_fb)
 			old->release_fb->funcs->destroy(old->release_fb);
@@ -10112,9 +10350,7 @@ retry:
 	intel_wait_for_vblank(dev, intel_crtc->pipe);
 	return true;
 
- fail:
-	intel_crtc->new_enabled = crtc->state->enable;
-fail_unlock:
+fail:
 	drm_atomic_state_free(state);
 	state = NULL;
 
@@ -10160,10 +10396,6 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
 		if (IS_ERR(crtc_state))
 			goto fail;
 
-		to_intel_connector(connector)->new_encoder = NULL;
-		intel_encoder->new_crtc = NULL;
-		intel_crtc->new_enabled = false;
-
 		connector_state->best_encoder = NULL;
 		connector_state->crtc = NULL;
 
@@ -10174,7 +10406,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
 		if (ret)
 			goto fail;
 
-		ret = intel_set_mode(crtc, state, true);
+		ret = drm_atomic_commit(state);
 		if (ret)
 			goto fail;
 
@@ -10222,6 +10454,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
 	u32 dpll = pipe_config->dpll_hw_state.dpll;
 	u32 fp;
 	intel_clock_t clock;
+	int port_clock;
 	int refclk = i9xx_pll_refclk(dev, pipe_config);
 
 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
@@ -10262,9 +10495,9 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
 		}
 
 		if (IS_PINEVIEW(dev))
-			pineview_clock(refclk, &clock);
+			port_clock = pnv_calc_dpll_params(refclk, &clock);
 		else
-			i9xx_clock(refclk, &clock);
+			port_clock = i9xx_calc_dpll_params(refclk, &clock);
 	} else {
 		u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
 		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
@@ -10290,7 +10523,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
 				clock.p2 = 2;
 		}
 
-		i9xx_clock(refclk, &clock);
+		port_clock = i9xx_calc_dpll_params(refclk, &clock);
 	}
 
 	/*
@@ -10298,7 +10531,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
 	 * port_clock to compute adjusted_mode.crtc_clock in the
 	 * encoder's get_config() function.
 	 */
-	pipe_config->port_clock = clock.dot;
+	pipe_config->port_clock = port_clock;
 }
 
 int intel_dotclock_calculate(int link_freq,
@@ -10387,42 +10620,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
 	return mode;
 }
 
-static void intel_decrease_pllclock(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-	if (!HAS_GMCH_DISPLAY(dev))
-		return;
-
-	if (!dev_priv->lvds_downclock_avail)
-		return;
-
-	/*
-	 * Since this is called by a timer, we should never get here in
-	 * the manual case.
-	 */
-	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
-		int pipe = intel_crtc->pipe;
-		int dpll_reg = DPLL(pipe);
-		int dpll;
-
-		DRM_DEBUG_DRIVER("downclocking LVDS\n");
-
-		assert_panel_unlocked(dev_priv, pipe);
-
-		dpll = I915_READ(dpll_reg);
-		dpll |= DISPLAY_RATE_SELECT_FPA1;
-		I915_WRITE(dpll_reg, dpll);
-		intel_wait_for_vblank(dev, pipe);
-		dpll = I915_READ(dpll_reg);
-		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
-			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
-	}
-
-}
-
 void intel_mark_busy(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -10440,20 +10637,12 @@ void intel_mark_busy(struct drm_device *dev)
 void intel_mark_idle(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc;
 
 	if (!dev_priv->mm.busy)
 		return;
 
 	dev_priv->mm.busy = false;
 
-	for_each_crtc(dev, crtc) {
-		if (!crtc->primary->fb)
-			continue;
-
-		intel_decrease_pllclock(crtc);
-	}
-
 	if (INTEL_INFO(dev)->gen >= 6)
 		gen6_rps_idle(dev->dev_private);
 
@@ -10485,24 +10674,23 @@ static void intel_unpin_work_fn(struct work_struct *__work)
 {
 	struct intel_unpin_work *work =
 		container_of(__work, struct intel_unpin_work, work);
-	struct drm_device *dev = work->crtc->dev;
-	enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
+	struct intel_crtc *crtc = to_intel_crtc(work->crtc);
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_plane *primary = crtc->base.primary;
 
 	mutex_lock(&dev->struct_mutex);
-	intel_unpin_fb_obj(work->old_fb, work->crtc->primary->state);
+	intel_unpin_fb_obj(work->old_fb, primary->state);
 	drm_gem_object_unreference(&work->pending_flip_obj->base);
 
-	intel_fbc_update(dev);
-
 	if (work->flip_queued_req)
 		i915_gem_request_assign(&work->flip_queued_req, NULL);
 	mutex_unlock(&dev->struct_mutex);
 
-	intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+	intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
 	drm_framebuffer_unreference(work->old_fb);
 
-	BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
-	atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
+	BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
+	atomic_dec(&crtc->unpin_work_count);
 
 	kfree(work);
 }
@@ -10635,14 +10823,15 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 				 struct drm_crtc *crtc,
 				 struct drm_framebuffer *fb,
 				 struct drm_i915_gem_object *obj,
-				 struct intel_engine_cs *ring,
+				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -10662,7 +10851,6 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 	intel_ring_emit(ring, 0); /* aux display base address, unused */
 
 	intel_mark_page_flip_active(intel_crtc);
-	__intel_ring_advance(ring);
 	return 0;
 }
 
@@ -10670,14 +10858,15 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
 				 struct drm_crtc *crtc,
 				 struct drm_framebuffer *fb,
 				 struct drm_i915_gem_object *obj,
-				 struct intel_engine_cs *ring,
+				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -10694,7 +10883,6 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
 	intel_ring_emit(ring, MI_NOOP);
 
 	intel_mark_page_flip_active(intel_crtc);
-	__intel_ring_advance(ring);
 	return 0;
 }
 
@@ -10702,15 +10890,16 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 				 struct drm_crtc *crtc,
 				 struct drm_framebuffer *fb,
 				 struct drm_i915_gem_object *obj,
-				 struct intel_engine_cs *ring,
+				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
 	int ret;
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -10733,7 +10922,6 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 	intel_ring_emit(ring, pf | pipesrc);
 
 	intel_mark_page_flip_active(intel_crtc);
-	__intel_ring_advance(ring);
 	return 0;
 }
 
@@ -10741,15 +10929,16 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 				 struct drm_crtc *crtc,
 				 struct drm_framebuffer *fb,
 				 struct drm_i915_gem_object *obj,
-				 struct intel_engine_cs *ring,
+				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
 	int ret;
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -10769,7 +10958,6 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 	intel_ring_emit(ring, pf | pipesrc);
 
 	intel_mark_page_flip_active(intel_crtc);
-	__intel_ring_advance(ring);
 	return 0;
 }
 
@@ -10777,9 +10965,10 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 				 struct drm_crtc *crtc,
 				 struct drm_framebuffer *fb,
 				 struct drm_i915_gem_object *obj,
-				 struct intel_engine_cs *ring,
+				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t plane_bit = 0;
 	int len, ret;
@@ -10821,11 +11010,11 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 	 * then do the cacheline alignment, and finally emit the
 	 * MI_DISPLAY_FLIP.
 	 */
-	ret = intel_ring_cacheline_align(ring);
+	ret = intel_ring_cacheline_align(req);
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(ring, len);
+	ret = intel_ring_begin(req, len);
 	if (ret)
 		return ret;
 
@@ -10864,7 +11053,6 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 	intel_ring_emit(ring, (MI_NOOP));
 
 	intel_mark_page_flip_active(intel_crtc);
-	__intel_ring_advance(ring);
 	return 0;
 }
 
@@ -10973,12 +11161,11 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc)
 static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
 {
 	struct drm_device *dev = intel_crtc->base.dev;
-	bool atomic_update;
 	u32 start_vbl_count;
 
 	intel_mark_page_flip_active(intel_crtc);
 
-	atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
+	intel_pipe_update_start(intel_crtc, &start_vbl_count);
 
 	if (INTEL_INFO(dev)->gen >= 9)
 		skl_do_mmio_flip(intel_crtc);
@@ -10986,8 +11173,7 @@ static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
 		/* use_mmio_flip() retricts MMIO flips to ilk+ */
 		ilk_do_mmio_flip(intel_crtc);
 
-	if (atomic_update)
-		intel_pipe_update_end(intel_crtc, start_vbl_count);
+	intel_pipe_update_end(intel_crtc, start_vbl_count);
 }
 
 static void intel_mmio_flip_work_func(struct work_struct *work)
@@ -11034,7 +11220,7 @@ static int intel_default_queue_flip(struct drm_device *dev,
 				    struct drm_crtc *crtc,
 				    struct drm_framebuffer *fb,
 				    struct drm_i915_gem_object *obj,
-				    struct intel_engine_cs *ring,
+				    struct drm_i915_gem_request *req,
 				    uint32_t flags)
 {
 	return -ENODEV;
@@ -11120,6 +11306,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	struct intel_unpin_work *work;
 	struct intel_engine_cs *ring;
 	bool mmio_flip;
+	struct drm_i915_gem_request *request = NULL;
 	int ret;
 
 	/*
@@ -11226,7 +11413,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	 */
 	ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
 					 crtc->primary->state,
-					 mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring);
+					 mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring, &request);
 	if (ret)
 		goto cleanup_pending;
 
@@ -11242,31 +11429,34 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 		i915_gem_request_assign(&work->flip_queued_req,
 					obj->last_write_req);
 	} else {
-		if (obj->last_write_req) {
-			ret = i915_gem_check_olr(obj->last_write_req);
+		if (!request) {
+			ret = i915_gem_request_alloc(ring, ring->default_context, &request);
 			if (ret)
 				goto cleanup_unpin;
 		}
 
-		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
+		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
 						   page_flip_flags);
 		if (ret)
 			goto cleanup_unpin;
 
-		i915_gem_request_assign(&work->flip_queued_req,
-					intel_ring_get_request(ring));
+		i915_gem_request_assign(&work->flip_queued_req, request);
 	}
 
+	if (request)
+		i915_add_request_no_flush(request);
+
 	work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
 	work->enable_stall_check = true;
 
 	i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
-			  INTEL_FRONTBUFFER_PRIMARY(pipe));
-
-	intel_fbc_disable(dev);
-	intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+			  to_intel_plane(primary)->frontbuffer_bit);
 	mutex_unlock(&dev->struct_mutex);
 
+	intel_fbc_disable_crtc(intel_crtc);
+	intel_frontbuffer_flip_prepare(dev,
+				       to_intel_plane(primary)->frontbuffer_bit);
+
 	trace_i915_flip_request(intel_crtc->plane, obj);
 
 	return 0;
@@ -11274,6 +11464,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 cleanup_unpin:
 	intel_unpin_fb_obj(fb, crtc->primary->state);
 cleanup_pending:
+	if (request)
+		i915_gem_request_cancel(request);
 	atomic_dec(&intel_crtc->unpin_work_count);
 	mutex_unlock(&dev->struct_mutex);
 cleanup:
@@ -11292,8 +11484,35 @@ free_work:
 	kfree(work);
 
 	if (ret == -EIO) {
+		struct drm_atomic_state *state;
+		struct drm_plane_state *plane_state;
+
 out_hang:
-		ret = intel_plane_restore(primary);
+		state = drm_atomic_state_alloc(dev);
+		if (!state)
+			return -ENOMEM;
+		state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+
+retry:
+		plane_state = drm_atomic_get_plane_state(state, primary);
+		ret = PTR_ERR_OR_ZERO(plane_state);
+		if (!ret) {
+			drm_atomic_set_fb_for_plane(plane_state, fb);
+
+			ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+			if (!ret)
+				ret = drm_atomic_commit(state);
+		}
+
+		if (ret == -EDEADLK) {
+			drm_modeset_backoff(state->acquire_ctx);
+			drm_atomic_state_clear(state);
+			goto retry;
+		}
+
+		if (ret)
+			drm_atomic_state_free(state);
+
 		if (ret == 0 && event) {
 			spin_lock_irq(&dev->event_lock);
 			drm_send_vblank_event(dev, pipe, event);
@@ -11303,44 +11522,274 @@ out_hang:
 	return ret;
 }
 
-static const struct drm_crtc_helper_funcs intel_helper_funcs = {
-	.mode_set_base_atomic = intel_pipe_set_base_atomic,
-	.load_lut = intel_crtc_load_lut,
-	.atomic_begin = intel_begin_crtc_commit,
-	.atomic_flush = intel_finish_crtc_commit,
-};
 
 /**
- * intel_modeset_update_staged_output_state
+ * intel_wm_need_update - Check whether watermarks need updating
+ * @plane: drm plane
+ * @state: new plane state
+ *
+ * Check current plane state versus the new one to determine whether
+ * watermarks need to be recalculated.
  *
- * Updates the staged output configuration state, e.g. after we've read out the
- * current hw state.
+ * Returns true or false.
  */
-static void intel_modeset_update_staged_output_state(struct drm_device *dev)
+static bool intel_wm_need_update(struct drm_plane *plane,
+				 struct drm_plane_state *state)
+{
+	/* Update watermarks on tiling changes. */
+	if (!plane->state->fb || !state->fb ||
+	    plane->state->fb->modifier[0] != state->fb->modifier[0] ||
+	    plane->state->rotation != state->rotation)
+		return true;
+
+	if (plane->state->crtc_w != state->crtc_w)
+		return true;
+
+	return false;
+}
+
+int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
+				    struct drm_plane_state *plane_state)
+{
+	struct drm_crtc *crtc = crtc_state->crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_plane *plane = plane_state->plane;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_plane_state *old_plane_state =
+		to_intel_plane_state(plane->state);
+	int idx = intel_crtc->base.base.id, ret;
+	int i = drm_plane_index(plane);
+	bool mode_changed = needs_modeset(crtc_state);
+	bool was_crtc_enabled = crtc->state->active;
+	bool is_crtc_enabled = crtc_state->active;
+
+	bool turn_off, turn_on, visible, was_visible;
+	struct drm_framebuffer *fb = plane_state->fb;
+
+	if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
+	    plane->type != DRM_PLANE_TYPE_CURSOR) {
+		ret = skl_update_scaler_plane(
+			to_intel_crtc_state(crtc_state),
+			to_intel_plane_state(plane_state));
+		if (ret)
+			return ret;
+	}
+
+	/*
+	 * Disabling a plane is always okay; we just need to update
+	 * fb tracking in a special way since cleanup_fb() won't
+	 * get called by the plane helpers.
+	 */
+	if (old_plane_state->base.fb && !fb)
+		intel_crtc->atomic.disabled_planes |= 1 << i;
+
+	was_visible = old_plane_state->visible;
+	visible = to_intel_plane_state(plane_state)->visible;
+
+	if (!was_crtc_enabled && WARN_ON(was_visible))
+		was_visible = false;
+
+	if (!is_crtc_enabled && WARN_ON(visible))
+		visible = false;
+
+	if (!was_visible && !visible)
+		return 0;
+
+	turn_off = was_visible && (!visible || mode_changed);
+	turn_on = visible && (!was_visible || mode_changed);
+
+	DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
+			 plane->base.id, fb ? fb->base.id : -1);
+
+	DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
+			 plane->base.id, was_visible, visible,
+			 turn_off, turn_on, mode_changed);
+
+	if (turn_on) {
+		intel_crtc->atomic.update_wm_pre = true;
+		/* must disable cxsr around plane enable/disable */
+		if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+			intel_crtc->atomic.disable_cxsr = true;
+			/* to potentially re-enable cxsr */
+			intel_crtc->atomic.wait_vblank = true;
+			intel_crtc->atomic.update_wm_post = true;
+		}
+	} else if (turn_off) {
+		intel_crtc->atomic.update_wm_post = true;
+		/* must disable cxsr around plane enable/disable */
+		if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+			if (is_crtc_enabled)
+				intel_crtc->atomic.wait_vblank = true;
+			intel_crtc->atomic.disable_cxsr = true;
+		}
+	} else if (intel_wm_need_update(plane, plane_state)) {
+		intel_crtc->atomic.update_wm_pre = true;
+	}
+
+	if (visible)
+		intel_crtc->atomic.fb_bits |=
+			to_intel_plane(plane)->frontbuffer_bit;
+
+	switch (plane->type) {
+	case DRM_PLANE_TYPE_PRIMARY:
+		intel_crtc->atomic.wait_for_flips = true;
+		intel_crtc->atomic.pre_disable_primary = turn_off;
+		intel_crtc->atomic.post_enable_primary = turn_on;
+
+		if (turn_off) {
+			/*
+			 * FIXME: Actually if we will still have any other
+			 * plane enabled on the pipe we could let IPS enabled
+			 * still, but for now lets consider that when we make
+			 * primary invisible by setting DSPCNTR to 0 on
+			 * update_primary_plane function IPS needs to be
+			 * disable.
+			 */
+			intel_crtc->atomic.disable_ips = true;
+
+			intel_crtc->atomic.disable_fbc = true;
+		}
+
+		/*
+		 * FBC does not work on some platforms for rotated
+		 * planes, so disable it when rotation is not 0 and
+		 * update it when rotation is set back to 0.
+		 *
+		 * FIXME: This is redundant with the fbc update done in
+		 * the primary plane enable function except that that
+		 * one is done too late. We eventually need to unify
+		 * this.
+		 */
+
+		if (visible &&
+		    INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
+		    dev_priv->fbc.crtc == intel_crtc &&
+		    plane_state->rotation != BIT(DRM_ROTATE_0))
+			intel_crtc->atomic.disable_fbc = true;
+
+		/*
+		 * BDW signals flip done immediately if the plane
+		 * is disabled, even if the plane enable is already
+		 * armed to occur at the next vblank :(
+		 */
+		if (turn_on && IS_BROADWELL(dev))
+			intel_crtc->atomic.wait_vblank = true;
+
+		intel_crtc->atomic.update_fbc |= visible || mode_changed;
+		break;
+	case DRM_PLANE_TYPE_CURSOR:
+		break;
+	case DRM_PLANE_TYPE_OVERLAY:
+		if (turn_off && !mode_changed) {
+			intel_crtc->atomic.wait_vblank = true;
+			intel_crtc->atomic.update_sprite_watermarks |=
+				1 << i;
+		}
+	}
+	return 0;
+}
+
+static bool encoders_cloneable(const struct intel_encoder *a,
+			       const struct intel_encoder *b)
+{
+	/* masks could be asymmetric, so check both ways */
+	return a == b || (a->cloneable & (1 << b->type) &&
+			  b->cloneable & (1 << a->type));
+}
+
+static bool check_single_encoder_cloning(struct drm_atomic_state *state,
+					 struct intel_crtc *crtc,
+					 struct intel_encoder *encoder)
+{
+	struct intel_encoder *source_encoder;
+	struct drm_connector *connector;
+	struct drm_connector_state *connector_state;
+	int i;
+
+	for_each_connector_in_state(state, connector, connector_state, i) {
+		if (connector_state->crtc != &crtc->base)
+			continue;
+
+		source_encoder =
+			to_intel_encoder(connector_state->best_encoder);
+		if (!encoders_cloneable(encoder, source_encoder))
+			return false;
+	}
+
+	return true;
+}
+
+static bool check_encoder_cloning(struct drm_atomic_state *state,
+				  struct intel_crtc *crtc)
 {
-	struct intel_crtc *crtc;
 	struct intel_encoder *encoder;
-	struct intel_connector *connector;
+	struct drm_connector *connector;
+	struct drm_connector_state *connector_state;
+	int i;
 
-	for_each_intel_connector(dev, connector) {
-		connector->new_encoder =
-			to_intel_encoder(connector->base.encoder);
+	for_each_connector_in_state(state, connector, connector_state, i) {
+		if (connector_state->crtc != &crtc->base)
+			continue;
+
+		encoder = to_intel_encoder(connector_state->best_encoder);
+		if (!check_single_encoder_cloning(state, crtc, encoder))
+			return false;
 	}
 
-	for_each_intel_encoder(dev, encoder) {
-		encoder->new_crtc =
-			to_intel_crtc(encoder->base.crtc);
+	return true;
+}
+
+static int intel_crtc_atomic_check(struct drm_crtc *crtc,
+				   struct drm_crtc_state *crtc_state)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc_state *pipe_config =
+		to_intel_crtc_state(crtc_state);
+	struct drm_atomic_state *state = crtc_state->state;
+	int ret;
+	bool mode_changed = needs_modeset(crtc_state);
+
+	if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
+		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
+		return -EINVAL;
 	}
 
-	for_each_intel_crtc(dev, crtc) {
-		crtc->new_enabled = crtc->base.state->enable;
+	if (mode_changed && !crtc_state->active)
+		intel_crtc->atomic.update_wm_post = true;
+
+	if (mode_changed && crtc_state->enable &&
+	    dev_priv->display.crtc_compute_clock &&
+	    !WARN_ON(pipe_config->shared_dpll != DPLL_ID_PRIVATE)) {
+		ret = dev_priv->display.crtc_compute_clock(intel_crtc,
+							   pipe_config);
+		if (ret)
+			return ret;
+	}
+
+	ret = 0;
+	if (INTEL_INFO(dev)->gen >= 9) {
+		if (mode_changed)
+			ret = skl_update_scaler_crtc(pipe_config);
+
+		if (!ret)
+			ret = intel_atomic_setup_scalers(dev, intel_crtc,
+							 pipe_config);
 	}
+
+	return ret;
 }
 
-/* Transitional helper to copy current connector/encoder state to
- * connector->state. This is needed so that code that is partially
- * converted to atomic does the right thing.
- */
+static const struct drm_crtc_helper_funcs intel_helper_funcs = {
+	.mode_set_base_atomic = intel_pipe_set_base_atomic,
+	.load_lut = intel_crtc_load_lut,
+	.atomic_begin = intel_begin_crtc_commit,
+	.atomic_flush = intel_finish_crtc_commit,
+	.atomic_check = intel_crtc_atomic_check,
+};
+
 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
 {
 	struct intel_connector *connector;
@@ -11358,39 +11807,6 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
 	}
 }
 
-/* Fixup legacy state after an atomic state swap.
- */
-static void intel_modeset_fixup_state(struct drm_atomic_state *state)
-{
-	struct intel_crtc *crtc;
-	struct intel_encoder *encoder;
-	struct intel_connector *connector;
-
-	for_each_intel_connector(state->dev, connector) {
-		connector->base.encoder = connector->base.state->best_encoder;
-		if (connector->base.encoder)
-			connector->base.encoder->crtc =
-				connector->base.state->crtc;
-	}
-
-	/* Update crtc of disabled encoders */
-	for_each_intel_encoder(state->dev, encoder) {
-		int num_connectors = 0;
-
-		for_each_intel_connector(state->dev, connector)
-			if (connector->base.encoder == &encoder->base)
-				num_connectors++;
-
-		if (num_connectors == 0)
-			encoder->base.crtc = NULL;
-	}
-
-	for_each_intel_crtc(state->dev, crtc) {
-		crtc->base.enabled = crtc->base.state->enable;
-		crtc->config = to_intel_crtc_state(crtc->base.state);
-	}
-}
-
 static void
 connected_sink_compute_bpp(struct intel_connector *connector,
 			   struct intel_crtc_state *pipe_config)
@@ -11526,17 +11942,20 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
 	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
 
 	if (IS_BROXTON(dev)) {
-		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, "
+		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
 			      "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
-			      "pll6: 0x%x, pll8: 0x%x, pcsdw12: 0x%x\n",
+			      "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
 			      pipe_config->ddi_pll_sel,
 			      pipe_config->dpll_hw_state.ebb0,
+			      pipe_config->dpll_hw_state.ebb4,
 			      pipe_config->dpll_hw_state.pll0,
 			      pipe_config->dpll_hw_state.pll1,
 			      pipe_config->dpll_hw_state.pll2,
 			      pipe_config->dpll_hw_state.pll3,
 			      pipe_config->dpll_hw_state.pll6,
 			      pipe_config->dpll_hw_state.pll8,
+			      pipe_config->dpll_hw_state.pll9,
+			      pipe_config->dpll_hw_state.pll10,
 			      pipe_config->dpll_hw_state.pcsdw12);
 	} else if (IS_SKYLAKE(dev)) {
 		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
@@ -11593,56 +12012,6 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
 	}
 }
 
-static bool encoders_cloneable(const struct intel_encoder *a,
-			       const struct intel_encoder *b)
-{
-	/* masks could be asymmetric, so check both ways */
-	return a == b || (a->cloneable & (1 << b->type) &&
-			  b->cloneable & (1 << a->type));
-}
-
-static bool check_single_encoder_cloning(struct drm_atomic_state *state,
-					 struct intel_crtc *crtc,
-					 struct intel_encoder *encoder)
-{
-	struct intel_encoder *source_encoder;
-	struct drm_connector *connector;
-	struct drm_connector_state *connector_state;
-	int i;
-
-	for_each_connector_in_state(state, connector, connector_state, i) {
-		if (connector_state->crtc != &crtc->base)
-			continue;
-
-		source_encoder =
-			to_intel_encoder(connector_state->best_encoder);
-		if (!encoders_cloneable(encoder, source_encoder))
-			return false;
-	}
-
-	return true;
-}
-
-static bool check_encoder_cloning(struct drm_atomic_state *state,
-				  struct intel_crtc *crtc)
-{
-	struct intel_encoder *encoder;
-	struct drm_connector *connector;
-	struct drm_connector_state *connector_state;
-	int i;
-
-	for_each_connector_in_state(state, connector, connector_state, i) {
-		if (connector_state->crtc != &crtc->base)
-			continue;
-
-		encoder = to_intel_encoder(connector_state->best_encoder);
-		if (!check_single_encoder_cloning(state, crtc, encoder))
-			return false;
-	}
-
-	return true;
-}
-
 static bool check_digital_port_conflicts(struct drm_atomic_state *state)
 {
 	struct drm_device *dev = state->dev;
@@ -11696,6 +12065,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
 	struct intel_dpll_hw_state dpll_hw_state;
 	enum intel_dpll_id shared_dpll;
 	uint32_t ddi_pll_sel;
+	bool force_thru;
 
 	/* FIXME: before the switch to atomic started, a new pipe_config was
 	 * kzalloc'd. Code that depends on any field being zero should be
@@ -11707,6 +12077,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
 	shared_dpll = crtc_state->shared_dpll;
 	dpll_hw_state = crtc_state->dpll_hw_state;
 	ddi_pll_sel = crtc_state->ddi_pll_sel;
+	force_thru = crtc_state->pch_pfit.force_thru;
 
 	memset(crtc_state, 0, sizeof *crtc_state);
 
@@ -11715,13 +12086,14 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
 	crtc_state->shared_dpll = shared_dpll;
 	crtc_state->dpll_hw_state = dpll_hw_state;
 	crtc_state->ddi_pll_sel = ddi_pll_sel;
+	crtc_state->pch_pfit.force_thru = force_thru;
 }
 
 static int
 intel_modeset_pipe_config(struct drm_crtc *crtc,
-			  struct drm_atomic_state *state,
 			  struct intel_crtc_state *pipe_config)
 {
+	struct drm_atomic_state *state = pipe_config->base.state;
 	struct intel_encoder *encoder;
 	struct drm_connector *connector;
 	struct drm_connector_state *connector_state;
@@ -11729,16 +12101,6 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
 	int i;
 	bool retry = true;
 
-	if (!check_encoder_cloning(state, to_intel_crtc(crtc))) {
-		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
-		return -EINVAL;
-	}
-
-	if (!check_digital_port_conflicts(state)) {
-		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
-		return -EINVAL;
-	}
-
 	clear_intel_crtc_state(pipe_config);
 
 	pipe_config->cpu_transcoder =
@@ -11832,90 +12194,27 @@ encoder_retry:
 	DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
 		      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
 
-	return 0;
 fail:
 	return ret;
 }
 
-static bool intel_crtc_in_use(struct drm_crtc *crtc)
-{
-	struct drm_encoder *encoder;
-	struct drm_device *dev = crtc->dev;
-
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
-		if (encoder->crtc == crtc)
-			return true;
-
-	return false;
-}
-
-static bool
-needs_modeset(struct drm_crtc_state *state)
-{
-	return state->mode_changed || state->active_changed;
-}
-
 static void
-intel_modeset_update_state(struct drm_atomic_state *state)
+intel_modeset_update_crtc_state(struct drm_atomic_state *state)
 {
-	struct drm_device *dev = state->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_encoder *intel_encoder;
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *crtc_state;
-	struct drm_connector *connector;
 	int i;
 
-	intel_shared_dpll_commit(dev_priv);
-
-	for_each_intel_encoder(dev, intel_encoder) {
-		if (!intel_encoder->base.crtc)
-			continue;
-
-		for_each_crtc_in_state(state, crtc, crtc_state, i) {
-			if (crtc != intel_encoder->base.crtc)
-				continue;
-
-			if (crtc_state->enable && needs_modeset(crtc_state))
-				intel_encoder->connectors_active = false;
-
-			break;
-		}
-	}
-
-	drm_atomic_helper_swap_state(state->dev, state);
-	intel_modeset_fixup_state(state);
-
 	/* Double check state. */
-	for_each_crtc(dev, crtc) {
-		WARN_ON(crtc->state->enable != intel_crtc_in_use(crtc));
-	}
-
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		if (!connector->encoder || !connector->encoder->crtc)
-			continue;
-
-		for_each_crtc_in_state(state, crtc, crtc_state, i) {
-			if (crtc != connector->encoder->crtc)
-				continue;
-
-			if (crtc->state->enable && needs_modeset(crtc->state)) {
-				struct drm_property *dpms_property =
-					dev->mode_config.dpms_property;
-
-				connector->dpms = DRM_MODE_DPMS_ON;
-				drm_object_property_set_value(&connector->base,
-								 dpms_property,
-								 DRM_MODE_DPMS_ON);
-
-				intel_encoder = to_intel_encoder(connector->encoder);
-				intel_encoder->connectors_active = true;
-			}
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
 
-			break;
-		}
+		/* Update hwmode for vblank functions */
+		if (crtc->state->active)
+			crtc->hwmode = crtc->state->adjusted_mode;
+		else
+			crtc->hwmode.crtc_clock = 0;
 	}
-
 }
 
 static bool intel_fuzzy_clock_check(int clock1, int clock2)
@@ -11942,27 +12241,133 @@ static bool intel_fuzzy_clock_check(int clock1, int clock2)
 			    base.head) \
 		if (mask & (1 <<(intel_crtc)->pipe))
 
+
+static bool
+intel_compare_m_n(unsigned int m, unsigned int n,
+		  unsigned int m2, unsigned int n2,
+		  bool exact)
+{
+	if (m == m2 && n == n2)
+		return true;
+
+	if (exact || !m || !n || !m2 || !n2)
+		return false;
+
+	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
+
+	if (m > m2) {
+		while (m > m2) {
+			m2 <<= 1;
+			n2 <<= 1;
+		}
+	} else if (m < m2) {
+		while (m < m2) {
+			m <<= 1;
+			n <<= 1;
+		}
+	}
+
+	return m == m2 && n == n2;
+}
+
+static bool
+intel_compare_link_m_n(const struct intel_link_m_n *m_n,
+		       struct intel_link_m_n *m2_n2,
+		       bool adjust)
+{
+	if (m_n->tu == m2_n2->tu &&
+	    intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
+			      m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
+	    intel_compare_m_n(m_n->link_m, m_n->link_n,
+			      m2_n2->link_m, m2_n2->link_n, !adjust)) {
+		if (adjust)
+			*m2_n2 = *m_n;
+
+		return true;
+	}
+
+	return false;
+}
+
 static bool
 intel_pipe_config_compare(struct drm_device *dev,
 			  struct intel_crtc_state *current_config,
-			  struct intel_crtc_state *pipe_config)
+			  struct intel_crtc_state *pipe_config,
+			  bool adjust)
 {
+	bool ret = true;
+
+#define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
+	do { \
+		if (!adjust) \
+			DRM_ERROR(fmt, ##__VA_ARGS__); \
+		else \
+			DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
+	} while (0)
+
 #define PIPE_CONF_CHECK_X(name)	\
 	if (current_config->name != pipe_config->name) { \
-		DRM_ERROR("mismatch in " #name " " \
+		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
 			  "(expected 0x%08x, found 0x%08x)\n", \
 			  current_config->name, \
 			  pipe_config->name); \
-		return false; \
+		ret = false; \
 	}
 
 #define PIPE_CONF_CHECK_I(name)	\
 	if (current_config->name != pipe_config->name) { \
-		DRM_ERROR("mismatch in " #name " " \
+		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
 			  "(expected %i, found %i)\n", \
 			  current_config->name, \
 			  pipe_config->name); \
-		return false; \
+		ret = false; \
+	}
+
+#define PIPE_CONF_CHECK_M_N(name) \
+	if (!intel_compare_link_m_n(&current_config->name, \
+				    &pipe_config->name,\
+				    adjust)) { \
+		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+			  "(expected tu %i gmch %i/%i link %i/%i, " \
+			  "found tu %i, gmch %i/%i link %i/%i)\n", \
+			  current_config->name.tu, \
+			  current_config->name.gmch_m, \
+			  current_config->name.gmch_n, \
+			  current_config->name.link_m, \
+			  current_config->name.link_n, \
+			  pipe_config->name.tu, \
+			  pipe_config->name.gmch_m, \
+			  pipe_config->name.gmch_n, \
+			  pipe_config->name.link_m, \
+			  pipe_config->name.link_n); \
+		ret = false; \
+	}
+
+#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
+	if (!intel_compare_link_m_n(&current_config->name, \
+				    &pipe_config->name, adjust) && \
+	    !intel_compare_link_m_n(&current_config->alt_name, \
+				    &pipe_config->name, adjust)) { \
+		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+			  "(expected tu %i gmch %i/%i link %i/%i, " \
+			  "or tu %i gmch %i/%i link %i/%i, " \
+			  "found tu %i, gmch %i/%i link %i/%i)\n", \
+			  current_config->name.tu, \
+			  current_config->name.gmch_m, \
+			  current_config->name.gmch_n, \
+			  current_config->name.link_m, \
+			  current_config->name.link_n, \
+			  current_config->alt_name.tu, \
+			  current_config->alt_name.gmch_m, \
+			  current_config->alt_name.gmch_n, \
+			  current_config->alt_name.link_m, \
+			  current_config->alt_name.link_n, \
+			  pipe_config->name.tu, \
+			  pipe_config->name.gmch_m, \
+			  pipe_config->name.gmch_n, \
+			  pipe_config->name.link_m, \
+			  pipe_config->name.link_n); \
+		ret = false; \
 	}
 
 /* This is required for BDW+ where there is only one set of registers for
@@ -11973,30 +12378,30 @@ intel_pipe_config_compare(struct drm_device *dev,
 #define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
 	if ((current_config->name != pipe_config->name) && \
 		(current_config->alt_name != pipe_config->name)) { \
-			DRM_ERROR("mismatch in " #name " " \
+			INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
 				  "(expected %i or %i, found %i)\n", \
 				  current_config->name, \
 				  current_config->alt_name, \
 				  pipe_config->name); \
-			return false; \
+			ret = false; \
 	}
 
 #define PIPE_CONF_CHECK_FLAGS(name, mask)	\
 	if ((current_config->name ^ pipe_config->name) & (mask)) { \
-		DRM_ERROR("mismatch in " #name "(" #mask ") "	   \
+		INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
 			  "(expected %i, found %i)\n", \
 			  current_config->name & (mask), \
 			  pipe_config->name & (mask)); \
-		return false; \
+		ret = false; \
 	}
 
 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
 	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
-		DRM_ERROR("mismatch in " #name " " \
+		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
 			  "(expected %i, found %i)\n", \
 			  current_config->name, \
 			  pipe_config->name); \
-		return false; \
+		ret = false; \
 	}
 
 #define PIPE_CONF_QUIRK(quirk)	\
@@ -12006,35 +12411,18 @@ intel_pipe_config_compare(struct drm_device *dev,
 
 	PIPE_CONF_CHECK_I(has_pch_encoder);
 	PIPE_CONF_CHECK_I(fdi_lanes);
-	PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
-	PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
-	PIPE_CONF_CHECK_I(fdi_m_n.link_m);
-	PIPE_CONF_CHECK_I(fdi_m_n.link_n);
-	PIPE_CONF_CHECK_I(fdi_m_n.tu);
+	PIPE_CONF_CHECK_M_N(fdi_m_n);
 
 	PIPE_CONF_CHECK_I(has_dp_encoder);
 
 	if (INTEL_INFO(dev)->gen < 8) {
-		PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
-		PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
-		PIPE_CONF_CHECK_I(dp_m_n.link_m);
-		PIPE_CONF_CHECK_I(dp_m_n.link_n);
-		PIPE_CONF_CHECK_I(dp_m_n.tu);
-
-		if (current_config->has_drrs) {
-			PIPE_CONF_CHECK_I(dp_m2_n2.gmch_m);
-			PIPE_CONF_CHECK_I(dp_m2_n2.gmch_n);
-			PIPE_CONF_CHECK_I(dp_m2_n2.link_m);
-			PIPE_CONF_CHECK_I(dp_m2_n2.link_n);
-			PIPE_CONF_CHECK_I(dp_m2_n2.tu);
-		}
-	} else {
-		PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_m, dp_m2_n2.gmch_m);
-		PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_n, dp_m2_n2.gmch_n);
-		PIPE_CONF_CHECK_I_ALT(dp_m_n.link_m, dp_m2_n2.link_m);
-		PIPE_CONF_CHECK_I_ALT(dp_m_n.link_n, dp_m2_n2.link_n);
-		PIPE_CONF_CHECK_I_ALT(dp_m_n.tu, dp_m2_n2.tu);
-	}
+		PIPE_CONF_CHECK_M_N(dp_m_n);
+
+		PIPE_CONF_CHECK_I(has_drrs);
+		if (current_config->has_drrs)
+			PIPE_CONF_CHECK_M_N(dp_m2_n2);
+	} else
+		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
 
 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
@@ -12076,21 +12464,11 @@ intel_pipe_config_compare(struct drm_device *dev,
 	PIPE_CONF_CHECK_I(pipe_src_w);
 	PIPE_CONF_CHECK_I(pipe_src_h);
 
-	/*
-	 * FIXME: BIOS likes to set up a cloned config with lvds+external
-	 * screen. Since we don't yet re-compute the pipe config when moving
-	 * just the lvds port away to another pipe the sw tracking won't match.
-	 *
-	 * Proper atomic modesets with recomputed global state will fix this.
-	 * Until then just don't check gmch state for inherited modes.
-	 */
-	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
-		PIPE_CONF_CHECK_I(gmch_pfit.control);
-		/* pfit ratios are autocomputed by the hw on gen4+ */
-		if (INTEL_INFO(dev)->gen < 4)
-			PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
-		PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
-	}
+	PIPE_CONF_CHECK_I(gmch_pfit.control);
+	/* pfit ratios are autocomputed by the hw on gen4+ */
+	if (INTEL_INFO(dev)->gen < 4)
+		PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
+	PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
 
 	PIPE_CONF_CHECK_I(pch_pfit.enabled);
 	if (current_config->pch_pfit.enabled) {
@@ -12130,8 +12508,9 @@ intel_pipe_config_compare(struct drm_device *dev,
 #undef PIPE_CONF_CHECK_FLAGS
 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
 #undef PIPE_CONF_QUIRK
+#undef INTEL_ERR_OR_DBG_KMS
 
-	return true;
+	return ret;
 }
 
 static void check_wm_state(struct drm_device *dev)
@@ -12185,17 +12564,23 @@ static void check_wm_state(struct drm_device *dev)
 }
 
 static void
-check_connector_state(struct drm_device *dev)
+check_connector_state(struct drm_device *dev,
+		      struct drm_atomic_state *old_state)
 {
-	struct intel_connector *connector;
+	struct drm_connector_state *old_conn_state;
+	struct drm_connector *connector;
+	int i;
+
+	for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+		struct drm_encoder *encoder = connector->encoder;
+		struct drm_connector_state *state = connector->state;
 
-	for_each_intel_connector(dev, connector) {
 		/* This also checks the encoder/connector hw state with the
 		 * ->get_hw_state callbacks. */
-		intel_connector_check_state(connector);
+		intel_connector_check_state(to_intel_connector(connector));
 
-		I915_STATE_WARN(&connector->new_encoder->base != connector->base.encoder,
-		     "connector's staged encoder doesn't match current encoder\n");
+		I915_STATE_WARN(state->best_encoder != encoder,
+		     "connector's atomic encoder doesn't match legacy encoder\n");
 	}
 }
 
@@ -12207,124 +12592,106 @@ check_encoder_state(struct drm_device *dev)
 
 	for_each_intel_encoder(dev, encoder) {
 		bool enabled = false;
-		bool active = false;
-		enum pipe pipe, tracked_pipe;
+		enum pipe pipe;
 
 		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
 			      encoder->base.base.id,
 			      encoder->base.name);
 
-		I915_STATE_WARN(&encoder->new_crtc->base != encoder->base.crtc,
-		     "encoder's stage crtc doesn't match current crtc\n");
-		I915_STATE_WARN(encoder->connectors_active && !encoder->base.crtc,
-		     "encoder's active_connectors set, but no crtc\n");
-
 		for_each_intel_connector(dev, connector) {
-			if (connector->base.encoder != &encoder->base)
+			if (connector->base.state->best_encoder != &encoder->base)
 				continue;
 			enabled = true;
-			if (connector->base.dpms != DRM_MODE_DPMS_OFF)
-				active = true;
+
+			I915_STATE_WARN(connector->base.state->crtc !=
+					encoder->base.crtc,
+			     "connector's crtc doesn't match encoder crtc\n");
 		}
-		/*
-		 * for MST connectors if we unplug the connector is gone
-		 * away but the encoder is still connected to a crtc
-		 * until a modeset happens in response to the hotplug.
-		 */
-		if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
-			continue;
 
 		I915_STATE_WARN(!!encoder->base.crtc != enabled,
 		     "encoder's enabled state mismatch "
 		     "(expected %i, found %i)\n",
 		     !!encoder->base.crtc, enabled);
-		I915_STATE_WARN(active && !encoder->base.crtc,
-		     "active encoder with no crtc\n");
-
-		I915_STATE_WARN(encoder->connectors_active != active,
-		     "encoder's computed active state doesn't match tracked active state "
-		     "(expected %i, found %i)\n", active, encoder->connectors_active);
-
-		active = encoder->get_hw_state(encoder, &pipe);
-		I915_STATE_WARN(active != encoder->connectors_active,
-		     "encoder's hw state doesn't match sw tracking "
-		     "(expected %i, found %i)\n",
-		     encoder->connectors_active, active);
-
-		if (!encoder->base.crtc)
-			continue;
 
-		tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
-		I915_STATE_WARN(active && pipe != tracked_pipe,
-		     "active encoder's pipe doesn't match"
-		     "(expected %i, found %i)\n",
-		     tracked_pipe, pipe);
+		if (!encoder->base.crtc) {
+			bool active;
 
+			active = encoder->get_hw_state(encoder, &pipe);
+			I915_STATE_WARN(active,
+			     "encoder detached but still enabled on pipe %c.\n",
+			     pipe_name(pipe));
+		}
 	}
 }
 
 static void
-check_crtc_state(struct drm_device *dev)
+check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *crtc;
 	struct intel_encoder *encoder;
-	struct intel_crtc_state pipe_config;
+	struct drm_crtc_state *old_crtc_state;
+	struct drm_crtc *crtc;
+	int i;
 
-	for_each_intel_crtc(dev, crtc) {
-		bool enabled = false;
-		bool active = false;
+	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+		struct intel_crtc_state *pipe_config, *sw_config;
+		bool active;
+
+		if (!needs_modeset(crtc->state))
+			continue;
 
-		memset(&pipe_config, 0, sizeof(pipe_config));
+		__drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
+		pipe_config = to_intel_crtc_state(old_crtc_state);
+		memset(pipe_config, 0, sizeof(*pipe_config));
+		pipe_config->base.crtc = crtc;
+		pipe_config->base.state = old_state;
 
 		DRM_DEBUG_KMS("[CRTC:%d]\n",
-			      crtc->base.base.id);
-
-		I915_STATE_WARN(crtc->active && !crtc->base.state->enable,
-		     "active crtc, but not enabled in sw tracking\n");
+			      crtc->base.id);
 
-		for_each_intel_encoder(dev, encoder) {
-			if (encoder->base.crtc != &crtc->base)
-				continue;
-			enabled = true;
-			if (encoder->connectors_active)
-				active = true;
-		}
+		active = dev_priv->display.get_pipe_config(intel_crtc,
+							   pipe_config);
 
-		I915_STATE_WARN(active != crtc->active,
-		     "crtc's computed active state doesn't match tracked active state "
-		     "(expected %i, found %i)\n", active, crtc->active);
-		I915_STATE_WARN(enabled != crtc->base.state->enable,
-		     "crtc's computed enabled state doesn't match tracked enabled state "
-		     "(expected %i, found %i)\n", enabled,
-				crtc->base.state->enable);
+		/* hw state is inconsistent with the pipe quirk */
+		if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
+		    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
+			active = crtc->state->active;
 
-		active = dev_priv->display.get_pipe_config(crtc,
-							   &pipe_config);
+		I915_STATE_WARN(crtc->state->active != active,
+		     "crtc active state doesn't match with hw state "
+		     "(expected %i, found %i)\n", crtc->state->active, active);
 
-		/* hw state is inconsistent with the pipe quirk */
-		if ((crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
-		    (crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
-			active = crtc->active;
+		I915_STATE_WARN(intel_crtc->active != crtc->state->active,
+		     "transitional active state does not match atomic hw state "
+		     "(expected %i, found %i)\n", crtc->state->active, intel_crtc->active);
 
-		for_each_intel_encoder(dev, encoder) {
+		for_each_encoder_on_crtc(dev, crtc, encoder) {
 			enum pipe pipe;
-			if (encoder->base.crtc != &crtc->base)
-				continue;
-			if (encoder->get_hw_state(encoder, &pipe))
-				encoder->get_config(encoder, &pipe_config);
+
+			active = encoder->get_hw_state(encoder, &pipe);
+			I915_STATE_WARN(active != crtc->state->active,
+				"[ENCODER:%i] active %i with crtc active %i\n",
+				encoder->base.base.id, active, crtc->state->active);
+
+			I915_STATE_WARN(active && intel_crtc->pipe != pipe,
+					"Encoder connected to wrong pipe %c\n",
+					pipe_name(pipe));
+
+			if (active)
+				encoder->get_config(encoder, pipe_config);
 		}
 
-		I915_STATE_WARN(crtc->active != active,
-		     "crtc active state doesn't match with hw state "
-		     "(expected %i, found %i)\n", crtc->active, active);
+		if (!crtc->state->active)
+			continue;
 
-		if (active &&
-		    !intel_pipe_config_compare(dev, crtc->config, &pipe_config)) {
+		sw_config = to_intel_crtc_state(crtc->state);
+		if (!intel_pipe_config_compare(dev, sw_config,
+					       pipe_config, false)) {
 			I915_STATE_WARN(1, "pipe state doesn't match!\n");
-			intel_dump_pipe_config(crtc, &pipe_config,
+			intel_dump_pipe_config(intel_crtc, pipe_config,
 					       "[hw state]");
-			intel_dump_pipe_config(crtc, crtc->config,
+			intel_dump_pipe_config(intel_crtc, sw_config,
 					       "[sw state]");
 		}
 	}
@@ -12379,13 +12746,14 @@ check_shared_dpll_state(struct drm_device *dev)
 	}
 }
 
-void
-intel_modeset_check_state(struct drm_device *dev)
+static void
+intel_modeset_check_state(struct drm_device *dev,
+			  struct drm_atomic_state *old_state)
 {
 	check_wm_state(dev);
-	check_connector_state(dev);
+	check_connector_state(dev, old_state);
 	check_encoder_state(dev);
-	check_crtc_state(dev);
+	check_crtc_state(dev, old_state);
 	check_shared_dpll_state(dev);
 }
 
@@ -12439,519 +12807,390 @@ static void update_scanline_offset(struct intel_crtc *crtc)
 		crtc->scanline_offset = 1;
 }
 
-static struct intel_crtc_state *
-intel_modeset_compute_config(struct drm_crtc *crtc,
-			     struct drm_atomic_state *state)
-{
-	struct intel_crtc_state *pipe_config;
-	int ret = 0;
-
-	ret = drm_atomic_add_affected_connectors(state, crtc);
-	if (ret)
-		return ERR_PTR(ret);
-
-	ret = drm_atomic_helper_check_modeset(state->dev, state);
-	if (ret)
-		return ERR_PTR(ret);
-
-	/*
-	 * Note this needs changes when we start tracking multiple modes
-	 * and crtcs.  At that point we'll need to compute the whole config
-	 * (i.e. one pipe_config for each crtc) rather than just the one
-	 * for this crtc.
-	 */
-	pipe_config = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));
-	if (IS_ERR(pipe_config))
-		return pipe_config;
-
-	if (!pipe_config->base.enable)
-		return pipe_config;
-
-	ret = intel_modeset_pipe_config(crtc, state, pipe_config);
-	if (ret)
-		return ERR_PTR(ret);
-
-	/* Check things that can only be changed through modeset */
-	if (pipe_config->has_audio !=
-	    to_intel_crtc(crtc)->config->has_audio)
-		pipe_config->base.mode_changed = true;
-
-	/*
-	 * Note we have an issue here with infoframes: current code
-	 * only updates them on the full mode set path per hw
-	 * requirements.  So here we should be checking for any
-	 * required changes and forcing a mode set.
-	 */
-
-	intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,"[modeset]");
-
-	ret = drm_atomic_helper_check_planes(state->dev, state);
-	if (ret)
-		return ERR_PTR(ret);
-
-	return pipe_config;
-}
-
-static int __intel_set_mode_setup_plls(struct drm_atomic_state *state)
+static void intel_modeset_clear_plls(struct drm_atomic_state *state)
 {
 	struct drm_device *dev = state->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	unsigned clear_pipes = 0;
+	struct intel_shared_dpll_config *shared_dpll = NULL;
 	struct intel_crtc *intel_crtc;
 	struct intel_crtc_state *intel_crtc_state;
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *crtc_state;
-	int ret = 0;
 	int i;
 
 	if (!dev_priv->display.crtc_compute_clock)
-		return 0;
-
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		intel_crtc = to_intel_crtc(crtc);
-		intel_crtc_state = to_intel_crtc_state(crtc_state);
-
-		if (needs_modeset(crtc_state)) {
-			clear_pipes |= 1 << intel_crtc->pipe;
-			intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
-		}
-	}
-
-	ret = intel_shared_dpll_start_config(dev_priv, clear_pipes);
-	if (ret)
-		goto done;
+		return;
 
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		if (!needs_modeset(crtc_state) || !crtc_state->enable)
-			continue;
+		int dpll;
 
 		intel_crtc = to_intel_crtc(crtc);
 		intel_crtc_state = to_intel_crtc_state(crtc_state);
+		dpll = intel_crtc_state->shared_dpll;
 
-		ret = dev_priv->display.crtc_compute_clock(intel_crtc,
-							   intel_crtc_state);
-		if (ret) {
-			intel_shared_dpll_abort_config(dev_priv);
-			goto done;
-		}
-	}
+		if (!needs_modeset(crtc_state) || dpll == DPLL_ID_PRIVATE)
+			continue;
 
-done:
-	return ret;
-}
+		intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
 
-/* Code that should eventually be part of atomic_check() */
-static int __intel_set_mode_checks(struct drm_atomic_state *state)
-{
-	struct drm_device *dev = state->dev;
-	int ret;
+		if (!shared_dpll)
+			shared_dpll = intel_atomic_get_shared_dpll_state(state);
 
-	/*
-	 * See if the config requires any additional preparation, e.g.
-	 * to adjust global state with pipes off.  We need to do this
-	 * here so we can get the modeset_pipe updated config for the new
-	 * mode set on this crtc.  For other crtcs we need to use the
-	 * adjusted_mode bits in the crtc directly.
-	 */
-	if (IS_VALLEYVIEW(dev) || IS_BROXTON(dev)) {
-		ret = valleyview_modeset_global_pipes(state);
-		if (ret)
-			return ret;
+		shared_dpll[dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
 	}
-
-	ret = __intel_set_mode_setup_plls(state);
-	if (ret)
-		return ret;
-
-	return 0;
 }
 
-static int __intel_set_mode(struct drm_crtc *modeset_crtc,
-			    struct intel_crtc_state *pipe_config)
+/*
+ * This implements the workaround described in the "notes" section of the mode
+ * set sequence documentation. When going from no pipes or single pipe to
+ * multiple pipes, and planes are enabled after the pipe, we need to wait at
+ * least 2 vblanks on the first pipe before enabling planes on the second pipe.
+ */
+static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
 {
-	struct drm_device *dev = modeset_crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_atomic_state *state = pipe_config->base.state;
-	struct drm_crtc *crtc;
 	struct drm_crtc_state *crtc_state;
-	int ret = 0;
+	struct intel_crtc *intel_crtc;
+	struct drm_crtc *crtc;
+	struct intel_crtc_state *first_crtc_state = NULL;
+	struct intel_crtc_state *other_crtc_state = NULL;
+	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
 	int i;
 
-	ret = __intel_set_mode_checks(state);
-	if (ret < 0)
-		return ret;
-
-	ret = drm_atomic_helper_prepare_planes(dev, state);
-	if (ret)
-		return ret;
-
+	/* look at all crtc's that are going to be enabled in during modeset */
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		if (!needs_modeset(crtc_state))
+		intel_crtc = to_intel_crtc(crtc);
+
+		if (!crtc_state->active || !needs_modeset(crtc_state))
 			continue;
 
-		if (!crtc_state->enable) {
-			if (crtc->state->enable)
-				intel_crtc_disable(crtc);
-		} else if (crtc->state->enable) {
-			intel_crtc_disable_planes(crtc);
-			dev_priv->display.crtc_disable(crtc);
+		if (first_crtc_state) {
+			other_crtc_state = to_intel_crtc_state(crtc_state);
+			break;
+		} else {
+			first_crtc_state = to_intel_crtc_state(crtc_state);
+			first_pipe = intel_crtc->pipe;
 		}
 	}
 
-	/* crtc->mode is already used by the ->mode_set callbacks, hence we need
-	 * to set it here already despite that we pass it down the callchain.
-	 *
-	 * Note we'll need to fix this up when we start tracking multiple
-	 * pipes; here we assume a single modeset_pipe and only track the
-	 * single crtc and mode.
-	 */
-	if (pipe_config->base.enable && needs_modeset(&pipe_config->base)) {
-		modeset_crtc->mode = pipe_config->base.mode;
-
-		/*
-		 * Calculate and store various constants which
-		 * are later needed by vblank and swap-completion
-		 * timestamping. They are derived from true hwmode.
-		 */
-		drm_calc_timestamping_constants(modeset_crtc,
-						&pipe_config->base.adjusted_mode);
-	}
+	/* No workaround needed? */
+	if (!first_crtc_state)
+		return 0;
 
-	/* Only after disabling all output pipelines that will be changed can we
-	 * update the the output configuration. */
-	intel_modeset_update_state(state);
+	/* w/a possibly needed, check how many crtc's are already enabled. */
+	for_each_intel_crtc(state->dev, intel_crtc) {
+		struct intel_crtc_state *pipe_config;
 
-	/* The state has been swaped above, so state actually contains the
-	 * old state now. */
+		pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
+		if (IS_ERR(pipe_config))
+			return PTR_ERR(pipe_config);
 
-	modeset_update_crtc_power_domains(state);
+		pipe_config->hsw_workaround_pipe = INVALID_PIPE;
 
-	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		if (!needs_modeset(crtc->state) || !crtc->state->enable) {
-			drm_atomic_helper_commit_planes_on_crtc(crtc_state);
+		if (!pipe_config->base.active ||
+		    needs_modeset(&pipe_config->base))
 			continue;
-		}
 
-		update_scanline_offset(to_intel_crtc(crtc));
+		/* 2 or more enabled crtcs means no need for w/a */
+		if (enabled_pipe != INVALID_PIPE)
+			return 0;
 
-		dev_priv->display.crtc_enable(crtc);
-		drm_atomic_helper_commit_planes_on_crtc(crtc_state);
+		enabled_pipe = intel_crtc->pipe;
 	}
 
-	/* FIXME: add subpixel order */
-
-	drm_atomic_helper_cleanup_planes(dev, state);
-
-	drm_atomic_state_free(state);
+	if (enabled_pipe != INVALID_PIPE)
+		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
+	else if (other_crtc_state)
+		other_crtc_state->hsw_workaround_pipe = first_pipe;
 
 	return 0;
 }
 
-static int intel_set_mode_with_config(struct drm_crtc *crtc,
-				      struct intel_crtc_state *pipe_config,
-				      bool force_restore)
+static int intel_modeset_all_pipes(struct drm_atomic_state *state)
 {
-	int ret;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	int ret = 0;
 
-	ret = __intel_set_mode(crtc, pipe_config);
+	/* add all active pipes to the state */
+	for_each_crtc(state->dev, crtc) {
+		crtc_state = drm_atomic_get_crtc_state(state, crtc);
+		if (IS_ERR(crtc_state))
+			return PTR_ERR(crtc_state);
 
-	if (ret == 0 && force_restore) {
-		intel_modeset_update_staged_output_state(crtc->dev);
-		intel_modeset_check_state(crtc->dev);
-	}
+		if (!crtc_state->active || needs_modeset(crtc_state))
+			continue;
 
-	return ret;
-}
+		crtc_state->mode_changed = true;
 
-static int intel_set_mode(struct drm_crtc *crtc,
-			  struct drm_atomic_state *state,
-			  bool force_restore)
-{
-	struct intel_crtc_state *pipe_config;
-	int ret = 0;
+		ret = drm_atomic_add_affected_connectors(state, crtc);
+		if (ret)
+			break;
 
-	pipe_config = intel_modeset_compute_config(crtc, state);
-	if (IS_ERR(pipe_config)) {
-		ret = PTR_ERR(pipe_config);
-		goto out;
+		ret = drm_atomic_add_affected_planes(state, crtc);
+		if (ret)
+			break;
 	}
 
-	ret = intel_set_mode_with_config(crtc, pipe_config, force_restore);
-	if (ret)
-		goto out;
-
-out:
 	return ret;
 }
 
-void intel_crtc_restore_mode(struct drm_crtc *crtc)
+
+static int intel_modeset_checks(struct drm_atomic_state *state)
 {
-	struct drm_device *dev = crtc->dev;
-	struct drm_atomic_state *state;
-	struct intel_encoder *encoder;
-	struct intel_connector *connector;
-	struct drm_connector_state *connector_state;
-	struct intel_crtc_state *crtc_state;
+	struct drm_device *dev = state->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
-	state = drm_atomic_state_alloc(dev);
-	if (!state) {
-		DRM_DEBUG_KMS("[CRTC:%d] mode restore failed, out of memory",
-			      crtc->base.id);
-		return;
-	}
-
-	state->acquire_ctx = dev->mode_config.acquire_ctx;
-
-	/* The force restore path in the HW readout code relies on the staged
-	 * config still keeping the user requested config while the actual
-	 * state has been overwritten by the configuration read from HW. We
-	 * need to copy the staged config to the atomic state, otherwise the
-	 * mode set will just reapply the state the HW is already in. */
-	for_each_intel_encoder(dev, encoder) {
-		if (&encoder->new_crtc->base != crtc)
-			continue;
-
-		for_each_intel_connector(dev, connector) {
-			if (connector->new_encoder != encoder)
-				continue;
-
-			connector_state = drm_atomic_get_connector_state(state, &connector->base);
-			if (IS_ERR(connector_state)) {
-				DRM_DEBUG_KMS("Failed to add [CONNECTOR:%d:%s] to state: %ld\n",
-					      connector->base.base.id,
-					      connector->base.name,
-					      PTR_ERR(connector_state));
-				continue;
-			}
-
-			connector_state->crtc = crtc;
-			connector_state->best_encoder = &encoder->base;
-		}
-	}
-
-	crtc_state = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));
-	if (IS_ERR(crtc_state)) {
-		DRM_DEBUG_KMS("Failed to add [CRTC:%d] to state: %ld\n",
-			      crtc->base.id, PTR_ERR(crtc_state));
-		drm_atomic_state_free(state);
-		return;
+	if (!check_digital_port_conflicts(state)) {
+		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
+		return -EINVAL;
 	}
 
-	crtc_state->base.active = crtc_state->base.enable =
-		to_intel_crtc(crtc)->new_enabled;
-
-	drm_mode_copy(&crtc_state->base.mode, &crtc->mode);
+	/*
+	 * See if the config requires any additional preparation, e.g.
+	 * to adjust global state with pipes off.  We need to do this
+	 * here so we can get the modeset_pipe updated config for the new
+	 * mode set on this crtc.  For other crtcs we need to use the
+	 * adjusted_mode bits in the crtc directly.
+	 */
+	if (dev_priv->display.modeset_calc_cdclk) {
+		unsigned int cdclk;
 
-	intel_modeset_setup_plane_state(state, crtc, &crtc->mode,
-					crtc->primary->fb, crtc->x, crtc->y);
+		ret = dev_priv->display.modeset_calc_cdclk(state);
 
-	ret = intel_set_mode(crtc, state, false);
-	if (ret)
-		drm_atomic_state_free(state);
-}
+		cdclk = to_intel_atomic_state(state)->cdclk;
+		if (!ret && cdclk != dev_priv->cdclk_freq)
+			ret = intel_modeset_all_pipes(state);
 
-#undef for_each_intel_crtc_masked
+		if (ret < 0)
+			return ret;
+	} else
+		to_intel_atomic_state(state)->cdclk = dev_priv->cdclk_freq;
 
-static bool intel_connector_in_mode_set(struct intel_connector *connector,
-					struct drm_mode_set *set)
-{
-	int ro;
+	intel_modeset_clear_plls(state);
 
-	for (ro = 0; ro < set->num_connectors; ro++)
-		if (set->connectors[ro] == &connector->base)
-			return true;
+	if (IS_HASWELL(dev))
+		return haswell_mode_set_planes_workaround(state);
 
-	return false;
+	return 0;
 }
 
-static int
-intel_modeset_stage_output_state(struct drm_device *dev,
-				 struct drm_mode_set *set,
-				 struct drm_atomic_state *state)
+/**
+ * intel_atomic_check - validate state object
+ * @dev: drm device
+ * @state: state to validate
+ */
+static int intel_atomic_check(struct drm_device *dev,
+			      struct drm_atomic_state *state)
 {
-	struct intel_connector *connector;
-	struct drm_connector *drm_connector;
-	struct drm_connector_state *connector_state;
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *crtc_state;
-	int i, ret;
-
-	/* The upper layers ensure that we either disable a crtc or have a list
-	 * of connectors. For paranoia, double-check this. */
-	WARN_ON(!set->fb && (set->num_connectors != 0));
-	WARN_ON(set->fb && (set->num_connectors == 0));
+	int ret, i;
+	bool any_ms = false;
 
-	for_each_intel_connector(dev, connector) {
-		bool in_mode_set = intel_connector_in_mode_set(connector, set);
+	ret = drm_atomic_helper_check_modeset(dev, state);
+	if (ret)
+		return ret;
 
-		if (!in_mode_set && connector->base.state->crtc != set->crtc)
-			continue;
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		struct intel_crtc_state *pipe_config =
+			to_intel_crtc_state(crtc_state);
 
-		connector_state =
-			drm_atomic_get_connector_state(state, &connector->base);
-		if (IS_ERR(connector_state))
-			return PTR_ERR(connector_state);
+		/* Catch I915_MODE_FLAG_INHERITED */
+		if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
+			crtc_state->mode_changed = true;
 
-		if (in_mode_set) {
-			int pipe = to_intel_crtc(set->crtc)->pipe;
-			connector_state->best_encoder =
-				&intel_find_encoder(connector, pipe)->base;
+		if (!crtc_state->enable) {
+			if (needs_modeset(crtc_state))
+				any_ms = true;
+			continue;
 		}
 
-		if (connector->base.state->crtc != set->crtc)
+		if (!needs_modeset(crtc_state))
 			continue;
 
-		/* If we disable the crtc, disable all its connectors. Also, if
-		 * the connector is on the changing crtc but not on the new
-		 * connector list, disable it. */
-		if (!set->fb || !in_mode_set) {
-			connector_state->best_encoder = NULL;
-
-			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
-				connector->base.base.id,
-				connector->base.name);
-		}
-	}
-	/* connector->new_encoder is now updated for all connectors. */
+		/* FIXME: For only active_changed we shouldn't need to do any
+		 * state recomputation at all. */
 
-	for_each_connector_in_state(state, drm_connector, connector_state, i) {
-		connector = to_intel_connector(drm_connector);
+		ret = drm_atomic_add_affected_connectors(state, crtc);
+		if (ret)
+			return ret;
 
-		if (!connector_state->best_encoder) {
-			ret = drm_atomic_set_crtc_for_connector(connector_state,
-								NULL);
-			if (ret)
-				return ret;
+		ret = intel_modeset_pipe_config(crtc, pipe_config);
+		if (ret)
+			return ret;
 
-			continue;
+		if (i915.fastboot &&
+		    intel_pipe_config_compare(state->dev,
+					to_intel_crtc_state(crtc->state),
+					pipe_config, true)) {
+			crtc_state->mode_changed = false;
 		}
 
-		if (intel_connector_in_mode_set(connector, set)) {
-			struct drm_crtc *crtc = connector->base.state->crtc;
-
-			/* If this connector was in a previous crtc, add it
-			 * to the state. We might need to disable it. */
-			if (crtc) {
-				crtc_state =
-					drm_atomic_get_crtc_state(state, crtc);
-				if (IS_ERR(crtc_state))
-					return PTR_ERR(crtc_state);
-			}
+		if (needs_modeset(crtc_state)) {
+			any_ms = true;
 
-			ret = drm_atomic_set_crtc_for_connector(connector_state,
-								set->crtc);
+			ret = drm_atomic_add_affected_planes(state, crtc);
 			if (ret)
 				return ret;
 		}
 
-		/* Make sure the new CRTC will work with the encoder */
-		if (!drm_encoder_crtc_ok(connector_state->best_encoder,
-					 connector_state->crtc)) {
-			return -EINVAL;
-		}
-
-		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
-			connector->base.base.id,
-			connector->base.name,
-			connector_state->crtc->base.id);
-
-		if (connector_state->best_encoder != &connector->encoder->base)
-			connector->encoder =
-				to_intel_encoder(connector_state->best_encoder);
+		intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
+				       needs_modeset(crtc_state) ?
+				       "[modeset]" : "[fastset]");
 	}
 
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		bool has_connectors;
+	if (any_ms) {
+		ret = intel_modeset_checks(state);
 
-		ret = drm_atomic_add_affected_connectors(state, crtc);
 		if (ret)
 			return ret;
+	} else
+		to_intel_atomic_state(state)->cdclk =
+			to_i915(state->dev)->cdclk_freq;
+
+	return drm_atomic_helper_check_planes(state->dev, state);
+}
 
-		has_connectors = !!drm_atomic_connectors_for_crtc(state, crtc);
-		if (has_connectors != crtc_state->enable)
-			crtc_state->enable =
-			crtc_state->active = has_connectors;
+/**
+ * intel_atomic_commit - commit validated state object
+ * @dev: DRM device
+ * @state: the top-level driver state object
+ * @async: asynchronous commit
+ *
+ * This function commits a top-level state object that has been validated
+ * with drm_atomic_helper_check().
+ *
+ * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
+ * we can only handle plane-related operations and do not yet support
+ * asynchronous commit.
+ *
+ * RETURNS
+ * Zero for success or -errno.
+ */
+static int intel_atomic_commit(struct drm_device *dev,
+			       struct drm_atomic_state *state,
+			       bool async)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	int ret = 0;
+	int i;
+	bool any_ms = false;
+
+	if (async) {
+		DRM_DEBUG_KMS("i915 does not yet support async commit\n");
+		return -EINVAL;
 	}
 
-	ret = intel_modeset_setup_plane_state(state, set->crtc, set->mode,
-					      set->fb, set->x, set->y);
+	ret = drm_atomic_helper_prepare_planes(dev, state);
 	if (ret)
 		return ret;
 
-	crtc_state = drm_atomic_get_crtc_state(state, set->crtc);
-	if (IS_ERR(crtc_state))
-		return PTR_ERR(crtc_state);
+	drm_atomic_helper_swap_state(dev, state);
 
-	if (set->mode)
-		drm_mode_copy(&crtc_state->mode, set->mode);
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-	if (set->num_connectors)
-		crtc_state->active = true;
+		if (!needs_modeset(crtc->state))
+			continue;
 
-	return 0;
-}
+		any_ms = true;
+		intel_pre_plane_update(intel_crtc);
 
-static int intel_crtc_set_config(struct drm_mode_set *set)
-{
-	struct drm_device *dev;
-	struct drm_atomic_state *state = NULL;
-	struct intel_crtc_state *pipe_config;
-	int ret;
+		if (crtc_state->active) {
+			intel_crtc_disable_planes(crtc, crtc_state->plane_mask);
+			dev_priv->display.crtc_disable(crtc);
+			intel_crtc->active = false;
+			intel_disable_shared_dpll(intel_crtc);
+		}
+	}
+
+	/* Only after disabling all output pipelines that will be changed can we
+	 * update the the output configuration. */
+	intel_modeset_update_crtc_state(state);
 
-	BUG_ON(!set);
-	BUG_ON(!set->crtc);
-	BUG_ON(!set->crtc->helper_private);
+	if (any_ms) {
+		intel_shared_dpll_commit(state);
 
-	/* Enforce sane interface api - has been abused by the fb helper. */
-	BUG_ON(!set->mode && set->fb);
-	BUG_ON(set->fb && set->num_connectors == 0);
+		drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
+		modeset_update_crtc_power_domains(state);
+	}
 
-	if (set->fb) {
-		DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
-				set->crtc->base.id, set->fb->base.id,
-				(int)set->num_connectors, set->x, set->y);
-	} else {
-		DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
+	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+		bool modeset = needs_modeset(crtc->state);
+
+		if (modeset && crtc->state->active) {
+			update_scanline_offset(to_intel_crtc(crtc));
+			dev_priv->display.crtc_enable(crtc);
+		}
+
+		if (!modeset)
+			intel_pre_plane_update(intel_crtc);
+
+		drm_atomic_helper_commit_planes_on_crtc(crtc_state);
+		intel_post_plane_update(intel_crtc);
 	}
 
-	dev = set->crtc->dev;
+	/* FIXME: add subpixel order */
 
-	state = drm_atomic_state_alloc(dev);
-	if (!state)
-		return -ENOMEM;
+	drm_atomic_helper_wait_for_vblanks(dev, state);
+	drm_atomic_helper_cleanup_planes(dev, state);
 
-	state->acquire_ctx = dev->mode_config.acquire_ctx;
+	if (any_ms)
+		intel_modeset_check_state(dev, state);
 
-	ret = intel_modeset_stage_output_state(dev, set, state);
-	if (ret)
-		goto out;
+	drm_atomic_state_free(state);
+
+	return 0;
+}
 
-	pipe_config = intel_modeset_compute_config(set->crtc, state);
-	if (IS_ERR(pipe_config)) {
-		ret = PTR_ERR(pipe_config);
-		goto out;
+void intel_crtc_restore_mode(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_atomic_state *state;
+	struct drm_crtc_state *crtc_state;
+	int ret;
+
+	state = drm_atomic_state_alloc(dev);
+	if (!state) {
+		DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
+			      crtc->base.id);
+		return;
 	}
 
-	intel_update_pipe_size(to_intel_crtc(set->crtc));
+	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
 
-	ret = intel_set_mode_with_config(set->crtc, pipe_config, true);
+retry:
+	crtc_state = drm_atomic_get_crtc_state(state, crtc);
+	ret = PTR_ERR_OR_ZERO(crtc_state);
+	if (!ret) {
+		if (!crtc_state->active)
+			goto out;
 
-	if (ret) {
-		DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
-			      set->crtc->base.id, ret);
+		crtc_state->mode_changed = true;
+		ret = drm_atomic_commit(state);
+	}
+
+	if (ret == -EDEADLK) {
+		drm_atomic_state_clear(state);
+		drm_modeset_backoff(state->acquire_ctx);
+		goto retry;
 	}
 
-out:
 	if (ret)
+out:
 		drm_atomic_state_free(state);
-	return ret;
 }
 
+#undef for_each_intel_crtc_masked
+
 static const struct drm_crtc_funcs intel_crtc_funcs = {
 	.gamma_set = intel_crtc_gamma_set,
-	.set_config = intel_crtc_set_config,
+	.set_config = drm_atomic_helper_set_config,
 	.destroy = intel_crtc_destroy,
 	.page_flip = intel_crtc_page_flip,
 	.atomic_duplicate_state = intel_crtc_duplicate_state,
@@ -13048,6 +13287,8 @@ static void intel_shared_dpll_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
+	intel_update_cdclk(dev);
+
 	if (HAS_DDI(dev))
 		intel_ddi_pll_init(dev);
 	else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
@@ -13059,28 +13300,6 @@ static void intel_shared_dpll_init(struct drm_device *dev)
 }
 
 /**
- * intel_wm_need_update - Check whether watermarks need updating
- * @plane: drm plane
- * @state: new plane state
- *
- * Check current plane state versus the new one to determine whether
- * watermarks need to be recalculated.
- *
- * Returns true or false.
- */
-bool intel_wm_need_update(struct drm_plane *plane,
-			  struct drm_plane_state *state)
-{
-	/* Update watermarks on tiling changes. */
-	if (!plane->state->fb || !state->fb ||
-	    plane->state->fb->modifier[0] != state->fb->modifier[0] ||
-	    plane->state->rotation != state->rotation)
-		return true;
-
-	return false;
-}
-
-/**
  * intel_prepare_plane_fb - Prepare fb for usage on plane
  * @plane: drm plane to prepare for
  * @fb: framebuffer to prepare for presentation
@@ -13099,27 +13318,13 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 {
 	struct drm_device *dev = plane->dev;
 	struct intel_plane *intel_plane = to_intel_plane(plane);
-	enum pipe pipe = intel_plane->pipe;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
-	unsigned frontbuffer_bits = 0;
 	int ret = 0;
 
 	if (!obj)
 		return 0;
 
-	switch (plane->type) {
-	case DRM_PLANE_TYPE_PRIMARY:
-		frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(pipe);
-		break;
-	case DRM_PLANE_TYPE_CURSOR:
-		frontbuffer_bits = INTEL_FRONTBUFFER_CURSOR(pipe);
-		break;
-	case DRM_PLANE_TYPE_OVERLAY:
-		frontbuffer_bits = INTEL_FRONTBUFFER_SPRITE(pipe);
-		break;
-	}
-
 	mutex_lock(&dev->struct_mutex);
 
 	if (plane->type == DRM_PLANE_TYPE_CURSOR &&
@@ -13129,11 +13334,11 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 		if (ret)
 			DRM_DEBUG_KMS("failed to attach phys object\n");
 	} else {
-		ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL);
+		ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL, NULL);
 	}
 
 	if (ret == 0)
-		i915_gem_track_fb(old_obj, obj, frontbuffer_bits);
+		i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
 
 	mutex_unlock(&dev->struct_mutex);
 
@@ -13180,7 +13385,7 @@ skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state
 	dev = intel_crtc->base.dev;
 	dev_priv = dev->dev_private;
 	crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
-	cdclk = dev_priv->display.get_display_clock_speed(dev);
+	cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
 
 	if (!crtc_clock || !cdclk)
 		return DRM_PLANE_HELPER_NO_SCALING;
@@ -13198,112 +13403,28 @@ skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state
 
 static int
 intel_check_primary_plane(struct drm_plane *plane,
+			  struct intel_crtc_state *crtc_state,
 			  struct intel_plane_state *state)
 {
-	struct drm_device *dev = plane->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc = state->base.crtc;
-	struct intel_crtc *intel_crtc;
-	struct intel_crtc_state *crtc_state;
 	struct drm_framebuffer *fb = state->base.fb;
-	struct drm_rect *dest = &state->dst;
-	struct drm_rect *src = &state->src;
-	const struct drm_rect *clip = &state->clip;
-	bool can_position = false;
-	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
 	int min_scale = DRM_PLANE_HELPER_NO_SCALING;
-	int ret;
-
-	crtc = crtc ? crtc : plane->crtc;
-	intel_crtc = to_intel_crtc(crtc);
-	crtc_state = state->base.state ?
-		intel_atomic_get_crtc_state(state->base.state, intel_crtc) : NULL;
+	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
+	bool can_position = false;
 
-	if (INTEL_INFO(dev)->gen >= 9) {
-		/* use scaler when colorkey is not required */
-		if (to_intel_plane(plane)->ckey.flags == I915_SET_COLORKEY_NONE) {
-			min_scale = 1;
-			max_scale = skl_max_scale(intel_crtc, crtc_state);
-		}
+	/* use scaler when colorkey is not required */
+	if (INTEL_INFO(plane->dev)->gen >= 9 &&
+	    state->ckey.flags == I915_SET_COLORKEY_NONE) {
+		min_scale = 1;
+		max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
 		can_position = true;
 	}
 
-	ret = drm_plane_helper_check_update(plane, crtc, fb,
-					    src, dest, clip,
-					    min_scale,
-					    max_scale,
-					    can_position, true,
-					    &state->visible);
-	if (ret)
-		return ret;
-
-	if (crtc_state ? crtc_state->base.active : intel_crtc->active) {
-		struct intel_plane_state *old_state =
-			to_intel_plane_state(plane->state);
-
-		intel_crtc->atomic.wait_for_flips = true;
-
-		/*
-		 * FBC does not work on some platforms for rotated
-		 * planes, so disable it when rotation is not 0 and
-		 * update it when rotation is set back to 0.
-		 *
-		 * FIXME: This is redundant with the fbc update done in
-		 * the primary plane enable function except that that
-		 * one is done too late. We eventually need to unify
-		 * this.
-		 */
-		if (state->visible &&
-		    INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
-		    dev_priv->fbc.crtc == intel_crtc &&
-		    state->base.rotation != BIT(DRM_ROTATE_0)) {
-			intel_crtc->atomic.disable_fbc = true;
-		}
-
-		if (state->visible && !old_state->visible) {
-			/*
-			 * BDW signals flip done immediately if the plane
-			 * is disabled, even if the plane enable is already
-			 * armed to occur at the next vblank :(
-			 */
-			if (IS_BROADWELL(dev))
-				intel_crtc->atomic.wait_vblank = true;
-
-			if (crtc_state)
-				intel_crtc->atomic.post_enable_primary = true;
-		}
-
-		/*
-		 * FIXME: Actually if we will still have any other plane enabled
-		 * on the pipe we could let IPS enabled still, but for
-		 * now lets consider that when we make primary invisible
-		 * by setting DSPCNTR to 0 on update_primary_plane function
-		 * IPS needs to be disable.
-		 */
-		if (!state->visible || !fb)
-			intel_crtc->atomic.disable_ips = true;
-
-		if (!state->visible && old_state->visible &&
-		    crtc_state && !needs_modeset(&crtc_state->base))
-			intel_crtc->atomic.pre_disable_primary = true;
-
-		intel_crtc->atomic.fb_bits |=
-			INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
-
-		intel_crtc->atomic.update_fbc = true;
-
-		if (intel_wm_need_update(plane, &state->base))
-			intel_crtc->atomic.update_wm = true;
-	}
-
-	if (INTEL_INFO(dev)->gen >= 9) {
-		ret = skl_update_scaler_users(intel_crtc, crtc_state,
-			to_intel_plane(plane), state, 0);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
+	return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
+					     &state->dst, &state->clip,
+					     min_scale, max_scale,
+					     can_position, true,
+					     &state->visible);
 }
 
 static void
@@ -13324,20 +13445,19 @@ intel_commit_primary_plane(struct drm_plane *plane,
 	crtc->x = src->x1 >> 16;
 	crtc->y = src->y1 >> 16;
 
-	if (intel_crtc->active) {
-		if (state->visible)
-			/* FIXME: kill this fastboot hack */
-			intel_update_pipe_size(intel_crtc);
+	if (!crtc->state->active)
+		return;
 
-		dev_priv->display.update_primary_plane(crtc, plane->fb,
-						       crtc->x, crtc->y);
-	}
+	if (state->visible)
+		/* FIXME: kill this fastboot hack */
+		intel_update_pipe_size(intel_crtc);
+
+	dev_priv->display.update_primary_plane(crtc, fb, crtc->x, crtc->y);
 }
 
 static void
 intel_disable_primary_plane(struct drm_plane *plane,
-			    struct drm_crtc *crtc,
-			    bool force)
+			    struct drm_crtc *crtc)
 {
 	struct drm_device *dev = plane->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -13345,96 +13465,30 @@ intel_disable_primary_plane(struct drm_plane *plane,
 	dev_priv->display.update_primary_plane(crtc, NULL, 0, 0);
 }
 
-static void intel_begin_crtc_commit(struct drm_crtc *crtc)
+static void intel_begin_crtc_commit(struct drm_crtc *crtc,
+				    struct drm_crtc_state *old_crtc_state)
 {
 	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_plane *intel_plane;
-	struct drm_plane *p;
-	unsigned fb_bits = 0;
 
-	/* Track fb's for any planes being disabled */
-	list_for_each_entry(p, &dev->mode_config.plane_list, head) {
-		intel_plane = to_intel_plane(p);
-
-		if (intel_crtc->atomic.disabled_planes &
-		    (1 << drm_plane_index(p))) {
-			switch (p->type) {
-			case DRM_PLANE_TYPE_PRIMARY:
-				fb_bits = INTEL_FRONTBUFFER_PRIMARY(intel_plane->pipe);
-				break;
-			case DRM_PLANE_TYPE_CURSOR:
-				fb_bits = INTEL_FRONTBUFFER_CURSOR(intel_plane->pipe);
-				break;
-			case DRM_PLANE_TYPE_OVERLAY:
-				fb_bits = INTEL_FRONTBUFFER_SPRITE(intel_plane->pipe);
-				break;
-			}
-
-			mutex_lock(&dev->struct_mutex);
-			i915_gem_track_fb(intel_fb_obj(p->fb), NULL, fb_bits);
-			mutex_unlock(&dev->struct_mutex);
-		}
-	}
-
-	if (intel_crtc->atomic.wait_for_flips)
-		intel_crtc_wait_for_pending_flips(crtc);
-
-	if (intel_crtc->atomic.disable_fbc)
-		intel_fbc_disable(dev);
-
-	if (intel_crtc->atomic.disable_ips)
-		hsw_disable_ips(intel_crtc);
-
-	if (intel_crtc->atomic.pre_disable_primary)
-		intel_pre_disable_primary(crtc);
-
-	if (intel_crtc->atomic.update_wm)
+	if (intel_crtc->atomic.update_wm_pre)
 		intel_update_watermarks(crtc);
 
-	intel_runtime_pm_get(dev_priv);
-
 	/* Perform vblank evasion around commit operation */
-	if (intel_crtc->active)
-		intel_crtc->atomic.evade =
-			intel_pipe_update_start(intel_crtc,
-						&intel_crtc->atomic.start_vbl_count);
+	if (crtc->state->active)
+		intel_pipe_update_start(intel_crtc, &intel_crtc->start_vbl_count);
+
+	if (!needs_modeset(crtc->state) && INTEL_INFO(dev)->gen >= 9)
+		skl_detach_scalers(intel_crtc);
 }
 
-static void intel_finish_crtc_commit(struct drm_crtc *crtc)
+static void intel_finish_crtc_commit(struct drm_crtc *crtc,
+				     struct drm_crtc_state *old_crtc_state)
 {
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct drm_plane *p;
-
-	if (intel_crtc->atomic.evade)
-		intel_pipe_update_end(intel_crtc,
-				      intel_crtc->atomic.start_vbl_count);
-
-	intel_runtime_pm_put(dev_priv);
-
-	if (intel_crtc->atomic.wait_vblank)
-		intel_wait_for_vblank(dev, intel_crtc->pipe);
-
-	intel_frontbuffer_flip(dev, intel_crtc->atomic.fb_bits);
-
-	if (intel_crtc->atomic.update_fbc) {
-		mutex_lock(&dev->struct_mutex);
-		intel_fbc_update(dev);
-		mutex_unlock(&dev->struct_mutex);
-	}
 
-	if (intel_crtc->atomic.post_enable_primary)
-		intel_post_enable_primary(crtc);
-
-	drm_for_each_legacy_plane(p, &dev->mode_config.plane_list)
-		if (intel_crtc->atomic.update_sprite_watermarks & drm_plane_index(p))
-			intel_update_sprite_watermarks(p, crtc, 0, 0, 0,
-						       false, false);
-
-	memset(&intel_crtc->atomic, 0, sizeof(intel_crtc->atomic));
+	if (crtc->state->active)
+		intel_pipe_update_end(intel_crtc, intel_crtc->start_vbl_count);
 }
 
 /**
@@ -13469,7 +13523,7 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
 	struct intel_plane *primary;
 	struct intel_plane_state *state;
 	const uint32_t *intel_primary_formats;
-	int num_formats;
+	unsigned int num_formats;
 
 	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
 	if (primary == NULL)
@@ -13490,10 +13544,10 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
 	}
 	primary->pipe = pipe;
 	primary->plane = pipe;
+	primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
 	primary->check_plane = intel_check_primary_plane;
 	primary->commit_plane = intel_commit_primary_plane;
 	primary->disable_plane = intel_disable_primary_plane;
-	primary->ckey.flags = I915_SET_COLORKEY_NONE;
 	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
 		primary->plane = !pipe;
 
@@ -13541,37 +13595,29 @@ void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *
 
 static int
 intel_check_cursor_plane(struct drm_plane *plane,
+			 struct intel_crtc_state *crtc_state,
 			 struct intel_plane_state *state)
 {
-	struct drm_crtc *crtc = state->base.crtc;
-	struct drm_device *dev = plane->dev;
+	struct drm_crtc *crtc = crtc_state->base.crtc;
 	struct drm_framebuffer *fb = state->base.fb;
-	struct drm_rect *dest = &state->dst;
-	struct drm_rect *src = &state->src;
-	const struct drm_rect *clip = &state->clip;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-	struct intel_crtc *intel_crtc;
 	unsigned stride;
 	int ret;
 
-	crtc = crtc ? crtc : plane->crtc;
-	intel_crtc = to_intel_crtc(crtc);
-
-	ret = drm_plane_helper_check_update(plane, crtc, fb,
-					    src, dest, clip,
+	ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
+					    &state->dst, &state->clip,
 					    DRM_PLANE_HELPER_NO_SCALING,
 					    DRM_PLANE_HELPER_NO_SCALING,
 					    true, true, &state->visible);
 	if (ret)
 		return ret;
 
-
 	/* if we want to turn off the cursor ignore width and height */
 	if (!obj)
-		goto finish;
+		return 0;
 
 	/* Check for which cursor types we support */
-	if (!cursor_size_ok(dev, state->base.crtc_w, state->base.crtc_h)) {
+	if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
 		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
 			  state->base.crtc_w, state->base.crtc_h);
 		return -EINVAL;
@@ -13585,34 +13631,16 @@ intel_check_cursor_plane(struct drm_plane *plane,
 
 	if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
 		DRM_DEBUG_KMS("cursor cannot be tiled\n");
-		ret = -EINVAL;
-	}
-
-finish:
-	if (intel_crtc->active) {
-		if (plane->state->crtc_w != state->base.crtc_w)
-			intel_crtc->atomic.update_wm = true;
-
-		intel_crtc->atomic.fb_bits |=
-			INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe);
+		return -EINVAL;
 	}
 
-	return ret;
+	return 0;
 }
 
 static void
 intel_disable_cursor_plane(struct drm_plane *plane,
-			   struct drm_crtc *crtc,
-			   bool force)
+			   struct drm_crtc *crtc)
 {
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-	if (!force) {
-		plane->fb = NULL;
-		intel_crtc->cursor_bo = NULL;
-		intel_crtc->cursor_addr = 0;
-	}
-
 	intel_crtc_update_cursor(crtc, false);
 }
 
@@ -13645,9 +13673,9 @@ intel_commit_cursor_plane(struct drm_plane *plane,
 
 	intel_crtc->cursor_addr = addr;
 	intel_crtc->cursor_bo = obj;
-update:
 
-	if (intel_crtc->active)
+update:
+	if (crtc->state->active)
 		intel_crtc_update_cursor(crtc, state->visible);
 }
 
@@ -13672,6 +13700,7 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
 	cursor->max_downscale = 1;
 	cursor->pipe = pipe;
 	cursor->plane = pipe;
+	cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
 	cursor->check_plane = intel_check_cursor_plane;
 	cursor->commit_plane = intel_commit_cursor_plane;
 	cursor->disable_plane = intel_disable_cursor_plane;
@@ -13712,8 +13741,6 @@ static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_cr
 	for (i = 0; i < intel_crtc->num_scalers; i++) {
 		intel_scaler = &scaler_state->scalers[i];
 		intel_scaler->in_use = 0;
-		intel_scaler->id = i;
-
 		intel_scaler->mode = PS_SCALER_MODE_DYN;
 	}
 
@@ -13785,6 +13812,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
 	intel_crtc->cursor_cntl = ~0;
 	intel_crtc->cursor_size = ~0;
 
+	intel_crtc->wm.cxsr_allowed = true;
+
 	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
 	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
 	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
@@ -13919,8 +13948,7 @@ static void intel_setup_outputs(struct drm_device *dev)
 		 */
 		found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
 		/* WaIgnoreDDIAStrap: skl */
-		if (found ||
-		    (IS_SKYLAKE(dev) && INTEL_REVID(dev) < SKL_REVID_D0))
+		if (found || IS_SKYLAKE(dev))
 			intel_ddi_init(dev, PORT_A);
 
 		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
@@ -13933,6 +13961,15 @@ static void intel_setup_outputs(struct drm_device *dev)
 			intel_ddi_init(dev, PORT_C);
 		if (found & SFUSE_STRAP_DDID_DETECTED)
 			intel_ddi_init(dev, PORT_D);
+		/*
+		 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
+		 */
+		if (IS_SKYLAKE(dev) &&
+		    (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
+		     dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
+		     dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
+			intel_ddi_init(dev, PORT_E);
+
 	} else if (HAS_PCH_SPLIT(dev)) {
 		int found;
 		dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
@@ -13996,18 +14033,18 @@ static void intel_setup_outputs(struct drm_device *dev)
 		}
 
 		intel_dsi_init(dev);
-	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
+	} else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
 		bool found = false;
 
 		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
 			DRM_DEBUG_KMS("probing SDVOB\n");
 			found = intel_sdvo_init(dev, GEN3_SDVOB, true);
-			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
+			if (!found && IS_G4X(dev)) {
 				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
 				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
 			}
 
-			if (!found && SUPPORTS_INTEGRATED_DP(dev))
+			if (!found && IS_G4X(dev))
 				intel_dp_init(dev, DP_B, PORT_B);
 		}
 
@@ -14020,15 +14057,15 @@ static void intel_setup_outputs(struct drm_device *dev)
 
 		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
 
-			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
+			if (IS_G4X(dev)) {
 				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
 				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
 			}
-			if (SUPPORTS_INTEGRATED_DP(dev))
+			if (IS_G4X(dev))
 				intel_dp_init(dev, DP_C, PORT_C);
 		}
 
-		if (SUPPORTS_INTEGRATED_DP(dev) &&
+		if (IS_G4X(dev) &&
 		    (I915_READ(DP_D) & DP_DETECTED))
 			intel_dp_init(dev, DP_D, PORT_D);
 	} else if (IS_GEN2(dev))
@@ -14073,9 +14110,27 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
 	return drm_gem_handle_create(file, &obj->base, handle);
 }
 
+static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
+					struct drm_file *file,
+					unsigned flags, unsigned color,
+					struct drm_clip_rect *clips,
+					unsigned num_clips)
+{
+	struct drm_device *dev = fb->dev;
+	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+	struct drm_i915_gem_object *obj = intel_fb->obj;
+
+	mutex_lock(&dev->struct_mutex);
+	intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
 static const struct drm_framebuffer_funcs intel_fb_funcs = {
 	.destroy = intel_user_framebuffer_destroy,
 	.create_handle = intel_user_framebuffer_create_handle,
+	.dirty = intel_user_framebuffer_dirty,
 };
 
 static
@@ -14270,7 +14325,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
 	return intel_framebuffer_create(dev, mode_cmd, obj);
 }
 
-#ifndef CONFIG_DRM_I915_FBDEV
+#ifndef CONFIG_DRM_FBDEV_EMULATION
 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
 {
 }
@@ -14281,6 +14336,8 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
 	.output_poll_changed = intel_fbdev_output_poll_changed,
 	.atomic_check = intel_atomic_check,
 	.atomic_commit = intel_atomic_commit,
+	.atomic_state_alloc = intel_atomic_state_alloc,
+	.atomic_state_clear = intel_atomic_state_clear,
 };
 
 /* Set up chip specific display functions */
@@ -14307,7 +14364,6 @@ static void intel_init_display(struct drm_device *dev)
 			haswell_crtc_compute_clock;
 		dev_priv->display.crtc_enable = haswell_crtc_enable;
 		dev_priv->display.crtc_disable = haswell_crtc_disable;
-		dev_priv->display.off = ironlake_crtc_off;
 		dev_priv->display.update_primary_plane =
 			skylake_update_primary_plane;
 	} else if (HAS_DDI(dev)) {
@@ -14318,7 +14374,6 @@ static void intel_init_display(struct drm_device *dev)
 			haswell_crtc_compute_clock;
 		dev_priv->display.crtc_enable = haswell_crtc_enable;
 		dev_priv->display.crtc_disable = haswell_crtc_disable;
-		dev_priv->display.off = ironlake_crtc_off;
 		dev_priv->display.update_primary_plane =
 			ironlake_update_primary_plane;
 	} else if (HAS_PCH_SPLIT(dev)) {
@@ -14329,7 +14384,6 @@ static void intel_init_display(struct drm_device *dev)
 			ironlake_crtc_compute_clock;
 		dev_priv->display.crtc_enable = ironlake_crtc_enable;
 		dev_priv->display.crtc_disable = ironlake_crtc_disable;
-		dev_priv->display.off = ironlake_crtc_off;
 		dev_priv->display.update_primary_plane =
 			ironlake_update_primary_plane;
 	} else if (IS_VALLEYVIEW(dev)) {
@@ -14339,7 +14393,6 @@ static void intel_init_display(struct drm_device *dev)
 		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
-		dev_priv->display.off = i9xx_crtc_off;
 		dev_priv->display.update_primary_plane =
 			i9xx_update_primary_plane;
 	} else {
@@ -14349,7 +14402,6 @@ static void intel_init_display(struct drm_device *dev)
 		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
-		dev_priv->display.off = i9xx_crtc_off;
 		dev_priv->display.update_primary_plane =
 			i9xx_update_primary_plane;
 	}
@@ -14358,6 +14410,9 @@ static void intel_init_display(struct drm_device *dev)
 	if (IS_SKYLAKE(dev))
 		dev_priv->display.get_display_clock_speed =
 			skylake_get_display_clock_speed;
+	else if (IS_BROXTON(dev))
+		dev_priv->display.get_display_clock_speed =
+			broxton_get_display_clock_speed;
 	else if (IS_BROADWELL(dev))
 		dev_priv->display.get_display_clock_speed =
 			broadwell_get_display_clock_speed;
@@ -14371,9 +14426,21 @@ static void intel_init_display(struct drm_device *dev)
 		dev_priv->display.get_display_clock_speed =
 			ilk_get_display_clock_speed;
 	else if (IS_I945G(dev) || IS_BROADWATER(dev) ||
-		 IS_GEN6(dev) || IS_IVYBRIDGE(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
+		 IS_GEN6(dev) || IS_IVYBRIDGE(dev))
 		dev_priv->display.get_display_clock_speed =
 			i945_get_display_clock_speed;
+	else if (IS_GM45(dev))
+		dev_priv->display.get_display_clock_speed =
+			gm45_get_display_clock_speed;
+	else if (IS_CRESTLINE(dev))
+		dev_priv->display.get_display_clock_speed =
+			i965gm_get_display_clock_speed;
+	else if (IS_PINEVIEW(dev))
+		dev_priv->display.get_display_clock_speed =
+			pnv_get_display_clock_speed;
+	else if (IS_G33(dev) || IS_G4X(dev))
+		dev_priv->display.get_display_clock_speed =
+			g33_get_display_clock_speed;
 	else if (IS_I915G(dev))
 		dev_priv->display.get_display_clock_speed =
 			i915_get_display_clock_speed;
@@ -14391,10 +14458,12 @@ static void intel_init_display(struct drm_device *dev)
 			i865_get_display_clock_speed;
 	else if (IS_I85X(dev))
 		dev_priv->display.get_display_clock_speed =
-			i855_get_display_clock_speed;
-	else /* 852, 830 */
+			i85x_get_display_clock_speed;
+	else { /* 830 */
+		WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n");
 		dev_priv->display.get_display_clock_speed =
 			i830_get_display_clock_speed;
+	}
 
 	if (IS_GEN5(dev)) {
 		dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
@@ -14405,12 +14474,22 @@ static void intel_init_display(struct drm_device *dev)
 		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
 	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
 		dev_priv->display.fdi_link_train = hsw_fdi_link_train;
+		if (IS_BROADWELL(dev)) {
+			dev_priv->display.modeset_commit_cdclk =
+				broadwell_modeset_commit_cdclk;
+			dev_priv->display.modeset_calc_cdclk =
+				broadwell_modeset_calc_cdclk;
+		}
 	} else if (IS_VALLEYVIEW(dev)) {
-		dev_priv->display.modeset_global_resources =
-			valleyview_modeset_global_resources;
+		dev_priv->display.modeset_commit_cdclk =
+			valleyview_modeset_commit_cdclk;
+		dev_priv->display.modeset_calc_cdclk =
+			valleyview_modeset_calc_cdclk;
 	} else if (IS_BROXTON(dev)) {
-		dev_priv->display.modeset_global_resources =
-			broxton_modeset_global_resources;
+		dev_priv->display.modeset_commit_cdclk =
+			broxton_modeset_commit_cdclk;
+		dev_priv->display.modeset_calc_cdclk =
+			broxton_modeset_calc_cdclk;
 	}
 
 	switch (INTEL_INFO(dev)->gen) {
@@ -14629,13 +14708,9 @@ static void i915_disable_vga(struct drm_device *dev)
 
 void intel_modeset_init_hw(struct drm_device *dev)
 {
+	intel_update_cdclk(dev);
 	intel_prepare_ddi(dev);
-
-	if (IS_VALLEYVIEW(dev))
-		vlv_update_cdclk(dev);
-
 	intel_init_clock_gating(dev);
-
 	intel_enable_gt_powersave(dev);
 }
 
@@ -14665,6 +14740,24 @@ void intel_modeset_init(struct drm_device *dev)
 	if (INTEL_INFO(dev)->num_pipes == 0)
 		return;
 
+	/*
+	 * There may be no VBT; and if the BIOS enabled SSC we can
+	 * just keep using it to avoid unnecessary flicker.  Whereas if the
+	 * BIOS isn't using it, don't assume it will work even if the VBT
+	 * indicates as much.
+	 */
+	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+		bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
+					    DREF_SSC1_ENABLE);
+
+		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
+			DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
+				     bios_lvds_use_ssc ? "en" : "dis",
+				     dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
+			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
+		}
+	}
+
 	intel_init_display(dev);
 	intel_init_audio(dev);
 
@@ -14715,13 +14808,15 @@ void intel_modeset_init(struct drm_device *dev)
 	intel_setup_outputs(dev);
 
 	/* Just in case the BIOS is doing something questionable. */
-	intel_fbc_disable(dev);
+	intel_fbc_disable(dev_priv);
 
 	drm_modeset_lock_all(dev);
-	intel_modeset_setup_hw_state(dev, false);
+	intel_modeset_setup_hw_state(dev);
 	drm_modeset_unlock_all(dev);
 
 	for_each_intel_crtc(dev, crtc) {
+		struct intel_initial_plane_config plane_config = {};
+
 		if (!crtc->active)
 			continue;
 
@@ -14732,15 +14827,14 @@ void intel_modeset_init(struct drm_device *dev)
 		 * can even allow for smooth boot transitions if the BIOS
 		 * fb is large enough for the active pipe configuration.
 		 */
-		if (dev_priv->display.get_initial_plane_config) {
-			dev_priv->display.get_initial_plane_config(crtc,
-							   &crtc->plane_config);
-			/*
-			 * If the fb is shared between multiple heads, we'll
-			 * just get the first one.
-			 */
-			intel_find_initial_plane_obj(crtc, &crtc->plane_config);
-		}
+		dev_priv->display.get_initial_plane_config(crtc,
+							   &plane_config);
+
+		/*
+		 * If the fb is shared between multiple heads, we'll
+		 * just get the first one.
+		 */
+		intel_find_initial_plane_obj(crtc, &plane_config);
 	}
 }
 
@@ -14792,7 +14886,9 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *encoder;
 	u32 reg;
+	bool enable;
 
 	/* Clear any frame start delays used for debugging left by the BIOS */
 	reg = PIPECONF(crtc->config->cpu_transcoder);
@@ -14801,6 +14897,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
 	/* restore vblank interrupts to correct state */
 	drm_crtc_vblank_reset(&crtc->base);
 	if (crtc->active) {
+		drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
 		update_scanline_offset(crtc);
 		drm_crtc_vblank_on(&crtc->base);
 	}
@@ -14809,7 +14906,6 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
 	 * disable the crtc (and hence change the state) if it is wrong. Note
 	 * that gen4+ has a fixed plane -> pipe mapping.  */
 	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
-		struct intel_connector *connector;
 		bool plane;
 
 		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
@@ -14821,30 +14917,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
 		plane = crtc->plane;
 		to_intel_plane_state(crtc->base.primary->state)->visible = true;
 		crtc->plane = !plane;
-		intel_crtc_disable_planes(&crtc->base);
-		dev_priv->display.crtc_disable(&crtc->base);
+		intel_crtc_disable_noatomic(&crtc->base);
 		crtc->plane = plane;
-
-		/* ... and break all links. */
-		for_each_intel_connector(dev, connector) {
-			if (connector->encoder->base.crtc != &crtc->base)
-				continue;
-
-			connector->base.dpms = DRM_MODE_DPMS_OFF;
-			connector->base.encoder = NULL;
-		}
-		/* multiple connectors may have the same encoder:
-		 *  handle them and break crtc link separately */
-		for_each_intel_connector(dev, connector)
-			if (connector->encoder->base.crtc == &crtc->base) {
-				connector->encoder->base.crtc = NULL;
-				connector->encoder->connectors_active = false;
-			}
-
-		WARN_ON(crtc->active);
-		crtc->base.state->enable = false;
-		crtc->base.state->active = false;
-		crtc->base.enabled = false;
 	}
 
 	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
@@ -14858,20 +14932,27 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
 
 	/* Adjust the state of the output pipe according to whether we
 	 * have active connectors/encoders. */
-	intel_crtc_update_dpms(&crtc->base);
+	enable = false;
+	for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
+		enable = true;
+		break;
+	}
 
-	if (crtc->active != crtc->base.state->enable) {
-		struct intel_encoder *encoder;
+	if (!enable)
+		intel_crtc_disable_noatomic(&crtc->base);
+
+	if (crtc->active != crtc->base.state->active) {
 
 		/* This can happen either due to bugs in the get_hw_state
-		 * functions or because the pipe is force-enabled due to the
+		 * functions or because of calls to intel_crtc_disable_noatomic,
+		 * or because the pipe is force-enabled due to the
 		 * pipe A quirk. */
 		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
 			      crtc->base.base.id,
 			      crtc->base.state->enable ? "enabled" : "disabled",
 			      crtc->active ? "enabled" : "disabled");
 
-		crtc->base.state->enable = crtc->active;
+		WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0);
 		crtc->base.state->active = crtc->active;
 		crtc->base.enabled = crtc->active;
 
@@ -14882,10 +14963,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
 		 *  actually up, hence no need to break them. */
 		WARN_ON(crtc->active);
 
-		for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
-			WARN_ON(encoder->connectors_active);
+		for_each_encoder_on_crtc(dev, &crtc->base, encoder)
 			encoder->base.crtc = NULL;
-		}
 	}
 
 	if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
@@ -14911,6 +14990,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
 {
 	struct intel_connector *connector;
 	struct drm_device *dev = encoder->base.dev;
+	bool active = false;
 
 	/* We need to check both for a crtc link (meaning that the
 	 * encoder is active and trying to read from a pipe) and the
@@ -14918,7 +14998,15 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
 	bool has_active_crtc = encoder->base.crtc &&
 		to_intel_crtc(encoder->base.crtc)->active;
 
-	if (encoder->connectors_active && !has_active_crtc) {
+	for_each_intel_connector(dev, connector) {
+		if (connector->base.encoder != &encoder->base)
+			continue;
+
+		active = true;
+		break;
+	}
+
+	if (active && !has_active_crtc) {
 		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
 			      encoder->base.base.id,
 			      encoder->base.name);
@@ -14935,7 +15023,6 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
 				encoder->post_disable(encoder);
 		}
 		encoder->base.crtc = NULL;
-		encoder->connectors_active = false;
 
 		/* Inconsistent output/port/pipe state happens presumably due to
 		 * a bug in one of the get_hw_state functions. Or someplace else
@@ -14984,10 +15071,31 @@ static bool primary_get_hw_state(struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
-	if (!crtc->active)
-		return false;
+	return !!(I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE);
+}
+
+static void readout_plane_state(struct intel_crtc *crtc,
+				struct intel_crtc_state *crtc_state)
+{
+	struct intel_plane *p;
+	struct intel_plane_state *plane_state;
+	bool active = crtc_state->base.active;
 
-	return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE;
+	for_each_intel_plane(crtc->base.dev, p) {
+		if (crtc->pipe != p->pipe)
+			continue;
+
+		plane_state = to_intel_plane_state(p->base.state);
+
+		if (p->base.type == DRM_PLANE_TYPE_PRIMARY)
+			plane_state->visible = primary_get_hw_state(crtc);
+		else {
+			if (active)
+				p->disable_plane(&p->base, &crtc->base);
+
+			plane_state->visible = false;
+		}
+	}
 }
 
 static void intel_modeset_readout_hw_state(struct drm_device *dev)
@@ -15000,23 +15108,44 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 	int i;
 
 	for_each_intel_crtc(dev, crtc) {
-		struct drm_plane *primary = crtc->base.primary;
-		struct intel_plane_state *plane_state;
-
+		__drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state);
 		memset(crtc->config, 0, sizeof(*crtc->config));
 		crtc->config->base.crtc = &crtc->base;
 
-		crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
-
 		crtc->active = dev_priv->display.get_pipe_config(crtc,
 								 crtc->config);
 
-		crtc->base.state->enable = crtc->active;
 		crtc->base.state->active = crtc->active;
 		crtc->base.enabled = crtc->active;
 
-		plane_state = to_intel_plane_state(primary->state);
-		plane_state->visible = primary_get_hw_state(crtc);
+		memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
+		if (crtc->base.state->active) {
+			intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
+			intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
+			WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
+
+			/*
+			 * The initial mode needs to be set in order to keep
+			 * the atomic core happy. It wants a valid mode if the
+			 * crtc's enabled, so we do the above call.
+			 *
+			 * At this point some state updated by the connectors
+			 * in their ->detect() callback has not run yet, so
+			 * no recalculation can be done yet.
+			 *
+			 * Even if we could do a recalculation and modeset
+			 * right now it would cause a double modeset if
+			 * fbdev or userspace chooses a different initial mode.
+			 *
+			 * If that happens, someone indicated they wanted a
+			 * mode change, which means it's safe to do a full
+			 * recalculation.
+			 */
+			crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
+		}
+
+		crtc->base.hwmode = crtc->config->base.adjusted_mode;
+		readout_plane_state(crtc, to_intel_crtc_state(crtc->base.state));
 
 		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
 			      crtc->base.base.id,
@@ -15055,7 +15184,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 			encoder->base.crtc = NULL;
 		}
 
-		encoder->connectors_active = false;
 		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
 			      encoder->base.base.id,
 			      encoder->base.name,
@@ -15066,7 +15194,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 	for_each_intel_connector(dev, connector) {
 		if (connector->get_hw_state(connector)) {
 			connector->base.dpms = DRM_MODE_DPMS_ON;
-			connector->encoder->connectors_active = true;
 			connector->base.encoder = &connector->encoder->base;
 		} else {
 			connector->base.dpms = DRM_MODE_DPMS_OFF;
@@ -15079,10 +15206,11 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 	}
 }
 
-/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
- * and i915 state tracking structures. */
-void intel_modeset_setup_hw_state(struct drm_device *dev,
-				  bool force_restore)
+/* Scan out the current hw modeset state,
+ * and sanitizes it to the current state
+ */
+static void
+intel_modeset_setup_hw_state(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	enum pipe pipe;
@@ -15092,21 +15220,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
 
 	intel_modeset_readout_hw_state(dev);
 
-	/*
-	 * Now that we have the config, copy it to each CRTC struct
-	 * Note that this could go away if we move to using crtc_config
-	 * checking everywhere.
-	 */
-	for_each_intel_crtc(dev, crtc) {
-		if (crtc->active && i915.fastboot) {
-			intel_mode_from_pipe_config(&crtc->base.mode,
-						    crtc->config);
-			DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
-				      crtc->base.base.id);
-			drm_mode_debug_printmodeline(&crtc->base.mode);
-		}
-	}
-
 	/* HW state is read out, now we need to sanitize this mess. */
 	for_each_intel_encoder(dev, encoder) {
 		intel_sanitize_encoder(encoder);
@@ -15133,34 +15246,77 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
 		pll->on = false;
 	}
 
-	if (IS_GEN9(dev))
+	if (IS_VALLEYVIEW(dev))
+		vlv_wm_get_hw_state(dev);
+	else if (IS_GEN9(dev))
 		skl_wm_get_hw_state(dev);
 	else if (HAS_PCH_SPLIT(dev))
 		ilk_wm_get_hw_state(dev);
 
-	if (force_restore) {
-		i915_redisable_vga(dev);
+	for_each_intel_crtc(dev, crtc) {
+		unsigned long put_domains;
 
-		/*
-		 * We need to use raw interfaces for restoring state to avoid
-		 * checking (bogus) intermediate states.
-		 */
-		for_each_pipe(dev_priv, pipe) {
-			struct drm_crtc *crtc =
-				dev_priv->pipe_to_crtc_mapping[pipe];
+		put_domains = modeset_get_crtc_power_domains(&crtc->base);
+		if (WARN_ON(put_domains))
+			modeset_put_power_domains(dev_priv, put_domains);
+	}
+	intel_display_set_init_power(dev_priv, false);
+}
 
-			intel_crtc_restore_mode(crtc);
-		}
-	} else {
-		intel_modeset_update_staged_output_state(dev);
+void intel_display_resume(struct drm_device *dev)
+{
+	struct drm_atomic_state *state = drm_atomic_state_alloc(dev);
+	struct intel_connector *conn;
+	struct intel_plane *plane;
+	struct drm_crtc *crtc;
+	int ret;
+
+	if (!state)
+		return;
+
+	state->acquire_ctx = dev->mode_config.acquire_ctx;
+
+	/* preserve complete old state, including dpll */
+	intel_atomic_get_shared_dpll_state(state);
+
+	for_each_crtc(dev, crtc) {
+		struct drm_crtc_state *crtc_state =
+			drm_atomic_get_crtc_state(state, crtc);
+
+		ret = PTR_ERR_OR_ZERO(crtc_state);
+		if (ret)
+			goto err;
+
+		/* force a restore */
+		crtc_state->mode_changed = true;
+	}
+
+	for_each_intel_plane(dev, plane) {
+		ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, &plane->base));
+		if (ret)
+			goto err;
+	}
+
+	for_each_intel_connector(dev, conn) {
+		ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, &conn->base));
+		if (ret)
+			goto err;
 	}
 
-	intel_modeset_check_state(dev);
+	intel_modeset_setup_hw_state(dev);
+
+	i915_redisable_vga(dev);
+	ret = drm_atomic_commit(state);
+	if (!ret)
+		return;
+
+err:
+	DRM_ERROR("Restoring old state failed with %i\n", ret);
+	drm_atomic_state_free(state);
 }
 
 void intel_modeset_gem_init(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *c;
 	struct drm_i915_gem_object *obj;
 	int ret;
@@ -15169,16 +15325,6 @@ void intel_modeset_gem_init(struct drm_device *dev)
 	intel_init_gt_powersave(dev);
 	mutex_unlock(&dev->struct_mutex);
 
-	/*
-	 * There may be no VBT; and if the BIOS enabled SSC we can
-	 * just keep using it to avoid unnecessary flicker.  Whereas if the
-	 * BIOS isn't using it, don't assume it will work even if the VBT
-	 * indicates as much.
-	 */
-	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
-		dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
-						DREF_SSC1_ENABLE);
-
 	intel_modeset_init_hw(dev);
 
 	intel_setup_overlay(dev);
@@ -15197,14 +15343,16 @@ void intel_modeset_gem_init(struct drm_device *dev)
 		ret = intel_pin_and_fence_fb_obj(c->primary,
 						 c->primary->fb,
 						 c->primary->state,
-						 NULL);
+						 NULL, NULL);
 		mutex_unlock(&dev->struct_mutex);
 		if (ret) {
 			DRM_ERROR("failed to pin boot fb on pipe %d\n",
 				  to_intel_crtc(c)->pipe);
 			drm_framebuffer_unreference(c->primary->fb);
 			c->primary->fb = NULL;
+			c->primary->crtc = c->primary->state->crtc = NULL;
 			update_state_fb(c->primary);
+			c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
 		}
 	}
 
@@ -15241,13 +15389,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
 	 */
 	drm_kms_helper_poll_fini(dev);
 
-	mutex_lock(&dev->struct_mutex);
-
 	intel_unregister_dsm_handler();
 
-	intel_fbc_disable(dev);
-
-	mutex_unlock(&dev->struct_mutex);
+	intel_fbc_disable(dev_priv);
 
 	/* flush any delayed tasks or pending work */
 	flush_scheduled_work();
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1df0e1fe235f..0a2e33fbf20d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -48,28 +48,28 @@
 #define INTEL_DP_RESOLUTION_FAILSAFE	(3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
 
 struct dp_link_dpll {
-	int link_bw;
+	int clock;
 	struct dpll dpll;
 };
 
 static const struct dp_link_dpll gen4_dpll[] = {
-	{ DP_LINK_BW_1_62,
+	{ 162000,
 		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
-	{ DP_LINK_BW_2_7,
+	{ 270000,
 		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
 };
 
 static const struct dp_link_dpll pch_dpll[] = {
-	{ DP_LINK_BW_1_62,
+	{ 162000,
 		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
-	{ DP_LINK_BW_2_7,
+	{ 270000,
 		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
 };
 
 static const struct dp_link_dpll vlv_dpll[] = {
-	{ DP_LINK_BW_1_62,
+	{ 162000,
 		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
-	{ DP_LINK_BW_2_7,
+	{ 270000,
 		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
 };
 
@@ -83,14 +83,16 @@ static const struct dp_link_dpll chv_dpll[] = {
 	 * m2 is stored in fixed point format using formula below
 	 * (m2_int << 22) | m2_fraction
 	 */
-	{ DP_LINK_BW_1_62,	/* m2_int = 32, m2_fraction = 1677722 */
+	{ 162000,	/* m2_int = 32, m2_fraction = 1677722 */
 		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
-	{ DP_LINK_BW_2_7,	/* m2_int = 27, m2_fraction = 0 */
+	{ 270000,	/* m2_int = 27, m2_fraction = 0 */
 		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
-	{ DP_LINK_BW_5_4,	/* m2_int = 27, m2_fraction = 0 */
+	{ 540000,	/* m2_int = 27, m2_fraction = 0 */
 		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
 };
 
+static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
+				  324000, 432000, 540000 };
 static const int skl_rates[] = { 162000, 216000, 270000,
 				  324000, 432000, 540000 };
 static const int default_rates[] = { 162000, 270000, 540000 };
@@ -562,7 +564,9 @@ static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
-	if (HAS_PCH_SPLIT(dev))
+	if (IS_BROXTON(dev))
+		return BXT_PP_CONTROL(0);
+	else if (HAS_PCH_SPLIT(dev))
 		return PCH_PP_CONTROL;
 	else
 		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
@@ -572,7 +576,9 @@ static u32 _pp_stat_reg(struct intel_dp *intel_dp)
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
-	if (HAS_PCH_SPLIT(dev))
+	if (IS_BROXTON(dev))
+		return BXT_PP_STATUS(0);
+	else if (HAS_PCH_SPLIT(dev))
 		return PCH_PP_STATUS;
 	else
 		return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
@@ -705,7 +711,8 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 		return 0;
 
 	if (intel_dig_port->port == PORT_A) {
-		return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
+		return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
+
 	} else {
 		return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
 	}
@@ -720,7 +727,7 @@ static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 	if (intel_dig_port->port == PORT_A) {
 		if (index)
 			return 0;
-		return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
+		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
 	} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
 		/* Workaround for non-ULT HSW */
 		switch (index) {
@@ -839,8 +846,15 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
 	}
 
 	if (try == 3) {
-		WARN(1, "dp_aux_ch not started status 0x%08x\n",
-		     I915_READ(ch_ctl));
+		static u32 last_status = -1;
+		const u32 status = I915_READ(ch_ctl);
+
+		if (status != last_status) {
+			WARN(1, "dp_aux_ch not started status 0x%08x\n",
+			     status);
+			last_status = status;
+		}
+
 		ret = -EBUSY;
 		goto out;
 	}
@@ -1016,11 +1030,34 @@ static void
 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 	enum port port = intel_dig_port->port;
+	struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
 	const char *name = NULL;
+	uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
 	int ret;
 
+	/* On SKL we don't have Aux for port E so we rely on VBT to set
+	 * a proper alternate aux channel.
+	 */
+	if (IS_SKYLAKE(dev) && port == PORT_E) {
+		switch (info->alternate_aux_channel) {
+		case DP_AUX_B:
+			porte_aux_ctl_reg = DPB_AUX_CH_CTL;
+			break;
+		case DP_AUX_C:
+			porte_aux_ctl_reg = DPC_AUX_CH_CTL;
+			break;
+		case DP_AUX_D:
+			porte_aux_ctl_reg = DPD_AUX_CH_CTL;
+			break;
+		case DP_AUX_A:
+		default:
+			porte_aux_ctl_reg = DPA_AUX_CH_CTL;
+		}
+	}
+
 	switch (port) {
 	case PORT_A:
 		intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
@@ -1038,6 +1075,10 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
 		intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
 		name = "DPDDC-D";
 		break;
+	case PORT_E:
+		intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
+		name = "DPDDC-E";
+		break;
 	default:
 		BUG();
 	}
@@ -1051,7 +1092,7 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
 	 *
 	 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
 	 */
-	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
+	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
 		intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
 
 	intel_dp->aux.name = name;
@@ -1089,7 +1130,7 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
 }
 
 static void
-skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
+skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
 {
 	u32 ctrl1;
 
@@ -1101,7 +1142,7 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
 	pipe_config->dpll_hw_state.cfgcr2 = 0;
 
 	ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
-	switch (link_clock / 2) {
+	switch (pipe_config->port_clock / 2) {
 	case 81000:
 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
 					      SKL_DPLL0);
@@ -1134,20 +1175,20 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
 	pipe_config->dpll_hw_state.ctrl1 = ctrl1;
 }
 
-static void
-hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
+void
+hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
 {
 	memset(&pipe_config->dpll_hw_state, 0,
 	       sizeof(pipe_config->dpll_hw_state));
 
-	switch (link_bw) {
-	case DP_LINK_BW_1_62:
+	switch (pipe_config->port_clock / 2) {
+	case 81000:
 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
 		break;
-	case DP_LINK_BW_2_7:
+	case 135000:
 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
 		break;
-	case DP_LINK_BW_5_4:
+	case 270000:
 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
 		break;
 	}
@@ -1182,23 +1223,29 @@ static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
 static int
 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
 {
-	if (IS_SKYLAKE(dev)) {
+	int size;
+
+	if (IS_BROXTON(dev)) {
+		*source_rates = bxt_rates;
+		size = ARRAY_SIZE(bxt_rates);
+	} else if (IS_SKYLAKE(dev)) {
 		*source_rates = skl_rates;
-		return ARRAY_SIZE(skl_rates);
+		size = ARRAY_SIZE(skl_rates);
+	} else {
+		*source_rates = default_rates;
+		size = ARRAY_SIZE(default_rates);
 	}
 
-	*source_rates = default_rates;
-
 	/* This depends on the fact that 5.4 is last value in the array */
-	if (intel_dp_source_supports_hbr2(dev))
-		return (DP_LINK_BW_5_4 >> 3) + 1;
-	else
-		return (DP_LINK_BW_2_7 >> 3) + 1;
+	if (!intel_dp_source_supports_hbr2(dev))
+		size--;
+
+	return size;
 }
 
 static void
 intel_dp_set_clock(struct intel_encoder *encoder,
-		   struct intel_crtc_state *pipe_config, int link_bw)
+		   struct intel_crtc_state *pipe_config)
 {
 	struct drm_device *dev = encoder->base.dev;
 	const struct dp_link_dpll *divisor = NULL;
@@ -1220,7 +1267,7 @@ intel_dp_set_clock(struct intel_encoder *encoder,
 
 	if (divisor && count) {
 		for (i = 0; i < count; i++) {
-			if (link_bw == divisor[i].link_bw) {
+			if (pipe_config->port_clock == divisor[i].clock) {
 				pipe_config->dpll = divisor[i].dpll;
 				pipe_config->clock_set = true;
 				break;
@@ -1378,7 +1425,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 
 		if (INTEL_INFO(dev)->gen >= 9) {
 			int ret;
-			ret = skl_update_scaler_users(intel_crtc, pipe_config, NULL, NULL, 0);
+			ret = skl_update_scaler_crtc(pipe_config);
 			if (ret)
 				return ret;
 		}
@@ -1403,7 +1450,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 	 * bpc in between. */
 	bpp = pipe_config->pipe_bpp;
 	if (is_edp(intel_dp)) {
-		if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
+
+		/* Get bpp from vbt only for panels that dont have bpp in edid */
+		if (intel_connector->base.display_info.bpc == 0 &&
+			(dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
 			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
 				      dev_priv->vbt.edp_bpp);
 			bpp = dev_priv->vbt.edp_bpp;
@@ -1494,13 +1544,13 @@ found:
 	}
 
 	if (IS_SKYLAKE(dev) && is_edp(intel_dp))
-		skl_edp_set_pll_config(pipe_config, common_rates[clock]);
+		skl_edp_set_pll_config(pipe_config);
 	else if (IS_BROXTON(dev))
 		/* handled in ddi */;
 	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
-		hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
+		hsw_dp_set_ddi_pll_sel(pipe_config);
 	else
-		intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
+		intel_dp_set_clock(encoder, pipe_config);
 
 	return true;
 }
@@ -1703,8 +1753,10 @@ static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
 	lockdep_assert_held(&dev_priv->pps_mutex);
 
 	control = I915_READ(_pp_ctrl_reg(intel_dp));
-	control &= ~PANEL_UNLOCK_MASK;
-	control |= PANEL_UNLOCK_REGS;
+	if (!IS_BROXTON(dev)) {
+		control &= ~PANEL_UNLOCK_MASK;
+		control |= PANEL_UNLOCK_REGS;
+	}
 	return control;
 }
 
@@ -2616,7 +2668,7 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
 		DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
 			      pipe_name(pipe), port_name(port));
 
-		WARN(encoder->connectors_active,
+		WARN(encoder->base.crtc,
 		     "stealing pipe %c power sequencer from active eDP port %c\n",
 		     pipe_name(pipe), port_name(port));
 
@@ -3418,92 +3470,6 @@ gen7_edp_signal_levels(uint8_t train_set)
 	}
 }
 
-/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
-static uint32_t
-hsw_signal_levels(uint8_t train_set)
-{
-	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
-					 DP_TRAIN_PRE_EMPHASIS_MASK);
-	switch (signal_levels) {
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-		return DDI_BUF_TRANS_SELECT(0);
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
-		return DDI_BUF_TRANS_SELECT(1);
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
-		return DDI_BUF_TRANS_SELECT(2);
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
-		return DDI_BUF_TRANS_SELECT(3);
-
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-		return DDI_BUF_TRANS_SELECT(4);
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
-		return DDI_BUF_TRANS_SELECT(5);
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
-		return DDI_BUF_TRANS_SELECT(6);
-
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-		return DDI_BUF_TRANS_SELECT(7);
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
-		return DDI_BUF_TRANS_SELECT(8);
-
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-		return DDI_BUF_TRANS_SELECT(9);
-	default:
-		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
-			      "0x%x\n", signal_levels);
-		return DDI_BUF_TRANS_SELECT(0);
-	}
-}
-
-static void bxt_signal_levels(struct intel_dp *intel_dp)
-{
-	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
-	enum port port = dport->port;
-	struct drm_device *dev = dport->base.base.dev;
-	struct intel_encoder *encoder = &dport->base;
-	uint8_t train_set = intel_dp->train_set[0];
-	uint32_t level = 0;
-
-	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
-					 DP_TRAIN_PRE_EMPHASIS_MASK);
-	switch (signal_levels) {
-	default:
-		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emph level\n");
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-		level = 0;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
-		level = 1;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
-		level = 2;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
-		level = 3;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-		level = 4;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
-		level = 5;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
-		level = 6;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-		level = 7;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
-		level = 8;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-		level = 9;
-		break;
-	}
-
-	bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
-}
-
 /* Properly updates "DP" with the correct signal levels. */
 static void
 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
@@ -3511,22 +3477,20 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 	enum port port = intel_dig_port->port;
 	struct drm_device *dev = intel_dig_port->base.base.dev;
-	uint32_t signal_levels, mask;
+	uint32_t signal_levels, mask = 0;
 	uint8_t train_set = intel_dp->train_set[0];
 
-	if (IS_BROXTON(dev)) {
-		signal_levels = 0;
-		bxt_signal_levels(intel_dp);
-		mask = 0;
-	} else if (HAS_DDI(dev)) {
-		signal_levels = hsw_signal_levels(train_set);
-		mask = DDI_BUF_EMP_MASK;
+	if (HAS_DDI(dev)) {
+		signal_levels = ddi_signal_levels(intel_dp);
+
+		if (IS_BROXTON(dev))
+			signal_levels = 0;
+		else
+			mask = DDI_BUF_EMP_MASK;
 	} else if (IS_CHERRYVIEW(dev)) {
 		signal_levels = chv_signal_levels(intel_dp);
-		mask = 0;
 	} else if (IS_VALLEYVIEW(dev)) {
 		signal_levels = vlv_signal_levels(intel_dp);
-		mask = 0;
 	} else if (IS_GEN7(dev) && port == PORT_A) {
 		signal_levels = gen7_edp_signal_levels(train_set);
 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
@@ -4043,43 +4007,67 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
 	return intel_dp->is_mst;
 }
 
-int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
+static void intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
 {
-	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-	struct drm_device *dev = intel_dig_port->base.base.dev;
-	struct intel_crtc *intel_crtc =
-		to_intel_crtc(intel_dig_port->base.base.crtc);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
 	u8 buf;
-	int test_crc_count;
-	int attempts = 6;
-	int ret = 0;
 
-	hsw_disable_ips(intel_crtc);
-
-	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
-		ret = -EIO;
-		goto out;
+	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
+		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
+		return;
 	}
 
-	if (!(buf & DP_TEST_CRC_SUPPORTED)) {
-		ret = -ENOTTY;
-		goto out;
-	}
+	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
+			       buf & ~DP_TEST_SINK_START) < 0)
+		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
 
-	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
-		ret = -EIO;
-		goto out;
-	}
+	hsw_enable_ips(intel_crtc);
+}
+
+static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
+	u8 buf;
+
+	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
+		return -EIO;
+
+	if (!(buf & DP_TEST_CRC_SUPPORTED))
+		return -ENOTTY;
+
+	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
+		return -EIO;
+
+	hsw_disable_ips(intel_crtc);
 
 	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
-				buf | DP_TEST_SINK_START) < 0) {
-		ret = -EIO;
-		goto out;
+			       buf | DP_TEST_SINK_START) < 0) {
+		hsw_enable_ips(intel_crtc);
+		return -EIO;
 	}
 
+	return 0;
+}
+
+int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
+{
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = dig_port->base.base.dev;
+	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
+	u8 buf;
+	int test_crc_count;
+	int attempts = 6;
+	int ret;
+
+	ret = intel_dp_sink_crc_start(intel_dp);
+	if (ret)
+		return ret;
+
 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
 		ret = -EIO;
-		goto out;
+		goto stop;
 	}
 
 	test_crc_count = buf & DP_TEST_COUNT_MASK;
@@ -4088,7 +4076,7 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
 		if (drm_dp_dpcd_readb(&intel_dp->aux,
 				      DP_TEST_SINK_MISC, &buf) < 0) {
 			ret = -EIO;
-			goto out;
+			goto stop;
 		}
 		intel_wait_for_vblank(dev, intel_crtc->pipe);
 	} while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
@@ -4096,25 +4084,13 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
 	if (attempts == 0) {
 		DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
 		ret = -ETIMEDOUT;
-		goto out;
-	}
-
-	if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
-		ret = -EIO;
-		goto out;
+		goto stop;
 	}
 
-	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
-		ret = -EIO;
-		goto out;
-	}
-	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
-			       buf & ~DP_TEST_SINK_START) < 0) {
+	if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
 		ret = -EIO;
-		goto out;
-	}
-out:
-	hsw_enable_ips(intel_crtc);
+stop:
+	intel_dp_sink_crc_stop(intel_dp);
 	return ret;
 }
 
@@ -4175,9 +4151,16 @@ static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
 				      intel_dp->aux.i2c_defer_count);
 		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
 	} else {
+		struct edid *block = intel_connector->detect_edid;
+
+		/* We have to write the checksum
+		 * of the last block read
+		 */
+		block += intel_connector->detect_edid->extensions;
+
 		if (!drm_dp_dpcd_write(&intel_dp->aux,
 					DP_TEST_EDID_CHECKSUM,
-					&intel_connector->detect_edid->checksum,
+					&block->checksum,
 					1))
 			DRM_DEBUG_KMS("Failed to write EDID checksum\n");
 
@@ -4325,10 +4308,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
 
 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
-	if (!intel_encoder->connectors_active)
-		return;
-
-	if (WARN_ON(!intel_encoder->base.crtc))
+	if (!intel_encoder->base.crtc)
 		return;
 
 	if (!to_intel_crtc(intel_encoder->base.crtc)->active)
@@ -4909,7 +4889,7 @@ static void intel_dp_encoder_reset(struct drm_encoder *encoder)
 }
 
 static const struct drm_connector_funcs intel_dp_connector_funcs = {
-	.dpms = intel_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_dp_detect,
 	.force = intel_dp_force,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -4931,12 +4911,6 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
 	.destroy = intel_dp_encoder_destroy,
 };
 
-void
-intel_dp_hot_plug(struct intel_encoder *intel_encoder)
-{
-	return;
-}
-
 enum irqreturn
 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
 {
@@ -4987,9 +4961,12 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
 
 		intel_dp_probe_oui(intel_dp);
 
-		if (!intel_dp_probe_mst(intel_dp))
+		if (!intel_dp_probe_mst(intel_dp)) {
+			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+			intel_dp_check_link_status(intel_dp);
+			drm_modeset_unlock(&dev->mode_config.connection_mutex);
 			goto mst_fail;
-
+		}
 	} else {
 		if (intel_dp->is_mst) {
 			if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
@@ -4997,10 +4974,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
 		}
 
 		if (!intel_dp->is_mst) {
-			/*
-			 * we'll check the link status via the normal hot plug path later -
-			 * but for short hpds we should check it now
-			 */
 			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
 			intel_dp_check_link_status(intel_dp);
 			drm_modeset_unlock(&dev->mode_config.connection_mutex);
@@ -5042,16 +5015,17 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
 	return -1;
 }
 
-/* check the VBT to see whether the eDP is on DP-D port */
+/* check the VBT to see whether the eDP is on another port */
 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	union child_device_config *p_child;
 	int i;
 	static const short port_mapping[] = {
-		[PORT_B] = PORT_IDPB,
-		[PORT_C] = PORT_IDPC,
-		[PORT_D] = PORT_IDPD,
+		[PORT_B] = DVO_PORT_DPB,
+		[PORT_C] = DVO_PORT_DPC,
+		[PORT_D] = DVO_PORT_DPD,
+		[PORT_E] = DVO_PORT_DPE,
 	};
 
 	if (port == PORT_A)
@@ -5104,8 +5078,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct edp_power_seq cur, vbt, spec,
 		*final = &intel_dp->pps_delays;
-	u32 pp_on, pp_off, pp_div, pp;
-	int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
+	u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
+	int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
 
 	lockdep_assert_held(&dev_priv->pps_mutex);
 
@@ -5113,7 +5087,16 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
 	if (final->t11_t12 != 0)
 		return;
 
-	if (HAS_PCH_SPLIT(dev)) {
+	if (IS_BROXTON(dev)) {
+		/*
+		 * TODO: BXT has 2 sets of PPS registers.
+		 * Correct Register for Broxton need to be identified
+		 * using VBT. hardcoding for now
+		 */
+		pp_ctrl_reg = BXT_PP_CONTROL(0);
+		pp_on_reg = BXT_PP_ON_DELAYS(0);
+		pp_off_reg = BXT_PP_OFF_DELAYS(0);
+	} else if (HAS_PCH_SPLIT(dev)) {
 		pp_ctrl_reg = PCH_PP_CONTROL;
 		pp_on_reg = PCH_PP_ON_DELAYS;
 		pp_off_reg = PCH_PP_OFF_DELAYS;
@@ -5129,12 +5112,14 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
 
 	/* Workaround: Need to write PP_CONTROL with the unlock key as
 	 * the very first thing. */
-	pp = ironlake_get_pp_control(intel_dp);
-	I915_WRITE(pp_ctrl_reg, pp);
+	pp_ctl = ironlake_get_pp_control(intel_dp);
 
 	pp_on = I915_READ(pp_on_reg);
 	pp_off = I915_READ(pp_off_reg);
-	pp_div = I915_READ(pp_div_reg);
+	if (!IS_BROXTON(dev)) {
+		I915_WRITE(pp_ctrl_reg, pp_ctl);
+		pp_div = I915_READ(pp_div_reg);
+	}
 
 	/* Pull timing values out of registers */
 	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
@@ -5149,8 +5134,17 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
 	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
 		PANEL_POWER_DOWN_DELAY_SHIFT;
 
-	cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+	if (IS_BROXTON(dev)) {
+		u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
+			BXT_POWER_CYCLE_DELAY_SHIFT;
+		if (tmp > 0)
+			cur.t11_t12 = (tmp - 1) * 1000;
+		else
+			cur.t11_t12 = 0;
+	} else {
+		cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
 		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
+	}
 
 	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
 		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
@@ -5207,13 +5201,23 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 pp_on, pp_off, pp_div, port_sel = 0;
 	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
-	int pp_on_reg, pp_off_reg, pp_div_reg;
+	int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
 	enum port port = dp_to_dig_port(intel_dp)->port;
 	const struct edp_power_seq *seq = &intel_dp->pps_delays;
 
 	lockdep_assert_held(&dev_priv->pps_mutex);
 
-	if (HAS_PCH_SPLIT(dev)) {
+	if (IS_BROXTON(dev)) {
+		/*
+		 * TODO: BXT has 2 sets of PPS registers.
+		 * Correct Register for Broxton need to be identified
+		 * using VBT. hardcoding for now
+		 */
+		pp_ctrl_reg = BXT_PP_CONTROL(0);
+		pp_on_reg = BXT_PP_ON_DELAYS(0);
+		pp_off_reg = BXT_PP_OFF_DELAYS(0);
+
+	} else if (HAS_PCH_SPLIT(dev)) {
 		pp_on_reg = PCH_PP_ON_DELAYS;
 		pp_off_reg = PCH_PP_OFF_DELAYS;
 		pp_div_reg = PCH_PP_DIVISOR;
@@ -5239,9 +5243,16 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
 		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
 	/* Compute the divisor for the pp clock, simply match the Bspec
 	 * formula. */
-	pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
-	pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
-			<< PANEL_POWER_CYCLE_DELAY_SHIFT);
+	if (IS_BROXTON(dev)) {
+		pp_div = I915_READ(pp_ctrl_reg);
+		pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
+		pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
+				<< BXT_POWER_CYCLE_DELAY_SHIFT);
+	} else {
+		pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
+		pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
+				<< PANEL_POWER_CYCLE_DELAY_SHIFT);
+	}
 
 	/* Haswell doesn't have any port selection bits for the panel
 	 * power sequencer any more. */
@@ -5258,11 +5269,16 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
 
 	I915_WRITE(pp_on_reg, pp_on);
 	I915_WRITE(pp_off_reg, pp_off);
-	I915_WRITE(pp_div_reg, pp_div);
+	if (IS_BROXTON(dev))
+		I915_WRITE(pp_ctrl_reg, pp_div);
+	else
+		I915_WRITE(pp_div_reg, pp_div);
 
 	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
 		      I915_READ(pp_on_reg),
 		      I915_READ(pp_off_reg),
+		      IS_BROXTON(dev) ?
+		      (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
 		      I915_READ(pp_div_reg));
 }
 
@@ -5467,13 +5483,12 @@ unlock:
 }
 
 /**
- * intel_edp_drrs_invalidate - Invalidate DRRS
+ * intel_edp_drrs_invalidate - Disable Idleness DRRS
  * @dev: DRM device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
- * When there is a disturbance on screen (due to cursor movement/time
- * update etc), DRRS needs to be invalidated, i.e. need to switch to
- * high RR.
+ * This function gets called everytime rendering on the given planes start.
+ * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
  *
  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
  */
@@ -5498,26 +5513,27 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
 	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
 	pipe = to_intel_crtc(crtc)->pipe;
 
-	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
+	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
+
+	/* invalidate means busy screen hence upclock */
+	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
 		intel_dp_set_drrs_state(dev_priv->dev,
 				dev_priv->drrs.dp->attached_connector->panel.
 				fixed_mode->vrefresh);
-	}
-
-	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
 
-	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
 	mutex_unlock(&dev_priv->drrs.mutex);
 }
 
 /**
- * intel_edp_drrs_flush - Flush DRRS
+ * intel_edp_drrs_flush - Restart Idleness DRRS
  * @dev: DRM device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
- * When there is no movement on screen, DRRS work can be scheduled.
- * This DRRS work is responsible for setting relevant registers after a
- * timeout of 1 second.
+ * This function gets called every time rendering on the given planes has
+ * completed or flip on a crtc is completed. So DRRS should be upclocked
+ * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
+ * if no other planes are dirty.
  *
  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
  */
@@ -5541,10 +5557,21 @@ void intel_edp_drrs_flush(struct drm_device *dev,
 
 	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
 	pipe = to_intel_crtc(crtc)->pipe;
+
+	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
 	dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
 
-	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
-			!dev_priv->drrs.busy_frontbuffer_bits)
+	/* flush means busy screen hence upclock */
+	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
+		intel_dp_set_drrs_state(dev_priv->dev,
+				dev_priv->drrs.dp->attached_connector->panel.
+				fixed_mode->vrefresh);
+
+	/*
+	 * flush also means no more activity hence schedule downclock, if all
+	 * other fbs are quiescent too
+	 */
+	if (!dev_priv->drrs.busy_frontbuffer_bits)
 		schedule_delayed_work(&dev_priv->drrs.work,
 				msecs_to_jiffies(1000));
 	mutex_unlock(&dev_priv->drrs.mutex);
@@ -5833,6 +5860,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 	case PORT_D:
 		intel_encoder->hpd_pin = HPD_PORT_D;
 		break;
+	case PORT_E:
+		intel_encoder->hpd_pin = HPD_PORT_E;
+		break;
 	default:
 		BUG();
 	}
@@ -5948,10 +5978,9 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
 		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 	}
 	intel_encoder->cloneable = 0;
-	intel_encoder->hot_plug = intel_dp_hot_plug;
 
 	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
-	dev_priv->hpd_irq_port[port] = intel_dig_port;
+	dev_priv->hotplug.irq_port[port] = intel_dig_port;
 
 	if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
 		drm_encoder_cleanup(encoder);
@@ -5967,7 +5996,7 @@ void intel_dp_mst_suspend(struct drm_device *dev)
 
 	/* disable MST */
 	for (i = 0; i < I915_MAX_PORTS; i++) {
-		struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
+		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
 		if (!intel_dig_port)
 			continue;
 
@@ -5986,7 +6015,7 @@ void intel_dp_mst_resume(struct drm_device *dev)
 	int i;
 
 	for (i = 0; i < I915_MAX_PORTS; i++) {
-		struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
+		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
 		if (!intel_dig_port)
 			continue;
 		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 600afdbef8c9..983553cf8b74 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -33,6 +33,7 @@
 static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
 					struct intel_crtc_state *pipe_config)
 {
+	struct drm_device *dev = encoder->base.dev;
 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
@@ -97,6 +98,10 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
 			       &pipe_config->dp_m_n);
 
 	pipe_config->dp_m_n.tu = slots;
+
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		hsw_dp_set_ddi_pll_sel(pipe_config);
+
 	return true;
 
 }
@@ -328,7 +333,7 @@ intel_dp_mst_connector_destroy(struct drm_connector *connector)
 }
 
 static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
-	.dpms = intel_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_dp_mst_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = intel_dp_mst_set_property,
@@ -406,7 +411,7 @@ static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
 
 static void intel_connector_add_to_fbdev(struct intel_connector *connector)
 {
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base);
 #endif
@@ -414,7 +419,7 @@ static void intel_connector_add_to_fbdev(struct intel_connector *connector)
 
 static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
 {
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base);
 #endif
@@ -452,10 +457,9 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
 	drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
 
 	drm_mode_connector_set_path_property(connector, pathprop);
-	drm_reinit_primary_mode_group(dev);
-	mutex_lock(&dev->mode_config.mutex);
+	drm_modeset_lock_all(dev);
 	intel_connector_add_to_fbdev(intel_connector);
-	mutex_unlock(&dev->mode_config.mutex);
+	drm_modeset_unlock_all(dev);
 	drm_connector_register(&intel_connector->base);
 	return connector;
 }
@@ -465,19 +469,28 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
 {
 	struct intel_connector *intel_connector = to_intel_connector(connector);
 	struct drm_device *dev = connector->dev;
+
 	/* need to nuke the connector */
-	mutex_lock(&dev->mode_config.mutex);
-	intel_connector_dpms(connector, DRM_MODE_DPMS_OFF);
-	mutex_unlock(&dev->mode_config.mutex);
+	drm_modeset_lock_all(dev);
+	if (connector->state->crtc) {
+		struct drm_mode_set set;
+		int ret;
+
+		memset(&set, 0, sizeof(set));
+		set.crtc = connector->state->crtc,
+
+		ret = drm_atomic_helper_set_config(&set);
+
+		WARN(ret, "Disabling mst crtc failed with %i\n", ret);
+	}
+	drm_modeset_unlock_all(dev);
 
 	intel_connector->unregister(intel_connector);
 
-	mutex_lock(&dev->mode_config.mutex);
+	drm_modeset_lock_all(dev);
 	intel_connector_remove_from_fbdev(intel_connector);
 	drm_connector_cleanup(connector);
-	mutex_unlock(&dev->mode_config.mutex);
-
-	drm_reinit_primary_mode_group(dev);
+	drm_modeset_unlock_all(dev);
 
 	kfree(intel_connector);
 	DRM_DEBUG_KMS("\n");
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 105928382e21..2b9e6f9775c5 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -130,15 +130,9 @@ struct intel_fbdev {
 
 struct intel_encoder {
 	struct drm_encoder base;
-	/*
-	 * The new crtc this encoder will be driven from. Only differs from
-	 * base->crtc while a modeset is in progress.
-	 */
-	struct intel_crtc *new_crtc;
 
 	enum intel_output_type type;
 	unsigned int cloneable;
-	bool connectors_active;
 	void (*hot_plug)(struct intel_encoder *);
 	bool (*compute_config)(struct intel_encoder *,
 			       struct intel_crtc_state *);
@@ -182,6 +176,10 @@ struct intel_panel {
 		bool enabled;
 		bool combination_mode;	/* gen 2/4 only */
 		bool active_low_pwm;
+
+		/* PWM chip */
+		struct pwm_device *pwm;
+
 		struct backlight_device *device;
 	} backlight;
 
@@ -195,12 +193,6 @@ struct intel_connector {
 	 */
 	struct intel_encoder *encoder;
 
-	/*
-	 * The new encoder this connector will be driven. Only differs from
-	 * encoder while a modeset is in progress.
-	 */
-	struct intel_encoder *new_encoder;
-
 	/* Reads out the current hw, returning true if the connector is enabled
 	 * and active (i.e. dpms ON state). */
 	bool (*get_hw_state)(struct intel_connector *);
@@ -241,6 +233,14 @@ typedef struct dpll {
 	int	p;
 } intel_clock_t;
 
+struct intel_atomic_state {
+	struct drm_atomic_state base;
+
+	unsigned int cdclk;
+	bool dpll_set;
+	struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
+};
+
 struct intel_plane_state {
 	struct drm_plane_state base;
 	struct drm_rect src;
@@ -256,7 +256,7 @@ struct intel_plane_state {
 	 * plane requiring a scaler:
 	 *   - During check_plane, its bit is set in
 	 *     crtc_state->scaler_state.scaler_users by calling helper function
-	 *     update_scaler_users.
+	 *     update_scaler_plane.
 	 *   - scaler_id indicates the scaler it got assigned.
 	 *
 	 * plane doesn't require a scaler:
@@ -264,9 +264,11 @@ struct intel_plane_state {
 	 *     got disabled.
 	 *   - During check_plane, corresponding bit is reset in
 	 *     crtc_state->scaler_state.scaler_users by calling helper function
-	 *     update_scaler_users.
+	 *     update_scaler_plane.
 	 */
 	int scaler_id;
+
+	struct drm_intel_sprite_colorkey ckey;
 };
 
 struct intel_initial_plane_config {
@@ -286,7 +288,6 @@ struct intel_initial_plane_config {
 #define SKL_MAX_DST_H 4096
 
 struct intel_scaler {
-	int id;
 	int in_use;
 	uint32_t mode;
 };
@@ -319,6 +320,9 @@ struct intel_crtc_scaler_state {
 	int scaler_id;
 };
 
+/* drm_mode->private_flags */
+#define I915_MODE_FLAG_INHERITED 1
+
 struct intel_crtc_state {
 	struct drm_crtc_state base;
 
@@ -331,7 +335,6 @@ struct intel_crtc_state {
 	 * accordingly.
 	 */
 #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS	(1<<0) /* unreliable sync mode.flags */
-#define PIPE_CONFIG_QUIRK_INHERITED_MODE	(1<<1) /* mode inherited from firmware */
 	unsigned long quirks;
 
 	/* Pipe source size (ie. panel fitter input size)
@@ -447,6 +450,18 @@ struct intel_crtc_state {
 	int pbn;
 
 	struct intel_crtc_scaler_state scaler_state;
+
+	/* w/a for waiting 2 vblanks during crtc enable */
+	enum pipe hsw_workaround_pipe;
+};
+
+struct vlv_wm_state {
+	struct vlv_pipe_wm wm[3];
+	struct vlv_sr_wm sr[3];
+	uint8_t num_active_planes;
+	uint8_t num_levels;
+	uint8_t level;
+	bool cxsr;
 };
 
 struct intel_pipe_wm {
@@ -478,16 +493,13 @@ struct skl_pipe_wm {
  * and thus can't be run with interrupts disabled.
  */
 struct intel_crtc_atomic_commit {
-	/* vblank evasion */
-	bool evade;
-	unsigned start_vbl_count;
-
 	/* Sleepable operations to perform before commit */
 	bool wait_for_flips;
 	bool disable_fbc;
 	bool disable_ips;
+	bool disable_cxsr;
 	bool pre_disable_primary;
-	bool update_wm;
+	bool update_wm_pre, update_wm_post;
 	unsigned disabled_planes;
 
 	/* Sleepable operations to perform after commit */
@@ -527,9 +539,7 @@ struct intel_crtc {
 	uint32_t cursor_size;
 	uint32_t cursor_base;
 
-	struct intel_initial_plane_config plane_config;
 	struct intel_crtc_state *config;
-	bool new_enabled;
 
 	/* reset counter value when the last flip was submitted */
 	unsigned int reset_counter;
@@ -544,14 +554,19 @@ struct intel_crtc {
 		struct intel_pipe_wm active;
 		/* SKL wm values currently in use */
 		struct skl_pipe_wm skl_active;
+		/* allow CxSR on this pipe */
+		bool cxsr_allowed;
 	} wm;
 
 	int scanline_offset;
 
+	unsigned start_vbl_count;
 	struct intel_crtc_atomic_commit atomic;
 
 	/* scalers available on this crtc */
 	int num_scalers;
+
+	struct vlv_wm_state wm_state;
 };
 
 struct intel_plane_wm_parameters {
@@ -570,6 +585,7 @@ struct intel_plane_wm_parameters {
 	bool scaled;
 	u64 tiling;
 	unsigned int rotation;
+	uint16_t fifo_size;
 };
 
 struct intel_plane {
@@ -578,9 +594,7 @@ struct intel_plane {
 	enum pipe pipe;
 	bool can_scale;
 	int max_downscale;
-
-	/* FIXME convert to properties */
-	struct drm_intel_sprite_colorkey ckey;
+	uint32_t frontbuffer_bit;
 
 	/* Since we need to change the watermarks before/after
 	 * enabling/disabling the planes, we need to store the parameters here
@@ -603,8 +617,9 @@ struct intel_plane {
 			     uint32_t x, uint32_t y,
 			     uint32_t src_w, uint32_t src_h);
 	void (*disable_plane)(struct drm_plane *plane,
-			      struct drm_crtc *crtc, bool force);
+			      struct drm_crtc *crtc);
 	int (*check_plane)(struct drm_plane *plane,
+			   struct intel_crtc_state *crtc_state,
 			   struct intel_plane_state *state);
 	void (*commit_plane)(struct drm_plane *plane,
 			     struct intel_plane_state *state);
@@ -629,6 +644,7 @@ struct cxsr_latency {
 	unsigned long cursor_hpll_disable;
 };
 
+#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
 #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
 #define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, base)
 #define to_intel_connector(x) container_of(x, struct intel_connector, base)
@@ -940,43 +956,23 @@ void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
 void intel_ddi_clock_get(struct intel_encoder *encoder,
 			 struct intel_crtc_state *pipe_config);
 void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
-void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
-				enum port port, int type);
+uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
 
 /* intel_frontbuffer.c */
 void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-			     struct intel_engine_cs *ring,
 			     enum fb_op_origin origin);
 void intel_frontbuffer_flip_prepare(struct drm_device *dev,
 				    unsigned frontbuffer_bits);
 void intel_frontbuffer_flip_complete(struct drm_device *dev,
 				     unsigned frontbuffer_bits);
-void intel_frontbuffer_flush(struct drm_device *dev,
-			     unsigned frontbuffer_bits);
-/**
- * intel_frontbuffer_flip - synchronous frontbuffer flip
- * @dev: DRM device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called after scheduling a flip on @obj. This is for
- * synchronous plane updates which will happen on the next vblank and which will
- * not get delayed by pending gpu rendering.
- *
- * Can be called without any locks held.
- */
-static inline
 void intel_frontbuffer_flip(struct drm_device *dev,
-			    unsigned frontbuffer_bits)
-{
-	intel_frontbuffer_flush(dev, frontbuffer_bits);
-}
-
+			    unsigned frontbuffer_bits);
 unsigned int intel_fb_align_height(struct drm_device *dev,
 				   unsigned int height,
 				   uint32_t pixel_format,
 				   uint64_t fb_format_modifier);
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
-
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
+			enum fb_op_origin origin);
 u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
 			      uint32_t pixel_format);
 
@@ -994,15 +990,11 @@ int intel_pch_rawclk(struct drm_device *dev);
 void intel_mark_busy(struct drm_device *dev);
 void intel_mark_idle(struct drm_device *dev);
 void intel_crtc_restore_mode(struct drm_crtc *crtc);
-void intel_crtc_control(struct drm_crtc *crtc, bool enable);
-void intel_crtc_reset(struct intel_crtc *crtc);
-void intel_crtc_update_dpms(struct drm_crtc *crtc);
+int intel_display_suspend(struct drm_device *dev);
 void intel_encoder_destroy(struct drm_encoder *encoder);
 int intel_connector_init(struct intel_connector *);
 struct intel_connector *intel_connector_alloc(void);
-void intel_connector_dpms(struct drm_connector *, int mode);
 bool intel_connector_get_hw_state(struct intel_connector *connector);
-void intel_modeset_check_state(struct drm_device *dev);
 bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
 				struct intel_digital_port *port);
 void intel_connector_attach_encoder(struct intel_connector *connector,
@@ -1035,7 +1027,8 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
 int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
 			       struct drm_framebuffer *fb,
 			       const struct drm_plane_state *plane_state,
-			       struct intel_engine_cs *pipelined);
+			       struct intel_engine_cs *pipelined,
+			       struct drm_i915_gem_request **pipelined_request);
 struct drm_framebuffer *
 __intel_framebuffer_create(struct drm_device *dev,
 			   struct drm_mode_fb_cmd2 *mode_cmd,
@@ -1058,6 +1051,8 @@ int intel_plane_atomic_set_property(struct drm_plane *plane,
 				    struct drm_plane_state *state,
 				    struct drm_property *property,
 				    uint64_t val);
+int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
+				    struct drm_plane_state *plane_state);
 
 unsigned int
 intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
@@ -1072,9 +1067,6 @@ intel_rotation_90_or_270(unsigned int rotation)
 void intel_create_rotation_property(struct drm_device *dev,
 					struct intel_plane *plane);
 
-bool intel_wm_need_update(struct drm_plane *plane,
-			  struct drm_plane_state *state);
-
 /* shared dpll functions */
 struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
 void assert_shared_dpll(struct drm_i915_private *dev_priv,
@@ -1084,7 +1076,6 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
 #define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
 struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
 						struct intel_crtc_state *state);
-void intel_put_shared_dpll(struct intel_crtc *crtc);
 
 void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
 		      const struct dpll *dpll);
@@ -1104,7 +1095,8 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
 void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
 #define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
 #define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
-unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
+					     int *x, int *y,
 					     unsigned int tiling_mode,
 					     unsigned int bpp,
 					     unsigned int pitch);
@@ -1114,7 +1106,6 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv);
 void hsw_disable_pc8(struct drm_i915_private *dev_priv);
 void broxton_init_cdclk(struct drm_device *dev);
 void broxton_uninit_cdclk(struct drm_device *dev);
-void broxton_set_cdclk(struct drm_device *dev, int frequency);
 void broxton_ddi_phy_init(struct drm_device *dev);
 void broxton_ddi_phy_uninit(struct drm_device *dev);
 void bxt_enable_dc9(struct drm_i915_private *dev_priv);
@@ -1130,6 +1121,8 @@ ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
 				int dotclock);
 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
 			intel_clock_t *best_clock);
+int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock);
+
 bool intel_crtc_active(struct drm_crtc *crtc);
 void hsw_enable_ips(struct intel_crtc *crtc);
 void hsw_disable_ips(struct intel_crtc *crtc);
@@ -1139,10 +1132,8 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 				 struct intel_crtc_state *pipe_config);
 void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
 void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
-void skl_detach_scalers(struct intel_crtc *intel_crtc);
-int skl_update_scaler_users(struct intel_crtc *intel_crtc,
-	struct intel_crtc_state *crtc_state, struct intel_plane *intel_plane,
-	struct intel_plane_state *plane_state, int force_detach);
+
+int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
 int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
 
 unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
@@ -1194,6 +1185,7 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp);
 void intel_edp_drrs_invalidate(struct drm_device *dev,
 		unsigned frontbuffer_bits);
 void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
+void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config);
 
 /* intel_dp_mst.c */
 int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
@@ -1207,7 +1199,7 @@ void intel_dvo_init(struct drm_device *dev);
 
 
 /* legacy fbdev emulation in intel_fbdev.c */
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
 extern int intel_fbdev_init(struct drm_device *dev);
 extern void intel_fbdev_initial_config(void *data, async_cookie_t cookie);
 extern void intel_fbdev_fini(struct drm_device *dev);
@@ -1238,15 +1230,18 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
 #endif
 
 /* intel_fbc.c */
-bool intel_fbc_enabled(struct drm_device *dev);
-void intel_fbc_update(struct drm_device *dev);
+bool intel_fbc_enabled(struct drm_i915_private *dev_priv);
+void intel_fbc_update(struct drm_i915_private *dev_priv);
 void intel_fbc_init(struct drm_i915_private *dev_priv);
-void intel_fbc_disable(struct drm_device *dev);
+void intel_fbc_disable(struct drm_i915_private *dev_priv);
+void intel_fbc_disable_crtc(struct intel_crtc *crtc);
 void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
 			  unsigned int frontbuffer_bits,
 			  enum fb_op_origin origin);
 void intel_fbc_flush(struct drm_i915_private *dev_priv,
-		     unsigned int frontbuffer_bits);
+		     unsigned int frontbuffer_bits, enum fb_op_origin origin);
+const char *intel_no_fbc_reason_str(enum no_fbc_reason reason);
+void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
 
 /* intel_hdmi.c */
 void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
@@ -1314,11 +1309,13 @@ void intel_backlight_unregister(struct drm_device *dev);
 void intel_psr_enable(struct intel_dp *intel_dp);
 void intel_psr_disable(struct intel_dp *intel_dp);
 void intel_psr_invalidate(struct drm_device *dev,
-			      unsigned frontbuffer_bits);
+			  unsigned frontbuffer_bits);
 void intel_psr_flush(struct drm_device *dev,
-			 unsigned frontbuffer_bits);
+		     unsigned frontbuffer_bits,
+		     enum fb_op_origin origin);
 void intel_psr_init(struct drm_device *dev);
-void intel_psr_single_frame_update(struct drm_device *dev);
+void intel_psr_single_frame_update(struct drm_device *dev,
+				   unsigned frontbuffer_bits);
 
 /* intel_runtime_pm.c */
 int intel_power_domains_init(struct drm_i915_private *);
@@ -1372,11 +1369,12 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
 		    unsigned long submitted);
 void intel_queue_rps_boost_for_request(struct drm_device *dev,
 				       struct drm_i915_gem_request *req);
+void vlv_wm_get_hw_state(struct drm_device *dev);
 void ilk_wm_get_hw_state(struct drm_device *dev);
 void skl_wm_get_hw_state(struct drm_device *dev);
 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
 			  struct skl_ddb_allocation *ddb /* out */);
-
+uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
 
 /* intel_sdvo.c */
 bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
@@ -1384,10 +1382,9 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
 
 /* intel_sprite.c */
 int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
-int intel_plane_restore(struct drm_plane *plane);
 int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv);
-bool intel_pipe_update_start(struct intel_crtc *crtc,
+void intel_pipe_update_start(struct intel_crtc *crtc,
 			     uint32_t *start_vbl_count);
 void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
 
@@ -1395,11 +1392,6 @@ void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
 void intel_tv_init(struct drm_device *dev);
 
 /* intel_atomic.c */
-int intel_atomic_check(struct drm_device *dev,
-		       struct drm_atomic_state *state);
-int intel_atomic_commit(struct drm_device *dev,
-			struct drm_atomic_state *state,
-			bool async);
 int intel_connector_atomic_get_property(struct drm_connector *connector,
 					const struct drm_connector_state *state,
 					struct drm_property *property,
@@ -1407,6 +1399,11 @@ int intel_connector_atomic_get_property(struct drm_connector *connector,
 struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
 void intel_crtc_destroy_state(struct drm_crtc *crtc,
 			       struct drm_crtc_state *state);
+struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
+void intel_atomic_state_clear(struct drm_atomic_state *);
+struct intel_shared_dpll_config *
+intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s);
+
 static inline struct intel_crtc_state *
 intel_atomic_get_crtc_state(struct drm_atomic_state *state,
 			    struct intel_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index b5a5558ecd63..4a601cf90f16 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -31,6 +31,7 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_mipi_dsi.h>
 #include <linux/slab.h>
+#include <linux/gpio/consumer.h>
 #include "i915_drv.h"
 #include "intel_drv.h"
 #include "intel_dsi.h"
@@ -261,11 +262,6 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
 	return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
 }
 
-static void intel_dsi_hot_plug(struct intel_encoder *encoder)
-{
-	DRM_DEBUG_KMS("\n");
-}
-
 static bool intel_dsi_compute_config(struct intel_encoder *encoder,
 				     struct intel_crtc_state *config)
 {
@@ -401,6 +397,8 @@ static void intel_dsi_enable(struct intel_encoder *encoder)
 
 		intel_dsi_port_enable(encoder);
 	}
+
+	intel_panel_enable_backlight(intel_dsi->attached_connector);
 }
 
 static void intel_dsi_pre_enable(struct intel_encoder *encoder)
@@ -415,15 +413,21 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
 
 	DRM_DEBUG_KMS("\n");
 
+	/* Panel Enable over CRC PMIC */
+	if (intel_dsi->gpio_panel)
+		gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
+
+	msleep(intel_dsi->panel_on_delay);
+
 	/* Disable DPOunit clock gating, can stall pipe
 	 * and we need DPLL REFA always enabled */
 	tmp = I915_READ(DPLL(pipe));
-	tmp |= DPLL_REFA_CLK_ENABLE_VLV;
+	tmp |= DPLL_REF_CLK_ENABLE_VLV;
 	I915_WRITE(DPLL(pipe), tmp);
 
 	/* update the hw state for DPLL */
-	intel_crtc->config->dpll_hw_state.dpll = DPLL_INTEGRATED_CLOCK_VLV |
-		DPLL_REFA_CLK_ENABLE_VLV;
+	intel_crtc->config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
+		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
 
 	tmp = I915_READ(DSPCLK_GATE_D);
 	tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
@@ -432,8 +436,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
 	/* put device in ready state */
 	intel_dsi_device_ready(encoder);
 
-	msleep(intel_dsi->panel_on_delay);
-
 	drm_panel_prepare(intel_dsi->panel);
 
 	for_each_dsi_port(port, intel_dsi->ports)
@@ -461,6 +463,8 @@ static void intel_dsi_pre_disable(struct intel_encoder *encoder)
 
 	DRM_DEBUG_KMS("\n");
 
+	intel_panel_disable_backlight(intel_dsi->attached_connector);
+
 	if (is_vid_mode(intel_dsi)) {
 		/* Send Shutdown command to the panel in LP mode */
 		for_each_dsi_port(port, intel_dsi->ports)
@@ -576,6 +580,10 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
 
 	msleep(intel_dsi->panel_off_delay);
 	msleep(intel_dsi->panel_pwr_cycle_delay);
+
+	/* Panel Disable over CRC PMIC */
+	if (intel_dsi->gpio_panel)
+		gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
 }
 
 static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
@@ -955,6 +963,11 @@ static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
 		/* XXX: Logically this call belongs in the panel driver. */
 		drm_panel_remove(intel_dsi->panel);
 	}
+
+	/* dispose of the gpios */
+	if (intel_dsi->gpio_panel)
+		gpiod_put(intel_dsi->gpio_panel);
+
 	intel_encoder_destroy(encoder);
 }
 
@@ -969,7 +982,7 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs
 };
 
 static const struct drm_connector_funcs intel_dsi_connector_funcs = {
-	.dpms = intel_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_dsi_detect,
 	.destroy = intel_dsi_connector_destroy,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -1022,7 +1035,6 @@ void intel_dsi_init(struct drm_device *dev)
 	drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
 
 	/* XXX: very likely not all of these are needed */
-	intel_encoder->hot_plug = intel_dsi_hot_plug;
 	intel_encoder->compute_config = intel_dsi_compute_config;
 	intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
 	intel_encoder->pre_enable = intel_dsi_pre_enable;
@@ -1071,6 +1083,20 @@ void intel_dsi_init(struct drm_device *dev)
 		goto err;
 	}
 
+	/*
+	 * In case of BYT with CRC PMIC, we need to use GPIO for
+	 * Panel control.
+	 */
+	if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
+		intel_dsi->gpio_panel =
+			gpiod_get(dev->dev, "panel", GPIOD_OUT_HIGH);
+
+		if (IS_ERR(intel_dsi->gpio_panel)) {
+			DRM_ERROR("Failed to own gpio for panel control\n");
+			intel_dsi->gpio_panel = NULL;
+		}
+	}
+
 	intel_encoder->type = INTEL_OUTPUT_DSI;
 	intel_encoder->cloneable = 0;
 	drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
@@ -1104,6 +1130,7 @@ void intel_dsi_init(struct drm_device *dev)
 	}
 
 	intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+	intel_panel_setup_backlight(connector, INVALID_PIPE);
 
 	return;
 
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 2784ac442368..42a68593e32a 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -42,6 +42,9 @@ struct intel_dsi {
 	struct drm_panel *panel;
 	struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS];
 
+	/* GPIO Desc for CRC based Panel control */
+	struct gpio_desc *gpio_panel;
+
 	struct intel_connector *attached_connector;
 
 	/* bit mask of ports being driven */
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index d20cf37b6901..c6a8975b128f 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -38,6 +38,27 @@
 #define DSI_HFP_PACKET_EXTRA_SIZE	6
 #define DSI_EOTP_PACKET_SIZE		4
 
+static int dsi_pixel_format_bpp(int pixel_format)
+{
+	int bpp;
+
+	switch (pixel_format) {
+	default:
+	case VID_MODE_FORMAT_RGB888:
+	case VID_MODE_FORMAT_RGB666_LOOSE:
+		bpp = 24;
+		break;
+	case VID_MODE_FORMAT_RGB666:
+		bpp = 18;
+		break;
+	case VID_MODE_FORMAT_RGB565:
+		bpp = 16;
+		break;
+	}
+
+	return bpp;
+}
+
 struct dsi_mnp {
 	u32 dsi_pll_ctrl;
 	u32 dsi_pll_div;
@@ -46,8 +67,8 @@ struct dsi_mnp {
 static const u32 lfsr_converts[] = {
 	426, 469, 234, 373, 442, 221, 110, 311, 411,		/* 62 - 70 */
 	461, 486, 243, 377, 188, 350, 175, 343, 427, 213,	/* 71 - 80 */
-	106, 53, 282, 397, 354, 227, 113, 56, 284, 142,		/* 81 - 90 */
-	71, 35							/* 91 - 92 */
+	106, 53, 282, 397, 454, 227, 113, 56, 284, 142,		/* 81 - 90 */
+	71, 35, 273, 136, 324, 418, 465, 488, 500, 506		/* 91 - 100 */
 };
 
 #ifdef DSI_CLK_FROM_RR
@@ -65,19 +86,7 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode,
 	u32 dsi_bit_clock_hz;
 	u32 dsi_clk;
 
-	switch (pixel_format) {
-	default:
-	case VID_MODE_FORMAT_RGB888:
-	case VID_MODE_FORMAT_RGB666_LOOSE:
-		bpp = 24;
-		break;
-	case VID_MODE_FORMAT_RGB666:
-		bpp = 18;
-		break;
-	case VID_MODE_FORMAT_RGB565:
-		bpp = 16;
-		break;
-	}
+	bpp = dsi_pixel_format_bpp(pixel_format);
 
 	hactive = mode->hdisplay;
 	vactive = mode->vdisplay;
@@ -137,21 +146,7 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode,
 static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
 {
 	u32 dsi_clk_khz;
-	u32 bpp;
-
-	switch (pixel_format) {
-	default:
-	case VID_MODE_FORMAT_RGB888:
-	case VID_MODE_FORMAT_RGB666_LOOSE:
-		bpp = 24;
-		break;
-	case VID_MODE_FORMAT_RGB666:
-		bpp = 18;
-		break;
-	case VID_MODE_FORMAT_RGB565:
-		bpp = 16;
-		break;
-	}
+	u32 bpp = dsi_pixel_format_bpp(pixel_format);
 
 	/* DSI data rate = pixel clock * bits per pixel / lane count
 	   pixel clock is converted from KHz to Hz */
@@ -162,11 +157,13 @@ static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
 
 #endif
 
-static int dsi_calc_mnp(int target_dsi_clk, struct dsi_mnp *dsi_mnp)
+static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
+			struct dsi_mnp *dsi_mnp, int target_dsi_clk)
 {
 	unsigned int calc_m = 0, calc_p = 0;
-	unsigned int m, n = 1, p;
-	int ref_clk = 25000;
+	unsigned int m_min, m_max, p_min = 2, p_max = 6;
+	unsigned int m, n, p;
+	int ref_clk;
 	int delta = target_dsi_clk;
 	u32 m_seed;
 
@@ -176,8 +173,20 @@ static int dsi_calc_mnp(int target_dsi_clk, struct dsi_mnp *dsi_mnp)
 		return -ECHRNG;
 	}
 
-	for (m = 62; m <= 92 && delta; m++) {
-		for (p = 2; p <= 6 && delta; p++) {
+	if (IS_CHERRYVIEW(dev_priv)) {
+		ref_clk = 100000;
+		n = 4;
+		m_min = 70;
+		m_max = 96;
+	} else {
+		ref_clk = 25000;
+		n = 1;
+		m_min = 62;
+		m_max = 92;
+	}
+
+	for (m = m_min; m <= m_max && delta; m++) {
+		for (p = p_min; p <= p_max && delta; p++) {
 			/*
 			 * Find the optimal m and p divisors with minimal delta
 			 * +/- the required clock
@@ -217,7 +226,7 @@ static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
 	dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
 				    intel_dsi->lane_count);
 
-	ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
+	ret = dsi_calc_mnp(dev_priv, &dsi_mnp, dsi_clk);
 	if (ret) {
 		DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
 		return;
@@ -286,21 +295,7 @@ void vlv_disable_dsi_pll(struct intel_encoder *encoder)
 
 static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
 {
-	int bpp;
-
-	switch (pixel_format) {
-	default:
-	case VID_MODE_FORMAT_RGB888:
-	case VID_MODE_FORMAT_RGB666_LOOSE:
-		bpp = 24;
-		break;
-	case VID_MODE_FORMAT_RGB666:
-		bpp = 18;
-		break;
-	case VID_MODE_FORMAT_RGB565:
-		bpp = 16;
-		break;
-	}
+	int bpp = dsi_pixel_format_bpp(pixel_format);
 
 	WARN(bpp != pipe_bpp,
 	     "bpp match assertion failure (expected %d, current %d)\n",
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index ece5bd754f85..dc532bb61d22 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -196,50 +196,6 @@ static void intel_enable_dvo(struct intel_encoder *encoder)
 	intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
 }
 
-/* Special dpms function to support cloning between dvo/sdvo/crt. */
-static void intel_dvo_dpms(struct drm_connector *connector, int mode)
-{
-	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
-	struct drm_crtc *crtc;
-	struct intel_crtc_state *config;
-
-	/* dvo supports only 2 dpms states. */
-	if (mode != DRM_MODE_DPMS_ON)
-		mode = DRM_MODE_DPMS_OFF;
-
-	if (mode == connector->dpms)
-		return;
-
-	connector->dpms = mode;
-
-	/* Only need to change hw state when actually enabled */
-	crtc = intel_dvo->base.base.crtc;
-	if (!crtc) {
-		intel_dvo->base.connectors_active = false;
-		return;
-	}
-
-	/* We call connector dpms manually below in case pipe dpms doesn't
-	 * change due to cloning. */
-	if (mode == DRM_MODE_DPMS_ON) {
-		config = to_intel_crtc(crtc)->config;
-
-		intel_dvo->base.connectors_active = true;
-
-		intel_crtc_update_dpms(crtc);
-
-		intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
-	} else {
-		intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
-
-		intel_dvo->base.connectors_active = false;
-
-		intel_crtc_update_dpms(crtc);
-	}
-
-	intel_modeset_check_state(connector->dev);
-}
-
 static enum drm_mode_status
 intel_dvo_mode_valid(struct drm_connector *connector,
 		     struct drm_display_mode *mode)
@@ -387,7 +343,7 @@ static void intel_dvo_destroy(struct drm_connector *connector)
 }
 
 static const struct drm_connector_funcs intel_dvo_connector_funcs = {
-	.dpms = intel_dvo_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_dvo_detect,
 	.destroy = intel_dvo_destroy,
 	.fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 6abb83432d4d..1f97fb548c2a 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -41,9 +41,8 @@
 #include "intel_drv.h"
 #include "i915_drv.h"
 
-static void i8xx_fbc_disable(struct drm_device *dev)
+static void i8xx_fbc_disable(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 fbc_ctl;
 
 	dev_priv->fbc.enabled = false;
@@ -65,13 +64,11 @@ static void i8xx_fbc_disable(struct drm_device *dev)
 	DRM_DEBUG_KMS("disabled FBC\n");
 }
 
-static void i8xx_fbc_enable(struct drm_crtc *crtc)
+static void i8xx_fbc_enable(struct intel_crtc *crtc)
 {
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_framebuffer *fb = crtc->primary->fb;
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct drm_framebuffer *fb = crtc->base.primary->fb;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int cfb_pitch;
 	int i;
 	u32 fbc_ctl;
@@ -84,7 +81,7 @@ static void i8xx_fbc_enable(struct drm_crtc *crtc)
 		cfb_pitch = fb->pitches[0];
 
 	/* FBC_CTL wants 32B or 64B units */
-	if (IS_GEN2(dev))
+	if (IS_GEN2(dev_priv))
 		cfb_pitch = (cfb_pitch / 32) - 1;
 	else
 		cfb_pitch = (cfb_pitch / 64) - 1;
@@ -93,66 +90,61 @@ static void i8xx_fbc_enable(struct drm_crtc *crtc)
 	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
 		I915_WRITE(FBC_TAG + (i * 4), 0);
 
-	if (IS_GEN4(dev)) {
+	if (IS_GEN4(dev_priv)) {
 		u32 fbc_ctl2;
 
 		/* Set it up... */
 		fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
-		fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
+		fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane);
 		I915_WRITE(FBC_CONTROL2, fbc_ctl2);
-		I915_WRITE(FBC_FENCE_OFF, crtc->y);
+		I915_WRITE(FBC_FENCE_OFF, crtc->base.y);
 	}
 
 	/* enable it... */
 	fbc_ctl = I915_READ(FBC_CONTROL);
 	fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
 	fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
-	if (IS_I945GM(dev))
+	if (IS_I945GM(dev_priv))
 		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
 	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
 	fbc_ctl |= obj->fence_reg;
 	I915_WRITE(FBC_CONTROL, fbc_ctl);
 
 	DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
-		      cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
+		      cfb_pitch, crtc->base.y, plane_name(crtc->plane));
 }
 
-static bool i8xx_fbc_enabled(struct drm_device *dev)
+static bool i8xx_fbc_enabled(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
 	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
 }
 
-static void g4x_fbc_enable(struct drm_crtc *crtc)
+static void g4x_fbc_enable(struct intel_crtc *crtc)
 {
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_framebuffer *fb = crtc->primary->fb;
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct drm_framebuffer *fb = crtc->base.primary->fb;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 dpfc_ctl;
 
 	dev_priv->fbc.enabled = true;
 
-	dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
+	dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN;
 	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
 		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
 	else
 		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
 	dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
 
-	I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
+	I915_WRITE(DPFC_FENCE_YOFF, crtc->base.y);
 
 	/* enable it... */
 	I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
-	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
 }
 
-static void g4x_fbc_disable(struct drm_device *dev)
+static void g4x_fbc_disable(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 dpfc_ctl;
 
 	dev_priv->fbc.enabled = false;
@@ -167,10 +159,8 @@ static void g4x_fbc_disable(struct drm_device *dev)
 	}
 }
 
-static bool g4x_fbc_enabled(struct drm_device *dev)
+static bool g4x_fbc_enabled(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
 	return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
 }
 
@@ -180,22 +170,21 @@ static void intel_fbc_nuke(struct drm_i915_private *dev_priv)
 	POSTING_READ(MSG_FBC_REND_STATE);
 }
 
-static void ilk_fbc_enable(struct drm_crtc *crtc)
+static void ilk_fbc_enable(struct intel_crtc *crtc)
 {
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_framebuffer *fb = crtc->primary->fb;
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct drm_framebuffer *fb = crtc->base.primary->fb;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 dpfc_ctl;
+	int threshold = dev_priv->fbc.threshold;
 
 	dev_priv->fbc.enabled = true;
 
-	dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
+	dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
 	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
-		dev_priv->fbc.threshold++;
+		threshold++;
 
-	switch (dev_priv->fbc.threshold) {
+	switch (threshold) {
 	case 4:
 	case 3:
 		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
@@ -208,28 +197,27 @@ static void ilk_fbc_enable(struct drm_crtc *crtc)
 		break;
 	}
 	dpfc_ctl |= DPFC_CTL_FENCE_EN;
-	if (IS_GEN5(dev))
+	if (IS_GEN5(dev_priv))
 		dpfc_ctl |= obj->fence_reg;
 
-	I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
+	I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->base.y);
 	I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
 	/* enable it... */
 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
-	if (IS_GEN6(dev)) {
+	if (IS_GEN6(dev_priv)) {
 		I915_WRITE(SNB_DPFC_CTL_SA,
 			   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
-		I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+		I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->base.y);
 	}
 
 	intel_fbc_nuke(dev_priv);
 
-	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
 }
 
-static void ilk_fbc_disable(struct drm_device *dev)
+static void ilk_fbc_disable(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 dpfc_ctl;
 
 	dev_priv->fbc.enabled = false;
@@ -244,29 +232,29 @@ static void ilk_fbc_disable(struct drm_device *dev)
 	}
 }
 
-static bool ilk_fbc_enabled(struct drm_device *dev)
+static bool ilk_fbc_enabled(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
 	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
 }
 
-static void gen7_fbc_enable(struct drm_crtc *crtc)
+static void gen7_fbc_enable(struct intel_crtc *crtc)
 {
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_framebuffer *fb = crtc->primary->fb;
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct drm_framebuffer *fb = crtc->base.primary->fb;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 dpfc_ctl;
+	int threshold = dev_priv->fbc.threshold;
 
 	dev_priv->fbc.enabled = true;
 
-	dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
+	dpfc_ctl = 0;
+	if (IS_IVYBRIDGE(dev_priv))
+		dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane);
+
 	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
-		dev_priv->fbc.threshold++;
+		threshold++;
 
-	switch (dev_priv->fbc.threshold) {
+	switch (threshold) {
 	case 4:
 	case 3:
 		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
@@ -286,39 +274,37 @@ static void gen7_fbc_enable(struct drm_crtc *crtc)
 
 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
-	if (IS_IVYBRIDGE(dev)) {
+	if (IS_IVYBRIDGE(dev_priv)) {
 		/* WaFbcAsynchFlipDisableFbcQueue:ivb */
 		I915_WRITE(ILK_DISPLAY_CHICKEN1,
 			   I915_READ(ILK_DISPLAY_CHICKEN1) |
 			   ILK_FBCQ_DIS);
 	} else {
 		/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
-		I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
-			   I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
+		I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe),
+			   I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) |
 			   HSW_FBCQ_DIS);
 	}
 
 	I915_WRITE(SNB_DPFC_CTL_SA,
 		   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
-	I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+	I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->base.y);
 
 	intel_fbc_nuke(dev_priv);
 
-	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
 }
 
 /**
  * intel_fbc_enabled - Is FBC enabled?
- * @dev: the drm_device
+ * @dev_priv: i915 device instance
  *
  * This function is used to verify the current state of FBC.
  * FIXME: This should be tracked in the plane config eventually
  *        instead of queried at runtime for most callers.
  */
-bool intel_fbc_enabled(struct drm_device *dev)
+bool intel_fbc_enabled(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
 	return dev_priv->fbc.enabled;
 }
 
@@ -327,31 +313,33 @@ static void intel_fbc_work_fn(struct work_struct *__work)
 	struct intel_fbc_work *work =
 		container_of(to_delayed_work(__work),
 			     struct intel_fbc_work, work);
-	struct drm_device *dev = work->crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = work->crtc->base.dev->dev_private;
+	struct drm_framebuffer *crtc_fb = work->crtc->base.primary->fb;
 
-	mutex_lock(&dev->struct_mutex);
+	mutex_lock(&dev_priv->fbc.lock);
 	if (work == dev_priv->fbc.fbc_work) {
 		/* Double check that we haven't switched fb without cancelling
 		 * the prior work.
 		 */
-		if (work->crtc->primary->fb == work->fb) {
-			dev_priv->display.enable_fbc(work->crtc);
+		if (crtc_fb == work->fb) {
+			dev_priv->fbc.enable_fbc(work->crtc);
 
-			dev_priv->fbc.crtc = to_intel_crtc(work->crtc);
-			dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
-			dev_priv->fbc.y = work->crtc->y;
+			dev_priv->fbc.crtc = work->crtc;
+			dev_priv->fbc.fb_id = crtc_fb->base.id;
+			dev_priv->fbc.y = work->crtc->base.y;
 		}
 
 		dev_priv->fbc.fbc_work = NULL;
 	}
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev_priv->fbc.lock);
 
 	kfree(work);
 }
 
 static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
 {
+	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
+
 	if (dev_priv->fbc.fbc_work == NULL)
 		return;
 
@@ -373,26 +361,24 @@ static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
 	dev_priv->fbc.fbc_work = NULL;
 }
 
-static void intel_fbc_enable(struct drm_crtc *crtc)
+static void intel_fbc_enable(struct intel_crtc *crtc)
 {
 	struct intel_fbc_work *work;
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
-	if (!dev_priv->display.enable_fbc)
-		return;
+	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
 
 	intel_fbc_cancel_work(dev_priv);
 
 	work = kzalloc(sizeof(*work), GFP_KERNEL);
 	if (work == NULL) {
 		DRM_ERROR("Failed to allocate FBC work structure\n");
-		dev_priv->display.enable_fbc(crtc);
+		dev_priv->fbc.enable_fbc(crtc);
 		return;
 	}
 
 	work->crtc = crtc;
-	work->fb = crtc->primary->fb;
+	work->fb = crtc->base.primary->fb;
 	INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
 
 	dev_priv->fbc.fbc_work = work;
@@ -413,75 +399,274 @@ static void intel_fbc_enable(struct drm_crtc *crtc)
 	schedule_delayed_work(&work->work, msecs_to_jiffies(50));
 }
 
+static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
+{
+	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
+
+	intel_fbc_cancel_work(dev_priv);
+
+	dev_priv->fbc.disable_fbc(dev_priv);
+	dev_priv->fbc.crtc = NULL;
+}
+
 /**
  * intel_fbc_disable - disable FBC
- * @dev: the drm_device
+ * @dev_priv: i915 device instance
  *
  * This function disables FBC.
  */
-void intel_fbc_disable(struct drm_device *dev)
+void intel_fbc_disable(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	if (!dev_priv->fbc.enable_fbc)
+		return;
 
-	intel_fbc_cancel_work(dev_priv);
+	mutex_lock(&dev_priv->fbc.lock);
+	__intel_fbc_disable(dev_priv);
+	mutex_unlock(&dev_priv->fbc.lock);
+}
+
+/*
+ * intel_fbc_disable_crtc - disable FBC if it's associated with crtc
+ * @crtc: the CRTC
+ *
+ * This function disables FBC if it's associated with the provided CRTC.
+ */
+void intel_fbc_disable_crtc(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
-	if (!dev_priv->display.disable_fbc)
+	if (!dev_priv->fbc.enable_fbc)
 		return;
 
-	dev_priv->display.disable_fbc(dev);
-	dev_priv->fbc.crtc = NULL;
+	mutex_lock(&dev_priv->fbc.lock);
+	if (dev_priv->fbc.crtc == crtc)
+		__intel_fbc_disable(dev_priv);
+	mutex_unlock(&dev_priv->fbc.lock);
 }
 
-static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
+const char *intel_no_fbc_reason_str(enum no_fbc_reason reason)
+{
+	switch (reason) {
+	case FBC_OK:
+		return "FBC enabled but currently disabled in hardware";
+	case FBC_UNSUPPORTED:
+		return "unsupported by this chipset";
+	case FBC_NO_OUTPUT:
+		return "no output";
+	case FBC_STOLEN_TOO_SMALL:
+		return "not enough stolen memory";
+	case FBC_UNSUPPORTED_MODE:
+		return "mode incompatible with compression";
+	case FBC_MODE_TOO_LARGE:
+		return "mode too large for compression";
+	case FBC_BAD_PLANE:
+		return "FBC unsupported on plane";
+	case FBC_NOT_TILED:
+		return "framebuffer not tiled or fenced";
+	case FBC_MULTIPLE_PIPES:
+		return "more than one pipe active";
+	case FBC_MODULE_PARAM:
+		return "disabled per module param";
+	case FBC_CHIP_DEFAULT:
+		return "disabled per chip default";
+	case FBC_ROTATION:
+		return "rotation unsupported";
+	case FBC_IN_DBG_MASTER:
+		return "Kernel debugger is active";
+	default:
+		MISSING_CASE(reason);
+		return "unknown reason";
+	}
+}
+
+static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
 			      enum no_fbc_reason reason)
 {
 	if (dev_priv->fbc.no_fbc_reason == reason)
-		return false;
+		return;
 
 	dev_priv->fbc.no_fbc_reason = reason;
-	return true;
+	DRM_DEBUG_KMS("Disabling FBC: %s\n", intel_no_fbc_reason_str(reason));
 }
 
 static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
 {
 	struct drm_crtc *crtc = NULL, *tmp_crtc;
 	enum pipe pipe;
-	bool pipe_a_only = false, one_pipe_only = false;
+	bool pipe_a_only = false;
 
 	if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
 		pipe_a_only = true;
-	else if (INTEL_INFO(dev_priv)->gen <= 4)
-		one_pipe_only = true;
 
 	for_each_pipe(dev_priv, pipe) {
 		tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
 
 		if (intel_crtc_active(tmp_crtc) &&
-		    to_intel_plane_state(tmp_crtc->primary->state)->visible) {
-			if (one_pipe_only && crtc) {
-				if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
-					DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
-				return NULL;
-			}
+		    to_intel_plane_state(tmp_crtc->primary->state)->visible)
 			crtc = tmp_crtc;
-		}
 
 		if (pipe_a_only)
 			break;
 	}
 
-	if (!crtc || crtc->primary->fb == NULL) {
-		if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
-			DRM_DEBUG_KMS("no output, disabling\n");
+	if (!crtc || crtc->primary->fb == NULL)
 		return NULL;
-	}
 
 	return crtc;
 }
 
+static bool multiple_pipes_ok(struct drm_i915_private *dev_priv)
+{
+	enum pipe pipe;
+	int n_pipes = 0;
+	struct drm_crtc *crtc;
+
+	if (INTEL_INFO(dev_priv)->gen > 4)
+		return true;
+
+	for_each_pipe(dev_priv, pipe) {
+		crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+		if (intel_crtc_active(crtc) &&
+		    to_intel_plane_state(crtc->primary->state)->visible)
+			n_pipes++;
+	}
+
+	return (n_pipes < 2);
+}
+
+static int find_compression_threshold(struct drm_i915_private *dev_priv,
+				      struct drm_mm_node *node,
+				      int size,
+				      int fb_cpp)
+{
+	int compression_threshold = 1;
+	int ret;
+
+	/* HACK: This code depends on what we will do in *_enable_fbc. If that
+	 * code changes, this code needs to change as well.
+	 *
+	 * The enable_fbc code will attempt to use one of our 2 compression
+	 * thresholds, therefore, in that case, we only have 1 resort.
+	 */
+
+	/* Try to over-allocate to reduce reallocations and fragmentation. */
+	ret = i915_gem_stolen_insert_node(dev_priv, node, size <<= 1, 4096);
+	if (ret == 0)
+		return compression_threshold;
+
+again:
+	/* HW's ability to limit the CFB is 1:4 */
+	if (compression_threshold > 4 ||
+	    (fb_cpp == 2 && compression_threshold == 2))
+		return 0;
+
+	ret = i915_gem_stolen_insert_node(dev_priv, node, size >>= 1, 4096);
+	if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
+		return 0;
+	} else if (ret) {
+		compression_threshold <<= 1;
+		goto again;
+	} else {
+		return compression_threshold;
+	}
+}
+
+static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size,
+			       int fb_cpp)
+{
+	struct drm_mm_node *uninitialized_var(compressed_llb);
+	int ret;
+
+	ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb,
+					 size, fb_cpp);
+	if (!ret)
+		goto err_llb;
+	else if (ret > 1) {
+		DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
+
+	}
+
+	dev_priv->fbc.threshold = ret;
+
+	if (INTEL_INFO(dev_priv)->gen >= 5)
+		I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
+	else if (IS_GM45(dev_priv)) {
+		I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
+	} else {
+		compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
+		if (!compressed_llb)
+			goto err_fb;
+
+		ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
+						  4096, 4096);
+		if (ret)
+			goto err_fb;
+
+		dev_priv->fbc.compressed_llb = compressed_llb;
+
+		I915_WRITE(FBC_CFB_BASE,
+			   dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
+		I915_WRITE(FBC_LL_BASE,
+			   dev_priv->mm.stolen_base + compressed_llb->start);
+	}
+
+	dev_priv->fbc.uncompressed_size = size;
+
+	DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
+		      size);
+
+	return 0;
+
+err_fb:
+	kfree(compressed_llb);
+	i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
+err_llb:
+	pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
+	return -ENOSPC;
+}
+
+static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
+{
+	if (dev_priv->fbc.uncompressed_size == 0)
+		return;
+
+	i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
+
+	if (dev_priv->fbc.compressed_llb) {
+		i915_gem_stolen_remove_node(dev_priv,
+					    dev_priv->fbc.compressed_llb);
+		kfree(dev_priv->fbc.compressed_llb);
+	}
+
+	dev_priv->fbc.uncompressed_size = 0;
+}
+
+void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
+{
+	if (!dev_priv->fbc.enable_fbc)
+		return;
+
+	mutex_lock(&dev_priv->fbc.lock);
+	__intel_fbc_cleanup_cfb(dev_priv);
+	mutex_unlock(&dev_priv->fbc.lock);
+}
+
+static int intel_fbc_setup_cfb(struct drm_i915_private *dev_priv, int size,
+			       int fb_cpp)
+{
+	if (size <= dev_priv->fbc.uncompressed_size)
+		return 0;
+
+	/* Release any current block */
+	__intel_fbc_cleanup_cfb(dev_priv);
+
+	return intel_fbc_alloc_cfb(dev_priv, size, fb_cpp);
+}
+
 /**
- * intel_fbc_update - enable/disable FBC as needed
- * @dev: the drm_device
+ * __intel_fbc_update - enable/disable FBC as needed, unlocked
+ * @dev_priv: i915 device instance
  *
  * Set up the framebuffer compression hardware at mode set time.  We
  * enable it if possible:
@@ -498,9 +683,8 @@ static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
  *
  * We need to enable/disable FBC on a global basis.
  */
-void intel_fbc_update(struct drm_device *dev)
+static void __intel_fbc_update(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc = NULL;
 	struct intel_crtc *intel_crtc;
 	struct drm_framebuffer *fb;
@@ -508,22 +692,19 @@ void intel_fbc_update(struct drm_device *dev)
 	const struct drm_display_mode *adjusted_mode;
 	unsigned int max_width, max_height;
 
-	if (!HAS_FBC(dev))
-		return;
+	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
 
 	/* disable framebuffer compression in vGPU */
-	if (intel_vgpu_active(dev))
+	if (intel_vgpu_active(dev_priv->dev))
 		i915.enable_fbc = 0;
 
 	if (i915.enable_fbc < 0) {
-		if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
-			DRM_DEBUG_KMS("disabled per chip default\n");
+		set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT);
 		goto out_disable;
 	}
 
 	if (!i915.enable_fbc) {
-		if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
-			DRM_DEBUG_KMS("fbc disabled per module param\n");
+		set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM);
 		goto out_disable;
 	}
 
@@ -537,8 +718,15 @@ void intel_fbc_update(struct drm_device *dev)
 	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
 	 */
 	crtc = intel_fbc_find_crtc(dev_priv);
-	if (!crtc)
+	if (!crtc) {
+		set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT);
 		goto out_disable;
+	}
+
+	if (!multiple_pipes_ok(dev_priv)) {
+		set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES);
+		goto out_disable;
+	}
 
 	intel_crtc = to_intel_crtc(crtc);
 	fb = crtc->primary->fb;
@@ -547,16 +735,14 @@ void intel_fbc_update(struct drm_device *dev)
 
 	if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
 	    (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
-		if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
-			DRM_DEBUG_KMS("mode incompatible with compression, "
-				      "disabling\n");
+		set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE);
 		goto out_disable;
 	}
 
-	if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
+	if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
 		max_width = 4096;
 		max_height = 4096;
-	} else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
 		max_width = 4096;
 		max_height = 2048;
 	} else {
@@ -565,14 +751,12 @@ void intel_fbc_update(struct drm_device *dev)
 	}
 	if (intel_crtc->config->pipe_src_w > max_width ||
 	    intel_crtc->config->pipe_src_h > max_height) {
-		if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
-			DRM_DEBUG_KMS("mode too large for compression, disabling\n");
+		set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE);
 		goto out_disable;
 	}
-	if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
+	if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) &&
 	    intel_crtc->plane != PLANE_A) {
-		if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
-			DRM_DEBUG_KMS("plane not A, disabling compression\n");
+		set_no_fbc_reason(dev_priv, FBC_BAD_PLANE);
 		goto out_disable;
 	}
 
@@ -581,25 +765,24 @@ void intel_fbc_update(struct drm_device *dev)
 	 */
 	if (obj->tiling_mode != I915_TILING_X ||
 	    obj->fence_reg == I915_FENCE_REG_NONE) {
-		if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
-			DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
+		set_no_fbc_reason(dev_priv, FBC_NOT_TILED);
 		goto out_disable;
 	}
-	if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
+	if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
 	    crtc->primary->state->rotation != BIT(DRM_ROTATE_0)) {
-		if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
-			DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
+		set_no_fbc_reason(dev_priv, FBC_ROTATION);
 		goto out_disable;
 	}
 
 	/* If the kernel debugger is active, always disable compression */
-	if (in_dbg_master())
+	if (in_dbg_master()) {
+		set_no_fbc_reason(dev_priv, FBC_IN_DBG_MASTER);
 		goto out_disable;
+	}
 
-	if (i915_gem_stolen_setup_compression(dev, obj->base.size,
-					      drm_format_plane_cpp(fb->pixel_format, 0))) {
-		if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
-			DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
+	if (intel_fbc_setup_cfb(dev_priv, obj->base.size,
+				drm_format_plane_cpp(fb->pixel_format, 0))) {
+		set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL);
 		goto out_disable;
 	}
 
@@ -613,7 +796,7 @@ void intel_fbc_update(struct drm_device *dev)
 	    dev_priv->fbc.y == crtc->y)
 		return;
 
-	if (intel_fbc_enabled(dev)) {
+	if (intel_fbc_enabled(dev_priv)) {
 		/* We update FBC along two paths, after changing fb/crtc
 		 * configuration (modeswitching) and after page-flipping
 		 * finishes. For the latter, we know that not only did
@@ -638,58 +821,87 @@ void intel_fbc_update(struct drm_device *dev)
 		 * some point. And we wait before enabling FBC anyway.
 		 */
 		DRM_DEBUG_KMS("disabling active FBC for update\n");
-		intel_fbc_disable(dev);
+		__intel_fbc_disable(dev_priv);
 	}
 
-	intel_fbc_enable(crtc);
+	intel_fbc_enable(intel_crtc);
 	dev_priv->fbc.no_fbc_reason = FBC_OK;
 	return;
 
 out_disable:
 	/* Multiple disables should be harmless */
-	if (intel_fbc_enabled(dev)) {
+	if (intel_fbc_enabled(dev_priv)) {
 		DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
-		intel_fbc_disable(dev);
+		__intel_fbc_disable(dev_priv);
 	}
-	i915_gem_stolen_cleanup_compression(dev);
+	__intel_fbc_cleanup_cfb(dev_priv);
+}
+
+/*
+ * intel_fbc_update - enable/disable FBC as needed
+ * @dev_priv: i915 device instance
+ *
+ * This function reevaluates the overall state and enables or disables FBC.
+ */
+void intel_fbc_update(struct drm_i915_private *dev_priv)
+{
+	if (!dev_priv->fbc.enable_fbc)
+		return;
+
+	mutex_lock(&dev_priv->fbc.lock);
+	__intel_fbc_update(dev_priv);
+	mutex_unlock(&dev_priv->fbc.lock);
 }
 
 void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
 			  unsigned int frontbuffer_bits,
 			  enum fb_op_origin origin)
 {
-	struct drm_device *dev = dev_priv->dev;
 	unsigned int fbc_bits;
 
+	if (!dev_priv->fbc.enable_fbc)
+		return;
+
 	if (origin == ORIGIN_GTT)
 		return;
 
+	mutex_lock(&dev_priv->fbc.lock);
+
 	if (dev_priv->fbc.enabled)
 		fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
 	else if (dev_priv->fbc.fbc_work)
 		fbc_bits = INTEL_FRONTBUFFER_PRIMARY(
-			to_intel_crtc(dev_priv->fbc.fbc_work->crtc)->pipe);
+					dev_priv->fbc.fbc_work->crtc->pipe);
 	else
 		fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
 
 	dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
 
 	if (dev_priv->fbc.busy_bits)
-		intel_fbc_disable(dev);
+		__intel_fbc_disable(dev_priv);
+
+	mutex_unlock(&dev_priv->fbc.lock);
 }
 
 void intel_fbc_flush(struct drm_i915_private *dev_priv,
-		     unsigned int frontbuffer_bits)
+		     unsigned int frontbuffer_bits, enum fb_op_origin origin)
 {
-	struct drm_device *dev = dev_priv->dev;
+	if (!dev_priv->fbc.enable_fbc)
+		return;
 
-	if (!dev_priv->fbc.busy_bits)
+	if (origin == ORIGIN_GTT)
 		return;
 
+	mutex_lock(&dev_priv->fbc.lock);
+
 	dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
 
-	if (!dev_priv->fbc.busy_bits)
-		intel_fbc_update(dev);
+	if (!dev_priv->fbc.busy_bits) {
+		__intel_fbc_disable(dev_priv);
+		__intel_fbc_update(dev_priv);
+	}
+
+	mutex_unlock(&dev_priv->fbc.lock);
 }
 
 /**
@@ -702,6 +914,8 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
 {
 	enum pipe pipe;
 
+	mutex_init(&dev_priv->fbc.lock);
+
 	if (!HAS_FBC(dev_priv)) {
 		dev_priv->fbc.enabled = false;
 		dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED;
@@ -717,25 +931,25 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
 	}
 
 	if (INTEL_INFO(dev_priv)->gen >= 7) {
-		dev_priv->display.fbc_enabled = ilk_fbc_enabled;
-		dev_priv->display.enable_fbc = gen7_fbc_enable;
-		dev_priv->display.disable_fbc = ilk_fbc_disable;
+		dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
+		dev_priv->fbc.enable_fbc = gen7_fbc_enable;
+		dev_priv->fbc.disable_fbc = ilk_fbc_disable;
 	} else if (INTEL_INFO(dev_priv)->gen >= 5) {
-		dev_priv->display.fbc_enabled = ilk_fbc_enabled;
-		dev_priv->display.enable_fbc = ilk_fbc_enable;
-		dev_priv->display.disable_fbc = ilk_fbc_disable;
+		dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
+		dev_priv->fbc.enable_fbc = ilk_fbc_enable;
+		dev_priv->fbc.disable_fbc = ilk_fbc_disable;
 	} else if (IS_GM45(dev_priv)) {
-		dev_priv->display.fbc_enabled = g4x_fbc_enabled;
-		dev_priv->display.enable_fbc = g4x_fbc_enable;
-		dev_priv->display.disable_fbc = g4x_fbc_disable;
+		dev_priv->fbc.fbc_enabled = g4x_fbc_enabled;
+		dev_priv->fbc.enable_fbc = g4x_fbc_enable;
+		dev_priv->fbc.disable_fbc = g4x_fbc_disable;
 	} else {
-		dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
-		dev_priv->display.enable_fbc = i8xx_fbc_enable;
-		dev_priv->display.disable_fbc = i8xx_fbc_disable;
+		dev_priv->fbc.fbc_enabled = i8xx_fbc_enabled;
+		dev_priv->fbc.enable_fbc = i8xx_fbc_enable;
+		dev_priv->fbc.disable_fbc = i8xx_fbc_disable;
 
 		/* This value was pulled out of someone's hat */
 		I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
 	}
 
-	dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
+	dev_priv->fbc.enabled = dev_priv->fbc.fbc_enabled(dev_priv);
 }
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 6372cfc7d053..8c6a6fa46005 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -55,16 +55,8 @@ static int intel_fbdev_set_par(struct fb_info *info)
 	ret = drm_fb_helper_set_par(info);
 
 	if (ret == 0) {
-		/*
-		 * FIXME: fbdev presumes that all callbacks also work from
-		 * atomic contexts and relies on that for emergency oops
-		 * printing. KMS totally doesn't do that and the locking here is
-		 * by far not the only place this goes wrong.  Ignore this for
-		 * now until we solve this for real.
-		 */
 		mutex_lock(&fb_helper->dev->struct_mutex);
-		ret = i915_gem_object_set_to_gtt_domain(ifbdev->fb->obj,
-							true);
+		intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
 		mutex_unlock(&fb_helper->dev->struct_mutex);
 	}
 
@@ -81,15 +73,8 @@ static int intel_fbdev_blank(int blank, struct fb_info *info)
 	ret = drm_fb_helper_blank(blank, info);
 
 	if (ret == 0) {
-		/*
-		 * FIXME: fbdev presumes that all callbacks also work from
-		 * atomic contexts and relies on that for emergency oops
-		 * printing. KMS totally doesn't do that and the locking here is
-		 * by far not the only place this goes wrong.  Ignore this for
-		 * now until we solve this for real.
-		 */
 		mutex_lock(&fb_helper->dev->struct_mutex);
-		intel_fb_obj_invalidate(ifbdev->fb->obj, NULL, ORIGIN_GTT);
+		intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
 		mutex_unlock(&fb_helper->dev->struct_mutex);
 	}
 
@@ -107,15 +92,8 @@ static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
 	ret = drm_fb_helper_pan_display(var, info);
 
 	if (ret == 0) {
-		/*
-		 * FIXME: fbdev presumes that all callbacks also work from
-		 * atomic contexts and relies on that for emergency oops
-		 * printing. KMS totally doesn't do that and the locking here is
-		 * by far not the only place this goes wrong.  Ignore this for
-		 * now until we solve this for real.
-		 */
 		mutex_lock(&fb_helper->dev->struct_mutex);
-		intel_fb_obj_invalidate(ifbdev->fb->obj, NULL, ORIGIN_GTT);
+		intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
 		mutex_unlock(&fb_helper->dev->struct_mutex);
 	}
 
@@ -126,9 +104,9 @@ static struct fb_ops intelfb_ops = {
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = intel_fbdev_set_par,
-	.fb_fillrect = cfb_fillrect,
-	.fb_copyarea = cfb_copyarea,
-	.fb_imageblit = cfb_imageblit,
+	.fb_fillrect = drm_fb_helper_cfb_fillrect,
+	.fb_copyarea = drm_fb_helper_cfb_copyarea,
+	.fb_imageblit = drm_fb_helper_cfb_imageblit,
 	.fb_pan_display = intel_fbdev_pan_display,
 	.fb_blank = intel_fbdev_blank,
 	.fb_setcmap = drm_fb_helper_setcmap,
@@ -177,7 +155,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
 	}
 
 	/* Flush everything out, we'll be doing GTT only from now on */
-	ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL);
+	ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL, NULL);
 	if (ret) {
 		DRM_ERROR("failed to pin obj: %d\n", ret);
 		goto out_fb;
@@ -237,9 +215,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
 	obj = intel_fb->obj;
 	size = obj->base.size;
 
-	info = framebuffer_alloc(0, &dev->pdev->dev);
-	if (!info) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto out_unpin;
 	}
 
@@ -248,24 +226,13 @@ static int intelfb_create(struct drm_fb_helper *helper,
 	fb = &ifbdev->fb->base;
 
 	ifbdev->helper.fb = fb;
-	ifbdev->helper.fbdev = info;
 
 	strcpy(info->fix.id, "inteldrmfb");
 
 	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
 	info->fbops = &intelfb_ops;
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto out_unpin;
-	}
 	/* setup aperture base/size for vesafb takeover */
-	info->apertures = alloc_apertures(1);
-	if (!info->apertures) {
-		ret = -ENOMEM;
-		goto out_unpin;
-	}
 	info->apertures->ranges[0].base = dev->mode_config.fb_base;
 	info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
 
@@ -277,7 +244,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
 			   size);
 	if (!info->screen_base) {
 		ret = -ENOSPC;
-		goto out_unpin;
+		goto out_destroy_fbi;
 	}
 	info->screen_size = size;
 
@@ -304,6 +271,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
 	vga_switcheroo_client_fb_set(dev->pdev, info);
 	return 0;
 
+out_destroy_fbi:
+	drm_fb_helper_release_fbi(helper);
 out_unpin:
 	i915_gem_object_ggtt_unpin(obj);
 	drm_gem_object_unreference(&obj->base);
@@ -484,18 +453,13 @@ retry:
 			 * IMPORTANT: We want to use the adjusted mode (i.e.
 			 * after the panel fitter upscaling) as the initial
 			 * config, not the input mode, which is what crtc->mode
-			 * usually contains. But since our current fastboot
+			 * usually contains. But since our current
 			 * code puts a mode derived from the post-pfit timings
-			 * into crtc->mode this works out correctly. We don't
-			 * use hwmode anywhere right now, so use it for this
-			 * since the fb helper layer wants a pointer to
-			 * something we own.
+			 * into crtc->mode this works out correctly.
 			 */
 			DRM_DEBUG_KMS("looking for current mode on connector %s\n",
 				      connector->name);
-			intel_mode_from_pipe_config(&encoder->crtc->hwmode,
-						    to_intel_crtc(encoder->crtc)->config);
-			modes[i] = &encoder->crtc->hwmode;
+			modes[i] = &encoder->crtc->mode;
 		}
 		crtcs[i] = new_crtc;
 
@@ -550,16 +514,9 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
 static void intel_fbdev_destroy(struct drm_device *dev,
 				struct intel_fbdev *ifbdev)
 {
-	if (ifbdev->helper.fbdev) {
-		struct fb_info *info = ifbdev->helper.fbdev;
 
-		unregister_framebuffer(info);
-		iounmap(info->screen_base);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&ifbdev->helper);
+	drm_fb_helper_release_fbi(&ifbdev->helper);
 
 	drm_fb_helper_fini(&ifbdev->helper);
 
@@ -582,7 +539,6 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
 	struct intel_framebuffer *fb = NULL;
 	struct drm_crtc *crtc;
 	struct intel_crtc *intel_crtc;
-	struct intel_initial_plane_config *plane_config = NULL;
 	unsigned int max_size = 0;
 
 	if (!i915.fastboot)
@@ -590,20 +546,21 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
 
 	/* Find the largest fb */
 	for_each_crtc(dev, crtc) {
+		struct drm_i915_gem_object *obj =
+			intel_fb_obj(crtc->primary->state->fb);
 		intel_crtc = to_intel_crtc(crtc);
 
-		if (!intel_crtc->active || !crtc->primary->fb) {
+		if (!intel_crtc->active || !obj) {
 			DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
 				      pipe_name(intel_crtc->pipe));
 			continue;
 		}
 
-		if (intel_crtc->plane_config.size > max_size) {
+		if (obj->base.size > max_size) {
 			DRM_DEBUG_KMS("found possible fb from plane %c\n",
 				      pipe_name(intel_crtc->pipe));
-			plane_config = &intel_crtc->plane_config;
-			fb = to_intel_framebuffer(crtc->primary->fb);
-			max_size = plane_config->size;
+			fb = to_intel_framebuffer(crtc->primary->state->fb);
+			max_size = obj->base.size;
 		}
 	}
 
@@ -638,7 +595,6 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
 			DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
 				      pipe_name(intel_crtc->pipe),
 				      cur_size, fb->base.pitches[0]);
-			plane_config = NULL;
 			fb = NULL;
 			break;
 		}
@@ -659,7 +615,6 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
 			DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n",
 				      pipe_name(intel_crtc->pipe),
 				      cur_size, max_size);
-			plane_config = NULL;
 			fb = NULL;
 			break;
 		}
@@ -810,7 +765,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
 	if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen)
 		memset_io(info->screen_base, 0, info->screen_size);
 
-	fb_set_suspend(info, state);
+	drm_fb_helper_set_suspend(&ifbdev->helper, state);
 	console_unlock();
 }
 
@@ -825,11 +780,20 @@ void intel_fbdev_restore_mode(struct drm_device *dev)
 {
 	int ret;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_fbdev *ifbdev = dev_priv->fbdev;
+	struct drm_fb_helper *fb_helper;
 
-	if (!dev_priv->fbdev)
+	if (!ifbdev)
 		return;
 
-	ret = drm_fb_helper_restore_fbdev_mode_unlocked(&dev_priv->fbdev->helper);
-	if (ret)
+	fb_helper = &ifbdev->helper;
+
+	ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+	if (ret) {
 		DRM_DEBUG("failed to restore crtc mode\n");
+	} else {
+		mutex_lock(&fb_helper->dev->struct_mutex);
+		intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
+		mutex_unlock(&fb_helper->dev->struct_mutex);
+	}
 }
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 57095f54c1f2..ac85357010b4 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -65,84 +65,29 @@
 #include "intel_drv.h"
 #include "i915_drv.h"
 
-static void intel_increase_pllclock(struct drm_device *dev,
-				    enum pipe pipe)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int dpll_reg = DPLL(pipe);
-	int dpll;
-
-	if (!HAS_GMCH_DISPLAY(dev))
-		return;
-
-	if (!dev_priv->lvds_downclock_avail)
-		return;
-
-	dpll = I915_READ(dpll_reg);
-	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
-		DRM_DEBUG_DRIVER("upclocking LVDS\n");
-
-		assert_panel_unlocked(dev_priv, pipe);
-
-		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
-		I915_WRITE(dpll_reg, dpll);
-		intel_wait_for_vblank(dev, pipe);
-
-		dpll = I915_READ(dpll_reg);
-		if (dpll & DISPLAY_RATE_SELECT_FPA1)
-			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
-	}
-}
-
-/**
- * intel_mark_fb_busy - mark given planes as busy
- * @dev: DRM device
- * @frontbuffer_bits: bits for the affected planes
- * @ring: optional ring for asynchronous commands
- *
- * This function gets called every time the screen contents change. It can be
- * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
- */
-static void intel_mark_fb_busy(struct drm_device *dev,
-			       unsigned frontbuffer_bits,
-			       struct intel_engine_cs *ring)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	enum pipe pipe;
-
-	for_each_pipe(dev_priv, pipe) {
-		if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
-			continue;
-
-		intel_increase_pllclock(dev, pipe);
-	}
-}
-
 /**
  * intel_fb_obj_invalidate - invalidate frontbuffer object
  * @obj: GEM object to invalidate
- * @ring: set for asynchronous rendering
  * @origin: which operation caused the invalidation
  *
  * This function gets called every time rendering on the given object starts and
  * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
- * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
+ * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
  * until the rendering completes or a flip on this frontbuffer plane is
  * scheduled.
  */
 void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-			     struct intel_engine_cs *ring,
 			     enum fb_op_origin origin)
 {
 	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
 	if (!obj->frontbuffer_bits)
 		return;
 
-	if (ring) {
+	if (origin == ORIGIN_CS) {
 		mutex_lock(&dev_priv->fb_tracking.lock);
 		dev_priv->fb_tracking.busy_bits
 			|= obj->frontbuffer_bits;
@@ -151,8 +96,6 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
 		mutex_unlock(&dev_priv->fb_tracking.lock);
 	}
 
-	intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
-
 	intel_psr_invalidate(dev, obj->frontbuffer_bits);
 	intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
 	intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin);
@@ -162,6 +105,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
  * intel_frontbuffer_flush - flush frontbuffer
  * @dev: DRM device
  * @frontbuffer_bits: frontbuffer plane tracking bits
+ * @origin: which operation caused the flush
  *
  * This function gets called every time rendering on the given planes has
  * completed and frontbuffer caching can be started again. Flushes will get
@@ -169,37 +113,40 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
  *
  * Can be called without any locks held.
  */
-void intel_frontbuffer_flush(struct drm_device *dev,
-			     unsigned frontbuffer_bits)
+static void intel_frontbuffer_flush(struct drm_device *dev,
+				    unsigned frontbuffer_bits,
+				    enum fb_op_origin origin)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 
 	/* Delay flushing when rings are still busy.*/
 	mutex_lock(&dev_priv->fb_tracking.lock);
 	frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
 	mutex_unlock(&dev_priv->fb_tracking.lock);
 
-	intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
+	if (!frontbuffer_bits)
+		return;
 
 	intel_edp_drrs_flush(dev, frontbuffer_bits);
-	intel_psr_flush(dev, frontbuffer_bits);
-	intel_fbc_flush(dev_priv, frontbuffer_bits);
+	intel_psr_flush(dev, frontbuffer_bits, origin);
+	intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
 }
 
 /**
  * intel_fb_obj_flush - flush frontbuffer object
  * @obj: GEM object to flush
  * @retire: set when retiring asynchronous rendering
+ * @origin: which operation caused the flush
  *
  * This function gets called every time rendering on the given object has
  * completed and frontbuffer caching can be started again. If @retire is true
  * then any delayed flushes will be unblocked.
  */
 void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
-			bool retire)
+			bool retire, enum fb_op_origin origin)
 {
 	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	unsigned frontbuffer_bits;
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -218,7 +165,7 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
 		mutex_unlock(&dev_priv->fb_tracking.lock);
 	}
 
-	intel_frontbuffer_flush(dev, frontbuffer_bits);
+	intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
 }
 
 /**
@@ -236,7 +183,7 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
 void intel_frontbuffer_flip_prepare(struct drm_device *dev,
 				    unsigned frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 
 	mutex_lock(&dev_priv->fb_tracking.lock);
 	dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
@@ -244,7 +191,7 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev,
 	dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
 	mutex_unlock(&dev_priv->fb_tracking.lock);
 
-	intel_psr_single_frame_update(dev);
+	intel_psr_single_frame_update(dev, frontbuffer_bits);
 }
 
 /**
@@ -260,7 +207,7 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev,
 void intel_frontbuffer_flip_complete(struct drm_device *dev,
 				     unsigned frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 
 	mutex_lock(&dev_priv->fb_tracking.lock);
 	/* Mask any cancelled flips. */
@@ -268,5 +215,29 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
 	dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
 	mutex_unlock(&dev_priv->fb_tracking.lock);
 
-	intel_frontbuffer_flush(dev, frontbuffer_bits);
+	intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
+}
+
+/**
+ * intel_frontbuffer_flip - synchronous frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. This is for
+ * synchronous plane updates which will happen on the next vblank and which will
+ * not get delayed by pending gpu rendering.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip(struct drm_device *dev,
+			    unsigned frontbuffer_bits)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+
+	mutex_lock(&dev_priv->fb_tracking.lock);
+	/* Remove stale busy bits due to the old buffer. */
+	dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+	mutex_unlock(&dev_priv->fb_tracking.lock);
+
+	intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
 }
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
new file mode 100644
index 000000000000..18d7f20936c8
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -0,0 +1,245 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef _INTEL_GUC_FWIF_H
+#define _INTEL_GUC_FWIF_H
+
+/*
+ * This file is partially autogenerated, although currently with some manual
+ * fixups afterwards. In future, it should be entirely autogenerated, in order
+ * to ensure that the definitions herein remain in sync with those used by the
+ * GuC's own firmware.
+ *
+ * EDITING THIS FILE IS THEREFORE NOT RECOMMENDED - YOUR CHANGES MAY BE LOST.
+ */
+
+#define GFXCORE_FAMILY_GEN8		11
+#define GFXCORE_FAMILY_GEN9		12
+#define GFXCORE_FAMILY_FORCE_ULONG	0x7fffffff
+
+#define GUC_CTX_PRIORITY_CRITICAL	0
+#define GUC_CTX_PRIORITY_HIGH		1
+#define GUC_CTX_PRIORITY_NORMAL		2
+#define GUC_CTX_PRIORITY_LOW		3
+
+#define GUC_MAX_GPU_CONTEXTS		1024
+#define	GUC_INVALID_CTX_ID		(GUC_MAX_GPU_CONTEXTS + 1)
+
+/* Work queue item header definitions */
+#define WQ_STATUS_ACTIVE		1
+#define WQ_STATUS_SUSPENDED		2
+#define WQ_STATUS_CMD_ERROR		3
+#define WQ_STATUS_ENGINE_ID_NOT_USED	4
+#define WQ_STATUS_SUSPENDED_FROM_RESET	5
+#define WQ_TYPE_SHIFT			0
+#define   WQ_TYPE_BATCH_BUF		(0x1 << WQ_TYPE_SHIFT)
+#define   WQ_TYPE_PSEUDO		(0x2 << WQ_TYPE_SHIFT)
+#define   WQ_TYPE_INORDER		(0x3 << WQ_TYPE_SHIFT)
+#define WQ_TARGET_SHIFT			10
+#define WQ_LEN_SHIFT			16
+#define WQ_NO_WCFLUSH_WAIT		(1 << 27)
+#define WQ_PRESENT_WORKLOAD		(1 << 28)
+#define WQ_WORKLOAD_SHIFT		29
+#define   WQ_WORKLOAD_GENERAL		(0 << WQ_WORKLOAD_SHIFT)
+#define   WQ_WORKLOAD_GPGPU		(1 << WQ_WORKLOAD_SHIFT)
+#define   WQ_WORKLOAD_TOUCH		(2 << WQ_WORKLOAD_SHIFT)
+
+#define WQ_RING_TAIL_SHIFT		20
+#define WQ_RING_TAIL_MASK		(0x7FF << WQ_RING_TAIL_SHIFT)
+
+#define GUC_DOORBELL_ENABLED		1
+#define GUC_DOORBELL_DISABLED		0
+
+#define GUC_CTX_DESC_ATTR_ACTIVE	(1 << 0)
+#define GUC_CTX_DESC_ATTR_PENDING_DB	(1 << 1)
+#define GUC_CTX_DESC_ATTR_KERNEL	(1 << 2)
+#define GUC_CTX_DESC_ATTR_PREEMPT	(1 << 3)
+#define GUC_CTX_DESC_ATTR_RESET		(1 << 4)
+#define GUC_CTX_DESC_ATTR_WQLOCKED	(1 << 5)
+#define GUC_CTX_DESC_ATTR_PCH		(1 << 6)
+
+/* The guc control data is 10 DWORDs */
+#define GUC_CTL_CTXINFO			0
+#define   GUC_CTL_CTXNUM_IN16_SHIFT	0
+#define   GUC_CTL_BASE_ADDR_SHIFT	12
+#define GUC_CTL_ARAT_HIGH		1
+#define GUC_CTL_ARAT_LOW		2
+#define GUC_CTL_DEVICE_INFO		3
+#define   GUC_CTL_GTTYPE_SHIFT		0
+#define   GUC_CTL_COREFAMILY_SHIFT	7
+#define GUC_CTL_LOG_PARAMS		4
+#define   GUC_LOG_VALID			(1 << 0)
+#define   GUC_LOG_NOTIFY_ON_HALF_FULL	(1 << 1)
+#define   GUC_LOG_ALLOC_IN_MEGABYTE	(1 << 3)
+#define   GUC_LOG_CRASH_PAGES		1
+#define   GUC_LOG_CRASH_SHIFT		4
+#define   GUC_LOG_DPC_PAGES		3
+#define   GUC_LOG_DPC_SHIFT		6
+#define   GUC_LOG_ISR_PAGES		3
+#define   GUC_LOG_ISR_SHIFT		9
+#define   GUC_LOG_BUF_ADDR_SHIFT	12
+#define GUC_CTL_PAGE_FAULT_CONTROL	5
+#define GUC_CTL_WA			6
+#define   GUC_CTL_WA_UK_BY_DRIVER	(1 << 3)
+#define GUC_CTL_FEATURE			7
+#define   GUC_CTL_VCS2_ENABLED		(1 << 0)
+#define   GUC_CTL_KERNEL_SUBMISSIONS	(1 << 1)
+#define   GUC_CTL_FEATURE2		(1 << 2)
+#define   GUC_CTL_POWER_GATING		(1 << 3)
+#define   GUC_CTL_DISABLE_SCHEDULER	(1 << 4)
+#define   GUC_CTL_PREEMPTION_LOG	(1 << 5)
+#define   GUC_CTL_ENABLE_SLPC		(1 << 7)
+#define GUC_CTL_DEBUG			8
+#define   GUC_LOG_VERBOSITY_SHIFT	0
+#define   GUC_LOG_VERBOSITY_LOW		(0 << GUC_LOG_VERBOSITY_SHIFT)
+#define   GUC_LOG_VERBOSITY_MED		(1 << GUC_LOG_VERBOSITY_SHIFT)
+#define   GUC_LOG_VERBOSITY_HIGH	(2 << GUC_LOG_VERBOSITY_SHIFT)
+#define   GUC_LOG_VERBOSITY_ULTRA	(3 << GUC_LOG_VERBOSITY_SHIFT)
+/* Verbosity range-check limits, without the shift */
+#define	  GUC_LOG_VERBOSITY_MIN		0
+#define	  GUC_LOG_VERBOSITY_MAX		3
+
+#define GUC_CTL_MAX_DWORDS		(GUC_CTL_DEBUG + 1)
+
+struct guc_doorbell_info {
+	u32 db_status;
+	u32 cookie;
+	u32 reserved[14];
+} __packed;
+
+union guc_doorbell_qw {
+	struct {
+		u32 db_status;
+		u32 cookie;
+	};
+	u64 value_qw;
+} __packed;
+
+#define GUC_MAX_DOORBELLS		256
+#define GUC_INVALID_DOORBELL_ID		(GUC_MAX_DOORBELLS)
+
+#define GUC_DB_SIZE			(PAGE_SIZE)
+#define GUC_WQ_SIZE			(PAGE_SIZE * 2)
+
+/* Work item for submitting workloads into work queue of GuC. */
+struct guc_wq_item {
+	u32 header;
+	u32 context_desc;
+	u32 ring_tail;
+	u32 fence_id;
+} __packed;
+
+struct guc_process_desc {
+	u32 context_id;
+	u64 db_base_addr;
+	u32 head;
+	u32 tail;
+	u32 error_offset;
+	u64 wq_base_addr;
+	u32 wq_size_bytes;
+	u32 wq_status;
+	u32 engine_presence;
+	u32 priority;
+	u32 reserved[30];
+} __packed;
+
+/* engine id and context id is packed into guc_execlist_context.context_id*/
+#define GUC_ELC_CTXID_OFFSET		0
+#define GUC_ELC_ENGINE_OFFSET		29
+
+/* The execlist context including software and HW information */
+struct guc_execlist_context {
+	u32 context_desc;
+	u32 context_id;
+	u32 ring_status;
+	u32 ring_lcra;
+	u32 ring_begin;
+	u32 ring_end;
+	u32 ring_next_free_location;
+	u32 ring_current_tail_pointer_value;
+	u8 engine_state_submit_value;
+	u8 engine_state_wait_value;
+	u16 pagefault_count;
+	u16 engine_submit_queue_count;
+} __packed;
+
+/*Context descriptor for communicating between uKernel and Driver*/
+struct guc_context_desc {
+	u32 sched_common_area;
+	u32 context_id;
+	u32 pas_id;
+	u8 engines_used;
+	u64 db_trigger_cpu;
+	u32 db_trigger_uk;
+	u64 db_trigger_phy;
+	u16 db_id;
+
+	struct guc_execlist_context lrc[I915_NUM_RINGS];
+
+	u8 attribute;
+
+	u32 priority;
+
+	u32 wq_sampled_tail_offset;
+	u32 wq_total_submit_enqueues;
+
+	u32 process_desc;
+	u32 wq_addr;
+	u32 wq_size;
+
+	u32 engine_presence;
+
+	u32 reserved0[1];
+	u64 reserved1[1];
+
+	u64 desc_private;
+} __packed;
+
+/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
+enum host2guc_action {
+	HOST2GUC_ACTION_DEFAULT = 0x0,
+	HOST2GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
+	HOST2GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
+	HOST2GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
+	HOST2GUC_ACTION_SLPC_REQUEST = 0x3003,
+	HOST2GUC_ACTION_LIMIT
+};
+
+/*
+ * The GuC sends its response to a command by overwriting the
+ * command in SS0. The response is distinguishable from a command
+ * by the fact that all the MASK bits are set. The remaining bits
+ * give more detail.
+ */
+#define	GUC2HOST_RESPONSE_MASK		((u32)0xF0000000)
+#define	GUC2HOST_IS_RESPONSE(x) 	((u32)(x) >= GUC2HOST_RESPONSE_MASK)
+#define	GUC2HOST_STATUS(x)		(GUC2HOST_RESPONSE_MASK | (x))
+
+/* GUC will return status back to SOFT_SCRATCH_O_REG */
+enum guc2host_status {
+	GUC2HOST_STATUS_SUCCESS = GUC2HOST_STATUS(0x0),
+	GUC2HOST_STATUS_ALLOCATE_DOORBELL_FAIL = GUC2HOST_STATUS(0x10),
+	GUC2HOST_STATUS_DEALLOCATE_DOORBELL_FAIL = GUC2HOST_STATUS(0x20),
+	GUC2HOST_STATUS_GENERIC_FAIL = GUC2HOST_STATUS(0x0000F000)
+};
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index e97731aab6dc..dcd336bcdfe7 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -174,10 +174,14 @@ static bool g4x_infoframe_enabled(struct drm_encoder *encoder)
 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
 	u32 val = I915_READ(VIDEO_DIP_CTL);
 
-	if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
-		return val & VIDEO_DIP_ENABLE;
+	if ((val & VIDEO_DIP_ENABLE) == 0)
+		return false;
 
-	return false;
+	if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+		return false;
+
+	return val & (VIDEO_DIP_ENABLE_AVI |
+		      VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
 }
 
 static void ibx_write_infoframe(struct drm_encoder *encoder,
@@ -227,10 +231,15 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder)
 	int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
 	u32 val = I915_READ(reg);
 
-	if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
-		return val & VIDEO_DIP_ENABLE;
+	if ((val & VIDEO_DIP_ENABLE) == 0)
+		return false;
 
-	return false;
+	if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+		return false;
+
+	return val & (VIDEO_DIP_ENABLE_AVI |
+		      VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+		      VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 }
 
 static void cpt_write_infoframe(struct drm_encoder *encoder,
@@ -282,7 +291,12 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder)
 	int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
 	u32 val = I915_READ(reg);
 
-	return val & VIDEO_DIP_ENABLE;
+	if ((val & VIDEO_DIP_ENABLE) == 0)
+		return false;
+
+	return val & (VIDEO_DIP_ENABLE_AVI |
+		      VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+		      VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 }
 
 static void vlv_write_infoframe(struct drm_encoder *encoder,
@@ -332,10 +346,15 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder)
 	int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
 	u32 val = I915_READ(reg);
 
-	if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
-		return val & VIDEO_DIP_ENABLE;
+	if ((val & VIDEO_DIP_ENABLE) == 0)
+		return false;
 
-	return false;
+	if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+		return false;
+
+	return val & (VIDEO_DIP_ENABLE_AVI |
+		      VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+		      VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 }
 
 static void hsw_write_infoframe(struct drm_encoder *encoder,
@@ -383,8 +402,9 @@ static bool hsw_infoframe_enabled(struct drm_encoder *encoder)
 	u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
 	u32 val = I915_READ(ctl_reg);
 
-	return val & (VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
-		      VIDEO_DIP_ENABLE_VS_HSW);
+	return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
+		      VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
+		      VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
 }
 
 /*
@@ -514,7 +534,13 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
 	if (!enable) {
 		if (!(val & VIDEO_DIP_ENABLE))
 			return;
-		val &= ~VIDEO_DIP_ENABLE;
+		if (port != (val & VIDEO_DIP_PORT_MASK)) {
+			DRM_DEBUG_KMS("video DIP still enabled on port %c\n",
+				      (val & VIDEO_DIP_PORT_MASK) >> 29);
+			return;
+		}
+		val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+			 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
 		I915_WRITE(reg, val);
 		POSTING_READ(reg);
 		return;
@@ -522,16 +548,17 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
 
 	if (port != (val & VIDEO_DIP_PORT_MASK)) {
 		if (val & VIDEO_DIP_ENABLE) {
-			val &= ~VIDEO_DIP_ENABLE;
-			I915_WRITE(reg, val);
-			POSTING_READ(reg);
+			DRM_DEBUG_KMS("video DIP already enabled on port %c\n",
+				      (val & VIDEO_DIP_PORT_MASK) >> 29);
+			return;
 		}
 		val &= ~VIDEO_DIP_PORT_MASK;
 		val |= port;
 	}
 
 	val |= VIDEO_DIP_ENABLE;
-	val &= ~VIDEO_DIP_ENABLE_VENDOR;
+	val &= ~(VIDEO_DIP_ENABLE_AVI |
+		 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
 
 	I915_WRITE(reg, val);
 	POSTING_READ(reg);
@@ -541,6 +568,97 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
 	intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
 }
 
+static bool hdmi_sink_is_deep_color(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_connector *connector;
+
+	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
+	/*
+	 * HDMI cloning is only supported on g4x which doesn't
+	 * support deep color or GCP infoframes anyway so no
+	 * need to worry about multiple HDMI sinks here.
+	 */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+		if (connector->encoder == encoder)
+			return connector->display_info.bpc > 8;
+
+	return false;
+}
+
+/*
+ * Determine if default_phase=1 can be indicated in the GCP infoframe.
+ *
+ * From HDMI specification 1.4a:
+ * - The first pixel of each Video Data Period shall always have a pixel packing phase of 0
+ * - The first pixel following each Video Data Period shall have a pixel packing phase of 0
+ * - The PP bits shall be constant for all GCPs and will be equal to the last packing phase
+ * - The first pixel following every transition of HSYNC or VSYNC shall have a pixel packing
+ *   phase of 0
+ */
+static bool gcp_default_phase_possible(int pipe_bpp,
+				       const struct drm_display_mode *mode)
+{
+	unsigned int pixels_per_group;
+
+	switch (pipe_bpp) {
+	case 30:
+		/* 4 pixels in 5 clocks */
+		pixels_per_group = 4;
+		break;
+	case 36:
+		/* 2 pixels in 3 clocks */
+		pixels_per_group = 2;
+		break;
+	case 48:
+		/* 1 pixel in 2 clocks */
+		pixels_per_group = 1;
+		break;
+	default:
+		/* phase information not relevant for 8bpc */
+		return false;
+	}
+
+	return mode->crtc_hdisplay % pixels_per_group == 0 &&
+		mode->crtc_htotal % pixels_per_group == 0 &&
+		mode->crtc_hblank_start % pixels_per_group == 0 &&
+		mode->crtc_hblank_end % pixels_per_group == 0 &&
+		mode->crtc_hsync_start % pixels_per_group == 0 &&
+		mode->crtc_hsync_end % pixels_per_group == 0 &&
+		((mode->flags & DRM_MODE_FLAG_INTERLACE) == 0 ||
+		 mode->crtc_htotal/2 % pixels_per_group == 0);
+}
+
+static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+	u32 reg, val = 0;
+
+	if (HAS_DDI(dev_priv))
+		reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder);
+	else if (IS_VALLEYVIEW(dev_priv))
+		reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
+	else if (HAS_PCH_SPLIT(dev_priv->dev))
+		reg = TVIDEO_DIP_GCP(crtc->pipe);
+	else
+		return false;
+
+	/* Indicate color depth whenever the sink supports deep color */
+	if (hdmi_sink_is_deep_color(encoder))
+		val |= GCP_COLOR_INDICATION;
+
+	/* Enable default_phase whenever the display mode is suitably aligned */
+	if (gcp_default_phase_possible(crtc->config->pipe_bpp,
+				       &crtc->config->base.adjusted_mode))
+		val |= GCP_DEFAULT_PHASE_ENABLE;
+
+	I915_WRITE(reg, val);
+
+	return val != 0;
+}
+
 static void ibx_set_infoframes(struct drm_encoder *encoder,
 			       bool enable,
 			       struct drm_display_mode *adjusted_mode)
@@ -561,25 +679,29 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
 	if (!enable) {
 		if (!(val & VIDEO_DIP_ENABLE))
 			return;
-		val &= ~VIDEO_DIP_ENABLE;
+		val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+			 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+			 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 		I915_WRITE(reg, val);
 		POSTING_READ(reg);
 		return;
 	}
 
 	if (port != (val & VIDEO_DIP_PORT_MASK)) {
-		if (val & VIDEO_DIP_ENABLE) {
-			val &= ~VIDEO_DIP_ENABLE;
-			I915_WRITE(reg, val);
-			POSTING_READ(reg);
-		}
+		WARN(val & VIDEO_DIP_ENABLE,
+		     "DIP already enabled on port %c\n",
+		     (val & VIDEO_DIP_PORT_MASK) >> 29);
 		val &= ~VIDEO_DIP_PORT_MASK;
 		val |= port;
 	}
 
 	val |= VIDEO_DIP_ENABLE;
-	val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
-		 VIDEO_DIP_ENABLE_GCP);
+	val &= ~(VIDEO_DIP_ENABLE_AVI |
+		 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+		 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+
+	if (intel_hdmi_set_gcp_infoframe(encoder))
+		val |= VIDEO_DIP_ENABLE_GCP;
 
 	I915_WRITE(reg, val);
 	POSTING_READ(reg);
@@ -607,7 +729,9 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
 	if (!enable) {
 		if (!(val & VIDEO_DIP_ENABLE))
 			return;
-		val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI);
+		val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+			 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+			 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 		I915_WRITE(reg, val);
 		POSTING_READ(reg);
 		return;
@@ -616,7 +740,10 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
 	/* Set both together, unset both together: see the spec. */
 	val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI;
 	val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
-		 VIDEO_DIP_ENABLE_GCP);
+		 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+
+	if (intel_hdmi_set_gcp_infoframe(encoder))
+		val |= VIDEO_DIP_ENABLE_GCP;
 
 	I915_WRITE(reg, val);
 	POSTING_READ(reg);
@@ -646,25 +773,29 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
 	if (!enable) {
 		if (!(val & VIDEO_DIP_ENABLE))
 			return;
-		val &= ~VIDEO_DIP_ENABLE;
+		val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+			 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+			 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 		I915_WRITE(reg, val);
 		POSTING_READ(reg);
 		return;
 	}
 
 	if (port != (val & VIDEO_DIP_PORT_MASK)) {
-		if (val & VIDEO_DIP_ENABLE) {
-			val &= ~VIDEO_DIP_ENABLE;
-			I915_WRITE(reg, val);
-			POSTING_READ(reg);
-		}
+		WARN(val & VIDEO_DIP_ENABLE,
+		     "DIP already enabled on port %c\n",
+		     (val & VIDEO_DIP_PORT_MASK) >> 29);
 		val &= ~VIDEO_DIP_PORT_MASK;
 		val |= port;
 	}
 
 	val |= VIDEO_DIP_ENABLE;
-	val &= ~(VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR |
-		 VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_GCP);
+	val &= ~(VIDEO_DIP_ENABLE_AVI |
+		 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+		 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+
+	if (intel_hdmi_set_gcp_infoframe(encoder))
+		val |= VIDEO_DIP_ENABLE_GCP;
 
 	I915_WRITE(reg, val);
 	POSTING_READ(reg);
@@ -686,14 +817,18 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
 
 	assert_hdmi_port_disabled(intel_hdmi);
 
+	val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
+		 VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
+		 VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
+
 	if (!enable) {
-		I915_WRITE(reg, 0);
+		I915_WRITE(reg, val);
 		POSTING_READ(reg);
 		return;
 	}
 
-	val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
-		 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW);
+	if (intel_hdmi_set_gcp_infoframe(encoder))
+		val |= VIDEO_DIP_ENABLE_GCP_HSW;
 
 	I915_WRITE(reg, val);
 	POSTING_READ(reg);
@@ -808,58 +943,146 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
 	else
 		dotclock = pipe_config->port_clock;
 
+	if (pipe_config->pixel_multiplier)
+		dotclock /= pipe_config->pixel_multiplier;
+
 	if (HAS_PCH_SPLIT(dev_priv->dev))
 		ironlake_check_encoder_dotclock(pipe_config, dotclock);
 
 	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
 }
 
-static void intel_enable_hdmi(struct intel_encoder *encoder)
+static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
+{
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+
+	WARN_ON(!crtc->config->has_hdmi_sink);
+	DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
+			 pipe_name(crtc->pipe));
+	intel_audio_codec_enable(encoder);
+}
+
+static void g4x_enable_hdmi(struct intel_encoder *encoder)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 	u32 temp;
-	u32 enable_bits = SDVO_ENABLE;
 
-	if (intel_crtc->config->has_audio)
-		enable_bits |= SDVO_AUDIO_ENABLE;
+	temp = I915_READ(intel_hdmi->hdmi_reg);
+
+	temp |= SDVO_ENABLE;
+	if (crtc->config->has_audio)
+		temp |= SDVO_AUDIO_ENABLE;
+
+	I915_WRITE(intel_hdmi->hdmi_reg, temp);
+	POSTING_READ(intel_hdmi->hdmi_reg);
+
+	if (crtc->config->has_audio)
+		intel_enable_hdmi_audio(encoder);
+}
+
+static void ibx_enable_hdmi(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	u32 temp;
 
 	temp = I915_READ(intel_hdmi->hdmi_reg);
 
-	/* HW workaround for IBX, we need to move the port to transcoder A
-	 * before disabling it, so restore the transcoder select bit here. */
-	if (HAS_PCH_IBX(dev))
-		enable_bits |= SDVO_PIPE_SEL(intel_crtc->pipe);
+	temp |= SDVO_ENABLE;
+	if (crtc->config->has_audio)
+		temp |= SDVO_AUDIO_ENABLE;
 
-	/* HW workaround, need to toggle enable bit off and on for 12bpc, but
-	 * we do this anyway which shows more stable in testing.
+	/*
+	 * HW workaround, need to write this twice for issue
+	 * that may result in first write getting masked.
 	 */
-	if (HAS_PCH_SPLIT(dev)) {
+	I915_WRITE(intel_hdmi->hdmi_reg, temp);
+	POSTING_READ(intel_hdmi->hdmi_reg);
+	I915_WRITE(intel_hdmi->hdmi_reg, temp);
+	POSTING_READ(intel_hdmi->hdmi_reg);
+
+	/*
+	 * HW workaround, need to toggle enable bit off and on
+	 * for 12bpc with pixel repeat.
+	 *
+	 * FIXME: BSpec says this should be done at the end of
+	 * of the modeset sequence, so not sure if this isn't too soon.
+	 */
+	if (crtc->config->pipe_bpp > 24 &&
+	    crtc->config->pixel_multiplier > 1) {
 		I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
 		POSTING_READ(intel_hdmi->hdmi_reg);
+
+		/*
+		 * HW workaround, need to write this twice for issue
+		 * that may result in first write getting masked.
+		 */
+		I915_WRITE(intel_hdmi->hdmi_reg, temp);
+		POSTING_READ(intel_hdmi->hdmi_reg);
+		I915_WRITE(intel_hdmi->hdmi_reg, temp);
+		POSTING_READ(intel_hdmi->hdmi_reg);
 	}
 
-	temp |= enable_bits;
+	if (crtc->config->has_audio)
+		intel_enable_hdmi_audio(encoder);
+}
+
+static void cpt_enable_hdmi(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	enum pipe pipe = crtc->pipe;
+	u32 temp;
+
+	temp = I915_READ(intel_hdmi->hdmi_reg);
+
+	temp |= SDVO_ENABLE;
+	if (crtc->config->has_audio)
+		temp |= SDVO_AUDIO_ENABLE;
+
+	/*
+	 * WaEnableHDMI8bpcBefore12bpc:snb,ivb
+	 *
+	 * The procedure for 12bpc is as follows:
+	 * 1. disable HDMI clock gating
+	 * 2. enable HDMI with 8bpc
+	 * 3. enable HDMI with 12bpc
+	 * 4. enable HDMI clock gating
+	 */
+
+	if (crtc->config->pipe_bpp > 24) {
+		I915_WRITE(TRANS_CHICKEN1(pipe),
+			   I915_READ(TRANS_CHICKEN1(pipe)) |
+			   TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+
+		temp &= ~SDVO_COLOR_FORMAT_MASK;
+		temp |= SDVO_COLOR_FORMAT_8bpc;
+	}
 
 	I915_WRITE(intel_hdmi->hdmi_reg, temp);
 	POSTING_READ(intel_hdmi->hdmi_reg);
 
-	/* HW workaround, need to write this twice for issue that may result
-	 * in first write getting masked.
-	 */
-	if (HAS_PCH_SPLIT(dev)) {
+	if (crtc->config->pipe_bpp > 24) {
+		temp &= ~SDVO_COLOR_FORMAT_MASK;
+		temp |= HDMI_COLOR_FORMAT_12bpc;
+
 		I915_WRITE(intel_hdmi->hdmi_reg, temp);
 		POSTING_READ(intel_hdmi->hdmi_reg);
-	}
 
-	if (intel_crtc->config->has_audio) {
-		WARN_ON(!intel_crtc->config->has_hdmi_sink);
-		DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
-				 pipe_name(intel_crtc->pipe));
-		intel_audio_codec_enable(encoder);
+		I915_WRITE(TRANS_CHICKEN1(pipe),
+			   I915_READ(TRANS_CHICKEN1(pipe)) &
+			   ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
 	}
+
+	if (crtc->config->has_audio)
+		intel_enable_hdmi_audio(encoder);
 }
 
 static void vlv_enable_hdmi(struct intel_encoder *encoder)
@@ -901,6 +1124,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
 		I915_WRITE(intel_hdmi->hdmi_reg, temp);
 		POSTING_READ(intel_hdmi->hdmi_reg);
 	}
+
+	intel_hdmi->set_infoframes(&encoder->base, false, NULL);
 }
 
 static void g4x_disable_hdmi(struct intel_encoder *encoder)
@@ -926,7 +1151,7 @@ static void pch_post_disable_hdmi(struct intel_encoder *encoder)
 	intel_disable_hdmi(encoder);
 }
 
-static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
+static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
 {
 	struct drm_device *dev = intel_hdmi_to_dev(hdmi);
 
@@ -939,24 +1164,51 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
 }
 
 static enum drm_mode_status
+hdmi_port_clock_valid(struct intel_hdmi *hdmi,
+		      int clock, bool respect_dvi_limit)
+{
+	struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+
+	if (clock < 25000)
+		return MODE_CLOCK_LOW;
+	if (clock > hdmi_port_clock_limit(hdmi, respect_dvi_limit))
+		return MODE_CLOCK_HIGH;
+
+	/* BXT DPLL can't generate 223-240 MHz */
+	if (IS_BROXTON(dev) && clock > 223333 && clock < 240000)
+		return MODE_CLOCK_RANGE;
+
+	/* CHV DPLL can't generate 216-240 MHz */
+	if (IS_CHERRYVIEW(dev) && clock > 216000 && clock < 240000)
+		return MODE_CLOCK_RANGE;
+
+	return MODE_OK;
+}
+
+static enum drm_mode_status
 intel_hdmi_mode_valid(struct drm_connector *connector,
 		      struct drm_display_mode *mode)
 {
-	int clock = mode->clock;
+	struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
+	struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+	enum drm_mode_status status;
+	int clock;
 
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		return MODE_NO_DBLESCAN;
+
+	clock = mode->clock;
 	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
 		clock *= 2;
 
-	if (clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
-					 true))
-		return MODE_CLOCK_HIGH;
-	if (clock < 20000)
-		return MODE_CLOCK_LOW;
+	/* check if we can do 8bpc */
+	status = hdmi_port_clock_valid(hdmi, clock, true);
 
-	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
-		return MODE_NO_DBLESCAN;
+	/* if we can't do 8bpc we may still be able to do 12bpc */
+	if (!HAS_GMCH_DISPLAY(dev) && status != MODE_OK)
+		status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true);
 
-	return MODE_OK;
+	return status;
 }
 
 static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
@@ -997,8 +1249,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
-	int clock_12bpc = pipe_config->base.adjusted_mode.crtc_clock * 3 / 2;
-	int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
+	int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
+	int clock_12bpc = clock_8bpc * 3 / 2;
 	int desired_bpp;
 
 	pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink;
@@ -1017,6 +1269,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
 
 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
 		pipe_config->pixel_multiplier = 2;
+		clock_8bpc *= 2;
+		clock_12bpc *= 2;
 	}
 
 	if (intel_hdmi->color_range)
@@ -1035,9 +1289,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
 	 * within limits.
 	 */
 	if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
-	    clock_12bpc <= portclock_limit &&
-	    hdmi_12bpc_possible(pipe_config) &&
-	    0 /* FIXME 12bpc support totally broken */) {
+	    hdmi_port_clock_valid(intel_hdmi, clock_12bpc, false) == MODE_OK &&
+	    hdmi_12bpc_possible(pipe_config)) {
 		DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
 		desired_bpp = 12*3;
 
@@ -1046,6 +1299,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
 	} else {
 		DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
 		desired_bpp = 8*3;
+
+		pipe_config->port_clock = clock_8bpc;
 	}
 
 	if (!pipe_config->bw_constrained) {
@@ -1053,8 +1308,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
 		pipe_config->pipe_bpp = desired_bpp;
 	}
 
-	if (adjusted_mode->crtc_clock > portclock_limit) {
-		DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
+	if (hdmi_port_clock_valid(intel_hdmi, pipe_config->port_clock,
+				  false) != MODE_OK) {
+		DRM_DEBUG_KMS("unsupported HDMI clock, rejecting mode\n");
 		return false;
 	}
 
@@ -1323,7 +1579,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
 				   intel_crtc->config->has_hdmi_sink,
 				   adjusted_mode);
 
-	intel_enable_hdmi(encoder);
+	g4x_enable_hdmi(encoder);
 
 	vlv_wait_port_ready(dev_priv, dport, 0x0);
 }
@@ -1640,7 +1896,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
 				   intel_crtc->config->has_hdmi_sink,
 				   adjusted_mode);
 
-	intel_enable_hdmi(encoder);
+	g4x_enable_hdmi(encoder);
 
 	vlv_wait_port_ready(dev_priv, dport, 0x0);
 }
@@ -1653,7 +1909,7 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
 }
 
 static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
-	.dpms = intel_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_hdmi_detect,
 	.force = intel_hdmi_force,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -1702,6 +1958,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 	struct drm_device *dev = intel_encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	enum port port = intel_dig_port->port;
+	uint8_t alternate_ddc_pin;
 
 	drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
 			   DRM_MODE_CONNECTOR_HDMIA);
@@ -1735,6 +1992,26 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 			intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
 		intel_encoder->hpd_pin = HPD_PORT_D;
 		break;
+	case PORT_E:
+		/* On SKL PORT E doesn't have seperate GMBUS pin
+		 *  We rely on VBT to set a proper alternate GMBUS pin. */
+		alternate_ddc_pin =
+			dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin;
+		switch (alternate_ddc_pin) {
+		case DDC_PIN_B:
+			intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
+			break;
+		case DDC_PIN_C:
+			intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
+			break;
+		case DDC_PIN_D:
+			intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
+			break;
+		default:
+			MISSING_CASE(alternate_ddc_pin);
+		}
+		intel_encoder->hpd_pin = HPD_PORT_E;
+		break;
 	case PORT_A:
 		intel_encoder->hpd_pin = HPD_PORT_A;
 		/* Internal port only for eDP. */
@@ -1827,7 +2104,12 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
 		intel_encoder->post_disable = vlv_hdmi_post_disable;
 	} else {
 		intel_encoder->pre_enable = intel_hdmi_pre_enable;
-		intel_encoder->enable = intel_enable_hdmi;
+		if (HAS_PCH_CPT(dev))
+			intel_encoder->enable = cpt_enable_hdmi;
+		else if (HAS_PCH_IBX(dev))
+			intel_encoder->enable = ibx_enable_hdmi;
+		else
+			intel_encoder->enable = g4x_enable_hdmi;
 	}
 
 	intel_encoder->type = INTEL_OUTPUT_HDMI;
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
new file mode 100644
index 000000000000..53c0173a39fe
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -0,0 +1,508 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+/**
+ * DOC: Hotplug
+ *
+ * Simply put, hotplug occurs when a display is connected to or disconnected
+ * from the system. However, there may be adapters and docking stations and
+ * Display Port short pulses and MST devices involved, complicating matters.
+ *
+ * Hotplug in i915 is handled in many different levels of abstraction.
+ *
+ * The platform dependent interrupt handling code in i915_irq.c enables,
+ * disables, and does preliminary handling of the interrupts. The interrupt
+ * handlers gather the hotplug detect (HPD) information from relevant registers
+ * into a platform independent mask of hotplug pins that have fired.
+ *
+ * The platform independent interrupt handler intel_hpd_irq_handler() in
+ * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes
+ * further processing to appropriate bottom halves (Display Port specific and
+ * regular hotplug).
+ *
+ * The Display Port work function i915_digport_work_func() calls into
+ * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long
+ * pulses, with failures and non-MST long pulses triggering regular hotplug
+ * processing on the connector.
+ *
+ * The regular hotplug work function i915_hotplug_work_func() calls connector
+ * detect hooks, and, if connector status changes, triggers sending of hotplug
+ * uevent to userspace via drm_kms_helper_hotplug_event().
+ *
+ * Finally, the userspace is responsible for triggering a modeset upon receiving
+ * the hotplug uevent, disabling or enabling the crtc as needed.
+ *
+ * The hotplug interrupt storm detection and mitigation code keeps track of the
+ * number of interrupts per hotplug pin per a period of time, and if the number
+ * of interrupts exceeds a certain threshold, the interrupt is disabled for a
+ * while before being re-enabled. The intention is to mitigate issues raising
+ * from broken hardware triggering massive amounts of interrupts and grinding
+ * the system to a halt.
+ *
+ * Current implementation expects that hotplug interrupt storm will not be
+ * seen when display port sink is connected, hence on platforms whose DP
+ * callback is handled by i915_digport_work_func reenabling of hpd is not
+ * performed (it was never expected to be disabled in the first place ;) )
+ * this is specific to DP sinks handled by this routine and any other display
+ * such as HDMI or DVI enabled on the same port will have proper logic since
+ * it will use i915_hotplug_work_func where this logic is handled.
+ */
+
+bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port)
+{
+	switch (pin) {
+	case HPD_PORT_A:
+		*port = PORT_A;
+		return true;
+	case HPD_PORT_B:
+		*port = PORT_B;
+		return true;
+	case HPD_PORT_C:
+		*port = PORT_C;
+		return true;
+	case HPD_PORT_D:
+		*port = PORT_D;
+		return true;
+	case HPD_PORT_E:
+		*port = PORT_E;
+		return true;
+	default:
+		return false;	/* no hpd */
+	}
+}
+
+#define HPD_STORM_DETECT_PERIOD		1000
+#define HPD_STORM_THRESHOLD		5
+#define HPD_STORM_REENABLE_DELAY	(2 * 60 * 1000)
+
+/**
+ * intel_hpd_irq_storm_detect - gather stats and detect HPD irq storm on a pin
+ * @dev_priv: private driver data pointer
+ * @pin: the pin to gather stats on
+ *
+ * Gather stats about HPD irqs from the specified @pin, and detect irq
+ * storms. Only the pin specific stats and state are changed, the caller is
+ * responsible for further action.
+ *
+ * @HPD_STORM_THRESHOLD irqs are allowed within @HPD_STORM_DETECT_PERIOD ms,
+ * otherwise it's considered an irq storm, and the irq state is set to
+ * @HPD_MARK_DISABLED.
+ *
+ * Return true if an irq storm was detected on @pin.
+ */
+static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
+				       enum hpd_pin pin)
+{
+	unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies;
+	unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
+	bool storm = false;
+
+	if (!time_in_range(jiffies, start, end)) {
+		dev_priv->hotplug.stats[pin].last_jiffies = jiffies;
+		dev_priv->hotplug.stats[pin].count = 0;
+		DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin);
+	} else if (dev_priv->hotplug.stats[pin].count > HPD_STORM_THRESHOLD) {
+		dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED;
+		DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
+		storm = true;
+	} else {
+		dev_priv->hotplug.stats[pin].count++;
+		DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
+			      dev_priv->hotplug.stats[pin].count);
+	}
+
+	return storm;
+}
+
+static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct intel_connector *intel_connector;
+	struct intel_encoder *intel_encoder;
+	struct drm_connector *connector;
+	enum hpd_pin pin;
+	bool hpd_disabled = false;
+
+	assert_spin_locked(&dev_priv->irq_lock);
+
+	list_for_each_entry(connector, &mode_config->connector_list, head) {
+		if (connector->polled != DRM_CONNECTOR_POLL_HPD)
+			continue;
+
+		intel_connector = to_intel_connector(connector);
+		intel_encoder = intel_connector->encoder;
+		if (!intel_encoder)
+			continue;
+
+		pin = intel_encoder->hpd_pin;
+		if (pin == HPD_NONE ||
+		    dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
+			continue;
+
+		DRM_INFO("HPD interrupt storm detected on connector %s: "
+			 "switching from hotplug detection to polling\n",
+			 connector->name);
+
+		dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
+		connector->polled = DRM_CONNECTOR_POLL_CONNECT
+			| DRM_CONNECTOR_POLL_DISCONNECT;
+		hpd_disabled = true;
+	}
+
+	/* Enable polling and queue hotplug re-enabling. */
+	if (hpd_disabled) {
+		drm_kms_helper_poll_enable(dev);
+		mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
+				 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
+	}
+}
+
+static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv),
+			     hotplug.reenable_work.work);
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	int i;
+
+	intel_runtime_pm_get(dev_priv);
+
+	spin_lock_irq(&dev_priv->irq_lock);
+	for_each_hpd_pin(i) {
+		struct drm_connector *connector;
+
+		if (dev_priv->hotplug.stats[i].state != HPD_DISABLED)
+			continue;
+
+		dev_priv->hotplug.stats[i].state = HPD_ENABLED;
+
+		list_for_each_entry(connector, &mode_config->connector_list, head) {
+			struct intel_connector *intel_connector = to_intel_connector(connector);
+
+			if (intel_connector->encoder->hpd_pin == i) {
+				if (connector->polled != intel_connector->polled)
+					DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
+							 connector->name);
+				connector->polled = intel_connector->polled;
+				if (!connector->polled)
+					connector->polled = DRM_CONNECTOR_POLL_HPD;
+			}
+		}
+	}
+	if (dev_priv->display.hpd_irq_setup)
+		dev_priv->display.hpd_irq_setup(dev);
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	intel_runtime_pm_put(dev_priv);
+}
+
+static bool intel_hpd_irq_event(struct drm_device *dev,
+				struct drm_connector *connector)
+{
+	enum drm_connector_status old_status;
+
+	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+	old_status = connector->status;
+
+	connector->status = connector->funcs->detect(connector, false);
+	if (old_status == connector->status)
+		return false;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
+		      connector->base.id,
+		      connector->name,
+		      drm_get_connector_status_name(old_status),
+		      drm_get_connector_status_name(connector->status));
+
+	return true;
+}
+
+static void i915_digport_work_func(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, struct drm_i915_private, hotplug.dig_port_work);
+	u32 long_port_mask, short_port_mask;
+	struct intel_digital_port *intel_dig_port;
+	int i;
+	u32 old_bits = 0;
+
+	spin_lock_irq(&dev_priv->irq_lock);
+	long_port_mask = dev_priv->hotplug.long_port_mask;
+	dev_priv->hotplug.long_port_mask = 0;
+	short_port_mask = dev_priv->hotplug.short_port_mask;
+	dev_priv->hotplug.short_port_mask = 0;
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	for (i = 0; i < I915_MAX_PORTS; i++) {
+		bool valid = false;
+		bool long_hpd = false;
+		intel_dig_port = dev_priv->hotplug.irq_port[i];
+		if (!intel_dig_port || !intel_dig_port->hpd_pulse)
+			continue;
+
+		if (long_port_mask & (1 << i))  {
+			valid = true;
+			long_hpd = true;
+		} else if (short_port_mask & (1 << i))
+			valid = true;
+
+		if (valid) {
+			enum irqreturn ret;
+
+			ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
+			if (ret == IRQ_NONE) {
+				/* fall back to old school hpd */
+				old_bits |= (1 << intel_dig_port->base.hpd_pin);
+			}
+		}
+	}
+
+	if (old_bits) {
+		spin_lock_irq(&dev_priv->irq_lock);
+		dev_priv->hotplug.event_bits |= old_bits;
+		spin_unlock_irq(&dev_priv->irq_lock);
+		schedule_work(&dev_priv->hotplug.hotplug_work);
+	}
+}
+
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+static void i915_hotplug_work_func(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, struct drm_i915_private, hotplug.hotplug_work);
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct intel_connector *intel_connector;
+	struct intel_encoder *intel_encoder;
+	struct drm_connector *connector;
+	bool changed = false;
+	u32 hpd_event_bits;
+
+	mutex_lock(&mode_config->mutex);
+	DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
+	spin_lock_irq(&dev_priv->irq_lock);
+
+	hpd_event_bits = dev_priv->hotplug.event_bits;
+	dev_priv->hotplug.event_bits = 0;
+
+	/* Disable hotplug on connectors that hit an irq storm. */
+	intel_hpd_irq_storm_disable(dev_priv);
+
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	list_for_each_entry(connector, &mode_config->connector_list, head) {
+		intel_connector = to_intel_connector(connector);
+		if (!intel_connector->encoder)
+			continue;
+		intel_encoder = intel_connector->encoder;
+		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
+			DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
+				      connector->name, intel_encoder->hpd_pin);
+			if (intel_encoder->hot_plug)
+				intel_encoder->hot_plug(intel_encoder);
+			if (intel_hpd_irq_event(dev, connector))
+				changed = true;
+		}
+	}
+	mutex_unlock(&mode_config->mutex);
+
+	if (changed)
+		drm_kms_helper_hotplug_event(dev);
+}
+
+
+/**
+ * intel_hpd_irq_handler - main hotplug irq handler
+ * @dev: drm device
+ * @pin_mask: a mask of hpd pins that have triggered the irq
+ * @long_mask: a mask of hpd pins that may be long hpd pulses
+ *
+ * This is the main hotplug irq handler for all platforms. The platform specific
+ * irq handlers call the platform specific hotplug irq handlers, which read and
+ * decode the appropriate registers into bitmasks about hpd pins that have
+ * triggered (@pin_mask), and which of those pins may be long pulses
+ * (@long_mask). The @long_mask is ignored if the port corresponding to the pin
+ * is not a digital port.
+ *
+ * Here, we do hotplug irq storm detection and mitigation, and pass further
+ * processing to appropriate bottom halves.
+ */
+void intel_hpd_irq_handler(struct drm_device *dev,
+			   u32 pin_mask, u32 long_mask)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+	enum port port;
+	bool storm_detected = false;
+	bool queue_dig = false, queue_hp = false;
+	bool is_dig_port;
+
+	if (!pin_mask)
+		return;
+
+	spin_lock(&dev_priv->irq_lock);
+	for_each_hpd_pin(i) {
+		if (!(BIT(i) & pin_mask))
+			continue;
+
+		is_dig_port = intel_hpd_pin_to_port(i, &port) &&
+			      dev_priv->hotplug.irq_port[port];
+
+		if (is_dig_port) {
+			bool long_hpd = long_mask & BIT(i);
+
+			DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
+					 long_hpd ? "long" : "short");
+			/*
+			 * For long HPD pulses we want to have the digital queue happen,
+			 * but we still want HPD storm detection to function.
+			 */
+			queue_dig = true;
+			if (long_hpd) {
+				dev_priv->hotplug.long_port_mask |= (1 << port);
+			} else {
+				/* for short HPD just trigger the digital queue */
+				dev_priv->hotplug.short_port_mask |= (1 << port);
+				continue;
+			}
+		}
+
+		if (dev_priv->hotplug.stats[i].state == HPD_DISABLED) {
+			/*
+			 * On GMCH platforms the interrupt mask bits only
+			 * prevent irq generation, not the setting of the
+			 * hotplug bits itself. So only WARN about unexpected
+			 * interrupts on saner platforms.
+			 */
+			WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
+				  "Received HPD interrupt on pin %d although disabled\n", i);
+			continue;
+		}
+
+		if (dev_priv->hotplug.stats[i].state != HPD_ENABLED)
+			continue;
+
+		if (!is_dig_port) {
+			dev_priv->hotplug.event_bits |= BIT(i);
+			queue_hp = true;
+		}
+
+		if (intel_hpd_irq_storm_detect(dev_priv, i)) {
+			dev_priv->hotplug.event_bits &= ~BIT(i);
+			storm_detected = true;
+		}
+	}
+
+	if (storm_detected)
+		dev_priv->display.hpd_irq_setup(dev);
+	spin_unlock(&dev_priv->irq_lock);
+
+	/*
+	 * Our hotplug handler can grab modeset locks (by calling down into the
+	 * fb helpers). Hence it must not be run on our own dev-priv->wq work
+	 * queue for otherwise the flush_work in the pageflip code will
+	 * deadlock.
+	 */
+	if (queue_dig)
+		queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
+	if (queue_hp)
+		schedule_work(&dev_priv->hotplug.hotplug_work);
+}
+
+/**
+ * intel_hpd_init - initializes and enables hpd support
+ * @dev_priv: i915 device instance
+ *
+ * This function enables the hotplug support. It requires that interrupts have
+ * already been enabled with intel_irq_init_hw(). From this point on hotplug and
+ * poll request can run concurrently to other code, so locking rules must be
+ * obeyed.
+ *
+ * This is a separate step from interrupt enabling to simplify the locking rules
+ * in the driver load and resume code.
+ */
+void intel_hpd_init(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_connector *connector;
+	int i;
+
+	for_each_hpd_pin(i) {
+		dev_priv->hotplug.stats[i].count = 0;
+		dev_priv->hotplug.stats[i].state = HPD_ENABLED;
+	}
+	list_for_each_entry(connector, &mode_config->connector_list, head) {
+		struct intel_connector *intel_connector = to_intel_connector(connector);
+		connector->polled = intel_connector->polled;
+		if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
+			connector->polled = DRM_CONNECTOR_POLL_HPD;
+		if (intel_connector->mst_port)
+			connector->polled = DRM_CONNECTOR_POLL_HPD;
+	}
+
+	/*
+	 * Interrupt setup is already guaranteed to be single-threaded, this is
+	 * just to make the assert_spin_locked checks happy.
+	 */
+	spin_lock_irq(&dev_priv->irq_lock);
+	if (dev_priv->display.hpd_irq_setup)
+		dev_priv->display.hpd_irq_setup(dev);
+	spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void intel_hpd_init_work(struct drm_i915_private *dev_priv)
+{
+	INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func);
+	INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
+	INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
+			  intel_hpd_irq_storm_reenable_work);
+}
+
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
+{
+	spin_lock_irq(&dev_priv->irq_lock);
+
+	dev_priv->hotplug.long_port_mask = 0;
+	dev_priv->hotplug.short_port_mask = 0;
+	dev_priv->hotplug.event_bits = 0;
+
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	cancel_work_sync(&dev_priv->hotplug.dig_port_work);
+	cancel_work_sync(&dev_priv->hotplug.hotplug_work);
+	cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
+}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7f2161a1ff5d..72e0edd7bbde 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -135,6 +135,7 @@
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
+#include "intel_mocs.h"
 
 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
@@ -190,9 +191,7 @@
 #define GEN8_CTX_PRIVILEGE (1<<8)
 
 #define ASSIGN_CTX_PDP(ppgtt, reg_state, n) { \
-	const u64 _addr = test_bit(n, ppgtt->pdp.used_pdpes) ? \
-		ppgtt->pdp.page_directory[n]->daddr : \
-		ppgtt->scratch_pd->daddr; \
+	const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n));	\
 	reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
 	reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
 }
@@ -211,9 +210,9 @@ enum {
 	FAULT_AND_CONTINUE /* Unsupported */
 };
 #define GEN8_CTX_ID_SHIFT 32
+#define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT  0x17
 
-static int intel_lr_context_pin(struct intel_engine_cs *ring,
-		struct intel_context *ctx);
+static int intel_lr_context_pin(struct drm_i915_gem_request *rq);
 
 /**
  * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
@@ -263,10 +262,11 @@ u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
 	return lrca >> 12;
 }
 
-static uint64_t execlists_ctx_descriptor(struct intel_engine_cs *ring,
-					 struct drm_i915_gem_object *ctx_obj)
+static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_request *rq)
 {
+	struct intel_engine_cs *ring = rq->ring;
 	struct drm_device *dev = ring->dev;
+	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
 	uint64_t desc;
 	uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj);
 
@@ -294,55 +294,59 @@ static uint64_t execlists_ctx_descriptor(struct intel_engine_cs *ring,
 	return desc;
 }
 
-static void execlists_elsp_write(struct intel_engine_cs *ring,
-				 struct drm_i915_gem_object *ctx_obj0,
-				 struct drm_i915_gem_object *ctx_obj1)
+static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
+				 struct drm_i915_gem_request *rq1)
 {
+
+	struct intel_engine_cs *ring = rq0->ring;
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint64_t temp = 0;
-	uint32_t desc[4];
+	uint64_t desc[2];
 
-	/* XXX: You must always write both descriptors in the order below. */
-	if (ctx_obj1)
-		temp = execlists_ctx_descriptor(ring, ctx_obj1);
-	else
-		temp = 0;
-	desc[1] = (u32)(temp >> 32);
-	desc[0] = (u32)temp;
+	if (rq1) {
+		desc[1] = execlists_ctx_descriptor(rq1);
+		rq1->elsp_submitted++;
+	} else {
+		desc[1] = 0;
+	}
 
-	temp = execlists_ctx_descriptor(ring, ctx_obj0);
-	desc[3] = (u32)(temp >> 32);
-	desc[2] = (u32)temp;
+	desc[0] = execlists_ctx_descriptor(rq0);
+	rq0->elsp_submitted++;
 
+	/* You must always write both descriptors in the order below. */
 	spin_lock(&dev_priv->uncore.lock);
 	intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
-	I915_WRITE_FW(RING_ELSP(ring), desc[1]);
-	I915_WRITE_FW(RING_ELSP(ring), desc[0]);
-	I915_WRITE_FW(RING_ELSP(ring), desc[3]);
+	I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[1]));
+	I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[1]));
 
+	I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[0]));
 	/* The context is automatically loaded after the following */
-	I915_WRITE_FW(RING_ELSP(ring), desc[2]);
+	I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0]));
 
-	/* ELSP is a wo register, so use another nearby reg for posting instead */
+	/* ELSP is a wo register, use another nearby reg for posting */
 	POSTING_READ_FW(RING_EXECLIST_STATUS(ring));
 	intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
 	spin_unlock(&dev_priv->uncore.lock);
 }
 
-static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
-				    struct drm_i915_gem_object *ring_obj,
-				    struct i915_hw_ppgtt *ppgtt,
-				    u32 tail)
+static int execlists_update_context(struct drm_i915_gem_request *rq)
 {
+	struct intel_engine_cs *ring = rq->ring;
+	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
+	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+	struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
 	struct page *page;
 	uint32_t *reg_state;
 
+	BUG_ON(!ctx_obj);
+	WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
+	WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
+
 	page = i915_gem_object_get_page(ctx_obj, 1);
 	reg_state = kmap_atomic(page);
 
-	reg_state[CTX_RING_TAIL+1] = tail;
-	reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
+	reg_state[CTX_RING_TAIL+1] = rq->tail;
+	reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
 
 	/* True PPGTT with dynamic page allocation: update PDP registers and
 	 * point the unallocated PDPs to the scratch page
@@ -359,32 +363,15 @@ static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
 	return 0;
 }
 
-static void execlists_submit_contexts(struct intel_engine_cs *ring,
-				      struct intel_context *to0, u32 tail0,
-				      struct intel_context *to1, u32 tail1)
+static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
+				      struct drm_i915_gem_request *rq1)
 {
-	struct drm_i915_gem_object *ctx_obj0 = to0->engine[ring->id].state;
-	struct intel_ringbuffer *ringbuf0 = to0->engine[ring->id].ringbuf;
-	struct drm_i915_gem_object *ctx_obj1 = NULL;
-	struct intel_ringbuffer *ringbuf1 = NULL;
-
-	BUG_ON(!ctx_obj0);
-	WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
-	WARN_ON(!i915_gem_obj_is_pinned(ringbuf0->obj));
-
-	execlists_update_context(ctx_obj0, ringbuf0->obj, to0->ppgtt, tail0);
+	execlists_update_context(rq0);
 
-	if (to1) {
-		ringbuf1 = to1->engine[ring->id].ringbuf;
-		ctx_obj1 = to1->engine[ring->id].state;
-		BUG_ON(!ctx_obj1);
-		WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
-		WARN_ON(!i915_gem_obj_is_pinned(ringbuf1->obj));
+	if (rq1)
+		execlists_update_context(rq1);
 
-		execlists_update_context(ctx_obj1, ringbuf1->obj, to1->ppgtt, tail1);
-	}
-
-	execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
+	execlists_elsp_write(rq0, rq1);
 }
 
 static void execlists_context_unqueue(struct intel_engine_cs *ring)
@@ -444,13 +431,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
 
 	WARN_ON(req1 && req1->elsp_submitted);
 
-	execlists_submit_contexts(ring, req0->ctx, req0->tail,
-				  req1 ? req1->ctx : NULL,
-				  req1 ? req1->tail : 0);
-
-	req0->elsp_submitted++;
-	if (req1)
-		req1->elsp_submitted++;
+	execlists_submit_requests(req0, req1);
 }
 
 static bool execlists_check_remove_request(struct intel_engine_cs *ring,
@@ -516,6 +497,9 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
 		status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
 				(read_pointer % 6) * 8 + 4);
 
+		if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
+			continue;
+
 		if (status & GEN8_CTX_STATUS_PREEMPTED) {
 			if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
 				if (execlists_check_remove_request(ring, status_id))
@@ -540,37 +524,21 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
 	ring->next_context_status_buffer = write_pointer % 6;
 
 	I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
-		   ((u32)ring->next_context_status_buffer & 0x07) << 8);
+		   _MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8));
 }
 
-static int execlists_context_queue(struct intel_engine_cs *ring,
-				   struct intel_context *to,
-				   u32 tail,
-				   struct drm_i915_gem_request *request)
+static int execlists_context_queue(struct drm_i915_gem_request *request)
 {
+	struct intel_engine_cs *ring = request->ring;
 	struct drm_i915_gem_request *cursor;
 	int num_elements = 0;
 
-	if (to != ring->default_context)
-		intel_lr_context_pin(ring, to);
+	if (request->ctx != ring->default_context)
+		intel_lr_context_pin(request);
 
-	if (!request) {
-		/*
-		 * If there isn't a request associated with this submission,
-		 * create one as a temporary holder.
-		 */
-		request = kzalloc(sizeof(*request), GFP_KERNEL);
-		if (request == NULL)
-			return -ENOMEM;
-		request->ring = ring;
-		request->ctx = to;
-		kref_init(&request->ref);
-		i915_gem_context_reference(request->ctx);
-	} else {
-		i915_gem_request_reference(request);
-		WARN_ON(to != request->ctx);
-	}
-	request->tail = tail;
+	i915_gem_request_reference(request);
+
+	request->tail = request->ringbuf->tail;
 
 	spin_lock_irq(&ring->execlist_lock);
 
@@ -585,7 +553,7 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
 					   struct drm_i915_gem_request,
 					   execlist_link);
 
-		if (to == tail_req->ctx) {
+		if (request->ctx == tail_req->ctx) {
 			WARN(tail_req->elsp_submitted != 0,
 				"More than 2 already-submitted reqs queued\n");
 			list_del(&tail_req->execlist_link);
@@ -603,10 +571,9 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
 	return 0;
 }
 
-static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
-					      struct intel_context *ctx)
+static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
+	struct intel_engine_cs *ring = req->ring;
 	uint32_t flush_domains;
 	int ret;
 
@@ -614,8 +581,7 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
 	if (ring->gpu_caches_dirty)
 		flush_domains = I915_GEM_GPU_DOMAINS;
 
-	ret = ring->emit_flush(ringbuf, ctx,
-			       I915_GEM_GPU_DOMAINS, flush_domains);
+	ret = ring->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
 	if (ret)
 		return ret;
 
@@ -623,12 +589,10 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
 	return 0;
 }
 
-static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
-				 struct intel_context *ctx,
+static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
 				 struct list_head *vmas)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
-	const unsigned other_rings = ~intel_ring_flag(ring);
+	const unsigned other_rings = ~intel_ring_flag(req->ring);
 	struct i915_vma *vma;
 	uint32_t flush_domains = 0;
 	bool flush_chipset = false;
@@ -638,7 +602,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
 		struct drm_i915_gem_object *obj = vma->obj;
 
 		if (obj->active & other_rings) {
-			ret = i915_gem_object_sync(obj, ring);
+			ret = i915_gem_object_sync(obj, req->ring, &req);
 			if (ret)
 				return ret;
 		}
@@ -655,59 +619,59 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
 	/* Unconditionally invalidate gpu caches and ensure that we do flush
 	 * any residual writes from the previous batch.
 	 */
-	return logical_ring_invalidate_all_caches(ringbuf, ctx);
+	return logical_ring_invalidate_all_caches(req);
 }
 
-int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request,
-					    struct intel_context *ctx)
+int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 {
 	int ret;
 
-	if (ctx != request->ring->default_context) {
-		ret = intel_lr_context_pin(request->ring, ctx);
+	request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
+
+	if (request->ctx != request->ring->default_context) {
+		ret = intel_lr_context_pin(request);
 		if (ret)
 			return ret;
 	}
 
-	request->ringbuf = ctx->engine[request->ring->id].ringbuf;
-	request->ctx     = ctx;
-	i915_gem_context_reference(request->ctx);
-
 	return 0;
 }
 
-static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
-				       struct intel_context *ctx,
+static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
 				       int bytes)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
-	struct drm_i915_gem_request *request;
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_engine_cs *ring = req->ring;
+	struct drm_i915_gem_request *target;
 	unsigned space;
 	int ret;
 
 	if (intel_ring_space(ringbuf) >= bytes)
 		return 0;
 
-	list_for_each_entry(request, &ring->request_list, list) {
+	/* The whole point of reserving space is to not wait! */
+	WARN_ON(ringbuf->reserved_in_use);
+
+	list_for_each_entry(target, &ring->request_list, list) {
 		/*
 		 * The request queue is per-engine, so can contain requests
 		 * from multiple ringbuffers. Here, we must ignore any that
 		 * aren't from the ringbuffer we're considering.
 		 */
-		if (request->ringbuf != ringbuf)
+		if (target->ringbuf != ringbuf)
 			continue;
 
 		/* Would completion of this request free enough space? */
-		space = __intel_ring_space(request->postfix, ringbuf->tail,
+		space = __intel_ring_space(target->postfix, ringbuf->tail,
 					   ringbuf->size);
 		if (space >= bytes)
 			break;
 	}
 
-	if (WARN_ON(&request->list == &ring->request_list))
+	if (WARN_ON(&target->list == &ring->request_list))
 		return -ENOSPC;
 
-	ret = i915_wait_request(request);
+	ret = i915_wait_request(target);
 	if (ret)
 		return ret;
 
@@ -717,7 +681,7 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
 
 /*
  * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
- * @ringbuf: Logical Ringbuffer to advance.
+ * @request: Request to advance the logical ringbuffer of.
  *
  * The tail is updated in our logical ringbuffer struct, not in the actual context. What
  * really happens during submission is that the context and current tail will be placed
@@ -725,33 +689,23 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
  * point, the tail *inside* the context is updated and the ELSP written to.
  */
 static void
-intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
-				      struct intel_context *ctx,
-				      struct drm_i915_gem_request *request)
+intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
+	struct intel_engine_cs *ring = request->ring;
 
-	intel_logical_ring_advance(ringbuf);
+	intel_logical_ring_advance(request->ringbuf);
 
 	if (intel_ring_stopped(ring))
 		return;
 
-	execlists_context_queue(ring, ctx, ringbuf->tail, request);
+	execlists_context_queue(request);
 }
 
-static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
-				    struct intel_context *ctx)
+static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
 {
 	uint32_t __iomem *virt;
 	int rem = ringbuf->size - ringbuf->tail;
 
-	if (ringbuf->space < rem) {
-		int ret = logical_ring_wait_for_space(ringbuf, ctx, rem);
-
-		if (ret)
-			return ret;
-	}
-
 	virt = ringbuf->virtual_start + ringbuf->tail;
 	rem /= 4;
 	while (rem--)
@@ -759,25 +713,50 @@ static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
 
 	ringbuf->tail = 0;
 	intel_ring_update_space(ringbuf);
-
-	return 0;
 }
 
-static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
-				struct intel_context *ctx, int bytes)
+static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
 {
-	int ret;
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	int remain_usable = ringbuf->effective_size - ringbuf->tail;
+	int remain_actual = ringbuf->size - ringbuf->tail;
+	int ret, total_bytes, wait_bytes = 0;
+	bool need_wrap = false;
+
+	if (ringbuf->reserved_in_use)
+		total_bytes = bytes;
+	else
+		total_bytes = bytes + ringbuf->reserved_size;
 
-	if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
-		ret = logical_ring_wrap_buffer(ringbuf, ctx);
-		if (unlikely(ret))
-			return ret;
+	if (unlikely(bytes > remain_usable)) {
+		/*
+		 * Not enough space for the basic request. So need to flush
+		 * out the remainder and then wait for base + reserved.
+		 */
+		wait_bytes = remain_actual + total_bytes;
+		need_wrap = true;
+	} else {
+		if (unlikely(total_bytes > remain_usable)) {
+			/*
+			 * The base request will fit but the reserved space
+			 * falls off the end. So only need to to wait for the
+			 * reserved size after flushing out the remainder.
+			 */
+			wait_bytes = remain_actual + ringbuf->reserved_size;
+			need_wrap = true;
+		} else if (total_bytes > ringbuf->space) {
+			/* No wrapping required, just waiting. */
+			wait_bytes = total_bytes;
+		}
 	}
 
-	if (unlikely(ringbuf->space < bytes)) {
-		ret = logical_ring_wait_for_space(ringbuf, ctx, bytes);
+	if (wait_bytes) {
+		ret = logical_ring_wait_for_space(req, wait_bytes);
 		if (unlikely(ret))
 			return ret;
+
+		if (need_wrap)
+			__wrap_ring_buffer(ringbuf);
 	}
 
 	return 0;
@@ -786,7 +765,8 @@ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
 /**
  * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
  *
- * @ringbuf: Logical ringbuffer.
+ * @request: The request to start some new work for
+ * @ctx: Logical ring context whose ringbuffer is being prepared.
  * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
  *
  * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
@@ -796,32 +776,42 @@ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
  *
  * Return: non-zero if the ringbuffer is not ready to be written to.
  */
-static int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
-				    struct intel_context *ctx, int num_dwords)
+int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv;
 	int ret;
 
+	WARN_ON(req == NULL);
+	dev_priv = req->ring->dev->dev_private;
+
 	ret = i915_gem_check_wedge(&dev_priv->gpu_error,
 				   dev_priv->mm.interruptible);
 	if (ret)
 		return ret;
 
-	ret = logical_ring_prepare(ringbuf, ctx, num_dwords * sizeof(uint32_t));
-	if (ret)
-		return ret;
-
-	/* Preallocate the olr before touching the ring */
-	ret = i915_gem_request_alloc(ring, ctx);
+	ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
 	if (ret)
 		return ret;
 
-	ringbuf->space -= num_dwords * sizeof(uint32_t);
+	req->ringbuf->space -= num_dwords * sizeof(uint32_t);
 	return 0;
 }
 
+int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
+{
+	/*
+	 * The first call merely notes the reserve request and is common for
+	 * all back ends. The subsequent localised _begin() call actually
+	 * ensures that the reservation is available. Without the begin, if
+	 * the request creator immediately submitted the request without
+	 * adding any commands to it then there might not actually be
+	 * sufficient room for the submission commands.
+	 */
+	intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
+
+	return intel_logical_ring_begin(request, 0);
+}
+
 /**
  * execlists_submission() - submit a batchbuffer for execution, Execlists style
  * @dev: DRM device.
@@ -839,16 +829,15 @@ static int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
  *
  * Return: non-zero if the submission fails.
  */
-int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
-			       struct intel_engine_cs *ring,
-			       struct intel_context *ctx,
+int intel_execlists_submission(struct i915_execbuffer_params *params,
 			       struct drm_i915_gem_execbuffer2 *args,
-			       struct list_head *vmas,
-			       struct drm_i915_gem_object *batch_obj,
-			       u64 exec_start, u32 dispatch_flags)
+			       struct list_head *vmas)
 {
+	struct drm_device       *dev = params->dev;
+	struct intel_engine_cs  *ring = params->ring;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+	struct intel_ringbuffer *ringbuf = params->ctx->engine[ring->id].ringbuf;
+	u64 exec_start;
 	int instp_mode;
 	u32 instp_mask;
 	int ret;
@@ -899,13 +888,13 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
 		return -EINVAL;
 	}
 
-	ret = execlists_move_to_gpu(ringbuf, ctx, vmas);
+	ret = execlists_move_to_gpu(params->request, vmas);
 	if (ret)
 		return ret;
 
 	if (ring == &dev_priv->ring[RCS] &&
 	    instp_mode != dev_priv->relative_constants_mode) {
-		ret = intel_logical_ring_begin(ringbuf, ctx, 4);
+		ret = intel_logical_ring_begin(params->request, 4);
 		if (ret)
 			return ret;
 
@@ -918,14 +907,17 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
 		dev_priv->relative_constants_mode = instp_mode;
 	}
 
-	ret = ring->emit_bb_start(ringbuf, ctx, exec_start, dispatch_flags);
+	exec_start = params->batch_obj_vm_offset +
+		     args->batch_start_offset;
+
+	ret = ring->emit_bb_start(params->request, exec_start, params->dispatch_flags);
 	if (ret)
 		return ret;
 
-	trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
+	trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
 
-	i915_gem_execbuffer_move_to_active(vmas, ring);
-	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+	i915_gem_execbuffer_move_to_active(vmas, params->request);
+	i915_gem_execbuffer_retire_commands(params);
 
 	return 0;
 }
@@ -950,7 +942,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
 				ctx->engine[ring->id].state;
 
 		if (ctx_obj && (ctx != ring->default_context))
-			intel_lr_context_unpin(ring, ctx);
+			intel_lr_context_unpin(req);
 		list_del(&req->execlist_link);
 		i915_gem_request_unreference(req);
 	}
@@ -978,16 +970,15 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring)
 	I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
 }
 
-int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
-				  struct intel_context *ctx)
+int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
 	if (!ring->gpu_caches_dirty)
 		return 0;
 
-	ret = ring->emit_flush(ringbuf, ctx, 0, I915_GEM_GPU_DOMAINS);
+	ret = ring->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -995,15 +986,15 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
 	return 0;
 }
 
-static int intel_lr_context_pin(struct intel_engine_cs *ring,
-		struct intel_context *ctx)
+static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
 {
-	struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
-	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+	struct intel_engine_cs *ring = rq->ring;
+	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+	struct intel_ringbuffer *ringbuf = rq->ringbuf;
 	int ret = 0;
 
 	WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-	if (ctx->engine[ring->id].pin_count++ == 0) {
+	if (rq->ctx->engine[ring->id].pin_count++ == 0) {
 		ret = i915_gem_obj_ggtt_pin(ctx_obj,
 				GEN8_LR_CONTEXT_ALIGN, 0);
 		if (ret)
@@ -1021,31 +1012,31 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
 unpin_ctx_obj:
 	i915_gem_object_ggtt_unpin(ctx_obj);
 reset_pin_count:
-	ctx->engine[ring->id].pin_count = 0;
+	rq->ctx->engine[ring->id].pin_count = 0;
 
 	return ret;
 }
 
-void intel_lr_context_unpin(struct intel_engine_cs *ring,
-		struct intel_context *ctx)
+void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
 {
-	struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
-	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+	struct intel_engine_cs *ring = rq->ring;
+	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+	struct intel_ringbuffer *ringbuf = rq->ringbuf;
 
 	if (ctx_obj) {
 		WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-		if (--ctx->engine[ring->id].pin_count == 0) {
+		if (--rq->ctx->engine[ring->id].pin_count == 0) {
 			intel_unpin_ringbuffer_obj(ringbuf);
 			i915_gem_object_ggtt_unpin(ctx_obj);
 		}
 	}
 }
 
-static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
-					       struct intel_context *ctx)
+static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
 	int ret, i;
-	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_workarounds *w = &dev_priv->workarounds;
@@ -1054,11 +1045,11 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
 		return 0;
 
 	ring->gpu_caches_dirty = true;
-	ret = logical_ring_flush_all_caches(ringbuf, ctx);
+	ret = logical_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
 
-	ret = intel_logical_ring_begin(ringbuf, ctx, w->count * 2 + 2);
+	ret = intel_logical_ring_begin(req, w->count * 2 + 2);
 	if (ret)
 		return ret;
 
@@ -1072,13 +1063,361 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
 	intel_logical_ring_advance(ringbuf);
 
 	ring->gpu_caches_dirty = true;
-	ret = logical_ring_flush_all_caches(ringbuf, ctx);
+	ret = logical_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
 
 	return 0;
 }
 
+#define wa_ctx_emit(batch, index, cmd)					\
+	do {								\
+		int __index = (index)++;				\
+		if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
+			return -ENOSPC;					\
+		}							\
+		batch[__index] = (cmd);					\
+	} while (0)
+
+
+/*
+ * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
+ * PIPE_CONTROL instruction. This is required for the flush to happen correctly
+ * but there is a slight complication as this is applied in WA batch where the
+ * values are only initialized once so we cannot take register value at the
+ * beginning and reuse it further; hence we save its value to memory, upload a
+ * constant value with bit21 set and then we restore it back with the saved value.
+ * To simplify the WA, a constant value is formed by using the default value
+ * of this register. This shouldn't be a problem because we are only modifying
+ * it for a short period and this batch in non-premptible. We can ofcourse
+ * use additional instructions that read the actual value of the register
+ * at that time and set our bit of interest but it makes the WA complicated.
+ *
+ * This WA is also required for Gen9 so extracting as a function avoids
+ * code duplication.
+ */
+static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
+						uint32_t *const batch,
+						uint32_t index)
+{
+	uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+	/*
+	 * WaDisableLSQCROPERFforOCL:skl
+	 * This WA is implemented in skl_init_clock_gating() but since
+	 * this batch updates GEN8_L3SQCREG4 with default value we need to
+	 * set this bit here to retain the WA during flush.
+	 */
+	if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0)
+		l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
+
+	wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8(1) |
+				   MI_SRM_LRM_GLOBAL_GTT));
+	wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
+	wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
+	wa_ctx_emit(batch, index, 0);
+
+	wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
+	wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
+	wa_ctx_emit(batch, index, l3sqc4_flush);
+
+	wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
+	wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
+				   PIPE_CONTROL_DC_FLUSH_ENABLE));
+	wa_ctx_emit(batch, index, 0);
+	wa_ctx_emit(batch, index, 0);
+	wa_ctx_emit(batch, index, 0);
+	wa_ctx_emit(batch, index, 0);
+
+	wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8(1) |
+				   MI_SRM_LRM_GLOBAL_GTT));
+	wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
+	wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
+	wa_ctx_emit(batch, index, 0);
+
+	return index;
+}
+
+static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
+				    uint32_t offset,
+				    uint32_t start_alignment)
+{
+	return wa_ctx->offset = ALIGN(offset, start_alignment);
+}
+
+static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
+			     uint32_t offset,
+			     uint32_t size_alignment)
+{
+	wa_ctx->size = offset - wa_ctx->offset;
+
+	WARN(wa_ctx->size % size_alignment,
+	     "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
+	     wa_ctx->size, size_alignment);
+	return 0;
+}
+
+/**
+ * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
+ *
+ * @ring: only applicable for RCS
+ * @wa_ctx: structure representing wa_ctx
+ *  offset: specifies start of the batch, should be cache-aligned. This is updated
+ *    with the offset value received as input.
+ *  size: size of the batch in DWORDS but HW expects in terms of cachelines
+ * @batch: page in which WA are loaded
+ * @offset: This field specifies the start of the batch, it should be
+ *  cache-aligned otherwise it is adjusted accordingly.
+ *  Typically we only have one indirect_ctx and per_ctx batch buffer which are
+ *  initialized at the beginning and shared across all contexts but this field
+ *  helps us to have multiple batches at different offsets and select them based
+ *  on a criteria. At the moment this batch always start at the beginning of the page
+ *  and at this point we don't have multiple wa_ctx batch buffers.
+ *
+ *  The number of WA applied are not known at the beginning; we use this field
+ *  to return the no of DWORDS written.
+ *
+ *  It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
+ *  so it adds NOOPs as padding to make it cacheline aligned.
+ *  MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
+ *  makes a complete batch buffer.
+ *
+ * Return: non-zero if we exceed the PAGE_SIZE limit.
+ */
+
+static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
+				    struct i915_wa_ctx_bb *wa_ctx,
+				    uint32_t *const batch,
+				    uint32_t *offset)
+{
+	uint32_t scratch_addr;
+	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+	/* WaDisableCtxRestoreArbitration:bdw,chv */
+	wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+
+	/* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
+	if (IS_BROADWELL(ring->dev)) {
+		index = gen8_emit_flush_coherentl3_wa(ring, batch, index);
+		if (index < 0)
+			return index;
+	}
+
+	/* WaClearSlmSpaceAtContextSwitch:bdw,chv */
+	/* Actual scratch location is at 128 bytes offset */
+	scratch_addr = ring->scratch.gtt_offset + 2*CACHELINE_BYTES;
+
+	wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
+	wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
+				   PIPE_CONTROL_GLOBAL_GTT_IVB |
+				   PIPE_CONTROL_CS_STALL |
+				   PIPE_CONTROL_QW_WRITE));
+	wa_ctx_emit(batch, index, scratch_addr);
+	wa_ctx_emit(batch, index, 0);
+	wa_ctx_emit(batch, index, 0);
+	wa_ctx_emit(batch, index, 0);
+
+	/* Pad to end of cacheline */
+	while (index % CACHELINE_DWORDS)
+		wa_ctx_emit(batch, index, MI_NOOP);
+
+	/*
+	 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
+	 * execution depends on the length specified in terms of cache lines
+	 * in the register CTX_RCS_INDIRECT_CTX
+	 */
+
+	return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
+}
+
+/**
+ * gen8_init_perctx_bb() - initialize per ctx batch with WA
+ *
+ * @ring: only applicable for RCS
+ * @wa_ctx: structure representing wa_ctx
+ *  offset: specifies start of the batch, should be cache-aligned.
+ *  size: size of the batch in DWORDS but HW expects in terms of cachelines
+ * @batch: page in which WA are loaded
+ * @offset: This field specifies the start of this batch.
+ *   This batch is started immediately after indirect_ctx batch. Since we ensure
+ *   that indirect_ctx ends on a cacheline this batch is aligned automatically.
+ *
+ *   The number of DWORDS written are returned using this field.
+ *
+ *  This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
+ *  to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
+ */
+static int gen8_init_perctx_bb(struct intel_engine_cs *ring,
+			       struct i915_wa_ctx_bb *wa_ctx,
+			       uint32_t *const batch,
+			       uint32_t *offset)
+{
+	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+	/* WaDisableCtxRestoreArbitration:bdw,chv */
+	wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+
+	wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
+
+	return wa_ctx_end(wa_ctx, *offset = index, 1);
+}
+
+static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
+				    struct i915_wa_ctx_bb *wa_ctx,
+				    uint32_t *const batch,
+				    uint32_t *offset)
+{
+	int ret;
+	struct drm_device *dev = ring->dev;
+	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+	/* WaDisableCtxRestoreArbitration:skl,bxt */
+	if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) ||
+	    (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0)))
+		wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+
+	/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
+	ret = gen8_emit_flush_coherentl3_wa(ring, batch, index);
+	if (ret < 0)
+		return ret;
+	index = ret;
+
+	/* Pad to end of cacheline */
+	while (index % CACHELINE_DWORDS)
+		wa_ctx_emit(batch, index, MI_NOOP);
+
+	return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
+}
+
+static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
+			       struct i915_wa_ctx_bb *wa_ctx,
+			       uint32_t *const batch,
+			       uint32_t *offset)
+{
+	struct drm_device *dev = ring->dev;
+	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+	/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
+	if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_B0)) ||
+	    (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) {
+		wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
+		wa_ctx_emit(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
+		wa_ctx_emit(batch, index,
+			    _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
+		wa_ctx_emit(batch, index, MI_NOOP);
+	}
+
+	/* WaDisableCtxRestoreArbitration:skl,bxt */
+	if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) ||
+	    (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0)))
+		wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+
+	wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
+
+	return wa_ctx_end(wa_ctx, *offset = index, 1);
+}
+
+static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *ring, u32 size)
+{
+	int ret;
+
+	ring->wa_ctx.obj = i915_gem_alloc_object(ring->dev, PAGE_ALIGN(size));
+	if (!ring->wa_ctx.obj) {
+		DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
+		return -ENOMEM;
+	}
+
+	ret = i915_gem_obj_ggtt_pin(ring->wa_ctx.obj, PAGE_SIZE, 0);
+	if (ret) {
+		DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
+				 ret);
+		drm_gem_object_unreference(&ring->wa_ctx.obj->base);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *ring)
+{
+	if (ring->wa_ctx.obj) {
+		i915_gem_object_ggtt_unpin(ring->wa_ctx.obj);
+		drm_gem_object_unreference(&ring->wa_ctx.obj->base);
+		ring->wa_ctx.obj = NULL;
+	}
+}
+
+static int intel_init_workaround_bb(struct intel_engine_cs *ring)
+{
+	int ret;
+	uint32_t *batch;
+	uint32_t offset;
+	struct page *page;
+	struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
+
+	WARN_ON(ring->id != RCS);
+
+	/* update this when WA for higher Gen are added */
+	if (INTEL_INFO(ring->dev)->gen > 9) {
+		DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
+			  INTEL_INFO(ring->dev)->gen);
+		return 0;
+	}
+
+	/* some WA perform writes to scratch page, ensure it is valid */
+	if (ring->scratch.obj == NULL) {
+		DRM_ERROR("scratch page not allocated for %s\n", ring->name);
+		return -EINVAL;
+	}
+
+	ret = lrc_setup_wa_ctx_obj(ring, PAGE_SIZE);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
+		return ret;
+	}
+
+	page = i915_gem_object_get_page(wa_ctx->obj, 0);
+	batch = kmap_atomic(page);
+	offset = 0;
+
+	if (INTEL_INFO(ring->dev)->gen == 8) {
+		ret = gen8_init_indirectctx_bb(ring,
+					       &wa_ctx->indirect_ctx,
+					       batch,
+					       &offset);
+		if (ret)
+			goto out;
+
+		ret = gen8_init_perctx_bb(ring,
+					  &wa_ctx->per_ctx,
+					  batch,
+					  &offset);
+		if (ret)
+			goto out;
+	} else if (INTEL_INFO(ring->dev)->gen == 9) {
+		ret = gen9_init_indirectctx_bb(ring,
+					       &wa_ctx->indirect_ctx,
+					       batch,
+					       &offset);
+		if (ret)
+			goto out;
+
+		ret = gen9_init_perctx_bb(ring,
+					  &wa_ctx->per_ctx,
+					  batch,
+					  &offset);
+		if (ret)
+			goto out;
+	}
+
+out:
+	kunmap_atomic(batch);
+	if (ret)
+		lrc_destroy_wa_ctx_obj(ring);
+
+	return ret;
+}
+
 static int gen8_init_common_ring(struct intel_engine_cs *ring)
 {
 	struct drm_device *dev = ring->dev;
@@ -1139,19 +1478,64 @@ static int gen9_init_render_ring(struct intel_engine_cs *ring)
 	return init_workarounds_ring(ring);
 }
 
-static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
-			      struct intel_context *ctx,
+static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
+{
+	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
+	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
+	int i, ret;
+
+	ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2);
+	if (ret)
+		return ret;
+
+	intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
+	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
+		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+		intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_UDW(ring, i));
+		intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
+		intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_LDW(ring, i));
+		intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
+	}
+
+	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_logical_ring_advance(ringbuf);
+
+	return 0;
+}
+
+static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 			      u64 offset, unsigned dispatch_flags)
 {
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
 	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
 
-	ret = intel_logical_ring_begin(ringbuf, ctx, 4);
+	/* Don't rely in hw updating PDPs, specially in lite-restore.
+	 * Ideally, we should set Force PD Restore in ctx descriptor,
+	 * but we can't. Force Restore would be a second option, but
+	 * it is unsafe in case of lite-restore (because the ctx is
+	 * not idle). */
+	if (req->ctx->ppgtt &&
+	    (intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) {
+		ret = intel_logical_ring_emit_pdps(req);
+		if (ret)
+			return ret;
+
+		req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring);
+	}
+
+	ret = intel_logical_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
+	intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
+				(ppgtt<<8) |
+				(dispatch_flags & I915_DISPATCH_RS ?
+				 MI_BATCH_RESOURCE_STREAMER : 0));
 	intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
 	intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
 	intel_logical_ring_emit(ringbuf, MI_NOOP);
@@ -1193,18 +1577,18 @@ static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
-static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
-			   struct intel_context *ctx,
+static int gen8_emit_flush(struct drm_i915_gem_request *request,
 			   u32 invalidate_domains,
 			   u32 unused)
 {
+	struct intel_ringbuffer *ringbuf = request->ringbuf;
 	struct intel_engine_cs *ring = ringbuf->ring;
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t cmd;
 	int ret;
 
-	ret = intel_logical_ring_begin(ringbuf, ctx, 4);
+	ret = intel_logical_ring_begin(request, 4);
 	if (ret)
 		return ret;
 
@@ -1234,11 +1618,11 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
 	return 0;
 }
 
-static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
-				  struct intel_context *ctx,
+static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 				  u32 invalidate_domains,
 				  u32 flush_domains)
 {
+	struct intel_ringbuffer *ringbuf = request->ringbuf;
 	struct intel_engine_cs *ring = ringbuf->ring;
 	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	bool vf_flush_wa;
@@ -1270,7 +1654,7 @@ static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
 	vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 &&
 		      flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
 
-	ret = intel_logical_ring_begin(ringbuf, ctx, vf_flush_wa ? 12 : 6);
+	ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6);
 	if (ret)
 		return ret;
 
@@ -1304,9 +1688,9 @@ static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
 	intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
 }
 
-static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
-			     struct drm_i915_gem_request *request)
+static int gen8_emit_request(struct drm_i915_gem_request *request)
 {
+	struct intel_ringbuffer *ringbuf = request->ringbuf;
 	struct intel_engine_cs *ring = ringbuf->ring;
 	u32 cmd;
 	int ret;
@@ -1316,7 +1700,7 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
 	 * used as a workaround for not being allowed to do lite
 	 * restore with HEAD==TAIL (WaIdleLiteRestore).
 	 */
-	ret = intel_logical_ring_begin(ringbuf, request->ctx, 8);
+	ret = intel_logical_ring_begin(request, 8);
 	if (ret)
 		return ret;
 
@@ -1328,11 +1712,10 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
 				(ring->status_page.gfx_addr +
 				(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
 	intel_logical_ring_emit(ringbuf, 0);
-	intel_logical_ring_emit(ringbuf,
-		i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+	intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
 	intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
 	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance_and_submit(ringbuf, request->ctx, request);
+	intel_logical_ring_advance_and_submit(request);
 
 	/*
 	 * Here we add two extra NOOPs as padding to avoid
@@ -1345,49 +1728,53 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
 	return 0;
 }
 
-static int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
-					      struct intel_context *ctx)
+static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
 	struct render_state so;
-	struct drm_i915_file_private *file_priv = ctx->file_priv;
-	struct drm_file *file = file_priv ? file_priv->file : NULL;
 	int ret;
 
-	ret = i915_gem_render_state_prepare(ring, &so);
+	ret = i915_gem_render_state_prepare(req->ring, &so);
 	if (ret)
 		return ret;
 
 	if (so.rodata == NULL)
 		return 0;
 
-	ret = ring->emit_bb_start(ringbuf,
-			ctx,
-			so.ggtt_offset,
-			I915_DISPATCH_SECURE);
+	ret = req->ring->emit_bb_start(req, so.ggtt_offset,
+				       I915_DISPATCH_SECURE);
 	if (ret)
 		goto out;
 
-	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
+	ret = req->ring->emit_bb_start(req,
+				       (so.ggtt_offset + so.aux_batch_offset),
+				       I915_DISPATCH_SECURE);
+	if (ret)
+		goto out;
+
+	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
 
-	ret = __i915_add_request(ring, file, so.obj);
-	/* intel_logical_ring_add_request moves object to inactive if it
-	 * fails */
 out:
 	i915_gem_render_state_fini(&so);
 	return ret;
 }
 
-static int gen8_init_rcs_context(struct intel_engine_cs *ring,
-		       struct intel_context *ctx)
+static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
 {
 	int ret;
 
-	ret = intel_logical_ring_workarounds_emit(ring, ctx);
+	ret = intel_logical_ring_workarounds_emit(req);
 	if (ret)
 		return ret;
 
-	return intel_lr_context_render_state_init(ring, ctx);
+	ret = intel_rcs_context_init_mocs(req);
+	/*
+	 * Failing to program the MOCS is non-fatal.The system will not
+	 * run at peak performance. So generate an error and carry on.
+	 */
+	if (ret)
+		DRM_ERROR("MOCS failed to program: expect performance issues.\n");
+
+	return intel_lr_context_render_state_init(req);
 }
 
 /**
@@ -1407,7 +1794,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
 
 	intel_logical_ring_stop(ring);
 	WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
-	i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
 
 	if (ring->cleanup)
 		ring->cleanup(ring);
@@ -1419,6 +1805,8 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
 		kunmap(sg_page(ring->status_page.obj->pages->sgl));
 		ring->status_page.obj = NULL;
 	}
+
+	lrc_destroy_wa_ctx_obj(ring);
 }
 
 static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
@@ -1478,11 +1866,28 @@ static int logical_render_ring_init(struct drm_device *dev)
 	ring->emit_bb_start = gen8_emit_bb_start;
 
 	ring->dev = dev;
-	ret = logical_ring_init(dev, ring);
+
+	ret = intel_init_pipe_control(ring);
 	if (ret)
 		return ret;
 
-	return intel_init_pipe_control(ring);
+	ret = intel_init_workaround_bb(ring);
+	if (ret) {
+		/*
+		 * We continue even if we fail to initialize WA batch
+		 * because we only expect rare glitches but nothing
+		 * critical to prevent us from using GPU
+		 */
+		DRM_ERROR("WA batch buffer initialization failed: %d\n",
+			  ret);
+	}
+
+	ret = logical_ring_init(dev, ring);
+	if (ret) {
+		lrc_destroy_wa_ctx_obj(ring);
+	}
+
+	return ret;
 }
 
 static int logical_bsd_ring_init(struct drm_device *dev)
@@ -1737,7 +2142,8 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
 	reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
 	reg_state[CTX_CONTEXT_CONTROL+1] =
 		_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
-				CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+				   CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
+				   CTX_CTRL_RS_CTX_ENABLE);
 	reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
 	reg_state[CTX_RING_HEAD+1] = 0;
 	reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
@@ -1762,15 +2168,27 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
 	reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
 	reg_state[CTX_SECOND_BB_STATE+1] = 0;
 	if (ring->id == RCS) {
-		/* TODO: according to BSpec, the register state context
-		 * for CHV does not have these. OTOH, these registers do
-		 * exist in CHV. I'm waiting for a clarification */
 		reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
 		reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
 		reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
 		reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
 		reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
 		reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
+		if (ring->wa_ctx.obj) {
+			struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
+			uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
+
+			reg_state[CTX_RCS_INDIRECT_CTX+1] =
+				(ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
+				(wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
+
+			reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
+				CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT << 6;
+
+			reg_state[CTX_BB_PER_CTX_PTR+1] =
+				(ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
+				0x01;
+		}
 	}
 	reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
 	reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
@@ -1975,13 +2393,22 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
 		lrc_setup_hardware_status_page(ring, ctx_obj);
 	else if (ring->id == RCS && !ctx->rcs_initialized) {
 		if (ring->init_context) {
-			ret = ring->init_context(ring, ctx);
+			struct drm_i915_gem_request *req;
+
+			ret = i915_gem_request_alloc(ring, ctx, &req);
+			if (ret)
+				return ret;
+
+			ret = ring->init_context(req);
 			if (ret) {
 				DRM_ERROR("ring init context: %d\n", ret);
+				i915_gem_request_cancel(req);
 				ctx->engine[ring->id].ringbuf = NULL;
 				ctx->engine[ring->id].state = NULL;
 				goto error;
 			}
+
+			i915_add_request_no_flush(req);
 		}
 
 		ctx->rcs_initialized = true;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 04d3a6d8b207..64f89f9982a2 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -32,18 +32,19 @@
 #define RING_CONTEXT_CONTROL(ring)	((ring)->mmio_base+0x244)
 #define	  CTX_CTRL_INHIBIT_SYN_CTX_SWITCH	(1 << 3)
 #define	  CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT	(1 << 0)
+#define   CTX_CTRL_RS_CTX_ENABLE                (1 << 1)
 #define RING_CONTEXT_STATUS_BUF(ring)	((ring)->mmio_base+0x370)
 #define RING_CONTEXT_STATUS_PTR(ring)	((ring)->mmio_base+0x3a0)
 
 /* Logical Rings */
-int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request,
-					    struct intel_context *ctx);
+int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
+int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
 void intel_logical_ring_stop(struct intel_engine_cs *ring);
 void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
 int intel_logical_rings_init(struct drm_device *dev);
+int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
 
-int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
-				  struct intel_context *ctx);
+int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
 /**
  * intel_logical_ring_advance() - advance the ringbuffer tail
  * @ringbuf: Ringbuffer to advance.
@@ -70,20 +71,16 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
 void intel_lr_context_free(struct intel_context *ctx);
 int intel_lr_context_deferred_create(struct intel_context *ctx,
 				     struct intel_engine_cs *ring);
-void intel_lr_context_unpin(struct intel_engine_cs *ring,
-		struct intel_context *ctx);
+void intel_lr_context_unpin(struct drm_i915_gem_request *req);
 void intel_lr_context_reset(struct drm_device *dev,
 			struct intel_context *ctx);
 
 /* Execlists */
 int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
-int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
-			       struct intel_engine_cs *ring,
-			       struct intel_context *ctx,
+struct i915_execbuffer_params;
+int intel_execlists_submission(struct i915_execbuffer_params *params,
 			       struct drm_i915_gem_execbuffer2 *args,
-			       struct list_head *vmas,
-			       struct drm_i915_gem_object *batch_obj,
-			       u64 exec_start, u32 dispatch_flags);
+			       struct list_head *vmas);
 u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
 
 void intel_lrc_irq_handler(struct intel_engine_cs *ring);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 161ab26f81fb..881b5d13592e 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -239,8 +239,6 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
-	struct intel_connector *intel_connector =
-		&lvds_encoder->attached_connector->base;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 ctl_reg, stat_reg;
 
@@ -252,8 +250,6 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
 		stat_reg = PP_STATUS;
 	}
 
-	intel_panel_disable_backlight(intel_connector);
-
 	I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
 	if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
 		DRM_ERROR("timed out waiting for panel to power off\n");
@@ -262,6 +258,31 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
 	POSTING_READ(lvds_encoder->reg);
 }
 
+static void gmch_disable_lvds(struct intel_encoder *encoder)
+{
+	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+	struct intel_connector *intel_connector =
+		&lvds_encoder->attached_connector->base;
+
+	intel_panel_disable_backlight(intel_connector);
+
+	intel_disable_lvds(encoder);
+}
+
+static void pch_disable_lvds(struct intel_encoder *encoder)
+{
+	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+	struct intel_connector *intel_connector =
+		&lvds_encoder->attached_connector->base;
+
+	intel_panel_disable_backlight(intel_connector);
+}
+
+static void pch_post_disable_lvds(struct intel_encoder *encoder)
+{
+	intel_disable_lvds(encoder);
+}
+
 static enum drm_mode_status
 intel_lvds_mode_valid(struct drm_connector *connector,
 		      struct drm_display_mode *mode)
@@ -452,7 +473,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
 	 */
 	if (!HAS_PCH_SPLIT(dev)) {
 		drm_modeset_lock_all(dev);
-		intel_modeset_setup_hw_state(dev, true);
+		intel_display_resume(dev);
 		drm_modeset_unlock_all(dev);
 	}
 
@@ -528,7 +549,7 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs
 };
 
 static const struct drm_connector_funcs intel_lvds_connector_funcs = {
-	.dpms = intel_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_lvds_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = intel_lvds_set_property,
@@ -942,12 +963,6 @@ void intel_lvds_init(struct drm_device *dev)
 	if (dmi_check_system(intel_no_lvds))
 		return;
 
-	pin = GMBUS_PIN_PANEL;
-	if (!lvds_is_present_in_vbt(dev, &pin)) {
-		DRM_DEBUG_KMS("LVDS is not present in VBT\n");
-		return;
-	}
-
 	if (HAS_PCH_SPLIT(dev)) {
 		if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
 			return;
@@ -957,6 +972,16 @@ void intel_lvds_init(struct drm_device *dev)
 		}
 	}
 
+	pin = GMBUS_PIN_PANEL;
+	if (!lvds_is_present_in_vbt(dev, &pin)) {
+		u32 reg = HAS_PCH_SPLIT(dev) ? PCH_LVDS : LVDS;
+		if ((I915_READ(reg) & LVDS_PORT_EN) == 0) {
+			DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+			return;
+		}
+		DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n");
+	}
+
 	lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
 	if (!lvds_encoder)
 		return;
@@ -988,7 +1013,12 @@ void intel_lvds_init(struct drm_device *dev)
 	intel_encoder->enable = intel_enable_lvds;
 	intel_encoder->pre_enable = intel_pre_enable_lvds;
 	intel_encoder->compute_config = intel_lvds_compute_config;
-	intel_encoder->disable = intel_disable_lvds;
+	if (HAS_PCH_SPLIT(dev_priv)) {
+		intel_encoder->disable = pch_disable_lvds;
+		intel_encoder->post_disable = pch_post_disable_lvds;
+	} else {
+		intel_encoder->disable = gmch_disable_lvds;
+	}
 	intel_encoder->get_hw_state = intel_lvds_get_hw_state;
 	intel_encoder->get_config = intel_lvds_get_config;
 	intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -1068,24 +1098,8 @@ void intel_lvds_init(struct drm_device *dev)
 			drm_mode_debug_printmodeline(scan);
 
 			fixed_mode = drm_mode_duplicate(dev, scan);
-			if (fixed_mode) {
-				downclock_mode =
-					intel_find_panel_downclock(dev,
-					fixed_mode, connector);
-				if (downclock_mode != NULL &&
-					i915.lvds_downclock) {
-					/* We found the downclock for LVDS. */
-					dev_priv->lvds_downclock_avail = true;
-					dev_priv->lvds_downclock =
-						downclock_mode->clock;
-					DRM_DEBUG_KMS("LVDS downclock is found"
-					" in EDID. Normal clock %dKhz, "
-					"downclock %dKhz\n",
-					fixed_mode->clock,
-					dev_priv->lvds_downclock);
-				}
+			if (fixed_mode)
 				goto out;
-			}
 		}
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
new file mode 100644
index 000000000000..6d3c6c0a5c62
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions: *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "intel_mocs.h"
+#include "intel_lrc.h"
+#include "intel_ringbuffer.h"
+
+/* structures required */
+struct drm_i915_mocs_entry {
+	u32 control_value;
+	u16 l3cc_value;
+};
+
+struct drm_i915_mocs_table {
+	u32 size;
+	const struct drm_i915_mocs_entry *table;
+};
+
+/* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */
+#define LE_CACHEABILITY(value)	((value) << 0)
+#define LE_TGT_CACHE(value)	((value) << 2)
+#define LE_LRUM(value)		((value) << 4)
+#define LE_AOM(value)		((value) << 6)
+#define LE_RSC(value)		((value) << 7)
+#define LE_SCC(value)		((value) << 8)
+#define LE_PFM(value)		((value) << 11)
+#define LE_SCF(value)		((value) << 14)
+
+/* Defines for the tables (LNCFMOCS0 - LNCFMOCS31) - two entries per word */
+#define L3_ESC(value)		((value) << 0)
+#define L3_SCC(value)		((value) << 1)
+#define L3_CACHEABILITY(value)	((value) << 4)
+
+/* Helper defines */
+#define GEN9_NUM_MOCS_ENTRIES	62  /* 62 out of 64 - 63 & 64 are reserved. */
+
+/* (e)LLC caching options */
+#define LE_PAGETABLE		0
+#define LE_UC			1
+#define LE_WT			2
+#define LE_WB			3
+
+/* L3 caching options */
+#define L3_DIRECT		0
+#define L3_UC			1
+#define L3_RESERVED		2
+#define L3_WB			3
+
+/* Target cache */
+#define ELLC			0
+#define LLC			1
+#define LLC_ELLC		2
+
+/*
+ * MOCS tables
+ *
+ * These are the MOCS tables that are programmed across all the rings.
+ * The control value is programmed to all the rings that support the
+ * MOCS registers. While the l3cc_values are only programmed to the
+ * LNCFCMOCS0 - LNCFCMOCS32 registers.
+ *
+ * These tables are intended to be kept reasonably consistent across
+ * platforms. However some of the fields are not applicable to all of
+ * them.
+ *
+ * Entries not part of the following tables are undefined as far as
+ * userspace is concerned and shouldn't be relied upon.  For the time
+ * being they will be implicitly initialized to the strictest caching
+ * configuration (uncached) to guarantee forwards compatibility with
+ * userspace programs written against more recent kernels providing
+ * additional MOCS entries.
+ *
+ * NOTE: These tables MUST start with being uncached and the length
+ *       MUST be less than 63 as the last two registers are reserved
+ *       by the hardware.  These tables are part of the kernel ABI and
+ *       may only be updated incrementally by adding entries at the
+ *       end.
+ */
+static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
+	/* { 0x00000009, 0x0010 } */
+	{ (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
+	   LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+	  (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
+	/* { 0x00000038, 0x0030 } */
+	{ (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+	   LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+	  (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
+	/* { 0x0000003b, 0x0030 } */
+	{ (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+	   LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+	  (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
+};
+
+/* NOTE: the LE_TGT_CACHE is not used on Broxton */
+static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
+	/* { 0x00000009, 0x0010 } */
+	{ (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
+	   LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+	  (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
+	/* { 0x00000038, 0x0030 } */
+	{ (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+	   LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+	  (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
+	/* { 0x0000003b, 0x0030 } */
+	{ (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+	   LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+	  (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
+};
+
+/**
+ * get_mocs_settings()
+ * @dev:        DRM device.
+ * @table:      Output table that will be made to point at appropriate
+ *              MOCS values for the device.
+ *
+ * This function will return the values of the MOCS table that needs to
+ * be programmed for the platform. It will return the values that need
+ * to be programmed and if they need to be programmed.
+ *
+ * Return: true if there are applicable MOCS settings for the device.
+ */
+static bool get_mocs_settings(struct drm_device *dev,
+			      struct drm_i915_mocs_table *table)
+{
+	bool result = false;
+
+	if (IS_SKYLAKE(dev)) {
+		table->size  = ARRAY_SIZE(skylake_mocs_table);
+		table->table = skylake_mocs_table;
+		result = true;
+	} else if (IS_BROXTON(dev)) {
+		table->size  = ARRAY_SIZE(broxton_mocs_table);
+		table->table = broxton_mocs_table;
+		result = true;
+	} else {
+		WARN_ONCE(INTEL_INFO(dev)->gen >= 9,
+			  "Platform that should have a MOCS table does not.\n");
+	}
+
+	return result;
+}
+
+/**
+ * emit_mocs_control_table() - emit the mocs control table
+ * @req:	Request to set up the MOCS table for.
+ * @table:	The values to program into the control regs.
+ * @reg_base:	The base for the engine that needs to be programmed.
+ *
+ * This function simply emits a MI_LOAD_REGISTER_IMM command for the
+ * given table starting at the given address.
+ *
+ * Return: 0 on success, otherwise the error status.
+ */
+static int emit_mocs_control_table(struct drm_i915_gem_request *req,
+				   const struct drm_i915_mocs_table *table,
+				   u32 reg_base)
+{
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	unsigned int index;
+	int ret;
+
+	if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
+		return -ENODEV;
+
+	ret = intel_logical_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+	if (ret) {
+		DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
+		return ret;
+	}
+
+	intel_logical_ring_emit(ringbuf,
+				MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+
+	for (index = 0; index < table->size; index++) {
+		intel_logical_ring_emit(ringbuf, reg_base + index * 4);
+		intel_logical_ring_emit(ringbuf,
+					table->table[index].control_value);
+	}
+
+	/*
+	 * Ok, now set the unused entries to uncached. These entries
+	 * are officially undefined and no contract for the contents
+	 * and settings is given for these entries.
+	 *
+	 * Entry 0 in the table is uncached - so we are just writing
+	 * that value to all the used entries.
+	 */
+	for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
+		intel_logical_ring_emit(ringbuf, reg_base + index * 4);
+		intel_logical_ring_emit(ringbuf, table->table[0].control_value);
+	}
+
+	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_logical_ring_advance(ringbuf);
+
+	return 0;
+}
+
+/**
+ * emit_mocs_l3cc_table() - emit the mocs control table
+ * @req:	Request to set up the MOCS table for.
+ * @table:	The values to program into the control regs.
+ *
+ * This function simply emits a MI_LOAD_REGISTER_IMM command for the
+ * given table starting at the given address. This register set is
+ * programmed in pairs.
+ *
+ * Return: 0 on success, otherwise the error status.
+ */
+static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
+				const struct drm_i915_mocs_table *table)
+{
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	unsigned int count;
+	unsigned int i;
+	u32 value;
+	u32 filler = (table->table[0].l3cc_value & 0xffff) |
+			((table->table[0].l3cc_value & 0xffff) << 16);
+	int ret;
+
+	if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
+		return -ENODEV;
+
+	ret = intel_logical_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
+	if (ret) {
+		DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
+		return ret;
+	}
+
+	intel_logical_ring_emit(ringbuf,
+			MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
+
+	for (i = 0, count = 0; i < table->size / 2; i++, count += 2) {
+		value = (table->table[count].l3cc_value & 0xffff) |
+			((table->table[count + 1].l3cc_value & 0xffff) << 16);
+
+		intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
+		intel_logical_ring_emit(ringbuf, value);
+	}
+
+	if (table->size & 0x01) {
+		/* Odd table size - 1 left over */
+		value = (table->table[count].l3cc_value & 0xffff) |
+			((table->table[0].l3cc_value & 0xffff) << 16);
+	} else
+		value = filler;
+
+	/*
+	 * Now set the rest of the table to uncached - use entry 0 as
+	 * this will be uncached. Leave the last pair uninitialised as
+	 * they are reserved by the hardware.
+	 */
+	for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
+		intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
+		intel_logical_ring_emit(ringbuf, value);
+
+		value = filler;
+	}
+
+	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_logical_ring_advance(ringbuf);
+
+	return 0;
+}
+
+/**
+ * intel_rcs_context_init_mocs() - program the MOCS register.
+ * @req:	Request to set up the MOCS tables for.
+ *
+ * This function will emit a batch buffer with the values required for
+ * programming the MOCS register values for all the currently supported
+ * rings.
+ *
+ * These registers are partially stored in the RCS context, so they are
+ * emitted at the same time so that when a context is created these registers
+ * are set up. These registers have to be emitted into the start of the
+ * context as setting the ELSP will re-init some of these registers back
+ * to the hw values.
+ *
+ * Return: 0 on success, otherwise the error status.
+ */
+int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
+{
+	struct drm_i915_mocs_table t;
+	int ret;
+
+	if (get_mocs_settings(req->ring->dev, &t)) {
+		/* Program the control registers */
+		ret = emit_mocs_control_table(req, &t, GEN9_GFX_MOCS_0);
+		if (ret)
+			return ret;
+
+		ret = emit_mocs_control_table(req, &t, GEN9_MFX0_MOCS_0);
+		if (ret)
+			return ret;
+
+		ret = emit_mocs_control_table(req, &t, GEN9_MFX1_MOCS_0);
+		if (ret)
+			return ret;
+
+		ret = emit_mocs_control_table(req, &t, GEN9_VEBOX_MOCS_0);
+		if (ret)
+			return ret;
+
+		ret = emit_mocs_control_table(req, &t, GEN9_BLT_MOCS_0);
+		if (ret)
+			return ret;
+
+		/* Now program the l3cc registers */
+		ret = emit_mocs_l3cc_table(req, &t);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h
new file mode 100644
index 000000000000..76e45b1748b3
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_mocs.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef INTEL_MOCS_H
+#define INTEL_MOCS_H
+
+/**
+ * DOC: Memory Objects Control State (MOCS)
+ *
+ * Motivation:
+ * In previous Gens the MOCS settings was a value that was set by user land as
+ * part of the batch. In Gen9 this has changed to be a single table (per ring)
+ * that all batches now reference by index instead of programming the MOCS
+ * directly.
+ *
+ * The one wrinkle in this is that only PART of the MOCS tables are included
+ * in context (The GFX_MOCS_0 - GFX_MOCS_64 and the LNCFCMOCS0 - LNCFCMOCS32
+ * registers). The rest are not (the settings for the other rings).
+ *
+ * This table needs to be set at system start-up because the way the table
+ * interacts with the contexts and the GmmLib interface.
+ *
+ *
+ * Implementation:
+ *
+ * The tables (one per supported platform) are defined in intel_mocs.c
+ * and are programmed in the first batch after the context is loaded
+ * (with the hardware workarounds). This will then let the usual
+ * context handling keep the MOCS in step.
+ */
+
+#include <drm/drmP.h>
+#include "i915_drv.h"
+
+int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 481337436f72..cb1c65739425 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -25,8 +25,6 @@
  *
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 #include <linux/acpi.h>
 #include <acpi/video.h>
 
@@ -53,6 +51,7 @@
 #define MBOX_ACPI      (1<<0)
 #define MBOX_SWSCI     (1<<1)
 #define MBOX_ASLE      (1<<2)
+#define MBOX_ASLE_EXT  (1<<4)
 
 struct opregion_header {
 	u8 signature[16];
@@ -62,7 +61,10 @@ struct opregion_header {
 	u8 vbios_ver[16];
 	u8 driver_ver[16];
 	u32 mboxes;
-	u8 reserved[164];
+	u32 driver_model;
+	u32 pcon;
+	u8 dver[32];
+	u8 rsvd[124];
 } __packed;
 
 /* OpRegion mailbox #1: public ACPI methods */
@@ -84,7 +86,9 @@ struct opregion_acpi {
 	u32 evts;       /* ASL supported events */
 	u32 cnot;       /* current OS notification */
 	u32 nrdy;       /* driver status */
-	u8 rsvd2[60];
+	u32 did2[7];	/* extended supported display devices ID list */
+	u32 cpd2[7];	/* extended attached display devices list */
+	u8 rsvd2[4];
 } __packed;
 
 /* OpRegion mailbox #2: SWSCI */
@@ -113,7 +117,10 @@ struct opregion_asle {
 	u32 pcft;       /* power conservation features */
 	u32 srot;       /* supported rotation angles */
 	u32 iuer;       /* IUER events */
-	u8 rsvd[86];
+	u64 fdss;
+	u32 fdsp;
+	u32 stat;
+	u8 rsvd[70];
 } __packed;
 
 /* Driver readiness indicator */
@@ -611,6 +618,38 @@ static struct notifier_block intel_opregion_notifier = {
  * (version 3)
  */
 
+static u32 get_did(struct intel_opregion *opregion, int i)
+{
+	u32 did;
+
+	if (i < ARRAY_SIZE(opregion->acpi->didl)) {
+		did = ioread32(&opregion->acpi->didl[i]);
+	} else {
+		i -= ARRAY_SIZE(opregion->acpi->didl);
+
+		if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
+			return 0;
+
+		did = ioread32(&opregion->acpi->did2[i]);
+	}
+
+	return did;
+}
+
+static void set_did(struct intel_opregion *opregion, int i, u32 val)
+{
+	if (i < ARRAY_SIZE(opregion->acpi->didl)) {
+		iowrite32(val, &opregion->acpi->didl[i]);
+	} else {
+		i -= ARRAY_SIZE(opregion->acpi->didl);
+
+		if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
+			return;
+
+		iowrite32(val, &opregion->acpi->did2[i]);
+	}
+}
+
 static void intel_didl_outputs(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -620,7 +659,7 @@ static void intel_didl_outputs(struct drm_device *dev)
 	struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
 	unsigned long long device_id;
 	acpi_status status;
-	u32 temp;
+	u32 temp, max_outputs;
 	int i = 0;
 
 	handle = ACPI_HANDLE(&dev->pdev->dev);
@@ -639,41 +678,50 @@ static void intel_didl_outputs(struct drm_device *dev)
 	}
 
 	if (!acpi_video_bus) {
-		pr_warn("No ACPI video bus found\n");
+		DRM_ERROR("No ACPI video bus found\n");
 		return;
 	}
 
+	/*
+	 * In theory, did2, the extended didl, gets added at opregion version
+	 * 3.0. In practice, however, we're supposed to set it for earlier
+	 * versions as well, since a BIOS that doesn't understand did2 should
+	 * not look at it anyway. Use a variable so we can tweak this if a need
+	 * arises later.
+	 */
+	max_outputs = ARRAY_SIZE(opregion->acpi->didl) +
+		ARRAY_SIZE(opregion->acpi->did2);
+
 	list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
-		if (i >= 8) {
-			dev_dbg(&dev->pdev->dev,
-				"More than 8 outputs detected via ACPI\n");
+		if (i >= max_outputs) {
+			DRM_DEBUG_KMS("More than %u outputs detected via ACPI\n",
+				      max_outputs);
 			return;
 		}
-		status =
-			acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
-						NULL, &device_id);
+		status = acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
+					       NULL, &device_id);
 		if (ACPI_SUCCESS(status)) {
 			if (!device_id)
 				goto blind_set;
-			iowrite32((u32)(device_id & 0x0f0f),
-				  &opregion->acpi->didl[i]);
-			i++;
+			set_did(opregion, i++, (u32)(device_id & 0x0f0f));
 		}
 	}
 
 end:
-	/* If fewer than 8 outputs, the list must be null terminated */
-	if (i < 8)
-		iowrite32(0, &opregion->acpi->didl[i]);
+	DRM_DEBUG_KMS("%d outputs detected\n", i);
+
+	/* If fewer than max outputs, the list must be null terminated */
+	if (i < max_outputs)
+		set_did(opregion, i, 0);
 	return;
 
 blind_set:
 	i = 0;
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		int output_type = ACPI_OTHER_OUTPUT;
-		if (i >= 8) {
-			dev_dbg(&dev->pdev->dev,
-				"More than 8 outputs in connector list\n");
+		if (i >= max_outputs) {
+			DRM_DEBUG_KMS("More than %u outputs in connector list\n",
+				      max_outputs);
 			return;
 		}
 		switch (connector->connector_type) {
@@ -698,9 +746,8 @@ blind_set:
 			output_type = ACPI_LVDS_OUTPUT;
 			break;
 		}
-		temp = ioread32(&opregion->acpi->didl[i]);
-		iowrite32(temp | (1<<31) | output_type | i,
-			  &opregion->acpi->didl[i]);
+		temp = get_did(opregion, i);
+		set_did(opregion, i, temp | (1 << 31) | output_type | i);
 		i++;
 	}
 	goto end;
@@ -720,7 +767,7 @@ static void intel_setup_cadls(struct drm_device *dev)
 	 * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
 	 * there are less than eight devices. */
 	do {
-		disp_id = ioread32(&opregion->acpi->didl[i]);
+		disp_id = get_did(opregion, i);
 		iowrite32(disp_id, &opregion->acpi->cadl[i]);
 	} while (++i < 8 && disp_id != 0);
 }
@@ -852,6 +899,11 @@ int intel_opregion_setup(struct drm_device *dev)
 	char buf[sizeof(OPREGION_SIGNATURE)];
 	int err = 0;
 
+	BUILD_BUG_ON(sizeof(struct opregion_header) != 0x100);
+	BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100);
+	BUILD_BUG_ON(sizeof(struct opregion_swsci) != 0x100);
+	BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
+
 	pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
 	DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
 	if (asls == 0) {
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 25c8ec697da1..444542696a2c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -210,19 +210,14 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
 }
 
 static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
+					 struct drm_i915_gem_request *req,
 					 void (*tail)(struct intel_overlay *))
 {
-	struct drm_device *dev = overlay->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
 	int ret;
 
 	WARN_ON(overlay->last_flip_req);
-	i915_gem_request_assign(&overlay->last_flip_req,
-					     ring->outstanding_lazy_request);
-	ret = i915_add_request(ring);
-	if (ret)
-		return ret;
+	i915_gem_request_assign(&overlay->last_flip_req, req);
+	i915_add_request(req);
 
 	overlay->flip_tail = tail;
 	ret = i915_wait_request(overlay->last_flip_req);
@@ -239,15 +234,22 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct drm_i915_gem_request *req;
 	int ret;
 
 	WARN_ON(overlay->active);
 	WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
 
-	ret = intel_ring_begin(ring, 4);
+	ret = i915_gem_request_alloc(ring, ring->default_context, &req);
 	if (ret)
 		return ret;
 
+	ret = intel_ring_begin(req, 4);
+	if (ret) {
+		i915_gem_request_cancel(req);
+		return ret;
+	}
+
 	overlay->active = true;
 
 	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
@@ -256,7 +258,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_advance(ring);
 
-	return intel_overlay_do_wait_request(overlay, NULL);
+	return intel_overlay_do_wait_request(overlay, req, NULL);
 }
 
 /* overlay needs to be enabled in OCMD reg */
@@ -266,6 +268,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct drm_i915_gem_request *req;
 	u32 flip_addr = overlay->flip_addr;
 	u32 tmp;
 	int ret;
@@ -280,18 +283,25 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 	if (tmp & (1 << 17))
 		DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
 
-	ret = intel_ring_begin(ring, 2);
+	ret = i915_gem_request_alloc(ring, ring->default_context, &req);
 	if (ret)
 		return ret;
 
+	ret = intel_ring_begin(req, 2);
+	if (ret) {
+		i915_gem_request_cancel(req);
+		return ret;
+	}
+
 	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
 	intel_ring_emit(ring, flip_addr);
 	intel_ring_advance(ring);
 
 	WARN_ON(overlay->last_flip_req);
-	i915_gem_request_assign(&overlay->last_flip_req,
-					     ring->outstanding_lazy_request);
-	return i915_add_request(ring);
+	i915_gem_request_assign(&overlay->last_flip_req, req);
+	i915_add_request(req);
+
+	return 0;
 }
 
 static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
@@ -327,6 +337,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct drm_i915_gem_request *req;
 	u32 flip_addr = overlay->flip_addr;
 	int ret;
 
@@ -338,10 +349,16 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 	 * of the hw. Do it in both cases */
 	flip_addr |= OFC_UPDATE;
 
-	ret = intel_ring_begin(ring, 6);
+	ret = i915_gem_request_alloc(ring, ring->default_context, &req);
 	if (ret)
 		return ret;
 
+	ret = intel_ring_begin(req, 6);
+	if (ret) {
+		i915_gem_request_cancel(req);
+		return ret;
+	}
+
 	/* wait for overlay to go idle */
 	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
 	intel_ring_emit(ring, flip_addr);
@@ -360,7 +377,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 	}
 	intel_ring_advance(ring);
 
-	return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail);
+	return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
 }
 
 /* recover from an interruption due to a signal
@@ -404,15 +421,23 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 
 	if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
 		/* synchronous slowpath */
-		ret = intel_ring_begin(ring, 2);
+		struct drm_i915_gem_request *req;
+
+		ret = i915_gem_request_alloc(ring, ring->default_context, &req);
 		if (ret)
 			return ret;
 
+		ret = intel_ring_begin(req, 2);
+		if (ret) {
+			i915_gem_request_cancel(req);
+			return ret;
+		}
+
 		intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
 		intel_ring_emit(ring, MI_NOOP);
 		intel_ring_advance(ring);
 
-		ret = intel_overlay_do_wait_request(overlay,
+		ret = intel_overlay_do_wait_request(overlay, req,
 						    intel_overlay_release_old_vid_tail);
 		if (ret)
 			return ret;
@@ -724,7 +749,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 	if (ret != 0)
 		return ret;
 
-	ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL,
+	ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL, NULL,
 						   &i915_ggtt_view_normal);
 	if (ret != 0)
 		return ret;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 55aad2322e10..e2ab3f6ed022 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -32,8 +32,11 @@
 
 #include <linux/kernel.h>
 #include <linux/moduleparam.h>
+#include <linux/pwm.h>
 #include "intel_drv.h"
 
+#define CRC_PMIC_PWM_PERIOD_NS	21333
+
 void
 intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
 		       struct drm_display_mode *adjusted_mode)
@@ -544,6 +547,15 @@ static u32 bxt_get_backlight(struct intel_connector *connector)
 	return I915_READ(BXT_BLC_PWM_DUTY1);
 }
 
+static u32 pwm_get_backlight(struct intel_connector *connector)
+{
+	struct intel_panel *panel = &connector->panel;
+	int duty_ns;
+
+	duty_ns = pwm_get_duty_cycle(panel->backlight.pwm);
+	return DIV_ROUND_UP(duty_ns * 100, CRC_PMIC_PWM_PERIOD_NS);
+}
+
 static u32 intel_panel_get_backlight(struct intel_connector *connector)
 {
 	struct drm_device *dev = connector->base.dev;
@@ -632,6 +644,14 @@ static void bxt_set_backlight(struct intel_connector *connector, u32 level)
 	I915_WRITE(BXT_BLC_PWM_DUTY1, level);
 }
 
+static void pwm_set_backlight(struct intel_connector *connector, u32 level)
+{
+	struct intel_panel *panel = &connector->panel;
+	int duty_ns = DIV_ROUND_UP(level * CRC_PMIC_PWM_PERIOD_NS, 100);
+
+	pwm_config(panel->backlight.pwm, duty_ns, CRC_PMIC_PWM_PERIOD_NS);
+}
+
 static void
 intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
 {
@@ -769,6 +789,16 @@ static void bxt_disable_backlight(struct intel_connector *connector)
 	I915_WRITE(BXT_BLC_PWM_CTL1, tmp & ~BXT_BLC_PWM_ENABLE);
 }
 
+static void pwm_disable_backlight(struct intel_connector *connector)
+{
+	struct intel_panel *panel = &connector->panel;
+
+	/* Disable the backlight */
+	pwm_config(panel->backlight.pwm, 0, CRC_PMIC_PWM_PERIOD_NS);
+	usleep_range(2000, 3000);
+	pwm_disable(panel->backlight.pwm);
+}
+
 void intel_panel_disable_backlight(struct intel_connector *connector)
 {
 	struct drm_device *dev = connector->base.dev;
@@ -1010,6 +1040,14 @@ static void bxt_enable_backlight(struct intel_connector *connector)
 	I915_WRITE(BXT_BLC_PWM_CTL1, pwm_ctl | BXT_BLC_PWM_ENABLE);
 }
 
+static void pwm_enable_backlight(struct intel_connector *connector)
+{
+	struct intel_panel *panel = &connector->panel;
+
+	pwm_enable(panel->backlight.pwm);
+	intel_panel_actually_set_backlight(connector, panel->backlight.level);
+}
+
 void intel_panel_enable_backlight(struct intel_connector *connector)
 {
 	struct drm_device *dev = connector->base.dev;
@@ -1386,6 +1424,40 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
 	return 0;
 }
 
+static int pwm_setup_backlight(struct intel_connector *connector,
+			       enum pipe pipe)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct intel_panel *panel = &connector->panel;
+	int retval;
+
+	/* Get the PWM chip for backlight control */
+	panel->backlight.pwm = pwm_get(dev->dev, "pwm_backlight");
+	if (IS_ERR(panel->backlight.pwm)) {
+		DRM_ERROR("Failed to own the pwm chip\n");
+		panel->backlight.pwm = NULL;
+		return -ENODEV;
+	}
+
+	retval = pwm_config(panel->backlight.pwm, CRC_PMIC_PWM_PERIOD_NS,
+			    CRC_PMIC_PWM_PERIOD_NS);
+	if (retval < 0) {
+		DRM_ERROR("Failed to configure the pwm chip\n");
+		pwm_put(panel->backlight.pwm);
+		panel->backlight.pwm = NULL;
+		return retval;
+	}
+
+	panel->backlight.min = 0; /* 0% */
+	panel->backlight.max = 100; /* 100% */
+	panel->backlight.level = DIV_ROUND_UP(
+				 pwm_get_duty_cycle(panel->backlight.pwm) * 100,
+				 CRC_PMIC_PWM_PERIOD_NS);
+	panel->backlight.enabled = panel->backlight.level != 0;
+
+	return 0;
+}
+
 int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
 {
 	struct drm_device *dev = connector->dev;
@@ -1429,6 +1501,10 @@ void intel_panel_destroy_backlight(struct drm_connector *connector)
 	struct intel_connector *intel_connector = to_intel_connector(connector);
 	struct intel_panel *panel = &intel_connector->panel;
 
+	/* dispose of the pwm */
+	if (panel->backlight.pwm)
+		pwm_put(panel->backlight.pwm);
+
 	panel->backlight.present = false;
 }
 
@@ -1456,11 +1532,19 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
 		dev_priv->display.set_backlight = pch_set_backlight;
 		dev_priv->display.get_backlight = pch_get_backlight;
 	} else if (IS_VALLEYVIEW(dev)) {
-		dev_priv->display.setup_backlight = vlv_setup_backlight;
-		dev_priv->display.enable_backlight = vlv_enable_backlight;
-		dev_priv->display.disable_backlight = vlv_disable_backlight;
-		dev_priv->display.set_backlight = vlv_set_backlight;
-		dev_priv->display.get_backlight = vlv_get_backlight;
+		if (dev_priv->vbt.has_mipi) {
+			dev_priv->display.setup_backlight = pwm_setup_backlight;
+			dev_priv->display.enable_backlight = pwm_enable_backlight;
+			dev_priv->display.disable_backlight = pwm_disable_backlight;
+			dev_priv->display.set_backlight = pwm_set_backlight;
+			dev_priv->display.get_backlight = pwm_get_backlight;
+		} else {
+			dev_priv->display.setup_backlight = vlv_setup_backlight;
+			dev_priv->display.enable_backlight = vlv_enable_backlight;
+			dev_priv->display.disable_backlight = vlv_disable_backlight;
+			dev_priv->display.set_backlight = vlv_set_backlight;
+			dev_priv->display.get_backlight = vlv_get_backlight;
+		}
 	} else if (IS_GEN4(dev)) {
 		dev_priv->display.setup_backlight = i965_setup_backlight;
 		dev_priv->display.enable_backlight = i965_enable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index eadc15cddbeb..fff0c22682ee 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -59,6 +59,10 @@ static void gen9_init_clock_gating(struct drm_device *dev)
 	/* WaEnableLbsSlaRetryTimerDecrement:skl */
 	I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
 		   GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+
+	/* WaDisableKillLogic:bxt,skl */
+	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+		   ECOCHK_DIS_TLB);
 }
 
 static void skl_init_clock_gating(struct drm_device *dev)
@@ -91,10 +95,19 @@ static void skl_init_clock_gating(struct drm_device *dev)
 			   _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
 	}
 
+	/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
+	 * involving this register should also be added to WA batch as required.
+	 */
 	if (INTEL_REVID(dev) <= SKL_REVID_E0)
 		/* WaDisableLSQCROPERFforOCL:skl */
 		I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
 			   GEN8_LQSC_RO_PERF_DIS);
+
+	/* WaEnableGapsTsvCreditFix:skl */
+	if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) {
+		I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+					   GEN9_GAPS_TSV_CREDIT_DISABLE));
+	}
 }
 
 static void bxt_init_clock_gating(struct drm_device *dev)
@@ -334,22 +347,26 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
 
 	if (IS_VALLEYVIEW(dev)) {
 		I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
-		if (IS_CHERRYVIEW(dev))
-			chv_set_memory_pm5(dev_priv, enable);
+		POSTING_READ(FW_BLC_SELF_VLV);
+		dev_priv->wm.vlv.cxsr = enable;
 	} else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
 		I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+		POSTING_READ(FW_BLC_SELF);
 	} else if (IS_PINEVIEW(dev)) {
 		val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
 		val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
 		I915_WRITE(DSPFW3, val);
+		POSTING_READ(DSPFW3);
 	} else if (IS_I945G(dev) || IS_I945GM(dev)) {
 		val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
 			       _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
 		I915_WRITE(FW_BLC_SELF, val);
+		POSTING_READ(FW_BLC_SELF);
 	} else if (IS_I915GM(dev)) {
 		val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
 			       _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
 		I915_WRITE(INSTPM, val);
+		POSTING_READ(INSTPM);
 	} else {
 		return;
 	}
@@ -923,223 +940,484 @@ static void vlv_write_wm_values(struct intel_crtc *crtc,
 			   FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
 	}
 
-	POSTING_READ(DSPFW1);
+	/* zero (unused) WM1 watermarks */
+	I915_WRITE(DSPFW4, 0);
+	I915_WRITE(DSPFW5, 0);
+	I915_WRITE(DSPFW6, 0);
+	I915_WRITE(DSPHOWM1, 0);
 
-	dev_priv->wm.vlv = *wm;
+	POSTING_READ(DSPFW1);
 }
 
 #undef FW_WM_VLV
 
-static uint8_t vlv_compute_drain_latency(struct drm_crtc *crtc,
-					 struct drm_plane *plane)
-{
-	struct drm_device *dev = crtc->dev;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	int entries, prec_mult, drain_latency, pixel_size;
-	int clock = intel_crtc->config->base.adjusted_mode.crtc_clock;
-	const int high_precision = IS_CHERRYVIEW(dev) ? 16 : 64;
-
-	/*
-	 * FIXME the plane might have an fb
-	 * but be invisible (eg. due to clipping)
-	 */
-	if (!intel_crtc->active || !plane->state->fb)
-		return 0;
+enum vlv_wm_level {
+	VLV_WM_LEVEL_PM2,
+	VLV_WM_LEVEL_PM5,
+	VLV_WM_LEVEL_DDR_DVFS,
+	CHV_WM_NUM_LEVELS,
+	VLV_WM_NUM_LEVELS = 1,
+};
 
-	if (WARN(clock == 0, "Pixel clock is zero!\n"))
-		return 0;
+/* latency must be in 0.1us units. */
+static unsigned int vlv_wm_method2(unsigned int pixel_rate,
+				   unsigned int pipe_htotal,
+				   unsigned int horiz_pixels,
+				   unsigned int bytes_per_pixel,
+				   unsigned int latency)
+{
+	unsigned int ret;
 
-	pixel_size = drm_format_plane_cpp(plane->state->fb->pixel_format, 0);
+	ret = (latency * pixel_rate) / (pipe_htotal * 10000);
+	ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
+	ret = DIV_ROUND_UP(ret, 64);
 
-	if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
-		return 0;
+	return ret;
+}
 
-	entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
+static void vlv_setup_wm_latency(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	prec_mult = high_precision;
-	drain_latency = 64 * prec_mult * 4 / entries;
+	/* all latencies in usec */
+	dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
 
-	if (drain_latency > DRAIN_LATENCY_MASK) {
-		prec_mult /= 2;
-		drain_latency = 64 * prec_mult * 4 / entries;
+	if (IS_CHERRYVIEW(dev_priv)) {
+		dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
+		dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
 	}
-
-	if (drain_latency > DRAIN_LATENCY_MASK)
-		drain_latency = DRAIN_LATENCY_MASK;
-
-	return drain_latency | (prec_mult == high_precision ?
-				DDL_PRECISION_HIGH : DDL_PRECISION_LOW);
 }
 
-static int vlv_compute_wm(struct intel_crtc *crtc,
-			  struct intel_plane *plane,
-			  int fifo_size)
+static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
+				     struct intel_crtc *crtc,
+				     const struct intel_plane_state *state,
+				     int level)
 {
-	int clock, entries, pixel_size;
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	int clock, htotal, pixel_size, width, wm;
 
-	/*
-	 * FIXME the plane might have an fb
-	 * but be invisible (eg. due to clipping)
-	 */
-	if (!crtc->active || !plane->base.state->fb)
+	if (dev_priv->wm.pri_latency[level] == 0)
+		return USHRT_MAX;
+
+	if (!state->visible)
 		return 0;
 
-	pixel_size = drm_format_plane_cpp(plane->base.state->fb->pixel_format, 0);
+	pixel_size = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
 	clock = crtc->config->base.adjusted_mode.crtc_clock;
+	htotal = crtc->config->base.adjusted_mode.crtc_htotal;
+	width = crtc->config->pipe_src_w;
+	if (WARN_ON(htotal == 0))
+		htotal = 1;
 
-	entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
+	if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
+		/*
+		 * FIXME the formula gives values that are
+		 * too big for the cursor FIFO, and hence we
+		 * would never be able to use cursors. For
+		 * now just hardcode the watermark.
+		 */
+		wm = 63;
+	} else {
+		wm = vlv_wm_method2(clock, htotal, width, pixel_size,
+				    dev_priv->wm.pri_latency[level] * 10);
+	}
 
-	/*
-	 * Set up the watermark such that we don't start issuing memory
-	 * requests until we are within PND's max deadline value (256us).
-	 * Idea being to be idle as long as possible while still taking
-	 * advatange of PND's deadline scheduling. The limit of 8
-	 * cachelines (used when the FIFO will anyway drain in less time
-	 * than 256us) should match what we would be done if trickle
-	 * feed were enabled.
-	 */
-	return fifo_size - clamp(DIV_ROUND_UP(256 * entries, 64), 0, fifo_size - 8);
+	return min_t(int, wm, USHRT_MAX);
 }
 
-static bool vlv_compute_sr_wm(struct drm_device *dev,
-			      struct vlv_wm_values *wm)
+static void vlv_compute_fifo(struct intel_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_crtc *crtc;
-	enum pipe pipe = INVALID_PIPE;
-	int num_planes = 0;
-	int fifo_size = 0;
+	struct drm_device *dev = crtc->base.dev;
+	struct vlv_wm_state *wm_state = &crtc->wm_state;
 	struct intel_plane *plane;
+	unsigned int total_rate = 0;
+	const int fifo_size = 512 - 1;
+	int fifo_extra, fifo_left = fifo_size;
 
-	wm->sr.cursor = wm->sr.plane = 0;
+	for_each_intel_plane_on_crtc(dev, crtc, plane) {
+		struct intel_plane_state *state =
+			to_intel_plane_state(plane->base.state);
 
-	crtc = single_enabled_crtc(dev);
-	/* maxfifo not supported on pipe C */
-	if (crtc && to_intel_crtc(crtc)->pipe != PIPE_C) {
-		pipe = to_intel_crtc(crtc)->pipe;
-		num_planes = !!wm->pipe[pipe].primary +
-			!!wm->pipe[pipe].sprite[0] +
-			!!wm->pipe[pipe].sprite[1];
-		fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
+		if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
+			continue;
+
+		if (state->visible) {
+			wm_state->num_active_planes++;
+			total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
+		}
 	}
 
-	if (fifo_size == 0 || num_planes > 1)
-		return false;
+	for_each_intel_plane_on_crtc(dev, crtc, plane) {
+		struct intel_plane_state *state =
+			to_intel_plane_state(plane->base.state);
+		unsigned int rate;
+
+		if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
+			plane->wm.fifo_size = 63;
+			continue;
+		}
+
+		if (!state->visible) {
+			plane->wm.fifo_size = 0;
+			continue;
+		}
+
+		rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
+		plane->wm.fifo_size = fifo_size * rate / total_rate;
+		fifo_left -= plane->wm.fifo_size;
+	}
+
+	fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1);
 
-	wm->sr.cursor = vlv_compute_wm(to_intel_crtc(crtc),
-				       to_intel_plane(crtc->cursor), 0x3f);
+	/* spread the remainder evenly */
+	for_each_intel_plane_on_crtc(dev, crtc, plane) {
+		int plane_extra;
+
+		if (fifo_left == 0)
+			break;
 
-	list_for_each_entry(plane, &dev->mode_config.plane_list, base.head) {
 		if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
 			continue;
 
-		if (plane->pipe != pipe)
+		/* give it all to the first plane if none are active */
+		if (plane->wm.fifo_size == 0 &&
+		    wm_state->num_active_planes)
 			continue;
 
-		wm->sr.plane = vlv_compute_wm(to_intel_crtc(crtc),
-					      plane, fifo_size);
-		if (wm->sr.plane != 0)
+		plane_extra = min(fifo_extra, fifo_left);
+		plane->wm.fifo_size += plane_extra;
+		fifo_left -= plane_extra;
+	}
+
+	WARN_ON(fifo_left != 0);
+}
+
+static void vlv_invert_wms(struct intel_crtc *crtc)
+{
+	struct vlv_wm_state *wm_state = &crtc->wm_state;
+	int level;
+
+	for (level = 0; level < wm_state->num_levels; level++) {
+		struct drm_device *dev = crtc->base.dev;
+		const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
+		struct intel_plane *plane;
+
+		wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
+		wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor;
+
+		for_each_intel_plane_on_crtc(dev, crtc, plane) {
+			switch (plane->base.type) {
+				int sprite;
+			case DRM_PLANE_TYPE_CURSOR:
+				wm_state->wm[level].cursor = plane->wm.fifo_size -
+					wm_state->wm[level].cursor;
+				break;
+			case DRM_PLANE_TYPE_PRIMARY:
+				wm_state->wm[level].primary = plane->wm.fifo_size -
+					wm_state->wm[level].primary;
+				break;
+			case DRM_PLANE_TYPE_OVERLAY:
+				sprite = plane->plane;
+				wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size -
+					wm_state->wm[level].sprite[sprite];
+				break;
+			}
+		}
+	}
+}
+
+static void vlv_compute_wm(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct vlv_wm_state *wm_state = &crtc->wm_state;
+	struct intel_plane *plane;
+	int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
+	int level;
+
+	memset(wm_state, 0, sizeof(*wm_state));
+
+	wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
+	if (IS_CHERRYVIEW(dev))
+		wm_state->num_levels = CHV_WM_NUM_LEVELS;
+	else
+		wm_state->num_levels = VLV_WM_NUM_LEVELS;
+
+	wm_state->num_active_planes = 0;
+
+	vlv_compute_fifo(crtc);
+
+	if (wm_state->num_active_planes != 1)
+		wm_state->cxsr = false;
+
+	if (wm_state->cxsr) {
+		for (level = 0; level < wm_state->num_levels; level++) {
+			wm_state->sr[level].plane = sr_fifo_size;
+			wm_state->sr[level].cursor = 63;
+		}
+	}
+
+	for_each_intel_plane_on_crtc(dev, crtc, plane) {
+		struct intel_plane_state *state =
+			to_intel_plane_state(plane->base.state);
+
+		if (!state->visible)
+			continue;
+
+		/* normal watermarks */
+		for (level = 0; level < wm_state->num_levels; level++) {
+			int wm = vlv_compute_wm_level(plane, crtc, state, level);
+			int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511;
+
+			/* hack */
+			if (WARN_ON(level == 0 && wm > max_wm))
+				wm = max_wm;
+
+			if (wm > plane->wm.fifo_size)
+				break;
+
+			switch (plane->base.type) {
+				int sprite;
+			case DRM_PLANE_TYPE_CURSOR:
+				wm_state->wm[level].cursor = wm;
+				break;
+			case DRM_PLANE_TYPE_PRIMARY:
+				wm_state->wm[level].primary = wm;
+				break;
+			case DRM_PLANE_TYPE_OVERLAY:
+				sprite = plane->plane;
+				wm_state->wm[level].sprite[sprite] = wm;
+				break;
+			}
+		}
+
+		wm_state->num_levels = level;
+
+		if (!wm_state->cxsr)
+			continue;
+
+		/* maxfifo watermarks */
+		switch (plane->base.type) {
+			int sprite, level;
+		case DRM_PLANE_TYPE_CURSOR:
+			for (level = 0; level < wm_state->num_levels; level++)
+				wm_state->sr[level].cursor =
+					wm_state->sr[level].cursor;
+			break;
+		case DRM_PLANE_TYPE_PRIMARY:
+			for (level = 0; level < wm_state->num_levels; level++)
+				wm_state->sr[level].plane =
+					min(wm_state->sr[level].plane,
+					    wm_state->wm[level].primary);
+			break;
+		case DRM_PLANE_TYPE_OVERLAY:
+			sprite = plane->plane;
+			for (level = 0; level < wm_state->num_levels; level++)
+				wm_state->sr[level].plane =
+					min(wm_state->sr[level].plane,
+					    wm_state->wm[level].sprite[sprite]);
 			break;
+		}
 	}
 
-	return true;
+	/* clear any (partially) filled invalid levels */
+	for (level = wm_state->num_levels; level < CHV_WM_NUM_LEVELS; level++) {
+		memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
+		memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
+	}
+
+	vlv_invert_wms(crtc);
 }
 
-static void valleyview_update_wm(struct drm_crtc *crtc)
+#define VLV_FIFO(plane, value) \
+	(((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
+
+static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
 {
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	enum pipe pipe = intel_crtc->pipe;
-	bool cxsr_enabled;
-	struct vlv_wm_values wm = dev_priv->wm.vlv;
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_plane *plane;
+	int sprite0_start = 0, sprite1_start = 0, fifo_size = 0;
 
-	wm.ddl[pipe].primary = vlv_compute_drain_latency(crtc, crtc->primary);
-	wm.pipe[pipe].primary = vlv_compute_wm(intel_crtc,
-					       to_intel_plane(crtc->primary),
-					       vlv_get_fifo_size(dev, pipe, 0));
+	for_each_intel_plane_on_crtc(dev, crtc, plane) {
+		if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
+			WARN_ON(plane->wm.fifo_size != 63);
+			continue;
+		}
 
-	wm.ddl[pipe].cursor = vlv_compute_drain_latency(crtc, crtc->cursor);
-	wm.pipe[pipe].cursor = vlv_compute_wm(intel_crtc,
-					      to_intel_plane(crtc->cursor),
-					      0x3f);
+		if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+			sprite0_start = plane->wm.fifo_size;
+		else if (plane->plane == 0)
+			sprite1_start = sprite0_start + plane->wm.fifo_size;
+		else
+			fifo_size = sprite1_start + plane->wm.fifo_size;
+	}
 
-	cxsr_enabled = vlv_compute_sr_wm(dev, &wm);
+	WARN_ON(fifo_size != 512 - 1);
 
-	if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0)
-		return;
+	DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
+		      pipe_name(crtc->pipe), sprite0_start,
+		      sprite1_start, fifo_size);
 
-	DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
-		      "SR: plane=%d, cursor=%d\n", pipe_name(pipe),
-		      wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
-		      wm.sr.plane, wm.sr.cursor);
+	switch (crtc->pipe) {
+		uint32_t dsparb, dsparb2, dsparb3;
+	case PIPE_A:
+		dsparb = I915_READ(DSPARB);
+		dsparb2 = I915_READ(DSPARB2);
 
-	/*
-	 * FIXME DDR DVFS introduces massive memory latencies which
-	 * are not known to system agent so any deadline specified
-	 * by the display may not be respected. To support DDR DVFS
-	 * the watermark code needs to be rewritten to essentially
-	 * bypass deadline mechanism and rely solely on the
-	 * watermarks. For now disable DDR DVFS.
-	 */
-	if (IS_CHERRYVIEW(dev_priv))
-		chv_set_memory_dvfs(dev_priv, false);
+		dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
+			    VLV_FIFO(SPRITEB, 0xff));
+		dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
+			   VLV_FIFO(SPRITEB, sprite1_start));
 
-	if (!cxsr_enabled)
-		intel_set_memory_cxsr(dev_priv, false);
+		dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
+			     VLV_FIFO(SPRITEB_HI, 0x1));
+		dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
+			   VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
 
-	vlv_write_wm_values(intel_crtc, &wm);
+		I915_WRITE(DSPARB, dsparb);
+		I915_WRITE(DSPARB2, dsparb2);
+		break;
+	case PIPE_B:
+		dsparb = I915_READ(DSPARB);
+		dsparb2 = I915_READ(DSPARB2);
 
-	if (cxsr_enabled)
-		intel_set_memory_cxsr(dev_priv, true);
+		dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
+			    VLV_FIFO(SPRITED, 0xff));
+		dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
+			   VLV_FIFO(SPRITED, sprite1_start));
+
+		dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
+			     VLV_FIFO(SPRITED_HI, 0xff));
+		dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
+			   VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
+
+		I915_WRITE(DSPARB, dsparb);
+		I915_WRITE(DSPARB2, dsparb2);
+		break;
+	case PIPE_C:
+		dsparb3 = I915_READ(DSPARB3);
+		dsparb2 = I915_READ(DSPARB2);
+
+		dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
+			     VLV_FIFO(SPRITEF, 0xff));
+		dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
+			    VLV_FIFO(SPRITEF, sprite1_start));
+
+		dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
+			     VLV_FIFO(SPRITEF_HI, 0xff));
+		dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
+			   VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
+
+		I915_WRITE(DSPARB3, dsparb3);
+		I915_WRITE(DSPARB2, dsparb2);
+		break;
+	default:
+		break;
+	}
 }
 
-static void valleyview_update_sprite_wm(struct drm_plane *plane,
-					struct drm_crtc *crtc,
-					uint32_t sprite_width,
-					uint32_t sprite_height,
-					int pixel_size,
-					bool enabled, bool scaled)
+#undef VLV_FIFO
+
+static void vlv_merge_wm(struct drm_device *dev,
+			 struct vlv_wm_values *wm)
+{
+	struct intel_crtc *crtc;
+	int num_active_crtcs = 0;
+
+	if (IS_CHERRYVIEW(dev))
+		wm->level = VLV_WM_LEVEL_DDR_DVFS;
+	else
+		wm->level = VLV_WM_LEVEL_PM2;
+	wm->cxsr = true;
+
+	for_each_intel_crtc(dev, crtc) {
+		const struct vlv_wm_state *wm_state = &crtc->wm_state;
+
+		if (!crtc->active)
+			continue;
+
+		if (!wm_state->cxsr)
+			wm->cxsr = false;
+
+		num_active_crtcs++;
+		wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
+	}
+
+	if (num_active_crtcs != 1)
+		wm->cxsr = false;
+
+	if (num_active_crtcs > 1)
+		wm->level = VLV_WM_LEVEL_PM2;
+
+	for_each_intel_crtc(dev, crtc) {
+		struct vlv_wm_state *wm_state = &crtc->wm_state;
+		enum pipe pipe = crtc->pipe;
+
+		if (!crtc->active)
+			continue;
+
+		wm->pipe[pipe] = wm_state->wm[wm->level];
+		if (wm->cxsr)
+			wm->sr = wm_state->sr[wm->level];
+
+		wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2;
+		wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2;
+		wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2;
+		wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2;
+	}
+}
+
+static void vlv_update_wm(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	enum pipe pipe = intel_crtc->pipe;
-	int sprite = to_intel_plane(plane)->plane;
-	bool cxsr_enabled;
-	struct vlv_wm_values wm = dev_priv->wm.vlv;
+	struct vlv_wm_values wm = {};
 
-	if (enabled) {
-		wm.ddl[pipe].sprite[sprite] =
-			vlv_compute_drain_latency(crtc, plane);
+	vlv_compute_wm(intel_crtc);
+	vlv_merge_wm(dev, &wm);
 
-		wm.pipe[pipe].sprite[sprite] =
-			vlv_compute_wm(intel_crtc,
-				       to_intel_plane(plane),
-				       vlv_get_fifo_size(dev, pipe, sprite+1));
-	} else {
-		wm.ddl[pipe].sprite[sprite] = 0;
-		wm.pipe[pipe].sprite[sprite] = 0;
+	if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
+		/* FIXME should be part of crtc atomic commit */
+		vlv_pipe_set_fifo_size(intel_crtc);
+		return;
 	}
 
-	cxsr_enabled = vlv_compute_sr_wm(dev, &wm);
-
-	if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0)
-		return;
+	if (wm.level < VLV_WM_LEVEL_DDR_DVFS &&
+	    dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS)
+		chv_set_memory_dvfs(dev_priv, false);
 
-	DRM_DEBUG_KMS("Setting FIFO watermarks - %c: sprite %c=%d, "
-		      "SR: plane=%d, cursor=%d\n", pipe_name(pipe),
-		      sprite_name(pipe, sprite),
-		      wm.pipe[pipe].sprite[sprite],
-		      wm.sr.plane, wm.sr.cursor);
+	if (wm.level < VLV_WM_LEVEL_PM5 &&
+	    dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5)
+		chv_set_memory_pm5(dev_priv, false);
 
-	if (!cxsr_enabled)
+	if (!wm.cxsr && dev_priv->wm.vlv.cxsr)
 		intel_set_memory_cxsr(dev_priv, false);
 
+	/* FIXME should be part of crtc atomic commit */
+	vlv_pipe_set_fifo_size(intel_crtc);
+
 	vlv_write_wm_values(intel_crtc, &wm);
 
-	if (cxsr_enabled)
+	DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
+		      "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
+		      pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
+		      wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1],
+		      wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr);
+
+	if (wm.cxsr && !dev_priv->wm.vlv.cxsr)
 		intel_set_memory_cxsr(dev_priv, true);
+
+	if (wm.level >= VLV_WM_LEVEL_PM5 &&
+	    dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5)
+		chv_set_memory_pm5(dev_priv, true);
+
+	if (wm.level >= VLV_WM_LEVEL_DDR_DVFS &&
+	    dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS)
+		chv_set_memory_dvfs(dev_priv, true);
+
+	dev_priv->wm.vlv = wm;
 }
 
 #define single_plane_enabled(mask) is_power_of_2(mask)
@@ -1434,23 +1712,22 @@ static void i845_update_wm(struct drm_crtc *unused_crtc)
 	I915_WRITE(FW_BLC, fwater_lo);
 }
 
-static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
-				    struct drm_crtc *crtc)
+uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
 {
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pixel_rate;
 
-	pixel_rate = intel_crtc->config->base.adjusted_mode.crtc_clock;
+	pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
 
 	/* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
 	 * adjust the pixel_rate here. */
 
-	if (intel_crtc->config->pch_pfit.enabled) {
+	if (pipe_config->pch_pfit.enabled) {
 		uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
-		uint32_t pfit_size = intel_crtc->config->pch_pfit.size;
+		uint32_t pfit_size = pipe_config->pch_pfit.size;
+
+		pipe_w = pipe_config->pipe_src_w;
+		pipe_h = pipe_config->pipe_src_h;
 
-		pipe_w = intel_crtc->config->pipe_src_w;
-		pipe_h = intel_crtc->config->pipe_src_h;
 		pfit_w = (pfit_size >> 16) & 0xFFFF;
 		pfit_h = pfit_size & 0xFFFF;
 		if (pipe_w < pfit_w)
@@ -1815,7 +2092,7 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
 	linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
 				     mode->crtc_clock);
 	ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
-					 dev_priv->display.get_display_clock_speed(dev_priv->dev));
+					 dev_priv->cdclk_freq);
 
 	return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
 	       PIPE_WM_LINETIME_TIME(linetime);
@@ -2066,7 +2343,7 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
 
 	p->active = true;
 	p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
-	p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
+	p->pixel_rate = ilk_pipe_pixel_rate(intel_crtc->config);
 
 	if (crtc->primary->state->fb)
 		p->pri.bytes_per_pixel =
@@ -2085,7 +2362,7 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
 	p->pri.horiz_pixels = intel_crtc->config->pipe_src_w;
 	p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w;
 
-	drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
+	drm_for_each_legacy_plane(plane, dev) {
 		struct intel_plane *intel_plane = to_intel_plane(plane);
 
 		if (intel_plane->pipe == pipe) {
@@ -2215,6 +2492,7 @@ static void ilk_wm_merge(struct drm_device *dev,
 			 const struct ilk_wm_maximums *max,
 			 struct intel_pipe_wm *merged)
 {
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	int level, max_level = ilk_wm_max_level(dev);
 	int last_enabled_level = max_level;
 
@@ -2255,7 +2533,8 @@ static void ilk_wm_merge(struct drm_device *dev,
 	 * What we should check here is whether FBC can be
 	 * enabled sometime later.
 	 */
-	if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
+	if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
+	    intel_fbc_enabled(dev_priv)) {
 		for (level = 2; level <= max_level; level++) {
 			struct intel_wm_level *wm = &merged->wm[level];
 
@@ -3043,8 +3322,10 @@ skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
 	if (!to_intel_crtc(crtc)->active)
 		return 0;
 
-	return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
+	if (WARN_ON(p->pixel_rate == 0))
+		return 0;
 
+	return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
 }
 
 static void skl_compute_transition_wm(struct drm_crtc *crtc,
@@ -3685,6 +3966,139 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
 	}
 }
 
+#define _FW_WM(value, plane) \
+	(((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
+#define _FW_WM_VLV(value, plane) \
+	(((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
+
+static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
+			       struct vlv_wm_values *wm)
+{
+	enum pipe pipe;
+	uint32_t tmp;
+
+	for_each_pipe(dev_priv, pipe) {
+		tmp = I915_READ(VLV_DDL(pipe));
+
+		wm->ddl[pipe].primary =
+			(tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+		wm->ddl[pipe].cursor =
+			(tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+		wm->ddl[pipe].sprite[0] =
+			(tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+		wm->ddl[pipe].sprite[1] =
+			(tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+	}
+
+	tmp = I915_READ(DSPFW1);
+	wm->sr.plane = _FW_WM(tmp, SR);
+	wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB);
+	wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB);
+	wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA);
+
+	tmp = I915_READ(DSPFW2);
+	wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB);
+	wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA);
+	wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA);
+
+	tmp = I915_READ(DSPFW3);
+	wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
+
+	if (IS_CHERRYVIEW(dev_priv)) {
+		tmp = I915_READ(DSPFW7_CHV);
+		wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
+		wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
+
+		tmp = I915_READ(DSPFW8_CHV);
+		wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF);
+		wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE);
+
+		tmp = I915_READ(DSPFW9_CHV);
+		wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC);
+		wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC);
+
+		tmp = I915_READ(DSPHOWM);
+		wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
+		wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
+		wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
+		wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8;
+		wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
+		wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
+		wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
+		wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
+		wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
+		wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
+	} else {
+		tmp = I915_READ(DSPFW7);
+		wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
+		wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
+
+		tmp = I915_READ(DSPHOWM);
+		wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
+		wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
+		wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
+		wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
+		wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
+		wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
+		wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
+	}
+}
+
+#undef _FW_WM
+#undef _FW_WM_VLV
+
+void vlv_wm_get_hw_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct vlv_wm_values *wm = &dev_priv->wm.vlv;
+	struct intel_plane *plane;
+	enum pipe pipe;
+	u32 val;
+
+	vlv_read_wm_values(dev_priv, wm);
+
+	for_each_intel_plane(dev, plane) {
+		switch (plane->base.type) {
+			int sprite;
+		case DRM_PLANE_TYPE_CURSOR:
+			plane->wm.fifo_size = 63;
+			break;
+		case DRM_PLANE_TYPE_PRIMARY:
+			plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0);
+			break;
+		case DRM_PLANE_TYPE_OVERLAY:
+			sprite = plane->plane;
+			plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1);
+			break;
+		}
+	}
+
+	wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+	wm->level = VLV_WM_LEVEL_PM2;
+
+	if (IS_CHERRYVIEW(dev_priv)) {
+		mutex_lock(&dev_priv->rps.hw_lock);
+
+		val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+		if (val & DSP_MAXFIFO_PM5_ENABLE)
+			wm->level = VLV_WM_LEVEL_PM5;
+
+		val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+		if ((val & FORCE_DDR_HIGH_FREQ) == 0)
+			wm->level = VLV_WM_LEVEL_DDR_DVFS;
+
+		mutex_unlock(&dev_priv->rps.hw_lock);
+	}
+
+	for_each_pipe(dev_priv, pipe)
+		DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
+			      pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor,
+			      wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]);
+
+	DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
+		      wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
+}
+
 void ilk_wm_get_hw_state(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4083,14 +4497,14 @@ static void valleyview_set_rps(struct drm_device *dev, u8 val)
 		      "Odd GPU freq value\n"))
 		val &= ~1;
 
+	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
+
 	if (val != dev_priv->rps.cur_freq) {
 		vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
 		if (!IS_CHERRYVIEW(dev_priv))
 			gen6_set_rps_thresholds(dev_priv, val);
 	}
 
-	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
-
 	dev_priv->rps.cur_freq = val;
 	trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
 }
@@ -4250,12 +4664,8 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
 
 static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
 {
-	/* No RC6 before Ironlake */
-	if (INTEL_INFO(dev)->gen < 5)
-		return 0;
-
-	/* RC6 is only on Ironlake mobile not on desktop */
-	if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
+	/* No RC6 before Ironlake and code is gone for ilk. */
+	if (INTEL_INFO(dev)->gen < 6)
 		return 0;
 
 	/* Respect the kernel parameter if it is set */
@@ -4275,10 +4685,6 @@ static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
 		return enable_rc6 & mask;
 	}
 
-	/* Disable RC6 on Ironlake */
-	if (INTEL_INFO(dev)->gen == 5)
-		return 0;
-
 	if (IS_IVYBRIDGE(dev))
 		return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
 
@@ -4297,25 +4703,26 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
 	u32 ddcc_status = 0;
 	int ret;
 
-	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
 	/* All of these values are in units of 50MHz */
 	dev_priv->rps.cur_freq		= 0;
 	/* static values from HW: RP0 > RP1 > RPn (min_freq) */
-	dev_priv->rps.rp0_freq		= (rp_state_cap >>  0) & 0xff;
-	dev_priv->rps.rp1_freq		= (rp_state_cap >>  8) & 0xff;
-	dev_priv->rps.min_freq		= (rp_state_cap >> 16) & 0xff;
-	if (IS_SKYLAKE(dev)) {
-		/* Store the frequency values in 16.66 MHZ units, which is
-		   the natural hardware unit for SKL */
-		dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
-		dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
-		dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
+	if (IS_BROXTON(dev)) {
+		rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
+		dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
+		dev_priv->rps.rp1_freq = (rp_state_cap >>  8) & 0xff;
+		dev_priv->rps.min_freq = (rp_state_cap >>  0) & 0xff;
+	} else {
+		rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+		dev_priv->rps.rp0_freq = (rp_state_cap >>  0) & 0xff;
+		dev_priv->rps.rp1_freq = (rp_state_cap >>  8) & 0xff;
+		dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
 	}
+
 	/* hw_max = RP0 until we check for overclocking */
 	dev_priv->rps.max_freq		= dev_priv->rps.rp0_freq;
 
 	dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
-	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev) || IS_SKYLAKE(dev)) {
 		ret = sandybridge_pcode_read(dev_priv,
 					HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
 					&ddcc_status);
@@ -4327,6 +4734,16 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
 					dev_priv->rps.max_freq);
 	}
 
+	if (IS_SKYLAKE(dev)) {
+		/* Store the frequency values in 16.66 MHZ units, which is
+		   the natural hardware unit for SKL */
+		dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
+		dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
+		dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
+		dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
+		dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
+	}
+
 	dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
 
 	/* Preserve min/max settings in case of re-init */
@@ -4619,6 +5036,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
 	int min_freq = 15;
 	unsigned int gpu_freq;
 	unsigned int max_ia_freq, min_ring_freq;
+	unsigned int max_gpu_freq, min_gpu_freq;
 	int scaling_factor = 180;
 	struct cpufreq_policy *policy;
 
@@ -4643,17 +5061,31 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
 	/* convert DDR frequency from units of 266.6MHz to bandwidth */
 	min_ring_freq = mult_frac(min_ring_freq, 8, 3);
 
+	if (IS_SKYLAKE(dev)) {
+		/* Convert GT frequency to 50 HZ units */
+		min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
+		max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
+	} else {
+		min_gpu_freq = dev_priv->rps.min_freq;
+		max_gpu_freq = dev_priv->rps.max_freq;
+	}
+
 	/*
 	 * For each potential GPU frequency, load a ring frequency we'd like
 	 * to use for memory access.  We do this by specifying the IA frequency
 	 * the PCU should use as a reference to determine the ring frequency.
 	 */
-	for (gpu_freq = dev_priv->rps.max_freq; gpu_freq >= dev_priv->rps.min_freq;
-	     gpu_freq--) {
-		int diff = dev_priv->rps.max_freq - gpu_freq;
+	for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
+		int diff = max_gpu_freq - gpu_freq;
 		unsigned int ia_freq = 0, ring_freq = 0;
 
-		if (INTEL_INFO(dev)->gen >= 8) {
+		if (IS_SKYLAKE(dev)) {
+			/*
+			 * ring_freq = 2 * GT. ring_freq is in 100MHz units
+			 * No floor required for ring frequency on SKL.
+			 */
+			ring_freq = gpu_freq;
+		} else if (INTEL_INFO(dev)->gen >= 8) {
 			/* max(2 * GT, DDR). NB: GT is 50MHz units */
 			ring_freq = max(min_ring_freq, gpu_freq);
 		} else if (IS_HASWELL(dev)) {
@@ -4687,7 +5119,7 @@ void gen6_update_ring_freq(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
+	if (!HAS_CORE_RING_FREQ(dev))
 		return;
 
 	mutex_lock(&dev_priv->rps.hw_lock);
@@ -5802,7 +6234,8 @@ static void intel_gen6_powersave_work(struct work_struct *work)
 	} else if (INTEL_INFO(dev)->gen >= 9) {
 		gen9_enable_rc6(dev);
 		gen9_enable_rps(dev);
-		__gen6_update_ring_freq(dev);
+		if (IS_SKYLAKE(dev))
+			__gen6_update_ring_freq(dev);
 	} else if (IS_BROADWELL(dev)) {
 		gen8_enable_rps(dev);
 		__gen6_update_ring_freq(dev);
@@ -6686,13 +7119,15 @@ void intel_init_pm(struct drm_device *dev)
 		else if (INTEL_INFO(dev)->gen == 8)
 			dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
 	} else if (IS_CHERRYVIEW(dev)) {
-		dev_priv->display.update_wm = valleyview_update_wm;
-		dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
+		vlv_setup_wm_latency(dev);
+
+		dev_priv->display.update_wm = vlv_update_wm;
 		dev_priv->display.init_clock_gating =
 			cherryview_init_clock_gating;
 	} else if (IS_VALLEYVIEW(dev)) {
-		dev_priv->display.update_wm = valleyview_update_wm;
-		dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
+		vlv_setup_wm_latency(dev);
+
+		dev_priv->display.update_wm = vlv_update_wm;
 		dev_priv->display.init_clock_gating =
 			valleyview_init_clock_gating;
 	} else if (IS_PINEVIEW(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 5ee0fa57ed19..a04b4dc5ed9b 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -254,10 +254,13 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
 	uint32_t max_sleep_time = 0x1f;
 	/* Lately it was identified that depending on panel idle frame count
 	 * calculated at HW can be off by 1. So let's use what came
-	 * from VBT + 1 and at minimum 2 to be on the safe side.
+	 * from VBT + 1.
+	 * There are also other cases where panel demands at least 4
+	 * but VBT is not being set. To cover these 2 cases lets use
+	 * at least 5 when VBT isn't set to be on the safest side.
 	 */
 	uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ?
-			       dev_priv->vbt.psr.idle_frames + 1 : 2;
+			       dev_priv->vbt.psr.idle_frames + 1 : 5;
 	uint32_t val = 0x0;
 	const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
 
@@ -400,7 +403,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
 
 		/* Avoid continuous PSR exit by masking memup and hpd */
 		I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
-			   EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
+			   EDP_PSR_DEBUG_MASK_HPD);
 
 		/* Enable PSR on the panel */
 		hsw_psr_enable_sink(intel_dp);
@@ -596,13 +599,15 @@ static void intel_psr_exit(struct drm_device *dev)
 /**
  * intel_psr_single_frame_update - Single Frame Update
  * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * Some platforms support a single frame update feature that is used to
  * send and update only one frame on Remote Frame Buffer.
  * So far it is only implemented for Valleyview and Cherryview because
  * hardware requires this to be done before a page flip.
  */
-void intel_psr_single_frame_update(struct drm_device *dev)
+void intel_psr_single_frame_update(struct drm_device *dev,
+				   unsigned frontbuffer_bits)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc;
@@ -624,14 +629,16 @@ void intel_psr_single_frame_update(struct drm_device *dev)
 
 	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
 	pipe = to_intel_crtc(crtc)->pipe;
-	val = I915_READ(VLV_PSRCTL(pipe));
 
-	/*
-	 * We need to set this bit before writing registers for a flip.
-	 * This bit will be self-clear when it gets to the PSR active state.
-	 */
-	I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
+	if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
+		val = I915_READ(VLV_PSRCTL(pipe));
 
+		/*
+		 * We need to set this bit before writing registers for a flip.
+		 * This bit will be self-clear when it gets to the PSR active state.
+		 */
+		I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
+	}
 	mutex_unlock(&dev_priv->psr.lock);
 }
 
@@ -648,7 +655,7 @@ void intel_psr_single_frame_update(struct drm_device *dev)
  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
  */
 void intel_psr_invalidate(struct drm_device *dev,
-			      unsigned frontbuffer_bits)
+			  unsigned frontbuffer_bits)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc;
@@ -663,11 +670,12 @@ void intel_psr_invalidate(struct drm_device *dev,
 	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
 	pipe = to_intel_crtc(crtc)->pipe;
 
-	intel_psr_exit(dev);
-
 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
-
 	dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
+
+	if (frontbuffer_bits)
+		intel_psr_exit(dev);
+
 	mutex_unlock(&dev_priv->psr.lock);
 }
 
@@ -675,6 +683,7 @@ void intel_psr_invalidate(struct drm_device *dev,
  * intel_psr_flush - Flush PSR
  * @dev: DRM device
  * @frontbuffer_bits: frontbuffer plane tracking bits
+ * @origin: which operation caused the flush
  *
  * Since the hardware frontbuffer tracking has gaps we need to integrate
  * with the software frontbuffer tracking. This function gets called every
@@ -684,11 +693,12 @@ void intel_psr_invalidate(struct drm_device *dev,
  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
  */
 void intel_psr_flush(struct drm_device *dev,
-			 unsigned frontbuffer_bits)
+		     unsigned frontbuffer_bits, enum fb_op_origin origin)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc;
 	enum pipe pipe;
+	int delay_ms = HAS_DDI(dev) ? 100 : 500;
 
 	mutex_lock(&dev_priv->psr.lock);
 	if (!dev_priv->psr.enabled) {
@@ -698,30 +708,33 @@ void intel_psr_flush(struct drm_device *dev,
 
 	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
 	pipe = to_intel_crtc(crtc)->pipe;
-	dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
 
-	/*
-	 * On Haswell sprite plane updates don't result in a psr invalidating
-	 * signal in the hardware. Which means we need to manually fake this in
-	 * software for all flushes, not just when we've seen a preceding
-	 * invalidation through frontbuffer rendering.
-	 */
-	if (IS_HASWELL(dev) &&
-	    (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
-		intel_psr_exit(dev);
+	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+	dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
 
-	/*
-	 * On Valleyview and Cherryview we don't use hardware tracking so
-	 * any plane updates or cursor moves don't result in a PSR
-	 * invalidating. Which means we need to manually fake this in
-	 * software for all flushes, not just when we've seen a preceding
-	 * invalidation through frontbuffer rendering. */
-	if (!HAS_DDI(dev))
-		intel_psr_exit(dev);
+	if (HAS_DDI(dev)) {
+		/*
+		 * By definition every flush should mean invalidate + flush,
+		 * however on core platforms let's minimize the
+		 * disable/re-enable so we can avoid the invalidate when flip
+		 * originated the flush.
+		 */
+		if (frontbuffer_bits && origin != ORIGIN_FLIP)
+			intel_psr_exit(dev);
+	} else {
+		/*
+		 * On Valleyview and Cherryview we don't use hardware tracking
+		 * so any plane updates or cursor moves don't result in a PSR
+		 * invalidating. Which means we need to manually fake this in
+		 * software for all flushes.
+		 */
+		if (frontbuffer_bits)
+			intel_psr_exit(dev);
+	}
 
 	if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
 		schedule_delayed_work(&dev_priv->psr.work,
-				      msecs_to_jiffies(100));
+				      msecs_to_jiffies(delay_ms));
 	mutex_unlock(&dev_priv->psr.lock);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 3817a6f00d9e..6e6b8db996ef 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -81,7 +81,7 @@ bool intel_ring_stopped(struct intel_engine_cs *ring)
 	return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
 }
 
-void __intel_ring_advance(struct intel_engine_cs *ring)
+static void __intel_ring_advance(struct intel_engine_cs *ring)
 {
 	struct intel_ringbuffer *ringbuf = ring->buffer;
 	ringbuf->tail &= ringbuf->size - 1;
@@ -91,10 +91,11 @@ void __intel_ring_advance(struct intel_engine_cs *ring)
 }
 
 static int
-gen2_render_ring_flush(struct intel_engine_cs *ring,
+gen2_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	invalidate_domains,
 		       u32	flush_domains)
 {
+	struct intel_engine_cs *ring = req->ring;
 	u32 cmd;
 	int ret;
 
@@ -105,7 +106,7 @@ gen2_render_ring_flush(struct intel_engine_cs *ring,
 	if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
 		cmd |= MI_READ_FLUSH;
 
-	ret = intel_ring_begin(ring, 2);
+	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
@@ -117,10 +118,11 @@ gen2_render_ring_flush(struct intel_engine_cs *ring,
 }
 
 static int
-gen4_render_ring_flush(struct intel_engine_cs *ring,
+gen4_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	invalidate_domains,
 		       u32	flush_domains)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_device *dev = ring->dev;
 	u32 cmd;
 	int ret;
@@ -163,7 +165,7 @@ gen4_render_ring_flush(struct intel_engine_cs *ring,
 	    (IS_G4X(dev) || IS_GEN5(dev)))
 		cmd |= MI_INVALIDATE_ISP;
 
-	ret = intel_ring_begin(ring, 2);
+	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
@@ -212,13 +214,13 @@ gen4_render_ring_flush(struct intel_engine_cs *ring,
  * really our business.  That leaves only stall at scoreboard.
  */
 static int
-intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
+intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
-
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -231,7 +233,7 @@ intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_advance(ring);
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -247,15 +249,16 @@ intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
 }
 
 static int
-gen6_render_ring_flush(struct intel_engine_cs *ring,
-                         u32 invalidate_domains, u32 flush_domains)
+gen6_render_ring_flush(struct drm_i915_gem_request *req,
+		       u32 invalidate_domains, u32 flush_domains)
 {
+	struct intel_engine_cs *ring = req->ring;
 	u32 flags = 0;
 	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	/* Force SNB workarounds for PIPE_CONTROL flushes */
-	ret = intel_emit_post_sync_nonzero_flush(ring);
+	ret = intel_emit_post_sync_nonzero_flush(req);
 	if (ret)
 		return ret;
 
@@ -285,7 +288,7 @@ gen6_render_ring_flush(struct intel_engine_cs *ring,
 		flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
 	}
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -299,11 +302,12 @@ gen6_render_ring_flush(struct intel_engine_cs *ring,
 }
 
 static int
-gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
+gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -318,9 +322,10 @@ gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
 }
 
 static int
-gen7_render_ring_flush(struct intel_engine_cs *ring,
+gen7_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
+	struct intel_engine_cs *ring = req->ring;
 	u32 flags = 0;
 	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
@@ -362,10 +367,10 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
 		/* Workaround: we must issue a pipe_control with CS-stall bit
 		 * set before a pipe_control command that has the state cache
 		 * invalidate bit set. */
-		gen7_render_ring_cs_stall_wa(ring);
+		gen7_render_ring_cs_stall_wa(req);
 	}
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -379,12 +384,13 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
 }
 
 static int
-gen8_emit_pipe_control(struct intel_engine_cs *ring,
+gen8_emit_pipe_control(struct drm_i915_gem_request *req,
 		       u32 flags, u32 scratch_addr)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -400,11 +406,11 @@ gen8_emit_pipe_control(struct intel_engine_cs *ring,
 }
 
 static int
-gen8_render_ring_flush(struct intel_engine_cs *ring,
+gen8_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
 	u32 flags = 0;
-	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	flags |= PIPE_CONTROL_CS_STALL;
@@ -424,7 +430,7 @@ gen8_render_ring_flush(struct intel_engine_cs *ring,
 		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
 
 		/* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
-		ret = gen8_emit_pipe_control(ring,
+		ret = gen8_emit_pipe_control(req,
 					     PIPE_CONTROL_CS_STALL |
 					     PIPE_CONTROL_STALL_AT_SCOREBOARD,
 					     0);
@@ -432,7 +438,7 @@ gen8_render_ring_flush(struct intel_engine_cs *ring,
 			return ret;
 	}
 
-	return gen8_emit_pipe_control(ring, flags, scratch_addr);
+	return gen8_emit_pipe_control(req, flags, scratch_addr);
 }
 
 static void ring_write_tail(struct intel_engine_cs *ring,
@@ -703,10 +709,10 @@ err:
 	return ret;
 }
 
-static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
-				       struct intel_context *ctx)
+static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
 	int ret, i;
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_workarounds *w = &dev_priv->workarounds;
@@ -715,11 +721,11 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
 		return 0;
 
 	ring->gpu_caches_dirty = true;
-	ret = intel_ring_flush_all_caches(ring);
+	ret = intel_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(ring, (w->count * 2 + 2));
+	ret = intel_ring_begin(req, (w->count * 2 + 2));
 	if (ret)
 		return ret;
 
@@ -733,7 +739,7 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
 	intel_ring_advance(ring);
 
 	ring->gpu_caches_dirty = true;
-	ret = intel_ring_flush_all_caches(ring);
+	ret = intel_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
 
@@ -742,16 +748,15 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
 	return 0;
 }
 
-static int intel_rcs_ctx_init(struct intel_engine_cs *ring,
-			      struct intel_context *ctx)
+static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
 {
 	int ret;
 
-	ret = intel_ring_workarounds_emit(ring, ctx);
+	ret = intel_ring_workarounds_emit(req);
 	if (ret != 0)
 		return ret;
 
-	ret = i915_gem_render_state_init(ring);
+	ret = i915_gem_render_state_init(req);
 	if (ret)
 		DRM_ERROR("init render state: %d\n", ret);
 
@@ -775,11 +780,11 @@ static int wa_add(struct drm_i915_private *dev_priv,
 	return 0;
 }
 
-#define WA_REG(addr, mask, val) { \
+#define WA_REG(addr, mask, val) do { \
 		const int r = wa_add(dev_priv, (addr), (mask), (val)); \
 		if (r) \
 			return r; \
-	}
+	} while (0)
 
 #define WA_SET_BIT_MASKED(addr, mask) \
 	WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
@@ -800,6 +805,11 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
+	WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+
+	/* WaDisableAsyncFlipPerfMode:bdw */
+	WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
+
 	/* WaDisablePartialInstShootdown:bdw */
 	/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
@@ -861,6 +871,11 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
+	WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+
+	/* WaDisableAsyncFlipPerfMode:chv */
+	WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
+
 	/* WaDisablePartialInstShootdown:chv */
 	/* WaDisableThreadStallDopClockGating:chv */
 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
@@ -931,8 +946,11 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
 		/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
 		WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
 				  GEN9_RHWO_OPTIMIZATION_DISABLE);
-		WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN0,
-				  DISABLE_PIXEL_MASK_CAMMING);
+		/*
+		 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
+		 * but we do that in per ctx batchbuffer as there is an issue
+		 * with this register not getting restored on ctx restore
+		 */
 	}
 
 	if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) ||
@@ -1023,13 +1041,6 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
 		WA_SET_BIT_MASKED(HIZ_CHICKEN,
 				  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
 
-	if (INTEL_REVID(dev) == SKL_REVID_C0 ||
-	    INTEL_REVID(dev) == SKL_REVID_D0)
-		/* WaBarrierPerformanceFixDisable:skl */
-		WA_SET_BIT_MASKED(HDC_CHICKEN0,
-				  HDC_FENCE_DEST_SLM_DISABLE |
-				  HDC_BARRIER_PERFORMANCE_DISABLE);
-
 	if (INTEL_REVID(dev) <= SKL_REVID_D0) {
 		/*
 		 *Use Force Non-Coherent whenever executing a 3D context. This
@@ -1041,6 +1052,20 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
 				  HDC_FORCE_NON_COHERENT);
 	}
 
+	if (INTEL_REVID(dev) == SKL_REVID_C0 ||
+	    INTEL_REVID(dev) == SKL_REVID_D0)
+		/* WaBarrierPerformanceFixDisable:skl */
+		WA_SET_BIT_MASKED(HDC_CHICKEN0,
+				  HDC_FENCE_DEST_SLM_DISABLE |
+				  HDC_BARRIER_PERFORMANCE_DISABLE);
+
+	/* WaDisableSbeCacheDispatchPortSharing:skl */
+	if (INTEL_REVID(dev) <= SKL_REVID_F0) {
+		WA_SET_BIT_MASKED(
+			GEN7_HALF_SLICE_CHICKEN1,
+			GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+	}
+
 	return skl_tune_iz_hashing(ring);
 }
 
@@ -1105,9 +1130,9 @@ static int init_render_ring(struct intel_engine_cs *ring)
 	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
 	 * programmed to '1' on all products.
 	 *
-	 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
+	 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
 	 */
-	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 9)
+	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
 		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
 
 	/* Required for the hardware to program scanline values for waiting */
@@ -1132,7 +1157,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
 			   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
 	}
 
-	if (INTEL_INFO(dev)->gen >= 6)
+	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
 		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
 	if (HAS_L3_DPF(dev))
@@ -1155,10 +1180,11 @@ static void render_ring_cleanup(struct intel_engine_cs *ring)
 	intel_fini_pipe_control(ring);
 }
 
-static int gen8_rcs_signal(struct intel_engine_cs *signaller,
+static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
 			   unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 8
+	struct intel_engine_cs *signaller = signaller_req->ring;
 	struct drm_device *dev = signaller->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *waiter;
@@ -1168,7 +1194,7 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller,
 	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
 #undef MBOX_UPDATE_DWORDS
 
-	ret = intel_ring_begin(signaller, num_dwords);
+	ret = intel_ring_begin(signaller_req, num_dwords);
 	if (ret)
 		return ret;
 
@@ -1178,8 +1204,7 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller,
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
-		seqno = i915_gem_request_get_seqno(
-					   signaller->outstanding_lazy_request);
+		seqno = i915_gem_request_get_seqno(signaller_req);
 		intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
 		intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
 					   PIPE_CONTROL_QW_WRITE |
@@ -1196,10 +1221,11 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller,
 	return 0;
 }
 
-static int gen8_xcs_signal(struct intel_engine_cs *signaller,
+static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 			   unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 6
+	struct intel_engine_cs *signaller = signaller_req->ring;
 	struct drm_device *dev = signaller->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *waiter;
@@ -1209,7 +1235,7 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller,
 	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
 #undef MBOX_UPDATE_DWORDS
 
-	ret = intel_ring_begin(signaller, num_dwords);
+	ret = intel_ring_begin(signaller_req, num_dwords);
 	if (ret)
 		return ret;
 
@@ -1219,8 +1245,7 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller,
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
-		seqno = i915_gem_request_get_seqno(
-					   signaller->outstanding_lazy_request);
+		seqno = i915_gem_request_get_seqno(signaller_req);
 		intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
 					   MI_FLUSH_DW_OP_STOREDW);
 		intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
@@ -1235,9 +1260,10 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller,
 	return 0;
 }
 
-static int gen6_signal(struct intel_engine_cs *signaller,
+static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 		       unsigned int num_dwords)
 {
+	struct intel_engine_cs *signaller = signaller_req->ring;
 	struct drm_device *dev = signaller->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *useless;
@@ -1248,15 +1274,14 @@ static int gen6_signal(struct intel_engine_cs *signaller,
 	num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
 #undef MBOX_UPDATE_DWORDS
 
-	ret = intel_ring_begin(signaller, num_dwords);
+	ret = intel_ring_begin(signaller_req, num_dwords);
 	if (ret)
 		return ret;
 
 	for_each_ring(useless, dev_priv, i) {
 		u32 mbox_reg = signaller->semaphore.mbox.signal[i];
 		if (mbox_reg != GEN6_NOSYNC) {
-			u32 seqno = i915_gem_request_get_seqno(
-					   signaller->outstanding_lazy_request);
+			u32 seqno = i915_gem_request_get_seqno(signaller_req);
 			intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
 			intel_ring_emit(signaller, mbox_reg);
 			intel_ring_emit(signaller, seqno);
@@ -1272,30 +1297,29 @@ static int gen6_signal(struct intel_engine_cs *signaller,
 
 /**
  * gen6_add_request - Update the semaphore mailbox registers
- * 
- * @ring - ring that is adding a request
- * @seqno - return seqno stuck into the ring
+ *
+ * @request - request to write to the ring
  *
  * Update the mailbox registers in the *other* rings with the current seqno.
  * This acts like a signal in the canonical semaphore.
  */
 static int
-gen6_add_request(struct intel_engine_cs *ring)
+gen6_add_request(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
 	if (ring->semaphore.signal)
-		ret = ring->semaphore.signal(ring, 4);
+		ret = ring->semaphore.signal(req, 4);
 	else
-		ret = intel_ring_begin(ring, 4);
+		ret = intel_ring_begin(req, 4);
 
 	if (ret)
 		return ret;
 
 	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(ring,
-		    i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
 	__intel_ring_advance(ring);
 
@@ -1318,14 +1342,15 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
  */
 
 static int
-gen8_ring_sync(struct intel_engine_cs *waiter,
+gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
 	       struct intel_engine_cs *signaller,
 	       u32 seqno)
 {
+	struct intel_engine_cs *waiter = waiter_req->ring;
 	struct drm_i915_private *dev_priv = waiter->dev->dev_private;
 	int ret;
 
-	ret = intel_ring_begin(waiter, 4);
+	ret = intel_ring_begin(waiter_req, 4);
 	if (ret)
 		return ret;
 
@@ -1343,10 +1368,11 @@ gen8_ring_sync(struct intel_engine_cs *waiter,
 }
 
 static int
-gen6_ring_sync(struct intel_engine_cs *waiter,
+gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
 	       struct intel_engine_cs *signaller,
 	       u32 seqno)
 {
+	struct intel_engine_cs *waiter = waiter_req->ring;
 	u32 dw1 = MI_SEMAPHORE_MBOX |
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
@@ -1361,7 +1387,7 @@ gen6_ring_sync(struct intel_engine_cs *waiter,
 
 	WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
 
-	ret = intel_ring_begin(waiter, 4);
+	ret = intel_ring_begin(waiter_req, 4);
 	if (ret)
 		return ret;
 
@@ -1392,8 +1418,9 @@ do {									\
 } while (0)
 
 static int
-pc_render_add_request(struct intel_engine_cs *ring)
+pc_render_add_request(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
@@ -1405,7 +1432,7 @@ pc_render_add_request(struct intel_engine_cs *ring)
 	 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
 	 * memory before requesting an interrupt.
 	 */
-	ret = intel_ring_begin(ring, 32);
+	ret = intel_ring_begin(req, 32);
 	if (ret)
 		return ret;
 
@@ -1413,8 +1440,7 @@ pc_render_add_request(struct intel_engine_cs *ring)
 			PIPE_CONTROL_WRITE_FLUSH |
 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
 	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring,
-		    i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
 	intel_ring_emit(ring, 0);
 	PIPE_CONTROL_FLUSH(ring, scratch_addr);
 	scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
@@ -1433,8 +1459,7 @@ pc_render_add_request(struct intel_engine_cs *ring)
 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
 			PIPE_CONTROL_NOTIFY);
 	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring,
-		    i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
 	intel_ring_emit(ring, 0);
 	__intel_ring_advance(ring);
 
@@ -1585,13 +1610,14 @@ i8xx_ring_put_irq(struct intel_engine_cs *ring)
 }
 
 static int
-bsd_ring_flush(struct intel_engine_cs *ring,
+bsd_ring_flush(struct drm_i915_gem_request *req,
 	       u32     invalidate_domains,
 	       u32     flush_domains)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
-	ret = intel_ring_begin(ring, 2);
+	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
@@ -1602,18 +1628,18 @@ bsd_ring_flush(struct intel_engine_cs *ring,
 }
 
 static int
-i9xx_add_request(struct intel_engine_cs *ring)
+i9xx_add_request(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
 	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(ring,
-		    i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
 	__intel_ring_advance(ring);
 
@@ -1745,13 +1771,14 @@ gen8_ring_put_irq(struct intel_engine_cs *ring)
 }
 
 static int
-i965_dispatch_execbuffer(struct intel_engine_cs *ring,
+i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 length,
 			 unsigned dispatch_flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
-	ret = intel_ring_begin(ring, 2);
+	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
@@ -1771,14 +1798,15 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
 #define I830_TLB_ENTRIES (2)
 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
 static int
-i830_dispatch_execbuffer(struct intel_engine_cs *ring,
+i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 len,
 			 unsigned dispatch_flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	u32 cs_offset = ring->scratch.gtt_offset;
 	int ret;
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -1795,7 +1823,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
 		if (len > I830_BATCH_LIMIT)
 			return -ENOSPC;
 
-		ret = intel_ring_begin(ring, 6 + 2);
+		ret = intel_ring_begin(req, 6 + 2);
 		if (ret)
 			return ret;
 
@@ -1818,7 +1846,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
 		offset = cs_offset;
 	}
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -1833,13 +1861,14 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
 }
 
 static int
-i915_dispatch_execbuffer(struct intel_engine_cs *ring,
+i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 len,
 			 unsigned dispatch_flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
-	ret = intel_ring_begin(ring, 2);
+	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
@@ -2082,7 +2111,6 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
 
 	intel_unpin_ringbuffer_obj(ringbuf);
 	intel_destroy_ringbuffer_obj(ringbuf);
-	i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
 
 	if (ring->cleanup)
 		ring->cleanup(ring);
@@ -2106,6 +2134,9 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
 	if (intel_ring_space(ringbuf) >= n)
 		return 0;
 
+	/* The whole point of reserving space is to not wait! */
+	WARN_ON(ringbuf->reserved_in_use);
+
 	list_for_each_entry(request, &ring->request_list, list) {
 		space = __intel_ring_space(request->postfix, ringbuf->tail,
 					   ringbuf->size);
@@ -2124,18 +2155,11 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
 	return 0;
 }
 
-static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
+static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
 {
 	uint32_t __iomem *virt;
-	struct intel_ringbuffer *ringbuf = ring->buffer;
 	int rem = ringbuf->size - ringbuf->tail;
 
-	if (ringbuf->space < rem) {
-		int ret = ring_wait_for_space(ring, rem);
-		if (ret)
-			return ret;
-	}
-
 	virt = ringbuf->virtual_start + ringbuf->tail;
 	rem /= 4;
 	while (rem--)
@@ -2143,21 +2167,11 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
 
 	ringbuf->tail = 0;
 	intel_ring_update_space(ringbuf);
-
-	return 0;
 }
 
 int intel_ring_idle(struct intel_engine_cs *ring)
 {
 	struct drm_i915_gem_request *req;
-	int ret;
-
-	/* We need to add any requests required to flush the objects and ring */
-	if (ring->outstanding_lazy_request) {
-		ret = i915_add_request(ring);
-		if (ret)
-			return ret;
-	}
 
 	/* Wait upon the last request to be completed */
 	if (list_empty(&ring->request_list))
@@ -2180,33 +2194,126 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 	return 0;
 }
 
-static int __intel_ring_prepare(struct intel_engine_cs *ring,
-				int bytes)
+int intel_ring_reserve_space(struct drm_i915_gem_request *request)
+{
+	/*
+	 * The first call merely notes the reserve request and is common for
+	 * all back ends. The subsequent localised _begin() call actually
+	 * ensures that the reservation is available. Without the begin, if
+	 * the request creator immediately submitted the request without
+	 * adding any commands to it then there might not actually be
+	 * sufficient room for the submission commands.
+	 */
+	intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
+
+	return intel_ring_begin(request, 0);
+}
+
+void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
+{
+	WARN_ON(ringbuf->reserved_size);
+	WARN_ON(ringbuf->reserved_in_use);
+
+	ringbuf->reserved_size = size;
+}
+
+void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
+{
+	WARN_ON(ringbuf->reserved_in_use);
+
+	ringbuf->reserved_size   = 0;
+	ringbuf->reserved_in_use = false;
+}
+
+void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
+{
+	WARN_ON(ringbuf->reserved_in_use);
+
+	ringbuf->reserved_in_use = true;
+	ringbuf->reserved_tail   = ringbuf->tail;
+}
+
+void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
+{
+	WARN_ON(!ringbuf->reserved_in_use);
+	if (ringbuf->tail > ringbuf->reserved_tail) {
+		WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size,
+		     "request reserved size too small: %d vs %d!\n",
+		     ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size);
+	} else {
+		/*
+		 * The ring was wrapped while the reserved space was in use.
+		 * That means that some unknown amount of the ring tail was
+		 * no-op filled and skipped. Thus simply adding the ring size
+		 * to the tail and doing the above space check will not work.
+		 * Rather than attempt to track how much tail was skipped,
+		 * it is much simpler to say that also skipping the sanity
+		 * check every once in a while is not a big issue.
+		 */
+	}
+
+	ringbuf->reserved_size   = 0;
+	ringbuf->reserved_in_use = false;
+}
+
+static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
 {
 	struct intel_ringbuffer *ringbuf = ring->buffer;
-	int ret;
+	int remain_usable = ringbuf->effective_size - ringbuf->tail;
+	int remain_actual = ringbuf->size - ringbuf->tail;
+	int ret, total_bytes, wait_bytes = 0;
+	bool need_wrap = false;
 
-	if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
-		ret = intel_wrap_ring_buffer(ring);
-		if (unlikely(ret))
-			return ret;
+	if (ringbuf->reserved_in_use)
+		total_bytes = bytes;
+	else
+		total_bytes = bytes + ringbuf->reserved_size;
+
+	if (unlikely(bytes > remain_usable)) {
+		/*
+		 * Not enough space for the basic request. So need to flush
+		 * out the remainder and then wait for base + reserved.
+		 */
+		wait_bytes = remain_actual + total_bytes;
+		need_wrap = true;
+	} else {
+		if (unlikely(total_bytes > remain_usable)) {
+			/*
+			 * The base request will fit but the reserved space
+			 * falls off the end. So only need to to wait for the
+			 * reserved size after flushing out the remainder.
+			 */
+			wait_bytes = remain_actual + ringbuf->reserved_size;
+			need_wrap = true;
+		} else if (total_bytes > ringbuf->space) {
+			/* No wrapping required, just waiting. */
+			wait_bytes = total_bytes;
+		}
 	}
 
-	if (unlikely(ringbuf->space < bytes)) {
-		ret = ring_wait_for_space(ring, bytes);
+	if (wait_bytes) {
+		ret = ring_wait_for_space(ring, wait_bytes);
 		if (unlikely(ret))
 			return ret;
+
+		if (need_wrap)
+			__wrap_ring_buffer(ringbuf);
 	}
 
 	return 0;
 }
 
-int intel_ring_begin(struct intel_engine_cs *ring,
+int intel_ring_begin(struct drm_i915_gem_request *req,
 		     int num_dwords)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct intel_engine_cs *ring;
+	struct drm_i915_private *dev_priv;
 	int ret;
 
+	WARN_ON(req == NULL);
+	ring = req->ring;
+	dev_priv = ring->dev->dev_private;
+
 	ret = i915_gem_check_wedge(&dev_priv->gpu_error,
 				   dev_priv->mm.interruptible);
 	if (ret)
@@ -2216,18 +2323,14 @@ int intel_ring_begin(struct intel_engine_cs *ring,
 	if (ret)
 		return ret;
 
-	/* Preallocate the olr before touching the ring */
-	ret = i915_gem_request_alloc(ring, ring->default_context);
-	if (ret)
-		return ret;
-
 	ring->buffer->space -= num_dwords * sizeof(uint32_t);
 	return 0;
 }
 
 /* Align the ring tail to a cacheline boundary */
-int intel_ring_cacheline_align(struct intel_engine_cs *ring)
+int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
 	int ret;
 
@@ -2235,7 +2338,7 @@ int intel_ring_cacheline_align(struct intel_engine_cs *ring)
 		return 0;
 
 	num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
-	ret = intel_ring_begin(ring, num_dwords);
+	ret = intel_ring_begin(req, num_dwords);
 	if (ret)
 		return ret;
 
@@ -2252,8 +2355,6 @@ void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	BUG_ON(ring->outstanding_lazy_request);
-
 	if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
 		I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
 		I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
@@ -2298,13 +2399,14 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
 		   _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
 }
 
-static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
+static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 			       u32 invalidate, u32 flush)
 {
+	struct intel_engine_cs *ring = req->ring;
 	uint32_t cmd;
 	int ret;
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -2342,20 +2444,23 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
 }
 
 static int
-gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned dispatch_flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	bool ppgtt = USES_PPGTT(ring->dev) &&
 			!(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
+	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+			(dispatch_flags & I915_DISPATCH_RS ?
+			 MI_BATCH_RESOURCE_STREAMER : 0));
 	intel_ring_emit(ring, lower_32_bits(offset));
 	intel_ring_emit(ring, upper_32_bits(offset));
 	intel_ring_emit(ring, MI_NOOP);
@@ -2365,20 +2470,23 @@ gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
 }
 
 static int
-hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			     u64 offset, u32 len,
 			     unsigned dispatch_flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
-	ret = intel_ring_begin(ring, 2);
+	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
 	intel_ring_emit(ring,
 			MI_BATCH_BUFFER_START |
 			(dispatch_flags & I915_DISPATCH_SECURE ?
-			 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
+			 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
+			(dispatch_flags & I915_DISPATCH_RS ?
+			 MI_BATCH_RESOURCE_STREAMER : 0));
 	/* bit0-7 is the length on GEN6+ */
 	intel_ring_emit(ring, offset);
 	intel_ring_advance(ring);
@@ -2387,13 +2495,14 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
 }
 
 static int
-gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned dispatch_flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
-	ret = intel_ring_begin(ring, 2);
+	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
@@ -2410,14 +2519,15 @@ gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
 
 /* Blitter support (SandyBridge+) */
 
-static int gen6_ring_flush(struct intel_engine_cs *ring,
+static int gen6_ring_flush(struct drm_i915_gem_request *req,
 			   u32 invalidate, u32 flush)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_device *dev = ring->dev;
 	uint32_t cmd;
 	int ret;
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -2818,26 +2928,28 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
 }
 
 int
-intel_ring_flush_all_caches(struct intel_engine_cs *ring)
+intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
 	if (!ring->gpu_caches_dirty)
 		return 0;
 
-	ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
+	ret = ring->flush(req, 0, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
-	trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
+	trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
 
 	ring->gpu_caches_dirty = false;
 	return 0;
 }
 
 int
-intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
+intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	uint32_t flush_domains;
 	int ret;
 
@@ -2845,11 +2957,11 @@ intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
 	if (ring->gpu_caches_dirty)
 		flush_domains = I915_GEM_GPU_DOMAINS;
 
-	ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+	ret = ring->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
 	if (ret)
 		return ret;
 
-	trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+	trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
 
 	ring->gpu_caches_dirty = false;
 	return 0;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 4be66f60504d..2e85fda94963 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -12,6 +12,7 @@
  * workarounds!
  */
 #define CACHELINE_BYTES 64
+#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
 
 /*
  * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
@@ -105,6 +106,9 @@ struct intel_ringbuffer {
 	int space;
 	int size;
 	int effective_size;
+	int reserved_size;
+	int reserved_tail;
+	bool reserved_in_use;
 
 	/** We track the position of the requests in the ring buffer, and
 	 * when each is retired we increment last_retired_head as the GPU
@@ -120,6 +124,25 @@ struct intel_ringbuffer {
 struct	intel_context;
 struct drm_i915_reg_descriptor;
 
+/*
+ * we use a single page to load ctx workarounds so all of these
+ * values are referred in terms of dwords
+ *
+ * struct i915_wa_ctx_bb:
+ *  offset: specifies batch starting position, also helpful in case
+ *    if we want to have multiple batches at different offsets based on
+ *    some criteria. It is not a requirement at the moment but provides
+ *    an option for future use.
+ *  size: size of the batch in DWORDS
+ */
+struct  i915_ctx_workarounds {
+	struct i915_wa_ctx_bb {
+		u32 offset;
+		u32 size;
+	} indirect_ctx, per_ctx;
+	struct drm_i915_gem_object *obj;
+};
+
 struct  intel_engine_cs {
 	const char	*name;
 	enum intel_ring_id {
@@ -143,6 +166,7 @@ struct  intel_engine_cs {
 	struct i915_gem_batch_pool batch_pool;
 
 	struct intel_hw_status_page status_page;
+	struct i915_ctx_workarounds wa_ctx;
 
 	unsigned irq_refcount; /* protected by dev_priv->irq_lock */
 	u32		irq_enable_mask;	/* bitmask to enable ring interrupt */
@@ -152,15 +176,14 @@ struct  intel_engine_cs {
 
 	int		(*init_hw)(struct intel_engine_cs *ring);
 
-	int		(*init_context)(struct intel_engine_cs *ring,
-					struct intel_context *ctx);
+	int		(*init_context)(struct drm_i915_gem_request *req);
 
 	void		(*write_tail)(struct intel_engine_cs *ring,
 				      u32 value);
-	int __must_check (*flush)(struct intel_engine_cs *ring,
+	int __must_check (*flush)(struct drm_i915_gem_request *req,
 				  u32	invalidate_domains,
 				  u32	flush_domains);
-	int		(*add_request)(struct intel_engine_cs *ring);
+	int		(*add_request)(struct drm_i915_gem_request *req);
 	/* Some chipsets are not quite as coherent as advertised and need
 	 * an expensive kick to force a true read of the up-to-date seqno.
 	 * However, the up-to-date seqno is not always required and the last
@@ -171,11 +194,12 @@ struct  intel_engine_cs {
 				     bool lazy_coherency);
 	void		(*set_seqno)(struct intel_engine_cs *ring,
 				     u32 seqno);
-	int		(*dispatch_execbuffer)(struct intel_engine_cs *ring,
+	int		(*dispatch_execbuffer)(struct drm_i915_gem_request *req,
 					       u64 offset, u32 length,
 					       unsigned dispatch_flags);
 #define I915_DISPATCH_SECURE 0x1
 #define I915_DISPATCH_PINNED 0x2
+#define I915_DISPATCH_RS     0x4
 	void		(*cleanup)(struct intel_engine_cs *ring);
 
 	/* GEN8 signal/wait table - never trust comments!
@@ -229,10 +253,10 @@ struct  intel_engine_cs {
 		};
 
 		/* AKA wait() */
-		int	(*sync_to)(struct intel_engine_cs *ring,
-				   struct intel_engine_cs *to,
+		int	(*sync_to)(struct drm_i915_gem_request *to_req,
+				   struct intel_engine_cs *from,
 				   u32 seqno);
-		int	(*signal)(struct intel_engine_cs *signaller,
+		int	(*signal)(struct drm_i915_gem_request *signaller_req,
 				  /* num_dwords needed by caller */
 				  unsigned int num_dwords);
 	} semaphore;
@@ -243,14 +267,11 @@ struct  intel_engine_cs {
 	struct list_head execlist_retired_req_list;
 	u8 next_context_status_buffer;
 	u32             irq_keep_mask; /* bitmask for interrupts that should not be masked */
-	int		(*emit_request)(struct intel_ringbuffer *ringbuf,
-					struct drm_i915_gem_request *request);
-	int		(*emit_flush)(struct intel_ringbuffer *ringbuf,
-				      struct intel_context *ctx,
+	int		(*emit_request)(struct drm_i915_gem_request *request);
+	int		(*emit_flush)(struct drm_i915_gem_request *request,
 				      u32 invalidate_domains,
 				      u32 flush_domains);
-	int		(*emit_bb_start)(struct intel_ringbuffer *ringbuf,
-					 struct intel_context *ctx,
+	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
 					 u64 offset, unsigned dispatch_flags);
 
 	/**
@@ -272,10 +293,6 @@ struct  intel_engine_cs {
 	struct list_head request_list;
 
 	/**
-	 * Do we have some not yet emitted requests outstanding?
-	 */
-	struct drm_i915_gem_request *outstanding_lazy_request;
-	/**
 	 * Seqno of request most recently submitted to request_list.
 	 * Used exclusively by hang checker to avoid grabbing lock while
 	 * inspecting request list.
@@ -408,8 +425,8 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
 
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
 
-int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
-int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
+int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
+int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
 static inline void intel_ring_emit(struct intel_engine_cs *ring,
 				   u32 data)
 {
@@ -426,12 +443,11 @@ int __intel_ring_space(int head, int tail, int size);
 void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
 int intel_ring_space(struct intel_ringbuffer *ringbuf);
 bool intel_ring_stopped(struct intel_engine_cs *ring);
-void __intel_ring_advance(struct intel_engine_cs *ring);
 
 int __must_check intel_ring_idle(struct intel_engine_cs *ring);
 void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
-int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
-int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
+int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
+int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
 
 void intel_fini_pipe_control(struct intel_engine_cs *ring);
 int intel_init_pipe_control(struct intel_engine_cs *ring);
@@ -451,11 +467,29 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
 	return ringbuf->tail;
 }
 
-static inline struct drm_i915_gem_request *
-intel_ring_get_request(struct intel_engine_cs *ring)
-{
-	BUG_ON(ring->outstanding_lazy_request == NULL);
-	return ring->outstanding_lazy_request;
-}
+/*
+ * Arbitrary size for largest possible 'add request' sequence. The code paths
+ * are complex and variable. Empirical measurement shows that the worst case
+ * is ILK at 136 words. Reserving too much is better than reserving too little
+ * as that allows for corner cases that might have been missed. So the figure
+ * has been rounded up to 160 words.
+ */
+#define MIN_SPACE_FOR_ADD_REQUEST	160
+
+/*
+ * Reserve space in the ring to guarantee that the i915_add_request() call
+ * will always have sufficient room to do its stuff. The request creation
+ * code calls this automatically.
+ */
+void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
+/* Cancel the reservation, e.g. because the request is being discarded. */
+void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
+/* Use the reserved space - for use by i915_add_request() only. */
+void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
+/* Finish with the reserved space - for use by i915_add_request() only. */
+void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
+
+/* Legacy ringbuffer specific portion of reservation code: */
+int intel_ring_reserve_space(struct drm_i915_gem_request *request);
 
 #endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 1a45385f4d66..af7fdb3bd663 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -68,6 +68,22 @@
 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
 				    int power_well_id);
 
+static void intel_power_well_enable(struct drm_i915_private *dev_priv,
+				    struct i915_power_well *power_well)
+{
+	DRM_DEBUG_KMS("enabling %s\n", power_well->name);
+	power_well->ops->enable(dev_priv, power_well);
+	power_well->hw_enabled = true;
+}
+
+static void intel_power_well_disable(struct drm_i915_private *dev_priv,
+				     struct i915_power_well *power_well)
+{
+	DRM_DEBUG_KMS("disabling %s\n", power_well->name);
+	power_well->hw_enabled = false;
+	power_well->ops->disable(dev_priv, power_well);
+}
+
 /*
  * We should only use the power well if we explicitly asked the hardware to
  * enable it, so check if it's enabled and also check if we've requested it to
@@ -281,6 +297,7 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
 	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
 	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |		\
 	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) |		\
 	BIT(POWER_DOMAIN_AUX_B) |                       \
 	BIT(POWER_DOMAIN_AUX_C) |			\
 	BIT(POWER_DOMAIN_AUX_D) |			\
@@ -300,6 +317,7 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
 #define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS (		\
 	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
 	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) |		\
 	BIT(POWER_DOMAIN_INIT))
 #define SKL_DISPLAY_DDI_B_POWER_DOMAINS (		\
 	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
@@ -835,12 +853,8 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
 	return enabled;
 }
 
-static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
-					  struct i915_power_well *power_well)
+static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
 {
-	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
-
-	vlv_set_power_well(dev_priv, power_well, true);
 
 	spin_lock_irq(&dev_priv->irq_lock);
 	valleyview_enable_display_irqs(dev_priv);
@@ -858,18 +872,33 @@ static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
 	i915_redisable_vga_power_on(dev_priv->dev);
 }
 
+static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
+{
+	spin_lock_irq(&dev_priv->irq_lock);
+	valleyview_disable_display_irqs(dev_priv);
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	vlv_power_sequencer_reset(dev_priv);
+}
+
+static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+					  struct i915_power_well *power_well)
+{
+	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+
+	vlv_set_power_well(dev_priv, power_well, true);
+
+	vlv_display_power_well_init(dev_priv);
+}
+
 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
 					   struct i915_power_well *power_well)
 {
 	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
 
-	spin_lock_irq(&dev_priv->irq_lock);
-	valleyview_disable_display_irqs(dev_priv);
-	spin_unlock_irq(&dev_priv->irq_lock);
+	vlv_display_power_well_deinit(dev_priv);
 
 	vlv_set_power_well(dev_priv, power_well, false);
-
-	vlv_power_sequencer_reset(dev_priv);
 }
 
 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
@@ -882,8 +911,8 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
 	 * display and the reference clock for VGA
 	 * hotplug / manual detection.
 	 */
-	I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-		   DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+	I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
+		   DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
 
 	vlv_set_power_well(dev_priv, power_well, true);
@@ -933,14 +962,14 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
 	 */
 	if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
 		phy = DPIO_PHY0;
-		I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-			   DPLL_REFA_CLK_ENABLE_VLV);
-		I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-			   DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+		I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
+			   DPLL_REF_CLK_ENABLE_VLV);
+		I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
+			   DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
 	} else {
 		phy = DPIO_PHY1;
-		I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
-			   DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+		I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) | DPLL_VGA_MODE_DIS |
+			   DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
 	}
 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
 	vlv_set_power_well(dev_priv, power_well, true);
@@ -1042,53 +1071,29 @@ out:
 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
 					struct i915_power_well *power_well)
 {
+	WARN_ON_ONCE(power_well->data != PIPE_A);
+
 	chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
 }
 
 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
 				       struct i915_power_well *power_well)
 {
-	WARN_ON_ONCE(power_well->data != PIPE_A &&
-		     power_well->data != PIPE_B &&
-		     power_well->data != PIPE_C);
+	WARN_ON_ONCE(power_well->data != PIPE_A);
 
 	chv_set_pipe_power_well(dev_priv, power_well, true);
 
-	if (power_well->data == PIPE_A) {
-		spin_lock_irq(&dev_priv->irq_lock);
-		valleyview_enable_display_irqs(dev_priv);
-		spin_unlock_irq(&dev_priv->irq_lock);
-
-		/*
-		 * During driver initialization/resume we can avoid restoring the
-		 * part of the HW/SW state that will be inited anyway explicitly.
-		 */
-		if (dev_priv->power_domains.initializing)
-			return;
-
-		intel_hpd_init(dev_priv);
-
-		i915_redisable_vga_power_on(dev_priv->dev);
-	}
+	vlv_display_power_well_init(dev_priv);
 }
 
 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
 					struct i915_power_well *power_well)
 {
-	WARN_ON_ONCE(power_well->data != PIPE_A &&
-		     power_well->data != PIPE_B &&
-		     power_well->data != PIPE_C);
+	WARN_ON_ONCE(power_well->data != PIPE_A);
 
-	if (power_well->data == PIPE_A) {
-		spin_lock_irq(&dev_priv->irq_lock);
-		valleyview_disable_display_irqs(dev_priv);
-		spin_unlock_irq(&dev_priv->irq_lock);
-	}
+	vlv_display_power_well_deinit(dev_priv);
 
 	chv_set_pipe_power_well(dev_priv, power_well, false);
-
-	if (power_well->data == PIPE_A)
-		vlv_power_sequencer_reset(dev_priv);
 }
 
 /**
@@ -1117,11 +1122,8 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
 	mutex_lock(&power_domains->lock);
 
 	for_each_power_well(i, power_well, BIT(domain), power_domains) {
-		if (!power_well->count++) {
-			DRM_DEBUG_KMS("enabling %s\n", power_well->name);
-			power_well->ops->enable(dev_priv, power_well);
-			power_well->hw_enabled = true;
-		}
+		if (!power_well->count++)
+			intel_power_well_enable(dev_priv, power_well);
 	}
 
 	power_domains->domain_use_count[domain]++;
@@ -1155,11 +1157,8 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
 	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
 		WARN_ON(!power_well->count);
 
-		if (!--power_well->count && i915.disable_power_well) {
-			DRM_DEBUG_KMS("disabling %s\n", power_well->name);
-			power_well->hw_enabled = false;
-			power_well->ops->disable(dev_priv, power_well);
-		}
+		if (!--power_well->count && i915.disable_power_well)
+			intel_power_well_disable(dev_priv, power_well);
 	}
 
 	mutex_unlock(&power_domains->lock);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index aa2fd751609c..c98098e884cc 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1508,51 +1508,6 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
 	intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
 }
 
-/* Special dpms function to support cloning between dvo/sdvo/crt. */
-static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
-{
-	struct drm_crtc *crtc;
-	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
-
-	/* dvo supports only 2 dpms states. */
-	if (mode != DRM_MODE_DPMS_ON)
-		mode = DRM_MODE_DPMS_OFF;
-
-	if (mode == connector->dpms)
-		return;
-
-	connector->dpms = mode;
-
-	/* Only need to change hw state when actually enabled */
-	crtc = intel_sdvo->base.base.crtc;
-	if (!crtc) {
-		intel_sdvo->base.connectors_active = false;
-		return;
-	}
-
-	/* We set active outputs manually below in case pipe dpms doesn't change
-	 * due to cloning. */
-	if (mode != DRM_MODE_DPMS_ON) {
-		intel_sdvo_set_active_outputs(intel_sdvo, 0);
-		if (0)
-			intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
-
-		intel_sdvo->base.connectors_active = false;
-
-		intel_crtc_update_dpms(crtc);
-	} else {
-		intel_sdvo->base.connectors_active = true;
-
-		intel_crtc_update_dpms(crtc);
-
-		if (0)
-			intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
-		intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
-	}
-
-	intel_modeset_check_state(connector->dev);
-}
-
 static enum drm_mode_status
 intel_sdvo_mode_valid(struct drm_connector *connector,
 		      struct drm_display_mode *mode)
@@ -2190,7 +2145,7 @@ done:
 }
 
 static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
-	.dpms = intel_sdvo_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_sdvo_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = intel_sdvo_set_property,
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 8193a35388d7..9d8af2f8a875 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -75,10 +75,8 @@ static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
  * until a subsequent call to intel_pipe_update_end(). That is done to
  * avoid random delays. The value written to @start_vbl_count should be
  * supplied to intel_pipe_update_end() for error checking.
- *
- * Return: true if the call was successful
  */
-bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
+void intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
 {
 	struct drm_device *dev = crtc->base.dev;
 	const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
@@ -96,13 +94,14 @@ bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
 	min = vblank_start - usecs_to_scanlines(mode, 100);
 	max = vblank_start - 1;
 
+	local_irq_disable();
+	*start_vbl_count = 0;
+
 	if (min <= 0 || max <= 0)
-		return false;
+		return;
 
 	if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
-		return false;
-
-	local_irq_disable();
+		return;
 
 	trace_i915_pipe_update_start(crtc, min, max);
 
@@ -138,8 +137,6 @@ bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
 	*start_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
 
 	trace_i915_pipe_update_vblank_evaded(crtc, min, max, *start_vbl_count);
-
-	return true;
 }
 
 /**
@@ -161,7 +158,7 @@ void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
 
 	local_irq_enable();
 
-	if (start_vbl_count != end_vbl_count)
+	if (start_vbl_count && start_vbl_count != end_vbl_count)
 		DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u)\n",
 			  pipe_name(pipe), start_vbl_count, end_vbl_count);
 }
@@ -182,7 +179,8 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
 	const int plane = intel_plane->plane + 1;
 	u32 plane_ctl, stride_div, stride;
 	int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-	const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+	const struct drm_intel_sprite_colorkey *key =
+		&to_intel_plane_state(drm_plane->state)->ckey;
 	unsigned long surf_addr;
 	u32 tile_height, plane_offset, plane_size;
 	unsigned int rotation;
@@ -272,7 +270,7 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
 }
 
 static void
-skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force)
+skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
 {
 	struct drm_device *dev = dplane->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -344,7 +342,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
 	u32 sprctl;
 	unsigned long sprsurf_offset, linear_offset;
 	int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-	const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+	const struct drm_intel_sprite_colorkey *key =
+		&to_intel_plane_state(dplane->state)->ckey;
 
 	sprctl = SP_ENABLE;
 
@@ -400,10 +399,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
 	if (obj->tiling_mode != I915_TILING_NONE)
 		sprctl |= SP_TILED;
 
-	intel_update_sprite_watermarks(dplane, crtc, src_w, src_h,
-				       pixel_size, true,
-				       src_w != crtc_w || src_h != crtc_h);
-
 	/* Sizes are 0 based */
 	src_w--;
 	src_h--;
@@ -411,7 +406,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
 	crtc_h--;
 
 	linear_offset = y * fb->pitches[0] + x * pixel_size;
-	sprsurf_offset = intel_gen4_compute_page_offset(&x, &y,
+	sprsurf_offset = intel_gen4_compute_page_offset(dev_priv,
+							&x, &y,
 							obj->tiling_mode,
 							pixel_size,
 							fb->pitches[0]);
@@ -455,7 +451,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
 }
 
 static void
-vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force)
+vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
 {
 	struct drm_device *dev = dplane->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -467,8 +463,6 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force)
 
 	I915_WRITE(SPSURF(pipe, plane), 0);
 	POSTING_READ(SPSURF(pipe, plane));
-
-	intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
 }
 
 static void
@@ -487,7 +481,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 	u32 sprctl, sprscale = 0;
 	unsigned long sprsurf_offset, linear_offset;
 	int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-	const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+	const struct drm_intel_sprite_colorkey *key =
+		&to_intel_plane_state(plane->state)->ckey;
 
 	sprctl = SPRITE_ENABLE;
 
@@ -546,7 +541,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 
 	linear_offset = y * fb->pitches[0] + x * pixel_size;
 	sprsurf_offset =
-		intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+		intel_gen4_compute_page_offset(dev_priv,
+					       &x, &y, obj->tiling_mode,
 					       pixel_size, fb->pitches[0]);
 	linear_offset -= sprsurf_offset;
 
@@ -595,7 +591,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 }
 
 static void
-ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force)
+ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
 {
 	struct drm_device *dev = plane->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -627,7 +623,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 	unsigned long dvssurf_offset, linear_offset;
 	u32 dvscntr, dvsscale;
 	int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-	const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+	const struct drm_intel_sprite_colorkey *key =
+		&to_intel_plane_state(plane->state)->ckey;
 
 	dvscntr = DVS_ENABLE;
 
@@ -682,7 +679,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 
 	linear_offset = y * fb->pitches[0] + x * pixel_size;
 	dvssurf_offset =
-		intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+		intel_gen4_compute_page_offset(dev_priv,
+					       &x, &y, obj->tiling_mode,
 					       pixel_size, fb->pitches[0]);
 	linear_offset -= dvssurf_offset;
 
@@ -722,7 +720,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 }
 
 static void
-ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force)
+ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
 {
 	struct drm_device *dev = plane->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -739,11 +737,12 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force)
 
 static int
 intel_check_sprite_plane(struct drm_plane *plane,
+			 struct intel_crtc_state *crtc_state,
 			 struct intel_plane_state *state)
 {
 	struct drm_device *dev = plane->dev;
-	struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
-	struct intel_crtc_state *crtc_state;
+	struct drm_crtc *crtc = state->base.crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_plane *intel_plane = to_intel_plane(plane);
 	struct drm_framebuffer *fb = state->base.fb;
 	int crtc_x, crtc_y;
@@ -756,15 +755,10 @@ intel_check_sprite_plane(struct drm_plane *plane,
 	int max_scale, min_scale;
 	bool can_scale;
 	int pixel_size;
-	int ret;
-
-	intel_crtc = intel_crtc ? intel_crtc : to_intel_crtc(plane->crtc);
-	crtc_state = state->base.state ?
-		intel_atomic_get_crtc_state(state->base.state, intel_crtc) : NULL;
 
 	if (!fb) {
 		state->visible = false;
-		goto finish;
+		return 0;
 	}
 
 	/* Don't modify another pipe's plane */
@@ -782,7 +776,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
 	/* setup can_scale, min_scale, max_scale */
 	if (INTEL_INFO(dev)->gen >= 9) {
 		/* use scaler when colorkey is not required */
-		if (intel_plane->ckey.flags == I915_SET_COLORKEY_NONE) {
+		if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
 			can_scale = 1;
 			min_scale = 1;
 			max_scale = skl_max_scale(intel_crtc, crtc_state);
@@ -802,7 +796,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
 	 * coordinates and sizes. We probably need some way to decide whether
 	 * more strict checking should be done instead.
 	 */
-
 	drm_rect_rotate(src, fb->width << 16, fb->height << 16,
 			state->base.rotation);
 
@@ -812,7 +805,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
 	vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
 	BUG_ON(vscale < 0);
 
-	state->visible =  drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
+	state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
 
 	crtc_x = dst->x1;
 	crtc_y = dst->y1;
@@ -917,36 +910,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
 	dst->y1 = crtc_y;
 	dst->y2 = crtc_y + crtc_h;
 
-finish:
-	/*
-	 * If the sprite is completely covering the primary plane,
-	 * we can disable the primary and save power.
-	 */
-	if (intel_crtc->active) {
-		intel_crtc->atomic.fb_bits |=
-			INTEL_FRONTBUFFER_SPRITE(intel_crtc->pipe);
-
-		if (intel_wm_need_update(plane, &state->base))
-			intel_crtc->atomic.update_wm = true;
-
-		if (!state->visible) {
-			/*
-			 * Avoid underruns when disabling the sprite.
-			 * FIXME remove once watermark updates are done properly.
-			 */
-			intel_crtc->atomic.wait_vblank = true;
-			intel_crtc->atomic.update_sprite_watermarks |=
-				(1 << drm_plane_index(plane));
-		}
-	}
-
-	if (INTEL_INFO(dev)->gen >= 9) {
-		ret = skl_update_scaler_users(intel_crtc, crtc_state, intel_plane,
-			state, 0);
-		if (ret)
-			return ret;
-	}
-
 	return 0;
 }
 
@@ -955,34 +918,27 @@ intel_commit_sprite_plane(struct drm_plane *plane,
 			  struct intel_plane_state *state)
 {
 	struct drm_crtc *crtc = state->base.crtc;
-	struct intel_crtc *intel_crtc;
 	struct intel_plane *intel_plane = to_intel_plane(plane);
 	struct drm_framebuffer *fb = state->base.fb;
-	int crtc_x, crtc_y;
-	unsigned int crtc_w, crtc_h;
-	uint32_t src_x, src_y, src_w, src_h;
 
 	crtc = crtc ? crtc : plane->crtc;
-	intel_crtc = to_intel_crtc(crtc);
 
 	plane->fb = fb;
 
-	if (intel_crtc->active) {
-		if (state->visible) {
-			crtc_x = state->dst.x1;
-			crtc_y = state->dst.y1;
-			crtc_w = drm_rect_width(&state->dst);
-			crtc_h = drm_rect_height(&state->dst);
-			src_x = state->src.x1 >> 16;
-			src_y = state->src.y1 >> 16;
-			src_w = drm_rect_width(&state->src) >> 16;
-			src_h = drm_rect_height(&state->src) >> 16;
-			intel_plane->update_plane(plane, crtc, fb,
-						  crtc_x, crtc_y, crtc_w, crtc_h,
-						  src_x, src_y, src_w, src_h);
-		} else {
-			intel_plane->disable_plane(plane, crtc, false);
-		}
+	if (!crtc->state->active)
+		return;
+
+	if (state->visible) {
+		intel_plane->update_plane(plane, crtc, fb,
+					  state->dst.x1, state->dst.y1,
+					  drm_rect_width(&state->dst),
+					  drm_rect_height(&state->dst),
+					  state->src.x1 >> 16,
+					  state->src.y1 >> 16,
+					  drm_rect_width(&state->src) >> 16,
+					  drm_rect_height(&state->src) >> 16);
+	} else {
+		intel_plane->disable_plane(plane, crtc);
 	}
 }
 
@@ -991,7 +947,9 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
 {
 	struct drm_intel_sprite_colorkey *set = data;
 	struct drm_plane *plane;
-	struct intel_plane *intel_plane;
+	struct drm_plane_state *plane_state;
+	struct drm_atomic_state *state;
+	struct drm_modeset_acquire_ctx ctx;
 	int ret = 0;
 
 	/* Make sure we don't try to enable both src & dest simultaneously */
@@ -1002,50 +960,41 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
 	    set->flags & I915_SET_COLORKEY_DESTINATION)
 		return -EINVAL;
 
-	drm_modeset_lock_all(dev);
-
 	plane = drm_plane_find(dev, set->plane_id);
-	if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
-		ret = -ENOENT;
-		goto out_unlock;
-	}
+	if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
+		return -ENOENT;
 
-	intel_plane = to_intel_plane(plane);
+	drm_modeset_acquire_init(&ctx, 0);
 
-	if (INTEL_INFO(dev)->gen >= 9) {
-		/* plane scaling and colorkey are mutually exclusive */
-		if (to_intel_plane_state(plane->state)->scaler_id >= 0) {
-			DRM_ERROR("colorkey not allowed with scaler\n");
-			ret = -EINVAL;
-			goto out_unlock;
-		}
+	state = drm_atomic_state_alloc(plane->dev);
+	if (!state) {
+		ret = -ENOMEM;
+		goto out;
 	}
+	state->acquire_ctx = &ctx;
+
+	while (1) {
+		plane_state = drm_atomic_get_plane_state(state, plane);
+		ret = PTR_ERR_OR_ZERO(plane_state);
+		if (!ret) {
+			to_intel_plane_state(plane_state)->ckey = *set;
+			ret = drm_atomic_commit(state);
+		}
 
-	intel_plane->ckey = *set;
-
-	/*
-	 * The only way this could fail would be due to
-	 * the current plane state being unsupportable already,
-	 * and we dont't consider that an error for the
-	 * colorkey ioctl. So just ignore any error.
-	 */
-	intel_plane_restore(plane);
+		if (ret != -EDEADLK)
+			break;
 
-out_unlock:
-	drm_modeset_unlock_all(dev);
-	return ret;
-}
+		drm_atomic_state_clear(state);
+		drm_modeset_backoff(&ctx);
+	}
 
-int intel_plane_restore(struct drm_plane *plane)
-{
-	if (!plane->crtc || !plane->state->fb)
-		return 0;
+	if (ret)
+		drm_atomic_state_free(state);
 
-	return drm_plane_helper_update(plane, plane->crtc, plane->state->fb,
-				       plane->state->crtc_x, plane->state->crtc_y,
-				       plane->state->crtc_w, plane->state->crtc_h,
-				       plane->state->src_x, plane->state->src_y,
-				       plane->state->src_w, plane->state->src_h);
+out:
+	drm_modeset_drop_locks(&ctx);
+	drm_modeset_acquire_fini(&ctx);
+	return ret;
 }
 
 static const uint32_t ilk_plane_formats[] = {
@@ -1172,9 +1121,9 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
 
 	intel_plane->pipe = pipe;
 	intel_plane->plane = plane;
+	intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe);
 	intel_plane->check_plane = intel_check_sprite_plane;
 	intel_plane->commit_plane = intel_commit_sprite_plane;
-	intel_plane->ckey.flags = I915_SET_COLORKEY_NONE;
 	possible_crtcs = (1 << pipe);
 	ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
 				       &intel_plane_funcs,
@@ -1189,6 +1138,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
 
 	drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
 
- out:
+out:
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 8b9d325bda3c..0568ae6ec9dd 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1509,7 +1509,7 @@ out:
 }
 
 static const struct drm_connector_funcs intel_tv_connector_funcs = {
-	.dpms = intel_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_tv_detect,
 	.destroy = intel_tv_destroy,
 	.set_property = intel_tv_set_property,
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 260389acfb77..9d3c2e420d2b 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1467,20 +1467,80 @@ static int gen6_do_reset(struct drm_device *dev)
 	return ret;
 }
 
-int intel_gpu_reset(struct drm_device *dev)
+static int wait_for_register(struct drm_i915_private *dev_priv,
+			     const u32 reg,
+			     const u32 mask,
+			     const u32 value,
+			     const unsigned long timeout_ms)
+{
+	return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
+}
+
+static int gen8_do_reset(struct drm_device *dev)
 {
-	if (INTEL_INFO(dev)->gen >= 6)
-		return gen6_do_reset(dev);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *engine;
+	int i;
+
+	for_each_ring(engine, dev_priv, i) {
+		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
+			   _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
+
+		if (wait_for_register(dev_priv,
+				      RING_RESET_CTL(engine->mmio_base),
+				      RESET_CTL_READY_TO_RESET,
+				      RESET_CTL_READY_TO_RESET,
+				      700)) {
+			DRM_ERROR("%s: reset request timeout\n", engine->name);
+			goto not_ready;
+		}
+	}
+
+	return gen6_do_reset(dev);
+
+not_ready:
+	for_each_ring(engine, dev_priv, i)
+		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
+			   _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
+
+	return -EIO;
+}
+
+static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
+{
+	if (!i915.reset)
+		return NULL;
+
+	if (INTEL_INFO(dev)->gen >= 8)
+		return gen8_do_reset;
+	else if (INTEL_INFO(dev)->gen >= 6)
+		return gen6_do_reset;
 	else if (IS_GEN5(dev))
-		return ironlake_do_reset(dev);
+		return ironlake_do_reset;
 	else if (IS_G4X(dev))
-		return g4x_do_reset(dev);
+		return g4x_do_reset;
 	else if (IS_G33(dev))
-		return g33_do_reset(dev);
+		return g33_do_reset;
 	else if (INTEL_INFO(dev)->gen >= 3)
-		return i915_do_reset(dev);
+		return i915_do_reset;
 	else
+		return NULL;
+}
+
+int intel_gpu_reset(struct drm_device *dev)
+{
+	int (*reset)(struct drm_device *);
+
+	reset = intel_get_gpu_reset(dev);
+	if (reset == NULL)
 		return -ENODEV;
+
+	return reset(dev);
+}
+
+bool intel_has_gpu_reset(struct drm_device *dev)
+{
+	return intel_get_gpu_reset(dev) != NULL;
 }
 
 void intel_uncore_check_errors(struct drm_device *dev)
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index a3ecf1069b76..644edf65dbe0 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -75,6 +75,11 @@ static const struct dw_hdmi_curr_ctrl imx_cur_ctr[] = {
 	},
 };
 
+/*
+ * Resistance term 133Ohm Cfg
+ * PREEMP config 0.00
+ * TX/CK level 10
+ */
 static const struct dw_hdmi_phy_config imx_phy_config[] = {
 	/*pixelclk   symbol   term   vlev */
 	{ 148500000, 0x800d, 0x0005, 0x01ad},
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index 9f9780b7ddf0..4f2068fe5d88 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -70,18 +70,22 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
 	BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev);
 	BUG_ON(pixels_current == pixels_prev);
 
+	obj = drm_gem_object_lookup(dev, file_priv, handle);
+	if (!obj)
+		return -ENOENT;
+
 	ret = mgag200_bo_reserve(pixels_1, true);
 	if (ret) {
 		WREG8(MGA_CURPOSXL, 0);
 		WREG8(MGA_CURPOSXH, 0);
-		return ret;
+		goto out_unref;
 	}
 	ret = mgag200_bo_reserve(pixels_2, true);
 	if (ret) {
 		WREG8(MGA_CURPOSXL, 0);
 		WREG8(MGA_CURPOSXH, 0);
 		mgag200_bo_unreserve(pixels_1);
-		return ret;
+		goto out_unreserve1;
 	}
 
 	if (!handle) {
@@ -106,16 +110,6 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
 		}
 	}
 
-	mutex_lock(&dev->struct_mutex);
-	obj = drm_gem_object_lookup(dev, file_priv, handle);
-	if (!obj) {
-		mutex_unlock(&dev->struct_mutex);
-		ret = -ENOENT;
-		goto out1;
-	}
-	drm_gem_object_unreference(obj);
-	mutex_unlock(&dev->struct_mutex);
-
 	bo = gem_to_mga_bo(obj);
 	ret = mgag200_bo_reserve(bo, true);
 	if (ret) {
@@ -252,7 +246,11 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
 	if (ret)
 		mga_hide_cursor(mdev);
 	mgag200_bo_unreserve(pixels_1);
+out_unreserve1:
 	mgag200_bo_unreserve(pixels_2);
+out_unref:
+	drm_gem_object_unreference_unlocked(obj);
+
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 97745991544d..b0af77454d52 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -35,6 +35,7 @@ static const struct pci_device_id pciidlist[] = {
 	{ PCI_VENDOR_ID_MATROX, 0x532, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_WB },
 	{ PCI_VENDOR_ID_MATROX, 0x533, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH },
 	{ PCI_VENDOR_ID_MATROX, 0x534, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_ER },
+	{ PCI_VENDOR_ID_MATROX, 0x536, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EW3 },
 	{0,}
 };
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index e9eea1d4e7c3..912151c36d59 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -180,6 +180,7 @@ enum mga_type {
 	G200_EV,
 	G200_EH,
 	G200_ER,
+	G200_EW3,
 };
 
 #define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B)
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index c36b8304042b..87de15ea1f93 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -101,7 +101,7 @@ static void mga_fillrect(struct fb_info *info,
 			 const struct fb_fillrect *rect)
 {
 	struct mga_fbdev *mfbdev = info->par;
-	sys_fillrect(info, rect);
+	drm_fb_helper_sys_fillrect(info, rect);
 	mga_dirty_update(mfbdev, rect->dx, rect->dy, rect->width,
 			 rect->height);
 }
@@ -110,7 +110,7 @@ static void mga_copyarea(struct fb_info *info,
 			 const struct fb_copyarea *area)
 {
 	struct mga_fbdev *mfbdev = info->par;
-	sys_copyarea(info, area);
+	drm_fb_helper_sys_copyarea(info, area);
 	mga_dirty_update(mfbdev, area->dx, area->dy, area->width,
 			 area->height);
 }
@@ -119,7 +119,7 @@ static void mga_imageblit(struct fb_info *info,
 			  const struct fb_image *image)
 {
 	struct mga_fbdev *mfbdev = info->par;
-	sys_imageblit(info, image);
+	drm_fb_helper_sys_imageblit(info, image);
 	mga_dirty_update(mfbdev, image->dx, image->dy, image->width,
 			 image->height);
 }
@@ -166,8 +166,6 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
 	struct fb_info *info;
 	struct drm_framebuffer *fb;
 	struct drm_gem_object *gobj = NULL;
-	struct device *device = &dev->pdev->dev;
-	struct mgag200_bo *bo;
 	int ret;
 	void *sysram;
 	int size;
@@ -185,15 +183,14 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
 		DRM_ERROR("failed to create fbcon backing object %d\n", ret);
 		return ret;
 	}
-	bo = gem_to_mga_bo(gobj);
 
 	sysram = vmalloc(size);
 	if (!sysram)
 		return -ENOMEM;
 
-	info = framebuffer_alloc(0, device);
-	if (info == NULL)
-		return -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info))
+		return PTR_ERR(info);
 
 	info->par = mfbdev;
 
@@ -208,14 +205,6 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
 
 	/* setup helper */
 	mfbdev->helper.fb = fb;
-	mfbdev->helper.fbdev = info;
-
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
-		ret = -ENOMEM;
-		goto out;
-	}
 
 	strcpy(info->fix.id, "mgadrmfb");
 
@@ -223,11 +212,6 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
 	info->fbops = &mgag200fb_ops;
 
 	/* setup aperture base/size for vesafb takeover */
-	info->apertures = alloc_apertures(1);
-	if (!info->apertures) {
-		ret = -ENOMEM;
-		goto out;
-	}
 	info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base;
 	info->apertures->ranges[0].size = mdev->mc.vram_size;
 
@@ -242,24 +226,15 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
 	DRM_DEBUG_KMS("allocated %dx%d\n",
 		      fb->width, fb->height);
 	return 0;
-out:
-	return ret;
 }
 
 static int mga_fbdev_destroy(struct drm_device *dev,
 				struct mga_fbdev *mfbdev)
 {
-	struct fb_info *info;
 	struct mga_framebuffer *mfb = &mfbdev->mfb;
 
-	if (mfbdev->helper.fbdev) {
-		info = mfbdev->helper.fbdev;
-
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&mfbdev->helper);
+	drm_fb_helper_release_fbi(&mfbdev->helper);
 
 	if (mfb->obj) {
 		drm_gem_object_unreference_unlocked(mfb->obj);
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
index d3dcf54e6233..10535e3b75f2 100644
--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -101,6 +101,7 @@ struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
 	case G200_SE_B:
 	case G200_EV:
 	case G200_WB:
+	case G200_EW3:
 		data = 1;
 		clock = 2;
 		break;
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index f6b283b8375e..de06388069e7 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -82,12 +82,19 @@ static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
 	int orig;
 	int test1, test2;
 	int orig1, orig2;
+	unsigned int vram_size;
 
 	/* Probe */
 	orig = ioread16(mem);
 	iowrite16(0, mem);
 
-	for (offset = 0x100000; offset < mdev->mc.vram_window; offset += 0x4000) {
+	vram_size = mdev->mc.vram_window;
+
+	if ((mdev->type == G200_EW3) && (vram_size >= 0x1000000)) {
+		vram_size = vram_size - 0x400000;
+	}
+
+	for (offset = 0x100000; offset < vram_size; offset += 0x4000) {
 		orig1 = ioread8(mem + offset);
 		orig2 = ioread8(mem + offset + 0x100);
 
@@ -345,23 +352,15 @@ mgag200_dumb_mmap_offset(struct drm_file *file,
 		     uint64_t *offset)
 {
 	struct drm_gem_object *obj;
-	int ret;
 	struct mgag200_bo *bo;
 
-	mutex_lock(&dev->struct_mutex);
 	obj = drm_gem_object_lookup(dev, file, handle);
-	if (obj == NULL) {
-		ret = -ENOENT;
-		goto out_unlock;
-	}
+	if (obj == NULL)
+		return -ENOENT;
 
 	bo = gem_to_mga_bo(obj);
 	*offset = mgag200_bo_mmap_offset(bo);
 
-	drm_gem_object_unreference(obj);
-	ret = 0;
-out_unlock:
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
-
+	drm_gem_object_unreference_unlocked(obj);
+	return 0;
 }
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index ad4b9010dfb0..c99d3fe12881 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -104,6 +104,8 @@ static bool mga_crtc_mode_fixup(struct drm_crtc *crtc,
 	return true;
 }
 
+#define P_ARRAY_SIZE 9
+
 static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
 {
 	unsigned int vcomax, vcomin, pllreffreq;
@@ -111,37 +113,97 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
 	unsigned int testp, testm, testn;
 	unsigned int p, m, n;
 	unsigned int computed;
+	unsigned int pvalues_e4[P_ARRAY_SIZE] = {16, 14, 12, 10, 8, 6, 4, 2, 1};
+	unsigned int fvv;
+	unsigned int i;
+
+	if (mdev->unique_rev_id <= 0x03) {
+
+		m = n = p = 0;
+		vcomax = 320000;
+		vcomin = 160000;
+		pllreffreq = 25000;
+
+		delta = 0xffffffff;
+		permitteddelta = clock * 5 / 1000;
+
+		for (testp = 8; testp > 0; testp /= 2) {
+			if (clock * testp > vcomax)
+				continue;
+			if (clock * testp < vcomin)
+				continue;
+
+			for (testn = 17; testn < 256; testn++) {
+				for (testm = 1; testm < 32; testm++) {
+					computed = (pllreffreq * testn) /
+						(testm * testp);
+					if (computed > clock)
+						tmpdelta = computed - clock;
+					else
+						tmpdelta = clock - computed;
+					if (tmpdelta < delta) {
+						delta = tmpdelta;
+						m = testm - 1;
+						n = testn - 1;
+						p = testp - 1;
+					}
+				}
+			}
+		}
+	} else {
 
-	m = n = p = 0;
-	vcomax = 320000;
-	vcomin = 160000;
-	pllreffreq = 25000;
 
-	delta = 0xffffffff;
-	permitteddelta = clock * 5 / 1000;
+		m = n = p = 0;
+		vcomax        = 1600000;
+		vcomin        = 800000;
+		pllreffreq    = 25000;
 
-	for (testp = 8; testp > 0; testp /= 2) {
-		if (clock * testp > vcomax)
-			continue;
-		if (clock * testp < vcomin)
-			continue;
+		if (clock < 25000)
+			clock = 25000;
 
-		for (testn = 17; testn < 256; testn++) {
-			for (testm = 1; testm < 32; testm++) {
-				computed = (pllreffreq * testn) /
-					(testm * testp);
-				if (computed > clock)
-					tmpdelta = computed - clock;
-				else
-					tmpdelta = clock - computed;
-				if (tmpdelta < delta) {
-					delta = tmpdelta;
-					m = testm - 1;
-					n = testn - 1;
-					p = testp - 1;
+		clock = clock * 2;
+
+		delta = 0xFFFFFFFF;
+		/* Permited delta is 0.5% as VESA Specification */
+		permitteddelta = clock * 5 / 1000;
+
+		for (i = 0 ; i < P_ARRAY_SIZE ; i++) {
+			testp = pvalues_e4[i];
+
+			if ((clock * testp) > vcomax)
+				continue;
+			if ((clock * testp) < vcomin)
+				continue;
+
+			for (testn = 50; testn <= 256; testn++) {
+				for (testm = 1; testm <= 32; testm++) {
+					computed = (pllreffreq * testn) /
+						(testm * testp);
+					if (computed > clock)
+						tmpdelta = computed - clock;
+					else
+						tmpdelta = clock - computed;
+
+					if (tmpdelta < delta) {
+						delta = tmpdelta;
+						m = testm - 1;
+						n = testn - 1;
+						p = testp - 1;
+					}
 				}
 			}
 		}
+
+		fvv = pllreffreq * testn / testm;
+		fvv = (fvv - 800000) / 50000;
+
+		if (fvv > 15)
+			fvv = 15;
+
+		p |= (fvv << 4);
+		m |= 0x80;
+
+		clock = clock / 2;
 	}
 
 	if (delta > permitteddelta) {
@@ -158,8 +220,8 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
 static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
 {
 	unsigned int vcomax, vcomin, pllreffreq;
-	unsigned int delta, tmpdelta, permitteddelta;
-	unsigned int testp, testm, testn;
+	unsigned int delta, tmpdelta;
+	unsigned int testp, testm, testn, testp2;
 	unsigned int p, m, n;
 	unsigned int computed;
 	int i, j, tmpcount, vcount;
@@ -167,32 +229,71 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
 	u8 tmp;
 
 	m = n = p = 0;
-	vcomax = 550000;
-	vcomin = 150000;
-	pllreffreq = 48000;
 
 	delta = 0xffffffff;
-	permitteddelta = clock * 5 / 1000;
 
-	for (testp = 1; testp < 9; testp++) {
-		if (clock * testp > vcomax)
-			continue;
-		if (clock * testp < vcomin)
-			continue;
+	if (mdev->type == G200_EW3) {
+
+		vcomax = 800000;
+		vcomin = 400000;
+		pllreffreq = 25000;
+
+		for (testp = 1; testp < 8; testp++) {
+			for (testp2 = 1; testp2 < 8; testp2++) {
+				if (testp < testp2)
+					continue;
+				if ((clock * testp * testp2) > vcomax)
+					continue;
+				if ((clock * testp * testp2) < vcomin)
+					continue;
+				for (testm = 1; testm < 26; testm++) {
+					for (testn = 32; testn < 2048 ; testn++) {
+						computed = (pllreffreq * testn) /
+							(testm * testp * testp2);
+						if (computed > clock)
+							tmpdelta = computed - clock;
+						else
+							tmpdelta = clock - computed;
+						if (tmpdelta < delta) {
+							delta = tmpdelta;
+							m = ((testn & 0x100) >> 1) |
+								(testm);
+							n = (testn & 0xFF);
+							p = ((testn & 0x600) >> 3) |
+								(testp2 << 3) |
+								(testp);
+						}
+					}
+				}
+			}
+		}
+	} else {
 
-		for (testm = 1; testm < 17; testm++) {
-			for (testn = 1; testn < 151; testn++) {
-				computed = (pllreffreq * testn) /
-					(testm * testp);
-				if (computed > clock)
-					tmpdelta = computed - clock;
-				else
-					tmpdelta = clock - computed;
-				if (tmpdelta < delta) {
-					delta = tmpdelta;
-					n = testn - 1;
-					m = (testm - 1) | ((n >> 1) & 0x80);
-					p = testp - 1;
+		vcomax = 550000;
+		vcomin = 150000;
+		pllreffreq = 48000;
+
+		for (testp = 1; testp < 9; testp++) {
+			if (clock * testp > vcomax)
+				continue;
+			if (clock * testp < vcomin)
+				continue;
+
+			for (testm = 1; testm < 17; testm++) {
+				for (testn = 1; testn < 151; testn++) {
+					computed = (pllreffreq * testn) /
+						(testm * testp);
+					if (computed > clock)
+						tmpdelta = computed - clock;
+					else
+						tmpdelta = clock - computed;
+					if (tmpdelta < delta) {
+						delta = tmpdelta;
+						n = testn - 1;
+						m = (testm - 1) |
+							((n >> 1) & 0x80);
+						p = testp - 1;
+					}
 				}
 			}
 		}
@@ -298,7 +399,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
 static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
 {
 	unsigned int vcomax, vcomin, pllreffreq;
-	unsigned int delta, tmpdelta, permitteddelta;
+	unsigned int delta, tmpdelta;
 	unsigned int testp, testm, testn;
 	unsigned int p, m, n;
 	unsigned int computed;
@@ -310,7 +411,6 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
 	pllreffreq = 50000;
 
 	delta = 0xffffffff;
-	permitteddelta = clock * 5 / 1000;
 
 	for (testp = 16; testp > 0; testp--) {
 		if (clock * testp > vcomax)
@@ -392,7 +492,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
 static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
 {
 	unsigned int vcomax, vcomin, pllreffreq;
-	unsigned int delta, tmpdelta, permitteddelta;
+	unsigned int delta, tmpdelta;
 	unsigned int testp, testm, testn;
 	unsigned int p, m, n;
 	unsigned int computed;
@@ -406,7 +506,6 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
 	pllreffreq = 33333;
 
 	delta = 0xffffffff;
-	permitteddelta = clock * 5 / 1000;
 
 	for (testp = 16; testp > 0; testp >>= 1) {
 		if (clock * testp > vcomax)
@@ -572,6 +671,7 @@ static int mga_crtc_set_plls(struct mga_device *mdev, long clock)
 		return mga_g200se_set_plls(mdev, clock);
 		break;
 	case G200_WB:
+	case G200_EW3:
 		return mga_g200wb_set_plls(mdev, clock);
 		break;
 	case G200_EV:
@@ -823,6 +923,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
 		option2 = 0x00008000;
 		break;
 	case G200_WB:
+	case G200_EW3:
 		dacvalue[MGA1064_VREF_CTL] = 0x07;
 		option = 0x41049120;
 		option2 = 0x0000b000;
@@ -878,7 +979,10 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
 		if (IS_G200_SE(mdev) &&
 		    ((i == 0x2c) || (i == 0x2d) || (i == 0x2e)))
 			continue;
-		if ((mdev->type == G200_EV || mdev->type == G200_WB || mdev->type == G200_EH) &&
+		if ((mdev->type == G200_EV ||
+		    mdev->type == G200_WB ||
+		    mdev->type == G200_EH ||
+		    mdev->type == G200_EW3) &&
 		    (i >= 0x44) && (i <= 0x4e))
 			continue;
 
@@ -980,7 +1084,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
 	else
 		ext_vga[3] = ((1 << bppshift) - 1) | 0x80;
 	ext_vga[4] = 0;
-	if (mdev->type == G200_WB)
+	if (mdev->type == G200_WB || mdev->type == G200_EW3)
 		ext_vga[1] |= 0x88;
 
 	/* Set pixel clocks */
@@ -996,6 +1100,9 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
 	if (mdev->type == G200_ER)
 		WREG_ECRT(0x24, 0x5);
 
+	if (mdev->type == G200_EW3)
+		WREG_ECRT(0x34, 0x5);
+
 	if (mdev->type == G200_EV) {
 		WREG_ECRT(6, 0);
 	}
@@ -1208,7 +1315,7 @@ static void mga_crtc_prepare(struct drm_crtc *crtc)
 		WREG_SEQ(1, tmp | 0x20);
 	}
 
-	if (mdev->type == G200_WB)
+	if (mdev->type == G200_WB || mdev->type == G200_EW3)
 		mga_g200wb_prepare(crtc);
 
 	WREG_CRT(17, 0);
@@ -1225,7 +1332,7 @@ static void mga_crtc_commit(struct drm_crtc *crtc)
 	const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
 	u8 tmp;
 
-	if (mdev->type == G200_WB)
+	if (mdev->type == G200_WB || mdev->type == G200_EW3)
 		mga_g200wb_commit(crtc);
 
 	if (mdev->type == G200_SE_A || mdev->type == G200_SE_B) {
@@ -1495,7 +1602,7 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
 			if (mga_vga_calculate_mode_bandwidth(mode, bpp)
 				> (24400 * 1024))
 				return MODE_BANDWIDTH;
-		} else if (mdev->unique_rev_id >= 0x02) {
+		} else if (mdev->unique_rev_id == 0x02) {
 			if (mode->hdisplay > 1920)
 				return MODE_VIRTUAL_X;
 			if (mode->vdisplay > 1200)
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index d16964ea0ed4..05108b505fbf 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -378,7 +378,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
 
 int mgag200_bo_unpin(struct mgag200_bo *bo)
 {
-	int i, ret;
+	int i;
 	if (!bo->pin_count) {
 		DRM_ERROR("unpin bad %p\n", bo);
 		return 0;
@@ -389,11 +389,7 @@ int mgag200_bo_unpin(struct mgag200_bo *bo)
 
 	for (i = 0; i < bo->placement.num_placement ; i++)
 		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
-	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
-	if (ret)
-		return ret;
-
-	return 0;
+	return ttm_bo_validate(&bo->bo, &bo->placement, false, false);
 }
 
 int mgag200_bo_push_sysram(struct mgag200_bo *bo)
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 08ba8d0d93f5..8e6c7c638e24 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -9,6 +9,7 @@ config DRM_MSM
 	select DRM_PANEL
 	select SHMEM
 	select TMPFS
+	select QCOM_SCM
 	default y
 	help
 	  DRM/KMS driver for MSM/snapdragon.
@@ -53,3 +54,17 @@ config DRM_MSM_DSI_PLL
 	help
 	  Choose this option to enable DSI PLL driver which provides DSI
 	  source clocks under common clock framework.
+
+config DRM_MSM_DSI_28NM_PHY
+	bool "Enable DSI 28nm PHY driver in MSM DRM"
+	depends on DRM_MSM_DSI
+	default y
+	help
+	  Choose this option if the 28nm DSI PHY is used on the platform.
+
+config DRM_MSM_DSI_20NM_PHY
+	bool "Enable DSI 20nm PHY driver in MSM DRM"
+	depends on DRM_MSM_DSI
+	default y
+	help
+	  Choose this option if the 20nm DSI PHY is used on the platform.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 16a81b94d6f0..0a543eb5e5d7 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,5 +1,5 @@
 ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm
-ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi
+ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
 
 msm-y := \
 	adreno/adreno_device.o \
@@ -10,6 +10,7 @@ msm-y := \
 	hdmi/hdmi_audio.o \
 	hdmi/hdmi_bridge.o \
 	hdmi/hdmi_connector.o \
+	hdmi/hdmi_hdcp.o \
 	hdmi/hdmi_i2c.o \
 	hdmi/hdmi_phy_8960.o \
 	hdmi/hdmi_phy_8x60.o \
@@ -53,12 +54,18 @@ msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
 msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
 
 msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
+			dsi/dsi_cfg.o \
 			dsi/dsi_host.o \
 			dsi/dsi_manager.o \
-			dsi/dsi_phy.o \
+			dsi/phy/dsi_phy.o \
 			mdp/mdp5/mdp5_cmd_encoder.o
 
-msm-$(CONFIG_DRM_MSM_DSI_PLL) += dsi/pll/dsi_pll.o \
-				dsi/pll/dsi_pll_28nm.o
+msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
+msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
+
+ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
+msm-y += dsi/pll/dsi_pll.o
+msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
+endif
 
 obj-$(CONFIG_DRM_MSM)	+= msm.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 23176e402796..0261f0d31612 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -8,15 +8,15 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14895 bytes, from 2015-04-19 15:23:28)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  66709 bytes, from 2015-04-12 18:16:35)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  60633 bytes, from 2015-05-20 14:48:19)
-
-Copyright (C) 2013-2014 by the following authors:
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14968 bytes, from 2015-05-20 20:12:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  67120 bytes, from 2015-08-14 23:22:03)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  63785 bytes, from 2015-08-14 18:27:06)
+
+Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 
 Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 1c599e5cf318..48d133711487 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -8,13 +8,13 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14895 bytes, from 2015-04-19 15:23:28)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  66709 bytes, from 2015-04-12 18:16:35)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  60633 bytes, from 2015-05-20 14:48:19)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14968 bytes, from 2015-05-20 20:12:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  67120 bytes, from 2015-08-14 23:22:03)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  63785 bytes, from 2015-08-14 18:27:06)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -326,6 +326,13 @@ enum a3xx_tex_type {
 	A3XX_TEX_3D = 3,
 };
 
+enum a3xx_tex_msaa {
+	A3XX_TPL1_MSAA1X = 0,
+	A3XX_TPL1_MSAA2X = 1,
+	A3XX_TPL1_MSAA4X = 2,
+	A3XX_TPL1_MSAA8X = 3,
+};
+
 #define A3XX_INT0_RBBM_GPU_IDLE					0x00000001
 #define A3XX_INT0_RBBM_AHB_ERROR				0x00000002
 #define A3XX_INT0_RBBM_REG_TIMEOUT				0x00000004
@@ -2652,6 +2659,7 @@ static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val)
 #define REG_A3XX_VGT_IMMED_DATA					0x000021fd
 
 #define REG_A3XX_TEX_SAMP_0					0x00000000
+#define A3XX_TEX_SAMP_0_CLAMPENABLE				0x00000001
 #define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR			0x00000002
 #define A3XX_TEX_SAMP_0_XY_MAG__MASK				0x0000000c
 #define A3XX_TEX_SAMP_0_XY_MAG__SHIFT				2
@@ -2695,6 +2703,7 @@ static inline uint32_t A3XX_TEX_SAMP_0_COMPARE_FUNC(enum adreno_compare_func val
 {
 	return ((val) << A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT) & A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK;
 }
+#define A3XX_TEX_SAMP_0_CUBEMAPSEAMLESSFILTOFF			0x01000000
 #define A3XX_TEX_SAMP_0_UNNORM_COORDS				0x80000000
 
 #define REG_A3XX_TEX_SAMP_1					0x00000001
@@ -2750,6 +2759,12 @@ static inline uint32_t A3XX_TEX_CONST_0_MIPLVLS(uint32_t val)
 {
 	return ((val) << A3XX_TEX_CONST_0_MIPLVLS__SHIFT) & A3XX_TEX_CONST_0_MIPLVLS__MASK;
 }
+#define A3XX_TEX_CONST_0_MSAATEX__MASK				0x00300000
+#define A3XX_TEX_CONST_0_MSAATEX__SHIFT				20
+static inline uint32_t A3XX_TEX_CONST_0_MSAATEX(enum a3xx_tex_msaa val)
+{
+	return ((val) << A3XX_TEX_CONST_0_MSAATEX__SHIFT) & A3XX_TEX_CONST_0_MSAATEX__MASK;
+}
 #define A3XX_TEX_CONST_0_FMT__MASK				0x1fc00000
 #define A3XX_TEX_CONST_0_FMT__SHIFT				22
 static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val)
@@ -2785,7 +2800,7 @@ static inline uint32_t A3XX_TEX_CONST_1_FETCHSIZE(enum a3xx_tex_fetchsize val)
 }
 
 #define REG_A3XX_TEX_CONST_2					0x00000002
-#define A3XX_TEX_CONST_2_INDX__MASK				0x000000ff
+#define A3XX_TEX_CONST_2_INDX__MASK				0x000001ff
 #define A3XX_TEX_CONST_2_INDX__SHIFT				0
 static inline uint32_t A3XX_TEX_CONST_2_INDX(uint32_t val)
 {
@@ -2805,7 +2820,7 @@ static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
 }
 
 #define REG_A3XX_TEX_CONST_3					0x00000003
-#define A3XX_TEX_CONST_3_LAYERSZ1__MASK				0x00007fff
+#define A3XX_TEX_CONST_3_LAYERSZ1__MASK				0x0001ffff
 #define A3XX_TEX_CONST_3_LAYERSZ1__SHIFT			0
 static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ1(uint32_t val)
 {
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 3f06ecf62583..ac55066db3b0 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -8,13 +8,13 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14895 bytes, from 2015-04-19 15:23:28)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  66709 bytes, from 2015-04-12 18:16:35)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  60633 bytes, from 2015-05-20 14:48:19)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14968 bytes, from 2015-05-20 20:12:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  67120 bytes, from 2015-08-14 23:22:03)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  63785 bytes, from 2015-08-14 18:27:06)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -227,6 +227,7 @@ enum a4xx_depth_format {
 	DEPTH4_NONE = 0,
 	DEPTH4_16 = 1,
 	DEPTH4_24_8 = 2,
+	DEPTH4_32 = 3,
 };
 
 enum a4xx_tess_spacing {
@@ -429,7 +430,7 @@ static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
 	return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
 }
 #define A4XX_RB_MRT_BUF_INFO_COLOR_SRGB				0x00002000
-#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK		0x007fc000
+#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK		0xffffc000
 #define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT		14
 static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
 {
@@ -439,7 +440,7 @@ static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
 static inline uint32_t REG_A4XX_RB_MRT_BASE(uint32_t i0) { return 0x000020a6 + 0x5*i0; }
 
 static inline uint32_t REG_A4XX_RB_MRT_CONTROL3(uint32_t i0) { return 0x000020a7 + 0x5*i0; }
-#define A4XX_RB_MRT_CONTROL3_STRIDE__MASK			0x0001fff8
+#define A4XX_RB_MRT_CONTROL3_STRIDE__MASK			0x03fffff8
 #define A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT			3
 static inline uint32_t A4XX_RB_MRT_CONTROL3_STRIDE(uint32_t val)
 {
@@ -570,6 +571,15 @@ static inline uint32_t A4XX_RB_FS_OUTPUT_SAMPLE_MASK(uint32_t val)
 	return ((val) << A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT) & A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK;
 }
 
+#define REG_A4XX_RB_SAMPLE_COUNT_CONTROL			0x000020fa
+#define A4XX_RB_SAMPLE_COUNT_CONTROL_COPY			0x00000002
+#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK			0xfffffffc
+#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT		2
+static inline uint32_t A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR(uint32_t val)
+{
+	return ((val >> 2) << A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT) & A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK;
+}
+
 #define REG_A4XX_RB_RENDER_COMPONENTS				0x000020fb
 #define A4XX_RB_RENDER_COMPONENTS_RT0__MASK			0x0000000f
 #define A4XX_RB_RENDER_COMPONENTS_RT0__SHIFT			0
@@ -811,6 +821,23 @@ static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op v
 #define REG_A4XX_RB_STENCIL_CONTROL2				0x00002107
 #define A4XX_RB_STENCIL_CONTROL2_STENCIL_BUFFER			0x00000001
 
+#define REG_A4XX_RB_STENCIL_INFO				0x00002108
+#define A4XX_RB_STENCIL_INFO_SEPARATE_STENCIL			0x00000001
+#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK			0xfffff000
+#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT		12
+static inline uint32_t A4XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val)
+{
+	return ((val >> 12) << A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK;
+}
+
+#define REG_A4XX_RB_STENCIL_PITCH				0x00002109
+#define A4XX_RB_STENCIL_PITCH__MASK				0xffffffff
+#define A4XX_RB_STENCIL_PITCH__SHIFT				0
+static inline uint32_t A4XX_RB_STENCIL_PITCH(uint32_t val)
+{
+	return ((val >> 5) << A4XX_RB_STENCIL_PITCH__SHIFT) & A4XX_RB_STENCIL_PITCH__MASK;
+}
+
 #define REG_A4XX_RB_STENCILREFMASK				0x0000210b
 #define A4XX_RB_STENCILREFMASK_STENCILREF__MASK			0x000000ff
 #define A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT		0
@@ -1433,6 +1460,7 @@ static inline uint32_t A4XX_SP_FS_MRT_REG_MRTFORMAT(enum a4xx_color_fmt val)
 {
 	return ((val) << A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT) & A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK;
 }
+#define A4XX_SP_FS_MRT_REG_COLOR_SRGB				0x00040000
 
 #define REG_A4XX_SP_CS_CTRL_REG0				0x00002300
 
@@ -1470,6 +1498,76 @@ static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
 
 #define REG_A4XX_SP_HS_LENGTH_REG				0x00002312
 
+#define REG_A4XX_SP_DS_PARAM_REG				0x0000231a
+#define A4XX_SP_DS_PARAM_REG_POSREGID__MASK			0x000000ff
+#define A4XX_SP_DS_PARAM_REG_POSREGID__SHIFT			0
+static inline uint32_t A4XX_SP_DS_PARAM_REG_POSREGID(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_DS_PARAM_REG_POSREGID__MASK;
+}
+#define A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK		0xfff00000
+#define A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT		20
+static inline uint32_t A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_DS_OUT(uint32_t i0) { return 0x0000231b + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_DS_OUT_REG(uint32_t i0) { return 0x0000231b + 0x1*i0; }
+#define A4XX_SP_DS_OUT_REG_A_REGID__MASK			0x000001ff
+#define A4XX_SP_DS_OUT_REG_A_REGID__SHIFT			0
+static inline uint32_t A4XX_SP_DS_OUT_REG_A_REGID(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_DS_OUT_REG_A_REGID__MASK;
+}
+#define A4XX_SP_DS_OUT_REG_A_COMPMASK__MASK			0x00001e00
+#define A4XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT			9
+static inline uint32_t A4XX_SP_DS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A4XX_SP_DS_OUT_REG_B_REGID__MASK			0x01ff0000
+#define A4XX_SP_DS_OUT_REG_B_REGID__SHIFT			16
+static inline uint32_t A4XX_SP_DS_OUT_REG_B_REGID(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_DS_OUT_REG_B_REGID__MASK;
+}
+#define A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK			0x1e000000
+#define A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT			25
+static inline uint32_t A4XX_SP_DS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_DS_VPC_DST(uint32_t i0) { return 0x0000232c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_DS_VPC_DST_REG(uint32_t i0) { return 0x0000232c + 0x1*i0; }
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK			0x000000ff
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT			0
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK			0x0000ff00
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT			8
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK			0x00ff0000
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT			16
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK			0xff000000
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT			24
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
 #define REG_A4XX_SP_DS_OBJ_OFFSET_REG				0x00002334
 #define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK	0x01ff0000
 #define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT	16
@@ -1492,6 +1590,82 @@ static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
 
 #define REG_A4XX_SP_DS_LENGTH_REG				0x00002339
 
+#define REG_A4XX_SP_GS_PARAM_REG				0x00002341
+#define A4XX_SP_GS_PARAM_REG_POSREGID__MASK			0x000000ff
+#define A4XX_SP_GS_PARAM_REG_POSREGID__SHIFT			0
+static inline uint32_t A4XX_SP_GS_PARAM_REG_POSREGID(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_GS_PARAM_REG_POSREGID__MASK;
+}
+#define A4XX_SP_GS_PARAM_REG_PRIMREGID__MASK			0x0000ff00
+#define A4XX_SP_GS_PARAM_REG_PRIMREGID__SHIFT			8
+static inline uint32_t A4XX_SP_GS_PARAM_REG_PRIMREGID(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_PARAM_REG_PRIMREGID__SHIFT) & A4XX_SP_GS_PARAM_REG_PRIMREGID__MASK;
+}
+#define A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK		0xfff00000
+#define A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT		20
+static inline uint32_t A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_GS_OUT(uint32_t i0) { return 0x00002342 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_GS_OUT_REG(uint32_t i0) { return 0x00002342 + 0x1*i0; }
+#define A4XX_SP_GS_OUT_REG_A_REGID__MASK			0x000001ff
+#define A4XX_SP_GS_OUT_REG_A_REGID__SHIFT			0
+static inline uint32_t A4XX_SP_GS_OUT_REG_A_REGID(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_GS_OUT_REG_A_REGID__MASK;
+}
+#define A4XX_SP_GS_OUT_REG_A_COMPMASK__MASK			0x00001e00
+#define A4XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT			9
+static inline uint32_t A4XX_SP_GS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A4XX_SP_GS_OUT_REG_B_REGID__MASK			0x01ff0000
+#define A4XX_SP_GS_OUT_REG_B_REGID__SHIFT			16
+static inline uint32_t A4XX_SP_GS_OUT_REG_B_REGID(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_GS_OUT_REG_B_REGID__MASK;
+}
+#define A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK			0x1e000000
+#define A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT			25
+static inline uint32_t A4XX_SP_GS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_GS_VPC_DST(uint32_t i0) { return 0x00002353 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_GS_VPC_DST_REG(uint32_t i0) { return 0x00002353 + 0x1*i0; }
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK			0x000000ff
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT			0
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK			0x0000ff00
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT			8
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK			0x00ff0000
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT			16
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK			0xff000000
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT			24
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
 #define REG_A4XX_SP_GS_OBJ_OFFSET_REG				0x0000235b
 #define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK	0x01ff0000
 #define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT	16
@@ -1693,6 +1867,18 @@ static inline uint32_t A4XX_VFD_CONTROL_3_REGID_VTXCNT(uint32_t val)
 {
 	return ((val) << A4XX_VFD_CONTROL_3_REGID_VTXCNT__SHIFT) & A4XX_VFD_CONTROL_3_REGID_VTXCNT__MASK;
 }
+#define A4XX_VFD_CONTROL_3_REGID_TESSX__MASK			0x00ff0000
+#define A4XX_VFD_CONTROL_3_REGID_TESSX__SHIFT			16
+static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val)
+{
+	return ((val) << A4XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A4XX_VFD_CONTROL_3_REGID_TESSX__MASK;
+}
+#define A4XX_VFD_CONTROL_3_REGID_TESSY__MASK			0xff000000
+#define A4XX_VFD_CONTROL_3_REGID_TESSY__SHIFT			24
+static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
+{
+	return ((val) << A4XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A4XX_VFD_CONTROL_3_REGID_TESSY__MASK;
+}
 
 #define REG_A4XX_VFD_CONTROL_4					0x00002204
 
@@ -2489,6 +2675,8 @@ static inline uint32_t A4XX_UNKNOWN_20F7(float val)
 
 #define REG_A4XX_UNKNOWN_22D7					0x000022d7
 
+#define REG_A4XX_UNKNOWN_2352					0x00002352
+
 #define REG_A4XX_TEX_SAMP_0					0x00000000
 #define A4XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR			0x00000001
 #define A4XX_TEX_SAMP_0_XY_MAG__MASK				0x00000006
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 9562a1fa552b..399a9e528139 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -8,15 +8,15 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14895 bytes, from 2015-04-19 15:23:28)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  66709 bytes, from 2015-04-12 18:16:35)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  60633 bytes, from 2015-05-20 14:48:19)
-
-Copyright (C) 2013-2014 by the following authors:
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14968 bytes, from 2015-05-20 20:12:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  67120 bytes, from 2015-08-14 23:22:03)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  63785 bytes, from 2015-08-14 18:27:06)
+
+Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 
 Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index bd5b23bf9041..41904fed1350 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -8,13 +8,13 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14895 bytes, from 2015-04-19 15:23:28)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  66709 bytes, from 2015-04-12 18:16:35)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  60633 bytes, from 2015-05-20 14:48:19)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14968 bytes, from 2015-05-20 20:12:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  67120 bytes, from 2015-08-14 23:22:03)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  63785 bytes, from 2015-08-14 18:27:06)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -67,7 +67,7 @@ enum vgt_event_type {
 
 enum pc_di_primtype {
 	DI_PT_NONE = 0,
-	DI_PT_POINTLIST_A2XX = 1,
+	DI_PT_POINTLIST_PSIZE = 1,
 	DI_PT_LINELIST = 2,
 	DI_PT_LINESTRIP = 3,
 	DI_PT_TRILIST = 4,
@@ -75,7 +75,7 @@ enum pc_di_primtype {
 	DI_PT_TRISTRIP = 6,
 	DI_PT_LINELOOP = 7,
 	DI_PT_RECTLIST = 8,
-	DI_PT_POINTLIST_A3XX = 9,
+	DI_PT_POINTLIST = 9,
 	DI_PT_LINE_ADJ = 10,
 	DI_PT_LINESTRIP_ADJ = 11,
 	DI_PT_TRI_ADJ = 12,
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 1f2561e2ff71..6edcd6f57e70 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -15,10 +15,10 @@
 
 struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
 {
-	if (!msm_dsi || !msm_dsi->panel)
+	if (!msm_dsi || !msm_dsi_device_connected(msm_dsi))
 		return NULL;
 
-	return (msm_dsi->panel_flags & MIPI_DSI_MODE_VIDEO) ?
+	return (msm_dsi->device_flags & MIPI_DSI_MODE_VIDEO) ?
 		msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID] :
 		msm_dsi->encoders[MSM_DSI_CMD_ENCODER_ID];
 }
@@ -74,19 +74,15 @@ static void dsi_destroy(struct msm_dsi *msm_dsi)
 
 static struct msm_dsi *dsi_init(struct platform_device *pdev)
 {
-	struct msm_dsi *msm_dsi = NULL;
+	struct msm_dsi *msm_dsi;
 	int ret;
 
-	if (!pdev) {
-		ret = -ENXIO;
-		goto fail;
-	}
+	if (!pdev)
+		return ERR_PTR(-ENXIO);
 
 	msm_dsi = devm_kzalloc(&pdev->dev, sizeof(*msm_dsi), GFP_KERNEL);
-	if (!msm_dsi) {
-		ret = -ENOMEM;
-		goto fail;
-	}
+	if (!msm_dsi)
+		return ERR_PTR(-ENOMEM);
 	DBG("dsi probed=%p", msm_dsi);
 
 	msm_dsi->pdev = pdev;
@@ -95,24 +91,22 @@ static struct msm_dsi *dsi_init(struct platform_device *pdev)
 	/* Init dsi host */
 	ret = msm_dsi_host_init(msm_dsi);
 	if (ret)
-		goto fail;
+		goto destroy_dsi;
 
 	/* GET dsi PHY */
 	ret = dsi_get_phy(msm_dsi);
 	if (ret)
-		goto fail;
+		goto destroy_dsi;
 
 	/* Register to dsi manager */
 	ret = msm_dsi_manager_register(msm_dsi);
 	if (ret)
-		goto fail;
+		goto destroy_dsi;
 
 	return msm_dsi;
 
-fail:
-	if (msm_dsi)
-		dsi_destroy(msm_dsi);
-
+destroy_dsi:
+	dsi_destroy(msm_dsi);
 	return ERR_PTR(ret);
 }
 
@@ -196,6 +190,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
 		struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM])
 {
 	struct msm_drm_private *priv = dev->dev_private;
+	struct drm_bridge *ext_bridge;
 	int ret, i;
 
 	if (WARN_ON(!encoders[MSM_DSI_VIDEO_ENCODER_ID] ||
@@ -223,10 +218,25 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
 		msm_dsi->encoders[i] = encoders[i];
 	}
 
-	msm_dsi->connector = msm_dsi_manager_connector_init(msm_dsi->id);
+	/*
+	 * check if the dsi encoder output is connected to a panel or an
+	 * external bridge. We create a connector only if we're connected to a
+	 * drm_panel device. When we're connected to an external bridge, we
+	 * assume that the drm_bridge driver will create the connector itself.
+	 */
+	ext_bridge = msm_dsi_host_get_bridge(msm_dsi->host);
+
+	if (ext_bridge)
+		msm_dsi->connector =
+			msm_dsi_manager_ext_bridge_init(msm_dsi->id);
+	else
+		msm_dsi->connector =
+			msm_dsi_manager_connector_init(msm_dsi->id);
+
 	if (IS_ERR(msm_dsi->connector)) {
 		ret = PTR_ERR(msm_dsi->connector);
-		dev_err(dev->dev, "failed to create dsi connector: %d\n", ret);
+		dev_err(dev->dev,
+			"failed to create dsi connector: %d\n", ret);
 		msm_dsi->connector = NULL;
 		goto fail;
 	}
@@ -242,10 +252,12 @@ fail:
 			msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
 			msm_dsi->bridge = NULL;
 		}
-		if (msm_dsi->connector) {
+
+		/* don't destroy connector if we didn't make it */
+		if (msm_dsi->connector && !msm_dsi->external_bridge)
 			msm_dsi->connector->funcs->destroy(msm_dsi->connector);
-			msm_dsi->connector = NULL;
-		}
+
+		msm_dsi->connector = NULL;
 	}
 
 	return ret;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 92d697de4858..5f5a3732cdf6 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -27,21 +27,10 @@
 #define DSI_1	1
 #define DSI_MAX	2
 
-#define DSI_CLOCK_MASTER	DSI_0
-#define DSI_CLOCK_SLAVE		DSI_1
-
-#define DSI_LEFT		DSI_0
-#define DSI_RIGHT		DSI_1
-
-/* According to the current drm framework sequence, take the encoder of
- * DSI_1 as master encoder
- */
-#define DSI_ENCODER_MASTER	DSI_1
-#define DSI_ENCODER_SLAVE	DSI_0
-
 enum msm_dsi_phy_type {
 	MSM_DSI_PHY_28NM_HPM,
 	MSM_DSI_PHY_28NM_LP,
+	MSM_DSI_PHY_20NM,
 	MSM_DSI_PHY_MAX
 };
 
@@ -65,13 +54,21 @@ struct msm_dsi {
 	struct drm_device *dev;
 	struct platform_device *pdev;
 
+	/* connector managed by us when we're connected to a drm_panel */
 	struct drm_connector *connector;
+	/* internal dsi bridge attached to MDP interface */
 	struct drm_bridge *bridge;
 
 	struct mipi_dsi_host *host;
 	struct msm_dsi_phy *phy;
+
+	/*
+	 * panel/external_bridge connected to dsi bridge output, only one of the
+	 * two can be valid at a time
+	 */
 	struct drm_panel *panel;
-	unsigned long panel_flags;
+	struct drm_bridge *external_bridge;
+	unsigned long device_flags;
 
 	struct device *phy_dev;
 	bool phy_enabled;
@@ -86,6 +83,7 @@ struct msm_dsi {
 struct drm_bridge *msm_dsi_manager_bridge_init(u8 id);
 void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge);
 struct drm_connector *msm_dsi_manager_connector_init(u8 id);
+struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id);
 int msm_dsi_manager_phy_enable(int id,
 		const unsigned long bit_rate, const unsigned long esc_rate,
 		u32 *clk_pre, u32 *clk_post);
@@ -96,6 +94,11 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
 void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
 
 /* msm dsi */
+static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi)
+{
+	return msm_dsi->panel || msm_dsi->external_bridge;
+}
+
 struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi);
 
 /* dsi pll */
@@ -106,6 +109,8 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
 void msm_dsi_pll_destroy(struct msm_dsi_pll *pll);
 int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
 	struct clk **byte_clk_provider, struct clk **pixel_clk_provider);
+void msm_dsi_pll_save_state(struct msm_dsi_pll *pll);
+int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll);
 #else
 static inline struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
 			 enum msm_dsi_phy_type type, int id) {
@@ -119,6 +124,13 @@ static inline int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
 {
 	return -ENODEV;
 }
+static inline void msm_dsi_pll_save_state(struct msm_dsi_pll *pll)
+{
+}
+static inline int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll)
+{
+	return 0;
+}
 #endif
 
 /* dsi host */
@@ -140,6 +152,7 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
 					struct drm_display_mode *mode);
 struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
 					unsigned long *panel_flags);
+struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host);
 int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer);
 void msm_dsi_host_unregister(struct mipi_dsi_host *host);
 int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
@@ -153,9 +166,9 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi);
 struct msm_dsi_phy;
 void msm_dsi_phy_driver_register(void);
 void msm_dsi_phy_driver_unregister(void);
-int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
+int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
 	const unsigned long bit_rate, const unsigned long esc_rate);
-int msm_dsi_phy_disable(struct msm_dsi_phy *phy);
+void msm_dsi_phy_disable(struct msm_dsi_phy *phy);
 void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
 					u32 *clk_pre, u32 *clk_post);
 struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy);
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 9791ea04bcbc..1d2e32f0817b 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -382,6 +382,11 @@ static inline uint32_t DSI_TRIG_CTRL_STREAM(uint32_t val)
 #define REG_DSI_TRIG_DMA					0x0000008c
 
 #define REG_DSI_DLN0_PHY_ERR					0x000000b0
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_ESC				0x00000001
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC			0x00000010
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL			0x00000100
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0		0x00001000
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1		0x00010000
 
 #define REG_DSI_TIMEOUT_STATUS					0x000000bc
 
@@ -435,6 +440,9 @@ static inline uint32_t DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(enum dsi_lane_swap val)
 #define REG_DSI_PHY_RESET					0x00000128
 #define DSI_PHY_RESET_RESET					0x00000001
 
+#define REG_DSI_T_CLK_PRE_EXTEND				0x0000017c
+#define DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK			0x00000001
+
 #define REG_DSI_RDBK_DATA_CTRL					0x000001d0
 #define DSI_RDBK_DATA_CTRL_COUNT__MASK				0x00ff0000
 #define DSI_RDBK_DATA_CTRL_COUNT__SHIFT				16
@@ -830,6 +838,7 @@ static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
 #define REG_DSI_28nm_PHY_BIST_CTRL_5				0x000001c8
 
 #define REG_DSI_28nm_PHY_GLBL_TEST_CTRL				0x000001d4
+#define DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL		0x00000001
 
 #define REG_DSI_28nm_PHY_LDO_CNTRL				0x000001dc
 
@@ -994,5 +1003,185 @@ static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(uint32_t val)
 
 #define REG_DSI_28nm_PHY_PLL_CTRL_54				0x000000d4
 
+static inline uint32_t REG_DSI_20nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_3(uint32_t i0) { return 0x0000000c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_4(uint32_t i0) { return 0x00000010 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000014 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_DEBUG_SEL(uint32_t i0) { return 0x00000018 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x0000001c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000020 + 0x40*i0; }
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_0				0x00000100
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_1				0x00000104
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_2				0x00000108
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_3				0x0000010c
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_4				0x00000110
+
+#define REG_DSI_20nm_PHY_LNCK_TEST_DATAPATH			0x00000114
+
+#define REG_DSI_20nm_PHY_LNCK_DEBUG_SEL				0x00000118
+
+#define REG_DSI_20nm_PHY_LNCK_TEST_STR0				0x0000011c
+
+#define REG_DSI_20nm_PHY_LNCK_TEST_STR1				0x00000120
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_0				0x00000140
+#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_1				0x00000144
+#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_2				0x00000148
+#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_3				0x0000014c
+#define DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8			0x00000001
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_4				0x00000150
+#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_5				0x00000154
+#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_6				0x00000158
+#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_7				0x0000015c
+#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_8				0x00000160
+#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_9				0x00000164
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK			0x00000007
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT			0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK;
+}
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK		0x00000070
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT		4
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_10				0x00000168
+#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK		0x00000007
+#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_11				0x0000016c
+#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
+}
+
+#define REG_DSI_20nm_PHY_CTRL_0					0x00000170
+
+#define REG_DSI_20nm_PHY_CTRL_1					0x00000174
+
+#define REG_DSI_20nm_PHY_CTRL_2					0x00000178
+
+#define REG_DSI_20nm_PHY_CTRL_3					0x0000017c
+
+#define REG_DSI_20nm_PHY_CTRL_4					0x00000180
+
+#define REG_DSI_20nm_PHY_STRENGTH_0				0x00000184
+
+#define REG_DSI_20nm_PHY_STRENGTH_1				0x00000188
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_0				0x000001b4
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_1				0x000001b8
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_2				0x000001bc
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_3				0x000001c0
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_4				0x000001c4
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_5				0x000001c8
+
+#define REG_DSI_20nm_PHY_GLBL_TEST_CTRL				0x000001d4
+#define DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL		0x00000001
+
+#define REG_DSI_20nm_PHY_LDO_CNTRL				0x000001dc
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_0			0x00000000
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_1			0x00000004
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_2			0x00000008
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_3			0x0000000c
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_4			0x00000010
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_5			0x00000014
+
+#define REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG			0x00000018
+
 
 #endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
new file mode 100644
index 000000000000..5872d5e5934f
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi_cfg.h"
+
+/* DSI v2 has not been supported by now */
+static const struct msm_dsi_config dsi_v2_cfg = {
+	.io_offset = 0,
+};
+
+static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
+	.io_offset = DSI_6G_REG_SHIFT,
+	.reg_cfg = {
+		.num = 4,
+		.regs = {
+			{"gdsc", -1, -1, -1, -1},
+			{"vdd", 3000000, 3000000, 150000, 100},
+			{"vdda", 1200000, 1200000, 100000, 100},
+			{"vddio", 1800000, 1800000, 100000, 100},
+		},
+	},
+};
+
+static const struct msm_dsi_config msm8916_dsi_cfg = {
+	.io_offset = DSI_6G_REG_SHIFT,
+	.reg_cfg = {
+		.num = 4,
+		.regs = {
+			{"gdsc", -1, -1, -1, -1},
+			{"vdd", 2850000, 2850000, 100000, 100},
+			{"vdda", 1200000, 1200000, 100000, 100},
+			{"vddio", 1800000, 1800000, 100000, 100},
+		},
+	},
+};
+
+static const struct msm_dsi_config msm8994_dsi_cfg = {
+	.io_offset = DSI_6G_REG_SHIFT,
+	.reg_cfg = {
+		.num = 7,
+		.regs = {
+			{"gdsc", -1, -1, -1, -1},
+			{"vdda", 1250000, 1250000, 100000, 100},
+			{"vddio", 1800000, 1800000, 100000, 100},
+			{"vcca", 1000000, 1000000, 10000, 100},
+			{"vdd", 1800000, 1800000, 100000, 100},
+			{"lab_reg", -1, -1, -1, -1},
+			{"ibb_reg", -1, -1, -1, -1},
+		},
+	}
+};
+
+static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
+	{MSM_DSI_VER_MAJOR_V2, U32_MAX, &dsi_v2_cfg},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
+						&msm8974_apq8084_dsi_cfg},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1,
+						&msm8974_apq8084_dsi_cfg},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1_1,
+						&msm8974_apq8084_dsi_cfg},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_2,
+						&msm8974_apq8084_dsi_cfg},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg},
+};
+
+const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
+{
+	const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
+	int i;
+
+	for (i = ARRAY_SIZE(dsi_cfg_handlers) - 1; i >= 0; i--) {
+		if ((dsi_cfg_handlers[i].major == major) &&
+			(dsi_cfg_handlers[i].minor == minor)) {
+			cfg_hnd = &dsi_cfg_handlers[i];
+			break;
+		}
+	}
+
+	return cfg_hnd;
+}
+
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
new file mode 100644
index 000000000000..4cf887240177
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_DSI_CFG_H__
+#define __MSM_DSI_CFG_H__
+
+#include "dsi.h"
+
+#define MSM_DSI_VER_MAJOR_V2	0x02
+#define MSM_DSI_VER_MAJOR_6G	0x03
+#define MSM_DSI_6G_VER_MINOR_V1_0	0x10000000
+#define MSM_DSI_6G_VER_MINOR_V1_1	0x10010000
+#define MSM_DSI_6G_VER_MINOR_V1_1_1	0x10010001
+#define MSM_DSI_6G_VER_MINOR_V1_2	0x10020000
+#define MSM_DSI_6G_VER_MINOR_V1_3	0x10030000
+#define MSM_DSI_6G_VER_MINOR_V1_3_1	0x10030001
+
+#define DSI_6G_REG_SHIFT	4
+
+struct msm_dsi_config {
+	u32 io_offset;
+	struct dsi_reg_config reg_cfg;
+};
+
+struct msm_dsi_cfg_handler {
+	u32 major;
+	u32 minor;
+	const struct msm_dsi_config *cfg;
+};
+
+const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor);
+
+#endif /* __MSM_DSI_CFG_H__ */
+
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index de0400923303..8d82973fe9db 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -20,103 +20,15 @@
 #include <linux/of_device.h>
 #include <linux/of_gpio.h>
 #include <linux/of_irq.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/of_graph.h>
 #include <linux/regulator/consumer.h>
 #include <linux/spinlock.h>
 #include <video/mipi_display.h>
 
 #include "dsi.h"
 #include "dsi.xml.h"
-
-#define MSM_DSI_VER_MAJOR_V2	0x02
-#define MSM_DSI_VER_MAJOR_6G	0x03
-#define MSM_DSI_6G_VER_MINOR_V1_0	0x10000000
-#define MSM_DSI_6G_VER_MINOR_V1_1	0x10010000
-#define MSM_DSI_6G_VER_MINOR_V1_1_1	0x10010001
-#define MSM_DSI_6G_VER_MINOR_V1_2	0x10020000
-#define MSM_DSI_6G_VER_MINOR_V1_3_1	0x10030001
-
-#define DSI_6G_REG_SHIFT	4
-
-struct dsi_config {
-	u32 major;
-	u32 minor;
-	u32 io_offset;
-	struct dsi_reg_config reg_cfg;
-};
-
-static const struct dsi_config dsi_cfgs[] = {
-	{MSM_DSI_VER_MAJOR_V2, 0, 0, {0,} },
-	{ /* 8974 v1 */
-		.major = MSM_DSI_VER_MAJOR_6G,
-		.minor = MSM_DSI_6G_VER_MINOR_V1_0,
-		.io_offset = DSI_6G_REG_SHIFT,
-		.reg_cfg = {
-			.num = 4,
-			.regs = {
-				{"gdsc", -1, -1, -1, -1},
-				{"vdd", 3000000, 3000000, 150000, 100},
-				{"vdda", 1200000, 1200000, 100000, 100},
-				{"vddio", 1800000, 1800000, 100000, 100},
-			},
-		},
-	},
-	{ /* 8974 v2 */
-		.major = MSM_DSI_VER_MAJOR_6G,
-		.minor = MSM_DSI_6G_VER_MINOR_V1_1,
-		.io_offset = DSI_6G_REG_SHIFT,
-		.reg_cfg = {
-			.num = 4,
-			.regs = {
-				{"gdsc", -1, -1, -1, -1},
-				{"vdd", 3000000, 3000000, 150000, 100},
-				{"vdda", 1200000, 1200000, 100000, 100},
-				{"vddio", 1800000, 1800000, 100000, 100},
-			},
-		},
-	},
-	{ /* 8974 v3 */
-		.major = MSM_DSI_VER_MAJOR_6G,
-		.minor = MSM_DSI_6G_VER_MINOR_V1_1_1,
-		.io_offset = DSI_6G_REG_SHIFT,
-		.reg_cfg = {
-			.num = 4,
-			.regs = {
-				{"gdsc", -1, -1, -1, -1},
-				{"vdd", 3000000, 3000000, 150000, 100},
-				{"vdda", 1200000, 1200000, 100000, 100},
-				{"vddio", 1800000, 1800000, 100000, 100},
-			},
-		},
-	},
-	{ /* 8084 */
-		.major = MSM_DSI_VER_MAJOR_6G,
-		.minor = MSM_DSI_6G_VER_MINOR_V1_2,
-		.io_offset = DSI_6G_REG_SHIFT,
-		.reg_cfg = {
-			.num = 4,
-			.regs = {
-				{"gdsc", -1, -1, -1, -1},
-				{"vdd", 3000000, 3000000, 150000, 100},
-				{"vdda", 1200000, 1200000, 100000, 100},
-				{"vddio", 1800000, 1800000, 100000, 100},
-			},
-		},
-	},
-	{ /* 8916 */
-		.major = MSM_DSI_VER_MAJOR_6G,
-		.minor = MSM_DSI_6G_VER_MINOR_V1_3_1,
-		.io_offset = DSI_6G_REG_SHIFT,
-		.reg_cfg = {
-			.num = 4,
-			.regs = {
-				{"gdsc", -1, -1, -1, -1},
-				{"vdd", 2850000, 2850000, 100000, 100},
-				{"vdda", 1200000, 1200000, 100000, 100},
-				{"vddio", 1800000, 1800000, 100000, 100},
-			},
-		},
-	},
-};
+#include "dsi_cfg.h"
 
 static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
 {
@@ -194,7 +106,7 @@ struct msm_dsi_host {
 	struct gpio_desc *disp_en_gpio;
 	struct gpio_desc *te_gpio;
 
-	const struct dsi_config *cfg;
+	const struct msm_dsi_cfg_handler *cfg_hnd;
 
 	struct completion dma_comp;
 	struct completion video_comp;
@@ -212,8 +124,8 @@ struct msm_dsi_host {
 
 	struct drm_display_mode *mode;
 
-	/* Panel info */
-	struct device_node *panel_node;
+	/* connected device info */
+	struct device_node *device_node;
 	unsigned int channel;
 	unsigned int lanes;
 	enum mipi_dsi_pixel_format format;
@@ -239,61 +151,58 @@ static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
 
 static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
 {
-	return msm_readl(msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
+	return msm_readl(msm_host->ctrl_base + reg);
 }
 static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
 {
-	msm_writel(data, msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
+	msm_writel(data, msm_host->ctrl_base + reg);
 }
 
 static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
 static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
 
-static const struct dsi_config *dsi_get_config(struct msm_dsi_host *msm_host)
+static const struct msm_dsi_cfg_handler *dsi_get_config(
+						struct msm_dsi_host *msm_host)
 {
-	const struct dsi_config *cfg;
+	const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
 	struct regulator *gdsc_reg;
-	int i, ret;
+	int ret;
 	u32 major = 0, minor = 0;
 
 	gdsc_reg = regulator_get(&msm_host->pdev->dev, "gdsc");
 	if (IS_ERR(gdsc_reg)) {
 		pr_err("%s: cannot get gdsc\n", __func__);
-		goto fail;
+		goto exit;
 	}
 	ret = regulator_enable(gdsc_reg);
 	if (ret) {
 		pr_err("%s: unable to enable gdsc\n", __func__);
-		regulator_put(gdsc_reg);
-		goto fail;
+		goto put_gdsc;
 	}
 	ret = clk_prepare_enable(msm_host->ahb_clk);
 	if (ret) {
 		pr_err("%s: unable to enable ahb_clk\n", __func__);
-		regulator_disable(gdsc_reg);
-		regulator_put(gdsc_reg);
-		goto fail;
+		goto disable_gdsc;
 	}
 
 	ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
-
-	clk_disable_unprepare(msm_host->ahb_clk);
-	regulator_disable(gdsc_reg);
-	regulator_put(gdsc_reg);
 	if (ret) {
 		pr_err("%s: Invalid version\n", __func__);
-		goto fail;
+		goto disable_clks;
 	}
 
-	for (i = 0; i < ARRAY_SIZE(dsi_cfgs); i++) {
-		cfg = dsi_cfgs + i;
-		if ((cfg->major == major) && (cfg->minor == minor))
-			return cfg;
-	}
-	pr_err("%s: Version %x:%x not support\n", __func__, major, minor);
+	cfg_hnd = msm_dsi_cfg_get(major, minor);
 
-fail:
-	return NULL;
+	DBG("%s: Version %x:%x\n", __func__, major, minor);
+
+disable_clks:
+	clk_disable_unprepare(msm_host->ahb_clk);
+disable_gdsc:
+	regulator_disable(gdsc_reg);
+put_gdsc:
+	regulator_put(gdsc_reg);
+exit:
+	return cfg_hnd;
 }
 
 static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
@@ -304,8 +213,8 @@ static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
 static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
 {
 	struct regulator_bulk_data *s = msm_host->supplies;
-	const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
-	int num = msm_host->cfg->reg_cfg.num;
+	const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
+	int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
 	int i;
 
 	DBG("");
@@ -320,8 +229,8 @@ static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
 static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
 {
 	struct regulator_bulk_data *s = msm_host->supplies;
-	const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
-	int num = msm_host->cfg->reg_cfg.num;
+	const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
+	int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
 	int ret, i;
 
 	DBG("");
@@ -354,8 +263,8 @@ fail:
 static int dsi_regulator_init(struct msm_dsi_host *msm_host)
 {
 	struct regulator_bulk_data *s = msm_host->supplies;
-	const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
-	int num = msm_host->cfg->reg_cfg.num;
+	const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
+	int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
 	int i, ret;
 
 	for (i = 0; i < num; i++)
@@ -697,6 +606,7 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
 {
 	u32 flags = msm_host->mode_flags;
 	enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
+	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
 	u32 data = 0;
 
 	if (!enable) {
@@ -750,8 +660,8 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
 	data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
 	data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
 	data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
-	if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
-		(msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
+	if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
+		(cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
 		data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
 	dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
 
@@ -1257,7 +1167,11 @@ static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
 
 	status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
 
-	if (status) {
+	if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC |
+			DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC |
+			DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL |
+			DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 |
+			DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) {
 		dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
 		msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
 	}
@@ -1359,7 +1273,8 @@ static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
 		return PTR_ERR(msm_host->disp_en_gpio);
 	}
 
-	msm_host->te_gpio = devm_gpiod_get(panel_device, "disp-te", GPIOD_IN);
+	msm_host->te_gpio = devm_gpiod_get_optional(panel_device, "disp-te",
+								GPIOD_IN);
 	if (IS_ERR(msm_host->te_gpio)) {
 		DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
 		return PTR_ERR(msm_host->te_gpio);
@@ -1379,7 +1294,7 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
 	msm_host->format = dsi->format;
 	msm_host->mode_flags = dsi->mode_flags;
 
-	msm_host->panel_node = dsi->dev.of_node;
+	WARN_ON(dsi->dev.of_node != msm_host->device_node);
 
 	/* Some gpios defined in panel DT need to be controlled by host */
 	ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
@@ -1398,7 +1313,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host,
 {
 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
 
-	msm_host->panel_node = NULL;
+	msm_host->device_node = NULL;
 
 	DBG("id=%d", msm_host->id);
 	if (msm_host->dev)
@@ -1429,6 +1344,48 @@ static struct mipi_dsi_host_ops dsi_host_ops = {
 	.transfer = dsi_host_transfer,
 };
 
+static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
+{
+	struct device *dev = &msm_host->pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct device_node *endpoint, *device_node;
+	int ret;
+
+	ret = of_property_read_u32(np, "qcom,dsi-host-index", &msm_host->id);
+	if (ret) {
+		dev_err(dev, "%s: host index not specified, ret=%d\n",
+			__func__, ret);
+		return ret;
+	}
+
+	/*
+	 * Get the first endpoint node. In our case, dsi has one output port
+	 * to which the panel is connected. Don't return an error if a port
+	 * isn't defined. It's possible that there is nothing connected to
+	 * the dsi output.
+	 */
+	endpoint = of_graph_get_next_endpoint(np, NULL);
+	if (!endpoint) {
+		dev_dbg(dev, "%s: no endpoint\n", __func__);
+		return 0;
+	}
+
+	/* Get panel node from the output port's endpoint data */
+	device_node = of_graph_get_remote_port_parent(endpoint);
+	if (!device_node) {
+		dev_err(dev, "%s: no valid device\n", __func__);
+		of_node_put(endpoint);
+		return -ENODEV;
+	}
+
+	of_node_put(endpoint);
+	of_node_put(device_node);
+
+	msm_host->device_node = device_node;
+
+	return 0;
+}
+
 int msm_dsi_host_init(struct msm_dsi *msm_dsi)
 {
 	struct msm_dsi_host *msm_host = NULL;
@@ -1443,15 +1400,13 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
 		goto fail;
 	}
 
-	ret = of_property_read_u32(pdev->dev.of_node,
-				"qcom,dsi-host-index", &msm_host->id);
+	msm_host->pdev = pdev;
+
+	ret = dsi_host_parse_dt(msm_host);
 	if (ret) {
-		dev_err(&pdev->dev,
-			"%s: host index not specified, ret=%d\n",
-			__func__, ret);
+		pr_err("%s: failed to parse dt\n", __func__);
 		goto fail;
 	}
-	msm_host->pdev = pdev;
 
 	ret = dsi_clk_init(msm_host);
 	if (ret) {
@@ -1466,13 +1421,16 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
 		goto fail;
 	}
 
-	msm_host->cfg = dsi_get_config(msm_host);
-	if (!msm_host->cfg) {
+	msm_host->cfg_hnd = dsi_get_config(msm_host);
+	if (!msm_host->cfg_hnd) {
 		ret = -EINVAL;
 		pr_err("%s: get config failed\n", __func__);
 		goto fail;
 	}
 
+	/* fixup base address by io offset */
+	msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
+
 	ret = dsi_regulator_init(msm_host);
 	if (ret) {
 		pr_err("%s: regulator init failed\n", __func__);
@@ -1559,7 +1517,6 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
 int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
 {
 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
-	struct device_node *node;
 	int ret;
 
 	/* Register mipi dsi host */
@@ -1577,14 +1534,13 @@ int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
 		 * It makes sure panel is connected when fbcon detects
 		 * connector status and gets the proper display mode to
 		 * create framebuffer.
+		 * Don't try to defer if there is nothing connected to the dsi
+		 * output
 		 */
-		if (check_defer) {
-			node = of_get_child_by_name(msm_host->pdev->dev.of_node,
-							"panel");
-			if (node) {
-				if (!of_drm_find_panel(node))
+		if (check_defer && msm_host->device_node) {
+			if (!of_drm_find_panel(msm_host->device_node))
+				if (!of_drm_find_bridge(msm_host->device_node))
 					return -EPROBE_DEFER;
-			}
 		}
 	}
 
@@ -1663,6 +1619,7 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
 				const struct mipi_dsi_msg *msg)
 {
 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
 	int data_byte, rx_byte, dlen, end;
 	int short_response, diff, pkt_size, ret = 0;
 	char cmd;
@@ -1704,8 +1661,8 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
 			return -EINVAL;
 		}
 
-		if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
-			(msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
+		if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
+			(cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
 			/* Clear the RDBK_DATA registers */
 			dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
 					DSI_RDBK_DATA_CTRL_CLR);
@@ -1919,6 +1876,13 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
 		goto fail_disable_reg;
 	}
 
+	ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev);
+	if (ret) {
+		pr_err("%s: failed to set pinctrl default state, %d\n",
+			__func__, ret);
+		goto fail_disable_clk;
+	}
+
 	dsi_timing_setup(msm_host);
 	dsi_sw_reset(msm_host);
 	dsi_ctrl_config(msm_host, true, clk_pre, clk_post);
@@ -1931,6 +1895,8 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
 
 	return 0;
 
+fail_disable_clk:
+	dsi_clk_ctrl(msm_host, 0);
 fail_disable_reg:
 	dsi_host_regulator_disable(msm_host);
 unlock_ret:
@@ -1953,6 +1919,8 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
 	if (msm_host->disp_en_gpio)
 		gpiod_set_value(msm_host->disp_en_gpio, 0);
 
+	pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
+
 	msm_dsi_manager_phy_disable(msm_host->id);
 
 	dsi_clk_ctrl(msm_host, 0);
@@ -1993,10 +1961,16 @@ struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
 	struct drm_panel *panel;
 
-	panel = of_drm_find_panel(msm_host->panel_node);
+	panel = of_drm_find_panel(msm_host->device_node);
 	if (panel_flags)
 			*panel_flags = msm_host->mode_flags;
 
 	return panel;
 }
 
+struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	return of_drm_find_bridge(msm_host->device_node);
+}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 87ac6612b6f8..0455ff75074a 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -14,19 +14,31 @@
 #include "msm_kms.h"
 #include "dsi.h"
 
+#define DSI_CLOCK_MASTER	DSI_0
+#define DSI_CLOCK_SLAVE		DSI_1
+
+#define DSI_LEFT		DSI_0
+#define DSI_RIGHT		DSI_1
+
+/* According to the current drm framework sequence, take the encoder of
+ * DSI_1 as master encoder
+ */
+#define DSI_ENCODER_MASTER	DSI_1
+#define DSI_ENCODER_SLAVE	DSI_0
+
 struct msm_dsi_manager {
 	struct msm_dsi *dsi[DSI_MAX];
 
-	bool is_dual_panel;
+	bool is_dual_dsi;
 	bool is_sync_needed;
-	int master_panel_id;
+	int master_dsi_link_id;
 };
 
 static struct msm_dsi_manager msm_dsim_glb;
 
-#define IS_DUAL_PANEL()		(msm_dsim_glb.is_dual_panel)
+#define IS_DUAL_DSI()		(msm_dsim_glb.is_dual_dsi)
 #define IS_SYNC_NEEDED()	(msm_dsim_glb.is_sync_needed)
-#define IS_MASTER_PANEL(id)	(msm_dsim_glb.master_panel_id == id)
+#define IS_MASTER_DSI_LINK(id)	(msm_dsim_glb.master_dsi_link_id == id)
 
 static inline struct msm_dsi *dsi_mgr_get_dsi(int id)
 {
@@ -38,23 +50,23 @@ static inline struct msm_dsi *dsi_mgr_get_other_dsi(int id)
 	return msm_dsim_glb.dsi[(id + 1) % DSI_MAX];
 }
 
-static int dsi_mgr_parse_dual_panel(struct device_node *np, int id)
+static int dsi_mgr_parse_dual_dsi(struct device_node *np, int id)
 {
 	struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
 
-	/* We assume 2 dsi nodes have the same information of dual-panel and
+	/* We assume 2 dsi nodes have the same information of dual-dsi and
 	 * sync-mode, and only one node specifies master in case of dual mode.
 	 */
-	if (!msm_dsim->is_dual_panel)
-		msm_dsim->is_dual_panel = of_property_read_bool(
-						np, "qcom,dual-panel-mode");
+	if (!msm_dsim->is_dual_dsi)
+		msm_dsim->is_dual_dsi = of_property_read_bool(
+						np, "qcom,dual-dsi-mode");
 
-	if (msm_dsim->is_dual_panel) {
-		if (of_property_read_bool(np, "qcom,master-panel"))
-			msm_dsim->master_panel_id = id;
+	if (msm_dsim->is_dual_dsi) {
+		if (of_property_read_bool(np, "qcom,master-dsi"))
+			msm_dsim->master_dsi_link_id = id;
 		if (!msm_dsim->is_sync_needed)
 			msm_dsim->is_sync_needed = of_property_read_bool(
-					np, "qcom,sync-dual-panel");
+					np, "qcom,sync-dual-dsi");
 	}
 
 	return 0;
@@ -68,7 +80,7 @@ static int dsi_mgr_host_register(int id)
 	struct msm_dsi_pll *src_pll;
 	int ret;
 
-	if (!IS_DUAL_PANEL()) {
+	if (!IS_DUAL_DSI()) {
 		ret = msm_dsi_host_register(msm_dsi->host, true);
 		if (ret)
 			return ret;
@@ -78,9 +90,9 @@ static int dsi_mgr_host_register(int id)
 	} else if (!other_dsi) {
 		ret = 0;
 	} else {
-		struct msm_dsi *mdsi = IS_MASTER_PANEL(id) ?
+		struct msm_dsi *mdsi = IS_MASTER_DSI_LINK(id) ?
 					msm_dsi : other_dsi;
-		struct msm_dsi *sdsi = IS_MASTER_PANEL(id) ?
+		struct msm_dsi *sdsi = IS_MASTER_DSI_LINK(id) ?
 					other_dsi : msm_dsi;
 		/* Register slave host first, so that slave DSI device
 		 * has a chance to probe, and do not block the master
@@ -144,28 +156,28 @@ static enum drm_connector_status dsi_mgr_connector_detect(
 	DBG("id=%d", id);
 	if (!msm_dsi->panel) {
 		msm_dsi->panel = msm_dsi_host_get_panel(msm_dsi->host,
-						&msm_dsi->panel_flags);
+						&msm_dsi->device_flags);
 
 		/* There is only 1 panel in the global panel list
-		 * for dual panel mode. Therefore slave dsi should get
+		 * for dual DSI mode. Therefore slave dsi should get
 		 * the drm_panel instance from master dsi, and
 		 * keep using the panel flags got from the current DSI link.
 		 */
-		if (!msm_dsi->panel && IS_DUAL_PANEL() &&
-			!IS_MASTER_PANEL(id) && other_dsi)
+		if (!msm_dsi->panel && IS_DUAL_DSI() &&
+			!IS_MASTER_DSI_LINK(id) && other_dsi)
 			msm_dsi->panel = msm_dsi_host_get_panel(
 					other_dsi->host, NULL);
 
-		if (msm_dsi->panel && IS_DUAL_PANEL())
+		if (msm_dsi->panel && IS_DUAL_DSI())
 			drm_object_attach_property(&connector->base,
 				connector->dev->mode_config.tile_property, 0);
 
-		/* Set split display info to kms once dual panel is connected
-		 * to both hosts
+		/* Set split display info to kms once dual DSI panel is
+		 * connected to both hosts.
 		 */
-		if (msm_dsi->panel && IS_DUAL_PANEL() &&
+		if (msm_dsi->panel && IS_DUAL_DSI() &&
 			other_dsi && other_dsi->panel) {
-			bool cmd_mode = !(msm_dsi->panel_flags &
+			bool cmd_mode = !(msm_dsi->device_flags &
 						MIPI_DSI_MODE_VIDEO);
 			struct drm_encoder *encoder = msm_dsi_get_encoder(
 					dsi_mgr_get_dsi(DSI_ENCODER_MASTER));
@@ -176,7 +188,7 @@ static enum drm_connector_status dsi_mgr_connector_detect(
 				kms->funcs->set_split_display(kms, encoder,
 							slave_enc, cmd_mode);
 			else
-				pr_err("mdp does not support dual panel\n");
+				pr_err("mdp does not support dual DSI\n");
 		}
 	}
 
@@ -273,7 +285,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
 	if (!num)
 		return 0;
 
-	if (IS_DUAL_PANEL()) {
+	if (IS_DUAL_DSI()) {
 		/* report half resolution to user */
 		dsi_dual_connector_fix_modes(connector);
 		ret = dsi_dual_connector_tile_init(connector, id);
@@ -328,11 +340,12 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
 	struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
 	struct mipi_dsi_host *host = msm_dsi->host;
 	struct drm_panel *panel = msm_dsi->panel;
-	bool is_dual_panel = IS_DUAL_PANEL();
+	bool is_dual_dsi = IS_DUAL_DSI();
 	int ret;
 
 	DBG("id=%d", id);
-	if (!panel || (is_dual_panel && (DSI_1 == id)))
+	if (!msm_dsi_device_connected(msm_dsi) ||
+			(is_dual_dsi && (DSI_1 == id)))
 		return;
 
 	ret = msm_dsi_host_power_on(host);
@@ -341,7 +354,7 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
 		goto host_on_fail;
 	}
 
-	if (is_dual_panel && msm_dsi1) {
+	if (is_dual_dsi && msm_dsi1) {
 		ret = msm_dsi_host_power_on(msm_dsi1->host);
 		if (ret) {
 			pr_err("%s: power on host1 failed, %d\n",
@@ -353,10 +366,13 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
 	/* Always call panel functions once, because even for dual panels,
 	 * there is only one drm_panel instance.
 	 */
-	ret = drm_panel_prepare(panel);
-	if (ret) {
-		pr_err("%s: prepare panel %d failed, %d\n", __func__, id, ret);
-		goto panel_prep_fail;
+	if (panel) {
+		ret = drm_panel_prepare(panel);
+		if (ret) {
+			pr_err("%s: prepare panel %d failed, %d\n", __func__,
+								id, ret);
+			goto panel_prep_fail;
+		}
 	}
 
 	ret = msm_dsi_host_enable(host);
@@ -365,7 +381,7 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
 		goto host_en_fail;
 	}
 
-	if (is_dual_panel && msm_dsi1) {
+	if (is_dual_dsi && msm_dsi1) {
 		ret = msm_dsi_host_enable(msm_dsi1->host);
 		if (ret) {
 			pr_err("%s: enable host1 failed, %d\n", __func__, ret);
@@ -373,23 +389,27 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
 		}
 	}
 
-	ret = drm_panel_enable(panel);
-	if (ret) {
-		pr_err("%s: enable panel %d failed, %d\n", __func__, id, ret);
-		goto panel_en_fail;
+	if (panel) {
+		ret = drm_panel_enable(panel);
+		if (ret) {
+			pr_err("%s: enable panel %d failed, %d\n", __func__, id,
+									ret);
+			goto panel_en_fail;
+		}
 	}
 
 	return;
 
 panel_en_fail:
-	if (is_dual_panel && msm_dsi1)
+	if (is_dual_dsi && msm_dsi1)
 		msm_dsi_host_disable(msm_dsi1->host);
 host1_en_fail:
 	msm_dsi_host_disable(host);
 host_en_fail:
-	drm_panel_unprepare(panel);
+	if (panel)
+		drm_panel_unprepare(panel);
 panel_prep_fail:
-	if (is_dual_panel && msm_dsi1)
+	if (is_dual_dsi && msm_dsi1)
 		msm_dsi_host_power_off(msm_dsi1->host);
 host1_on_fail:
 	msm_dsi_host_power_off(host);
@@ -414,37 +434,44 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
 	struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
 	struct mipi_dsi_host *host = msm_dsi->host;
 	struct drm_panel *panel = msm_dsi->panel;
-	bool is_dual_panel = IS_DUAL_PANEL();
+	bool is_dual_dsi = IS_DUAL_DSI();
 	int ret;
 
 	DBG("id=%d", id);
 
-	if (!panel || (is_dual_panel && (DSI_1 == id)))
+	if (!msm_dsi_device_connected(msm_dsi) ||
+			(is_dual_dsi && (DSI_1 == id)))
 		return;
 
-	ret = drm_panel_disable(panel);
-	if (ret)
-		pr_err("%s: Panel %d OFF failed, %d\n", __func__, id, ret);
+	if (panel) {
+		ret = drm_panel_disable(panel);
+		if (ret)
+			pr_err("%s: Panel %d OFF failed, %d\n", __func__, id,
+									ret);
+	}
 
 	ret = msm_dsi_host_disable(host);
 	if (ret)
 		pr_err("%s: host %d disable failed, %d\n", __func__, id, ret);
 
-	if (is_dual_panel && msm_dsi1) {
+	if (is_dual_dsi && msm_dsi1) {
 		ret = msm_dsi_host_disable(msm_dsi1->host);
 		if (ret)
 			pr_err("%s: host1 disable failed, %d\n", __func__, ret);
 	}
 
-	ret = drm_panel_unprepare(panel);
-	if (ret)
-		pr_err("%s: Panel %d unprepare failed,%d\n", __func__, id, ret);
+	if (panel) {
+		ret = drm_panel_unprepare(panel);
+		if (ret)
+			pr_err("%s: Panel %d unprepare failed,%d\n", __func__,
+								id, ret);
+	}
 
 	ret = msm_dsi_host_power_off(host);
 	if (ret)
 		pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
 
-	if (is_dual_panel && msm_dsi1) {
+	if (is_dual_dsi && msm_dsi1) {
 		ret = msm_dsi_host_power_off(msm_dsi1->host);
 		if (ret)
 			pr_err("%s: host1 power off failed, %d\n",
@@ -460,7 +487,7 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
 	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
 	struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
 	struct mipi_dsi_host *host = msm_dsi->host;
-	bool is_dual_panel = IS_DUAL_PANEL();
+	bool is_dual_dsi = IS_DUAL_DSI();
 
 	DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
 			mode->base.id, mode->name,
@@ -471,11 +498,11 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
 			mode->vsync_end, mode->vtotal,
 			mode->type, mode->flags);
 
-	if (is_dual_panel && (DSI_1 == id))
+	if (is_dual_dsi && (DSI_1 == id))
 		return;
 
 	msm_dsi_host_set_display_mode(host, adjusted_mode);
-	if (is_dual_panel && other_dsi)
+	if (is_dual_dsi && other_dsi)
 		msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode);
 }
 
@@ -503,7 +530,7 @@ static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = {
 	.mode_set = dsi_mgr_bridge_mode_set,
 };
 
-/* initialize connector */
+/* initialize connector when we're connected to a drm_panel */
 struct drm_connector *msm_dsi_manager_connector_init(u8 id)
 {
 	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
@@ -588,6 +615,53 @@ fail:
 	return ERR_PTR(ret);
 }
 
+struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
+{
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct drm_device *dev = msm_dsi->dev;
+	struct drm_encoder *encoder;
+	struct drm_bridge *int_bridge, *ext_bridge;
+	struct drm_connector *connector;
+	struct list_head *connector_list;
+
+	int_bridge = msm_dsi->bridge;
+	ext_bridge = msm_dsi->external_bridge =
+			msm_dsi_host_get_bridge(msm_dsi->host);
+
+	/*
+	 * HACK: we may not know the external DSI bridge device's mode
+	 * flags here. We'll get to know them only when the device
+	 * attaches to the dsi host. For now, assume the bridge supports
+	 * DSI video mode
+	 */
+	encoder = msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID];
+
+	/* link the internal dsi bridge to the external bridge */
+	int_bridge->next = ext_bridge;
+	/* set the external bridge's encoder as dsi's encoder */
+	ext_bridge->encoder = encoder;
+
+	drm_bridge_attach(dev, ext_bridge);
+
+	/*
+	 * we need the drm_connector created by the external bridge
+	 * driver (or someone else) to feed it to our driver's
+	 * priv->connector[] list, mainly for msm_fbdev_init()
+	 */
+	connector_list = &dev->mode_config.connector_list;
+
+	list_for_each_entry(connector, connector_list, head) {
+		int i;
+
+		for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+			if (connector->encoder_ids[i] == encoder->base.id)
+				return connector;
+		}
+	}
+
+	return ERR_PTR(-ENODEV);
+}
+
 void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)
 {
 }
@@ -598,12 +672,29 @@ int msm_dsi_manager_phy_enable(int id,
 {
 	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
 	struct msm_dsi_phy *phy = msm_dsi->phy;
+	int src_pll_id = IS_DUAL_DSI() ? DSI_CLOCK_MASTER : id;
+	struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy);
 	int ret;
 
-	ret = msm_dsi_phy_enable(phy, IS_DUAL_PANEL(), bit_rate, esc_rate);
+	ret = msm_dsi_phy_enable(phy, src_pll_id, bit_rate, esc_rate);
 	if (ret)
 		return ret;
 
+	/*
+	 * Reset DSI PHY silently changes its PLL registers to reset status,
+	 * which will confuse clock driver and result in wrong output rate of
+	 * link clocks. Restore PLL status if its PLL is being used as clock
+	 * source.
+	 */
+	if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER)) {
+		ret = msm_dsi_pll_restore_state(pll);
+		if (ret) {
+			pr_err("%s: failed to restore pll state\n", __func__);
+			msm_dsi_phy_disable(phy);
+			return ret;
+		}
+	}
+
 	msm_dsi->phy_enabled = true;
 	msm_dsi_phy_get_clk_pre_post(phy, clk_pre, clk_post);
 
@@ -616,13 +707,18 @@ void msm_dsi_manager_phy_disable(int id)
 	struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
 	struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
 	struct msm_dsi_phy *phy = msm_dsi->phy;
+	struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy);
+
+	/* Save PLL status if it is a clock source */
+	if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER))
+		msm_dsi_pll_save_state(pll);
 
 	/* disable DSI phy
 	 * In dual-dsi configuration, the phy should be disabled for the
 	 * first controller only when the second controller is disabled.
 	 */
 	msm_dsi->phy_enabled = false;
-	if (IS_DUAL_PANEL() && mdsi && sdsi) {
+	if (IS_DUAL_DSI() && mdsi && sdsi) {
 		if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
 			msm_dsi_phy_disable(sdsi->phy);
 			msm_dsi_phy_disable(mdsi->phy);
@@ -713,9 +809,9 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
 
 	msm_dsim->dsi[id] = msm_dsi;
 
-	ret = dsi_mgr_parse_dual_panel(msm_dsi->pdev->dev.of_node, id);
+	ret = dsi_mgr_parse_dual_dsi(msm_dsi->pdev->dev.of_node, id);
 	if (ret) {
-		pr_err("%s: failed to parse dual panel info\n", __func__);
+		pr_err("%s: failed to parse dual DSI info\n", __func__);
 		goto fail;
 	}
 
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 728152f3ef48..5de505e627be 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
-
-Copyright (C) 2013-2014 by the following authors:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
+
+Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 
 Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 2d3b33ce1cc5..401ff58d6893 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -12,142 +12,8 @@
  */
 
 #include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
 
-#include "dsi.h"
-#include "dsi.xml.h"
-
-#define dsi_phy_read(offset) msm_readl((offset))
-#define dsi_phy_write(offset, data) msm_writel((data), (offset))
-
-struct dsi_phy_ops {
-	int (*enable)(struct msm_dsi_phy *phy, bool is_dual_panel,
-		const unsigned long bit_rate, const unsigned long esc_rate);
-	int (*disable)(struct msm_dsi_phy *phy);
-};
-
-struct dsi_phy_cfg {
-	enum msm_dsi_phy_type type;
-	struct dsi_reg_config reg_cfg;
-	struct dsi_phy_ops ops;
-};
-
-struct dsi_dphy_timing {
-	u32 clk_pre;
-	u32 clk_post;
-	u32 clk_zero;
-	u32 clk_trail;
-	u32 clk_prepare;
-	u32 hs_exit;
-	u32 hs_zero;
-	u32 hs_prepare;
-	u32 hs_trail;
-	u32 hs_rqst;
-	u32 ta_go;
-	u32 ta_sure;
-	u32 ta_get;
-};
-
-struct msm_dsi_phy {
-	struct platform_device *pdev;
-	void __iomem *base;
-	void __iomem *reg_base;
-	int id;
-
-	struct clk *ahb_clk;
-	struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
-
-	struct dsi_dphy_timing timing;
-	const struct dsi_phy_cfg *cfg;
-
-	struct msm_dsi_pll *pll;
-};
-
-static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
-{
-	struct regulator_bulk_data *s = phy->supplies;
-	const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
-	struct device *dev = &phy->pdev->dev;
-	int num = phy->cfg->reg_cfg.num;
-	int i, ret;
-
-	for (i = 0; i < num; i++)
-		s[i].supply = regs[i].name;
-
-	ret = devm_regulator_bulk_get(&phy->pdev->dev, num, s);
-	if (ret < 0) {
-		dev_err(dev, "%s: failed to init regulator, ret=%d\n",
-						__func__, ret);
-		return ret;
-	}
-
-	for (i = 0; i < num; i++) {
-		if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) {
-			ret = regulator_set_voltage(s[i].consumer,
-				regs[i].min_voltage, regs[i].max_voltage);
-			if (ret < 0) {
-				dev_err(dev,
-					"regulator %d set voltage failed, %d\n",
-					i, ret);
-				return ret;
-			}
-		}
-	}
-
-	return 0;
-}
-
-static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
-{
-	struct regulator_bulk_data *s = phy->supplies;
-	const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
-	int num = phy->cfg->reg_cfg.num;
-	int i;
-
-	DBG("");
-	for (i = num - 1; i >= 0; i--)
-		if (regs[i].disable_load >= 0)
-			regulator_set_load(s[i].consumer,
-						regs[i].disable_load);
-
-	regulator_bulk_disable(num, s);
-}
-
-static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
-{
-	struct regulator_bulk_data *s = phy->supplies;
-	const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
-	struct device *dev = &phy->pdev->dev;
-	int num = phy->cfg->reg_cfg.num;
-	int ret, i;
-
-	DBG("");
-	for (i = 0; i < num; i++) {
-		if (regs[i].enable_load >= 0) {
-			ret = regulator_set_load(s[i].consumer,
-							regs[i].enable_load);
-			if (ret < 0) {
-				dev_err(dev,
-					"regulator %d set op mode failed, %d\n",
-					i, ret);
-				goto fail;
-			}
-		}
-	}
-
-	ret = regulator_bulk_enable(num, s);
-	if (ret < 0) {
-		dev_err(dev, "regulator enable failed, %d\n", ret);
-		goto fail;
-	}
-
-	return 0;
-
-fail:
-	for (i--; i >= 0; i--)
-		regulator_set_load(s[i].consumer, regs[i].disable_load);
-	return ret;
-}
+#include "dsi_phy.h"
 
 #define S_DIV_ROUND_UP(n, d)	\
 	(((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
@@ -156,6 +22,7 @@ static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
 				s32 min_result, bool even)
 {
 	s32 v;
+
 	v = (tmax - tmin) * percent;
 	v = S_DIV_ROUND_UP(v, 100) + tmin;
 	if (even && (v & 0x1))
@@ -164,7 +31,7 @@ static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
 		return max_t(s32, min_result, v);
 }
 
-static void dsi_dphy_timing_calc_clk_zero(struct dsi_dphy_timing *timing,
+static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing,
 					s32 ui, s32 coeff, s32 pcnt)
 {
 	s32 tmax, tmin, clk_z;
@@ -186,7 +53,7 @@ static void dsi_dphy_timing_calc_clk_zero(struct dsi_dphy_timing *timing,
 	timing->clk_zero = clk_z + 8 - temp;
 }
 
-static int dsi_dphy_timing_calc(struct dsi_dphy_timing *timing,
+int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
 	const unsigned long bit_rate, const unsigned long esc_rate)
 {
 	s32 ui, lpx;
@@ -256,9 +123,8 @@ static int dsi_dphy_timing_calc(struct dsi_dphy_timing *timing,
 	temp += 8 * ui + lpx;
 	tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
 	if (tmin > tmax) {
-		temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false) >> 1;
+		temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false);
 		timing->clk_pre = temp >> 1;
-		temp = (2 * tmax - tmin) * pcnt2;
 	} else {
 		timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false);
 	}
@@ -276,130 +142,119 @@ static int dsi_dphy_timing_calc(struct dsi_dphy_timing *timing,
 	return 0;
 }
 
-static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
+				u32 bit_mask)
 {
-	void __iomem *base = phy->reg_base;
+	int phy_id = phy->id;
+	u32 val;
 
-	if (!enable) {
-		dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+	if ((phy_id >= DSI_MAX) || (pll_id >= DSI_MAX))
 		return;
-	}
 
-	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
+	val = dsi_phy_read(phy->base + reg);
+
+	if (phy->cfg->src_pll_truthtable[phy_id][pll_id])
+		dsi_phy_write(phy->base + reg, val | bit_mask);
+	else
+		dsi_phy_write(phy->base + reg, val & (~bit_mask));
 }
 
-static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
-		const unsigned long bit_rate, const unsigned long esc_rate)
+static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
 {
-	struct dsi_dphy_timing *timing = &phy->timing;
-	int i;
-	void __iomem *base = phy->base;
+	struct regulator_bulk_data *s = phy->supplies;
+	const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
+	struct device *dev = &phy->pdev->dev;
+	int num = phy->cfg->reg_cfg.num;
+	int i, ret;
 
-	DBG("");
+	for (i = 0; i < num; i++)
+		s[i].supply = regs[i].name;
 
-	if (dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
-		pr_err("%s: D-PHY timing calculation failed\n", __func__);
-		return -EINVAL;
+	ret = devm_regulator_bulk_get(dev, num, s);
+	if (ret < 0) {
+		dev_err(dev, "%s: failed to init regulator, ret=%d\n",
+						__func__, ret);
+		return ret;
 	}
 
-	dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
-
-	dsi_28nm_phy_regulator_ctrl(phy, true);
-
-	dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
-
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
-		DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
-		DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
-		DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
-	if (timing->clk_zero & BIT(8))
-		dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
-			DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
-		DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
-		DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
-		DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
-		DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
-		DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
-		DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
-		DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
-		DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
-		DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
-
-	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
-
-	dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
-
-	for (i = 0; i < 4; i++) {
-		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
-		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
-		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
-		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
-		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
-		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
-		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
-		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
+	for (i = 0; i < num; i++) {
+		if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) {
+			ret = regulator_set_voltage(s[i].consumer,
+				regs[i].min_voltage, regs[i].max_voltage);
+			if (ret < 0) {
+				dev_err(dev,
+					"regulator %d set voltage failed, %d\n",
+					i, ret);
+				return ret;
+			}
+		}
 	}
-	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(0), 0);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(1), 0x5);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(2), 0xa);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(3), 0xf);
 
-	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
+	return 0;
+}
 
-	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
+{
+	struct regulator_bulk_data *s = phy->supplies;
+	const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
+	int num = phy->cfg->reg_cfg.num;
+	int i;
 
-	if (is_dual_panel && (phy->id != DSI_CLOCK_MASTER))
-		dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x00);
-	else
-		dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x01);
+	DBG("");
+	for (i = num - 1; i >= 0; i--)
+		if (regs[i].disable_load >= 0)
+			regulator_set_load(s[i].consumer, regs[i].disable_load);
 
-	return 0;
+	regulator_bulk_disable(num, s);
 }
 
-static int dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
+static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
 {
-	dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
-	dsi_28nm_phy_regulator_ctrl(phy, false);
+	struct regulator_bulk_data *s = phy->supplies;
+	const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
+	struct device *dev = &phy->pdev->dev;
+	int num = phy->cfg->reg_cfg.num;
+	int ret, i;
 
-	/*
-	 * Wait for the registers writes to complete in order to
-	 * ensure that the phy is completely disabled
-	 */
-	wmb();
+	DBG("");
+	for (i = 0; i < num; i++) {
+		if (regs[i].enable_load >= 0) {
+			ret = regulator_set_load(s[i].consumer,
+							regs[i].enable_load);
+			if (ret < 0) {
+				dev_err(dev,
+					"regulator %d set op mode failed, %d\n",
+					i, ret);
+				goto fail;
+			}
+		}
+	}
+
+	ret = regulator_bulk_enable(num, s);
+	if (ret < 0) {
+		dev_err(dev, "regulator enable failed, %d\n", ret);
+		goto fail;
+	}
 
 	return 0;
+
+fail:
+	for (i--; i >= 0; i--)
+		regulator_set_load(s[i].consumer, regs[i].disable_load);
+	return ret;
 }
 
 static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
 {
+	struct device *dev = &phy->pdev->dev;
 	int ret;
 
-	pm_runtime_get_sync(&phy->pdev->dev);
+	pm_runtime_get_sync(dev);
 
 	ret = clk_prepare_enable(phy->ahb_clk);
 	if (ret) {
-		pr_err("%s: can't enable ahb clk, %d\n", __func__, ret);
-		pm_runtime_put_sync(&phy->pdev->dev);
+		dev_err(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
+		pm_runtime_put_sync(dev);
 	}
 
 	return ret;
@@ -411,92 +266,74 @@ static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
 	pm_runtime_put_sync(&phy->pdev->dev);
 }
 
-static const struct dsi_phy_cfg dsi_phy_cfgs[MSM_DSI_PHY_MAX] = {
-	[MSM_DSI_PHY_28NM_HPM] = {
-		.type = MSM_DSI_PHY_28NM_HPM,
-		.reg_cfg = {
-			.num = 1,
-			.regs = {
-				{"vddio", 1800000, 1800000, 100000, 100},
-			},
-		},
-		.ops = {
-			.enable = dsi_28nm_phy_enable,
-			.disable = dsi_28nm_phy_disable,
-		}
-	},
-	[MSM_DSI_PHY_28NM_LP] = {
-		.type = MSM_DSI_PHY_28NM_LP,
-		.reg_cfg = {
-			.num = 1,
-			.regs = {
-				{"vddio", 1800000, 1800000, 100000, 100},
-			},
-		},
-		.ops = {
-			.enable = dsi_28nm_phy_enable,
-			.disable = dsi_28nm_phy_disable,
-		}
-	},
-};
-
 static const struct of_device_id dsi_phy_dt_match[] = {
+#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
 	{ .compatible = "qcom,dsi-phy-28nm-hpm",
-	  .data = &dsi_phy_cfgs[MSM_DSI_PHY_28NM_HPM],},
+	  .data = &dsi_phy_28nm_hpm_cfgs },
 	{ .compatible = "qcom,dsi-phy-28nm-lp",
-	  .data = &dsi_phy_cfgs[MSM_DSI_PHY_28NM_LP],},
+	  .data = &dsi_phy_28nm_lp_cfgs },
+#endif
+#ifdef CONFIG_DRM_MSM_DSI_20NM_PHY
+	{ .compatible = "qcom,dsi-phy-20nm",
+	  .data = &dsi_phy_20nm_cfgs },
+#endif
 	{}
 };
 
 static int dsi_phy_driver_probe(struct platform_device *pdev)
 {
 	struct msm_dsi_phy *phy;
+	struct device *dev = &pdev->dev;
 	const struct of_device_id *match;
 	int ret;
 
-	phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
+	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
 	if (!phy)
 		return -ENOMEM;
 
-	match = of_match_node(dsi_phy_dt_match, pdev->dev.of_node);
+	match = of_match_node(dsi_phy_dt_match, dev->of_node);
 	if (!match)
 		return -ENODEV;
 
 	phy->cfg = match->data;
 	phy->pdev = pdev;
 
-	ret = of_property_read_u32(pdev->dev.of_node,
+	ret = of_property_read_u32(dev->of_node,
 				"qcom,dsi-phy-index", &phy->id);
 	if (ret) {
-		dev_err(&pdev->dev,
-			"%s: PHY index not specified, ret=%d\n",
+		dev_err(dev, "%s: PHY index not specified, %d\n",
 			__func__, ret);
 		goto fail;
 	}
 
+	phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
+				"qcom,dsi-phy-regulator-ldo-mode");
+
 	phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
 	if (IS_ERR(phy->base)) {
-		dev_err(&pdev->dev, "%s: failed to map phy base\n", __func__);
+		dev_err(dev, "%s: failed to map phy base\n", __func__);
 		ret = -ENOMEM;
 		goto fail;
 	}
-	phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", "DSI_PHY_REG");
+
+	phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
+				"DSI_PHY_REG");
 	if (IS_ERR(phy->reg_base)) {
-		dev_err(&pdev->dev,
-			"%s: failed to map phy regulator base\n", __func__);
+		dev_err(dev, "%s: failed to map phy regulator base\n",
+			__func__);
 		ret = -ENOMEM;
 		goto fail;
 	}
 
 	ret = dsi_phy_regulator_init(phy);
 	if (ret) {
-		dev_err(&pdev->dev, "%s: failed to init regulator\n", __func__);
+		dev_err(dev, "%s: failed to init regulator\n", __func__);
 		goto fail;
 	}
 
-	phy->ahb_clk = devm_clk_get(&pdev->dev, "iface_clk");
+	phy->ahb_clk = devm_clk_get(dev, "iface_clk");
 	if (IS_ERR(phy->ahb_clk)) {
-		pr_err("%s: Unable to get ahb clk\n", __func__);
+		dev_err(dev, "%s: Unable to get ahb clk\n", __func__);
 		ret = PTR_ERR(phy->ahb_clk);
 		goto fail;
 	}
@@ -510,7 +347,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
 
 	phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
 	if (!phy->pll)
-		dev_info(&pdev->dev,
+		dev_info(dev,
 			"%s: pll init failed, need separate pll clk driver\n",
 			__func__);
 
@@ -557,9 +394,10 @@ void __exit msm_dsi_phy_driver_unregister(void)
 	platform_driver_unregister(&dsi_phy_platform_driver);
 }
 
-int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
+int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
 	const unsigned long bit_rate, const unsigned long esc_rate)
 {
+	struct device *dev = &phy->pdev->dev;
 	int ret;
 
 	if (!phy || !phy->cfg->ops.enable)
@@ -567,30 +405,37 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
 
 	ret = dsi_phy_regulator_enable(phy);
 	if (ret) {
-		dev_err(&phy->pdev->dev, "%s: regulator enable failed, %d\n",
+		dev_err(dev, "%s: regulator enable failed, %d\n",
 			__func__, ret);
 		return ret;
 	}
 
-	return phy->cfg->ops.enable(phy, is_dual_panel, bit_rate, esc_rate);
+	ret = phy->cfg->ops.enable(phy, src_pll_id, bit_rate, esc_rate);
+	if (ret) {
+		dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret);
+		dsi_phy_regulator_disable(phy);
+		return ret;
+	}
+
+	return 0;
 }
 
-int msm_dsi_phy_disable(struct msm_dsi_phy *phy)
+void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
 {
 	if (!phy || !phy->cfg->ops.disable)
-		return -EINVAL;
+		return;
 
 	phy->cfg->ops.disable(phy);
-	dsi_phy_regulator_disable(phy);
 
-	return 0;
+	dsi_phy_regulator_disable(phy);
 }
 
 void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
-	u32 *clk_pre, u32 *clk_post)
+					u32 *clk_pre, u32 *clk_post)
 {
 	if (!phy)
 		return;
+
 	if (clk_pre)
 		*clk_pre = phy->timing.clk_pre;
 	if (clk_post)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
new file mode 100644
index 000000000000..0456b253239f
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DSI_PHY_H__
+#define __DSI_PHY_H__
+
+#include <linux/regulator/consumer.h>
+
+#include "dsi.h"
+
+#define dsi_phy_read(offset) msm_readl((offset))
+#define dsi_phy_write(offset, data) msm_writel((data), (offset))
+
+struct msm_dsi_phy_ops {
+	int (*enable)(struct msm_dsi_phy *phy, int src_pll_id,
+		const unsigned long bit_rate, const unsigned long esc_rate);
+	void (*disable)(struct msm_dsi_phy *phy);
+};
+
+struct msm_dsi_phy_cfg {
+	enum msm_dsi_phy_type type;
+	struct dsi_reg_config reg_cfg;
+	struct msm_dsi_phy_ops ops;
+
+	/*
+	 * Each cell {phy_id, pll_id} of the truth table indicates
+	 * if the source PLL selection bit should be set for each PHY.
+	 * Fill default H/W values in illegal cells, eg. cell {0, 1}.
+	 */
+	bool src_pll_truthtable[DSI_MAX][DSI_MAX];
+};
+
+extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
+
+struct msm_dsi_dphy_timing {
+	u32 clk_pre;
+	u32 clk_post;
+	u32 clk_zero;
+	u32 clk_trail;
+	u32 clk_prepare;
+	u32 hs_exit;
+	u32 hs_zero;
+	u32 hs_prepare;
+	u32 hs_trail;
+	u32 hs_rqst;
+	u32 ta_go;
+	u32 ta_sure;
+	u32 ta_get;
+};
+
+struct msm_dsi_phy {
+	struct platform_device *pdev;
+	void __iomem *base;
+	void __iomem *reg_base;
+	int id;
+
+	struct clk *ahb_clk;
+	struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
+
+	struct msm_dsi_dphy_timing timing;
+	const struct msm_dsi_phy_cfg *cfg;
+
+	bool regulator_ldo_mode;
+
+	struct msm_dsi_pll *pll;
+};
+
+/*
+ * PHY internal functions
+ */
+int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
+	const unsigned long bit_rate, const unsigned long esc_rate);
+void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
+				u32 bit_mask);
+
+#endif /* __DSI_PHY_H__ */
+
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
new file mode 100644
index 000000000000..2e9ba118d50a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+
+static void dsi_20nm_dphy_set_timing(struct msm_dsi_phy *phy,
+		struct msm_dsi_dphy_timing *timing)
+{
+	void __iomem *base = phy->base;
+
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_0,
+		DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_1,
+		DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_2,
+		DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+	if (timing->clk_zero & BIT(8))
+		dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_3,
+			DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_4,
+		DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_5,
+		DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_6,
+		DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_7,
+		DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_8,
+		DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_9,
+		DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+		DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_10,
+		DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_11,
+		DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+}
+
+static void dsi_20nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+{
+	void __iomem *base = phy->reg_base;
+
+	if (!enable) {
+		dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+		return;
+	}
+
+	if (phy->regulator_ldo_mode) {
+		dsi_phy_write(phy->base + REG_DSI_20nm_PHY_LDO_CNTRL, 0x1d);
+		return;
+	}
+
+	/* non LDO mode */
+	dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_1, 0x03);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_2, 0x03);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_3, 0x00);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_4, 0x20);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG, 0x01);
+	dsi_phy_write(phy->base + REG_DSI_20nm_PHY_LDO_CNTRL, 0x00);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_0, 0x03);
+}
+
+static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+		const unsigned long bit_rate, const unsigned long esc_rate)
+{
+	struct msm_dsi_dphy_timing *timing = &phy->timing;
+	int i;
+	void __iomem *base = phy->base;
+	u32 cfg_4[4] = {0x20, 0x40, 0x20, 0x00};
+
+	DBG("");
+
+	if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
+		dev_err(&phy->pdev->dev,
+			"%s: D-PHY timing calculation failed\n", __func__);
+		return -EINVAL;
+	}
+
+	dsi_20nm_phy_regulator_ctrl(phy, true);
+
+	dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_0, 0xff);
+
+	msm_dsi_phy_set_src_pll(phy, src_pll_id,
+				REG_DSI_20nm_PHY_GLBL_TEST_CTRL,
+				DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL);
+
+	for (i = 0; i < 4; i++) {
+		dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_3(i),
+							(i >> 1) * 0x40);
+		dsi_phy_write(base + REG_DSI_20nm_PHY_LN_TEST_STR_0(i), 0x01);
+		dsi_phy_write(base + REG_DSI_20nm_PHY_LN_TEST_STR_1(i), 0x46);
+		dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_0(i), 0x02);
+		dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_1(i), 0xa0);
+		dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_4(i), cfg_4[i]);
+	}
+
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_3, 0x80);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_TEST_STR0, 0x01);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_TEST_STR1, 0x46);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_0, 0x00);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_1, 0xa0);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_2, 0x00);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_4, 0x00);
+
+	dsi_20nm_dphy_set_timing(phy, timing);
+
+	dsi_phy_write(base + REG_DSI_20nm_PHY_CTRL_1, 0x00);
+
+	dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_1, 0x06);
+
+	/* make sure everything is written before enable */
+	wmb();
+	dsi_phy_write(base + REG_DSI_20nm_PHY_CTRL_0, 0x7f);
+
+	return 0;
+}
+
+static void dsi_20nm_phy_disable(struct msm_dsi_phy *phy)
+{
+	dsi_phy_write(phy->base + REG_DSI_20nm_PHY_CTRL_0, 0);
+	dsi_20nm_phy_regulator_ctrl(phy, false);
+}
+
+const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
+	.type = MSM_DSI_PHY_20NM,
+	.src_pll_truthtable = { {false, true}, {false, true} },
+	.reg_cfg = {
+		.num = 2,
+		.regs = {
+			{"vddio", 1800000, 1800000, 100000, 100},
+			{"vcca", 1000000, 1000000, 10000, 100},
+		},
+	},
+	.ops = {
+		.enable = dsi_20nm_phy_enable,
+		.disable = dsi_20nm_phy_disable,
+	}
+};
+
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
new file mode 100644
index 000000000000..f1a7c7b46420
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+
+static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
+		struct msm_dsi_dphy_timing *timing)
+{
+	void __iomem *base = phy->base;
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
+		DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
+		DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
+		DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+	if (timing->clk_zero & BIT(8))
+		dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
+			DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
+		DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
+		DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
+		DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
+		DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
+		DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
+		DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+		DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
+		DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
+		DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+}
+
+static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+{
+	void __iomem *base = phy->reg_base;
+
+	if (!enable) {
+		dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+		return;
+	}
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
+}
+
+static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+		const unsigned long bit_rate, const unsigned long esc_rate)
+{
+	struct msm_dsi_dphy_timing *timing = &phy->timing;
+	int i;
+	void __iomem *base = phy->base;
+
+	DBG("");
+
+	if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
+		dev_err(&phy->pdev->dev,
+			"%s: D-PHY timing calculation failed\n", __func__);
+		return -EINVAL;
+	}
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
+
+	dsi_28nm_phy_regulator_ctrl(phy, true);
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
+
+	dsi_28nm_dphy_set_timing(phy, timing);
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
+
+	for (i = 0; i < 4; i++) {
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
+	}
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(0), 0);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(1), 0x5);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(2), 0xa);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(3), 0xf);
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+
+	msm_dsi_phy_set_src_pll(phy, src_pll_id,
+				REG_DSI_28nm_PHY_GLBL_TEST_CTRL,
+				DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL);
+
+	return 0;
+}
+
+static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
+{
+	dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
+	dsi_28nm_phy_regulator_ctrl(phy, false);
+
+	/*
+	 * Wait for the registers writes to complete in order to
+	 * ensure that the phy is completely disabled
+	 */
+	wmb();
+}
+
+const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
+	.type = MSM_DSI_PHY_28NM_HPM,
+	.src_pll_truthtable = { {true, true}, {false, true} },
+	.reg_cfg = {
+		.num = 1,
+		.regs = {
+			{"vddio", 1800000, 1800000, 100000, 100},
+		},
+	},
+	.ops = {
+		.enable = dsi_28nm_phy_enable,
+		.disable = dsi_28nm_phy_disable,
+	},
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
+	.type = MSM_DSI_PHY_28NM_LP,
+	.src_pll_truthtable = { {true, true}, {true, true} },
+	.reg_cfg = {
+		.num = 1,
+		.regs = {
+			{"vddio", 1800000, 1800000, 100000, 100},
+		},
+	},
+	.ops = {
+		.enable = dsi_28nm_phy_enable,
+		.disable = dsi_28nm_phy_disable,
+	},
+};
+
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
index 509376fdd112..5104fc9f9a53 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
@@ -72,31 +72,14 @@ long msm_dsi_pll_helper_clk_round_rate(struct clk_hw *hw,
 int msm_dsi_pll_helper_clk_prepare(struct clk_hw *hw)
 {
 	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-	int ret;
-
-	/*
-	 * Certain PLLs need to update the same VCO rate and registers
-	 * after resume in suspend/resume scenario.
-	 */
-	if (pll->restore_state) {
-		ret = pll->restore_state(pll);
-		if (ret)
-			goto error;
-	}
 
-	ret = dsi_pll_enable(pll);
-
-error:
-	return ret;
+	return dsi_pll_enable(pll);
 }
 
 void msm_dsi_pll_helper_clk_unprepare(struct clk_hw *hw)
 {
 	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
 
-	if (pll->save_state)
-		pll->save_state(pll);
-
 	dsi_pll_disable(pll);
 }
 
@@ -134,6 +117,29 @@ void msm_dsi_pll_destroy(struct msm_dsi_pll *pll)
 		pll->destroy(pll);
 }
 
+void msm_dsi_pll_save_state(struct msm_dsi_pll *pll)
+{
+	if (pll->save_state) {
+		pll->save_state(pll);
+		pll->state_saved = true;
+	}
+}
+
+int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll)
+{
+	int ret;
+
+	if (pll->restore_state && pll->state_saved) {
+		ret = pll->restore_state(pll);
+		if (ret)
+			return ret;
+
+		pll->state_saved = false;
+	}
+
+	return 0;
+}
+
 struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
 			enum msm_dsi_phy_type type, int id)
 {
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
index 5a3bb241c039..063caa2c5740 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
@@ -27,6 +27,7 @@ struct msm_dsi_pll {
 
 	struct clk_hw	clk_hw;
 	bool		pll_on;
+	bool		state_saved;
 
 	unsigned long	min_rate;
 	unsigned long	max_rate;
@@ -82,8 +83,16 @@ void msm_dsi_pll_helper_unregister_clks(struct platform_device *pdev,
 /*
  * Initialization for Each PLL Type
  */
+#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
 struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
 					enum msm_dsi_phy_type type, int id);
+#else
+static inline struct msm_dsi_pll *msm_dsi_pll_28nm_init(
+	struct platform_device *pdev, enum msm_dsi_phy_type type, int id)
+{
+	return ERR_PTR(-ENODEV);
+}
+#endif
 
 #endif /* __DSI_PLL_H__ */
 
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
index 18b7727bdc57..598fdaff0a41 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
@@ -465,26 +465,21 @@ static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
 	void __iomem *base = pll_28nm->mmio;
 	int ret;
 
-	if ((cached_state->vco_rate != 0) &&
-		(cached_state->vco_rate == clk_hw_get_rate(&pll->clk_hw))) {
-		ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
-						cached_state->vco_rate, 0);
-		if (ret) {
-			dev_err(&pll_28nm->pdev->dev,
-				"restore vco rate failed. ret=%d\n", ret);
-			return ret;
-		}
-
-		pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
-				cached_state->postdiv3);
-		pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
-				cached_state->postdiv1);
-		pll_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
-				cached_state->byte_mux);
-
-		cached_state->vco_rate = 0;
+	ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
+					cached_state->vco_rate, 0);
+	if (ret) {
+		dev_err(&pll_28nm->pdev->dev,
+			"restore vco rate failed. ret=%d\n", ret);
+		return ret;
 	}
 
+	pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
+			cached_state->postdiv3);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
+			cached_state->postdiv1);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
+			cached_state->byte_mux);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 26f268e2dd3d..06cbddfc914f 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
-
-Copyright (C) 2013 by the following authors:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
+
+Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 
 Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
index f9c71dceb5e2..bef1d65fe28c 100644
--- a/drivers/gpu/drm/msm/edp/edp.xml.h
+++ b/drivers/gpu/drm/msm/edp/edp.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 814536202efe..101b324cdeef 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -22,7 +22,9 @@
 void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
 {
 	uint32_t ctrl = 0;
+	unsigned long flags;
 
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
 	if (power_on) {
 		ctrl |= HDMI_CTRL_ENABLE;
 		if (!hdmi->hdmi_mode) {
@@ -37,6 +39,7 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
 	}
 
 	hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
 	DBG("HDMI Core: %s, HDMI_CTRL=0x%08x",
 			power_on ? "Enable" : "Disable", ctrl);
 }
@@ -51,6 +54,10 @@ static irqreturn_t hdmi_irq(int irq, void *dev_id)
 	/* Process DDC: */
 	hdmi_i2c_irq(hdmi->i2c);
 
+	/* Process HDCP: */
+	if (hdmi->hdcp_ctrl)
+		hdmi_hdcp_irq(hdmi->hdcp_ctrl);
+
 	/* TODO audio.. */
 
 	return IRQ_HANDLED;
@@ -60,6 +67,15 @@ static void hdmi_destroy(struct hdmi *hdmi)
 {
 	struct hdmi_phy *phy = hdmi->phy;
 
+	/*
+	 * at this point, hpd has been disabled,
+	 * after flush workq, it's safe to deinit hdcp
+	 */
+	if (hdmi->workq) {
+		flush_workqueue(hdmi->workq);
+		destroy_workqueue(hdmi->workq);
+	}
+	hdmi_hdcp_destroy(hdmi);
 	if (phy)
 		phy->funcs->destroy(phy);
 
@@ -77,6 +93,7 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
 {
 	struct hdmi_platform_config *config = pdev->dev.platform_data;
 	struct hdmi *hdmi = NULL;
+	struct resource *res;
 	int i, ret;
 
 	hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
@@ -87,18 +104,18 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
 
 	hdmi->pdev = pdev;
 	hdmi->config = config;
+	spin_lock_init(&hdmi->reg_lock);
 
 	/* not sure about which phy maps to which msm.. probably I miss some */
-	if (config->phy_init)
+	if (config->phy_init) {
 		hdmi->phy = config->phy_init(hdmi);
-	else
-		hdmi->phy = ERR_PTR(-ENXIO);
 
-	if (IS_ERR(hdmi->phy)) {
-		ret = PTR_ERR(hdmi->phy);
-		dev_err(&pdev->dev, "failed to load phy: %d\n", ret);
-		hdmi->phy = NULL;
-		goto fail;
+		if (IS_ERR(hdmi->phy)) {
+			ret = PTR_ERR(hdmi->phy);
+			dev_err(&pdev->dev, "failed to load phy: %d\n", ret);
+			hdmi->phy = NULL;
+			goto fail;
+		}
 	}
 
 	hdmi->mmio = msm_ioremap(pdev, config->mmio_name, "HDMI");
@@ -107,6 +124,18 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
 		goto fail;
 	}
 
+	/* HDCP needs physical address of hdmi register */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+		config->mmio_name);
+	hdmi->mmio_phy_addr = res->start;
+
+	hdmi->qfprom_mmio = msm_ioremap(pdev,
+		config->qfprom_mmio_name, "HDMI_QFPROM");
+	if (IS_ERR(hdmi->qfprom_mmio)) {
+		dev_info(&pdev->dev, "can't find qfprom resource\n");
+		hdmi->qfprom_mmio = NULL;
+	}
+
 	hdmi->hpd_regs = devm_kzalloc(&pdev->dev, sizeof(hdmi->hpd_regs[0]) *
 			config->hpd_reg_cnt, GFP_KERNEL);
 	if (!hdmi->hpd_regs) {
@@ -189,6 +218,8 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
 		hdmi->pwr_clks[i] = clk;
 	}
 
+	hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0);
+
 	hdmi->i2c = hdmi_i2c_init(hdmi);
 	if (IS_ERR(hdmi->i2c)) {
 		ret = PTR_ERR(hdmi->i2c);
@@ -197,6 +228,12 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
 		goto fail;
 	}
 
+	hdmi->hdcp_ctrl = hdmi_hdcp_init(hdmi);
+	if (IS_ERR(hdmi->hdcp_ctrl)) {
+		dev_warn(&pdev->dev, "failed to init hdcp: disabled\n");
+		hdmi->hdcp_ctrl = NULL;
+	}
+
 	return hdmi;
 
 fail:
@@ -310,7 +347,7 @@ static const char *pwr_clk_names_8x74[] = {"extp_clk", "alt_iface_clk"};
 static const char *hpd_clk_names_8x74[] = {"iface_clk", "core_clk", "mdp_core_clk"};
 static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0};
 
-static struct hdmi_platform_config hdmi_tx_8074_config = {
+static struct hdmi_platform_config hdmi_tx_8974_config = {
 		.phy_init = hdmi_phy_8x74_init,
 		HDMI_CFG(pwr_reg, 8x74),
 		HDMI_CFG(hpd_reg, 8x74),
@@ -330,9 +367,21 @@ static struct hdmi_platform_config hdmi_tx_8084_config = {
 		.hpd_freq      = hpd_clk_freq_8x74,
 };
 
+static const char *hpd_reg_names_8x94[] = {};
+
+static struct hdmi_platform_config hdmi_tx_8994_config = {
+		.phy_init = NULL, /* nothing to do for this HDMI PHY 20nm */
+		HDMI_CFG(pwr_reg, 8x74),
+		HDMI_CFG(hpd_reg, 8x94),
+		HDMI_CFG(pwr_clk, 8x74),
+		HDMI_CFG(hpd_clk, 8x74),
+		.hpd_freq      = hpd_clk_freq_8x74,
+};
+
 static const struct of_device_id dt_match[] = {
+	{ .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config },
 	{ .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config },
-	{ .compatible = "qcom,hdmi-tx-8074", .data = &hdmi_tx_8074_config },
+	{ .compatible = "qcom,hdmi-tx-8974", .data = &hdmi_tx_8974_config },
 	{ .compatible = "qcom,hdmi-tx-8960", .data = &hdmi_tx_8960_config },
 	{ .compatible = "qcom,hdmi-tx-8660", .data = &hdmi_tx_8660_config },
 	{}
@@ -347,8 +396,7 @@ static int get_gpio(struct device *dev, struct device_node *of_node, const char
 		snprintf(name2, sizeof(name2), "%s-gpio", name);
 		gpio = of_get_named_gpio(of_node, name2, 0);
 		if (gpio < 0) {
-			dev_err(dev, "failed to get gpio: %s (%d)\n",
-					name, gpio);
+			DBG("failed to get gpio: %s (%d)", name, gpio);
 			gpio = -1;
 		}
 	}
@@ -376,6 +424,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
 	}
 
 	hdmi_cfg->mmio_name     = "core_physical";
+	hdmi_cfg->qfprom_mmio_name = "qfprom_physical";
 	hdmi_cfg->ddc_clk_gpio  = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk");
 	hdmi_cfg->ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data");
 	hdmi_cfg->hpd_gpio      = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd");
@@ -391,7 +440,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
 	if (cpu_is_apq8064()) {
 		static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
 		config.phy_init      = hdmi_phy_8960_init;
-		config.mmio_name     = "hdmi_msm_hdmi_addr";
 		config.hpd_reg_names = hpd_reg_names;
 		config.hpd_reg_cnt   = ARRAY_SIZE(hpd_reg_names);
 		config.hpd_clk_names = hpd_clk_names;
@@ -404,7 +452,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
 	} else if (cpu_is_msm8960() || cpu_is_msm8960ab()) {
 		static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
 		config.phy_init      = hdmi_phy_8960_init;
-		config.mmio_name     = "hdmi_msm_hdmi_addr";
 		config.hpd_reg_names = hpd_reg_names;
 		config.hpd_reg_cnt   = ARRAY_SIZE(hpd_reg_names);
 		config.hpd_clk_names = hpd_clk_names;
@@ -419,7 +466,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
 				"8901_hdmi_mvs", "8901_mpp0"
 		};
 		config.phy_init      = hdmi_phy_8x60_init;
-		config.mmio_name     = "hdmi_msm_hdmi_addr";
 		config.hpd_reg_names = hpd_reg_names;
 		config.hpd_reg_cnt   = ARRAY_SIZE(hpd_reg_names);
 		config.hpd_clk_names = hpd_clk_names;
@@ -430,6 +476,9 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
 		config.mux_en_gpio   = -1;
 		config.mux_sel_gpio  = -1;
 	}
+	config.mmio_name     = "hdmi_msm_hdmi_addr";
+	config.qfprom_mmio_name = "hdmi_msm_qfprom_addr";
+
 	hdmi_cfg = &config;
 #endif
 	dev->platform_data = hdmi_cfg;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 68fdfb3622a5..d0e663192d01 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -37,6 +37,8 @@ struct hdmi_audio {
 	int rate;
 };
 
+struct hdmi_hdcp_ctrl;
+
 struct hdmi {
 	struct drm_device *dev;
 	struct platform_device *pdev;
@@ -51,6 +53,8 @@ struct hdmi {
 	unsigned long int pixclock;
 
 	void __iomem *mmio;
+	void __iomem *qfprom_mmio;
+	phys_addr_t mmio_phy_addr;
 
 	struct regulator **hpd_regs;
 	struct regulator **pwr_regs;
@@ -68,12 +72,25 @@ struct hdmi {
 	bool hdmi_mode;               /* are we in hdmi mode? */
 
 	int irq;
+	struct workqueue_struct *workq;
+
+	struct hdmi_hdcp_ctrl *hdcp_ctrl;
+
+	/*
+	* spinlock to protect registers shared by different execution
+	* REG_HDMI_CTRL
+	* REG_HDMI_DDC_ARBITRATION
+	* REG_HDMI_HDCP_INT_CTRL
+	* REG_HDMI_HPD_CTRL
+	*/
+	spinlock_t reg_lock;
 };
 
 /* platform config data (ie. from DT, or pdata) */
 struct hdmi_platform_config {
 	struct hdmi_phy *(*phy_init)(struct hdmi *hdmi);
 	const char *mmio_name;
+	const char *qfprom_mmio_name;
 
 	/* regulators that need to be on for hpd: */
 	const char **hpd_reg_names;
@@ -109,6 +126,11 @@ static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg)
 	return msm_readl(hdmi->mmio + reg);
 }
 
+static inline u32 hdmi_qfprom_read(struct hdmi *hdmi, u32 reg)
+{
+	return msm_readl(hdmi->qfprom_mmio + reg);
+}
+
 /*
  * The phy appears to be different, for example between 8960 and 8x60,
  * so split the phy related functions out and load the correct one at
@@ -117,7 +139,6 @@ static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg)
 
 struct hdmi_phy_funcs {
 	void (*destroy)(struct hdmi_phy *phy);
-	void (*reset)(struct hdmi_phy *phy);
 	void (*powerup)(struct hdmi_phy *phy, unsigned long int pixclock);
 	void (*powerdown)(struct hdmi_phy *phy);
 };
@@ -163,4 +184,13 @@ void hdmi_i2c_irq(struct i2c_adapter *i2c);
 void hdmi_i2c_destroy(struct i2c_adapter *i2c);
 struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi);
 
+/*
+ * hdcp
+ */
+struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi);
+void hdmi_hdcp_destroy(struct hdmi *hdmi);
+void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+
 #endif /* __HDMI_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index e6f034808371..0b1b5586ff35 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -441,6 +441,12 @@ static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val)
 
 #define REG_HDMI_HDCP_SW_LOWER_AKSV				0x00000288
 
+#define REG_HDMI_CEC_CTRL					0x0000028c
+
+#define REG_HDMI_CEC_WR_DATA					0x00000290
+
+#define REG_HDMI_CEC_CEC_RETRANSMIT				0x00000294
+
 #define REG_HDMI_CEC_STATUS					0x00000298
 
 #define REG_HDMI_CEC_INT					0x0000029c
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
index 872485f60134..df232e20c13e 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -203,7 +203,6 @@ int hdmi_audio_update(struct hdmi *hdmi)
 		audio_config |= HDMI_AUDIO_CFG_FIFO_WATERMARK(4);
 		audio_config |= HDMI_AUDIO_CFG_ENGINE_ENABLE;
 	} else {
-		hdmi_write(hdmi, REG_HDMI_GC, HDMI_GC_MUTE);
 		acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_CONT;
 		acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_SEND;
 		vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_ENABLE;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index a7a1d8267cf0..92b69ae8caf9 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -100,8 +100,13 @@ static void hdmi_bridge_pre_enable(struct drm_bridge *bridge)
 		hdmi_audio_update(hdmi);
 	}
 
-	phy->funcs->powerup(phy, hdmi->pixclock);
+	if (phy)
+		phy->funcs->powerup(phy, hdmi->pixclock);
+
 	hdmi_set_mode(hdmi, true);
+
+	if (hdmi->hdcp_ctrl)
+		hdmi_hdcp_on(hdmi->hdcp_ctrl);
 }
 
 static void hdmi_bridge_enable(struct drm_bridge *bridge)
@@ -118,9 +123,14 @@ static void hdmi_bridge_post_disable(struct drm_bridge *bridge)
 	struct hdmi *hdmi = hdmi_bridge->hdmi;
 	struct hdmi_phy *phy = hdmi->phy;
 
+	if (hdmi->hdcp_ctrl)
+		hdmi_hdcp_off(hdmi->hdcp_ctrl);
+
 	DBG("power down");
 	hdmi_set_mode(hdmi, false);
-	phy->funcs->powerdown(phy);
+
+	if (phy)
+		phy->funcs->powerdown(phy);
 
 	if (hdmi->power_on) {
 		power_off(bridge);
@@ -142,8 +152,6 @@ static void hdmi_bridge_mode_set(struct drm_bridge *bridge,
 
 	hdmi->pixclock = mode->clock * 1000;
 
-	hdmi->hdmi_mode = drm_match_cea_mode(mode) > 1;
-
 	hstart = mode->htotal - mode->hsync_start;
 	hend   = mode->htotal - mode->hsync_start + mode->hdisplay;
 
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 54aa93ff5473..a3b05ae52dae 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -28,6 +28,55 @@ struct hdmi_connector {
 };
 #define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base)
 
+static void hdmi_phy_reset(struct hdmi *hdmi)
+{
+	unsigned int val;
+
+	val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
+
+	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+		/* pull low */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val & ~HDMI_PHY_CTRL_SW_RESET);
+	} else {
+		/* pull high */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val | HDMI_PHY_CTRL_SW_RESET);
+	}
+
+	if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+		/* pull low */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+	} else {
+		/* pull high */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val | HDMI_PHY_CTRL_SW_RESET_PLL);
+	}
+
+	msleep(100);
+
+	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+		/* pull high */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val | HDMI_PHY_CTRL_SW_RESET);
+	} else {
+		/* pull low */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val & ~HDMI_PHY_CTRL_SW_RESET);
+	}
+
+	if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+		/* pull high */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val | HDMI_PHY_CTRL_SW_RESET_PLL);
+	} else {
+		/* pull low */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+	}
+}
+
 static int gpio_config(struct hdmi *hdmi, bool on)
 {
 	struct device *dev = &hdmi->pdev->dev;
@@ -35,21 +84,25 @@ static int gpio_config(struct hdmi *hdmi, bool on)
 	int ret;
 
 	if (on) {
-		ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK");
-		if (ret) {
-			dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
-				"HDMI_DDC_CLK", config->ddc_clk_gpio, ret);
-			goto error1;
+		if (config->ddc_clk_gpio != -1) {
+			ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK");
+			if (ret) {
+				dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
+					"HDMI_DDC_CLK", config->ddc_clk_gpio, ret);
+				goto error1;
+			}
+			gpio_set_value_cansleep(config->ddc_clk_gpio, 1);
 		}
-		gpio_set_value_cansleep(config->ddc_clk_gpio, 1);
 
-		ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA");
-		if (ret) {
-			dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
-				"HDMI_DDC_DATA", config->ddc_data_gpio, ret);
-			goto error2;
+		if (config->ddc_data_gpio != -1) {
+			ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA");
+			if (ret) {
+				dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
+					"HDMI_DDC_DATA", config->ddc_data_gpio, ret);
+				goto error2;
+			}
+			gpio_set_value_cansleep(config->ddc_data_gpio, 1);
 		}
-		gpio_set_value_cansleep(config->ddc_data_gpio, 1);
 
 		ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
 		if (ret) {
@@ -94,8 +147,12 @@ static int gpio_config(struct hdmi *hdmi, bool on)
 		}
 		DBG("gpio on");
 	} else {
-		gpio_free(config->ddc_clk_gpio);
-		gpio_free(config->ddc_data_gpio);
+		if (config->ddc_clk_gpio != -1)
+			gpio_free(config->ddc_clk_gpio);
+
+		if (config->ddc_data_gpio != -1)
+			gpio_free(config->ddc_data_gpio);
+
 		gpio_free(config->hpd_gpio);
 
 		if (config->mux_en_gpio != -1) {
@@ -126,9 +183,11 @@ error5:
 error4:
 	gpio_free(config->hpd_gpio);
 error3:
-	gpio_free(config->ddc_data_gpio);
+	if (config->ddc_data_gpio != -1)
+		gpio_free(config->ddc_data_gpio);
 error2:
-	gpio_free(config->ddc_clk_gpio);
+	if (config->ddc_clk_gpio != -1)
+		gpio_free(config->ddc_clk_gpio);
 error1:
 	return ret;
 }
@@ -138,9 +197,9 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
 	struct hdmi *hdmi = hdmi_connector->hdmi;
 	const struct hdmi_platform_config *config = hdmi->config;
 	struct device *dev = &hdmi->pdev->dev;
-	struct hdmi_phy *phy = hdmi->phy;
 	uint32_t hpd_ctrl;
 	int i, ret;
+	unsigned long flags;
 
 	for (i = 0; i < config->hpd_reg_cnt; i++) {
 		ret = regulator_enable(hdmi->hpd_regs[i]);
@@ -181,7 +240,7 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
 	}
 
 	hdmi_set_mode(hdmi, false);
-	phy->funcs->reset(phy);
+	hdmi_phy_reset(hdmi);
 	hdmi_set_mode(hdmi, true);
 
 	hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
@@ -192,6 +251,7 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
 			HDMI_HPD_INT_CTRL_INT_EN);
 
 	/* set timeout to 4.1ms (max) for hardware debounce */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
 	hpd_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
 	hpd_ctrl |= HDMI_HPD_CTRL_TIMEOUT(0x1fff);
 
@@ -200,6 +260,7 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
 			~HDMI_HPD_CTRL_ENABLE & hpd_ctrl);
 	hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
 			HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
 
 	return 0;
 
@@ -250,7 +311,6 @@ hotplug_work(struct work_struct *work)
 void hdmi_connector_irq(struct drm_connector *connector)
 {
 	struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
-	struct msm_drm_private *priv = connector->dev->dev_private;
 	struct hdmi *hdmi = hdmi_connector->hdmi;
 	uint32_t hpd_int_status, hpd_int_ctrl;
 
@@ -274,7 +334,7 @@ void hdmi_connector_irq(struct drm_connector *connector)
 			hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
 		hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
 
-		queue_work(priv->wq, &hdmi_connector->hpd_work);
+		queue_work(hdmi->workq, &hdmi_connector->hpd_work);
 	}
 }
 
@@ -350,6 +410,7 @@ static int hdmi_connector_get_modes(struct drm_connector *connector)
 
 	hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
 
+	hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
 	drm_mode_connector_update_edid_property(connector, edid);
 
 	if (edid) {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
new file mode 100644
index 000000000000..1dc9c34eb0df
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
@@ -0,0 +1,1437 @@
+/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "hdmi.h"
+#include <linux/qcom_scm.h>
+
+#define HDCP_REG_ENABLE 0x01
+#define HDCP_REG_DISABLE 0x00
+#define HDCP_PORT_ADDR 0x74
+
+#define HDCP_INT_STATUS_MASK ( \
+		HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_INT | \
+		HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT | \
+		HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_INT | \
+		HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_INT)
+
+#define AUTH_WORK_RETRIES_TIME 100
+#define AUTH_RETRIES_TIME 30
+
+/* QFPROM Registers for HDMI/HDCP */
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_LSB  0x000000F8
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_MSB  0x000000FC
+#define HDCP_KSV_LSB                     0x000060D8
+#define HDCP_KSV_MSB                     0x000060DC
+
+enum DS_TYPE {  /* type of downstream device */
+	DS_UNKNOWN,
+	DS_RECEIVER,
+	DS_REPEATER,
+};
+
+enum hdmi_hdcp_state {
+	HDCP_STATE_NO_AKSV,
+	HDCP_STATE_INACTIVE,
+	HDCP_STATE_AUTHENTICATING,
+	HDCP_STATE_AUTHENTICATED,
+	HDCP_STATE_AUTH_FAILED
+};
+
+struct hdmi_hdcp_reg_data {
+	u32 reg_id;
+	u32 off;
+	char *name;
+	u32 reg_val;
+};
+
+struct hdmi_hdcp_ctrl {
+	struct hdmi *hdmi;
+	u32 auth_retries;
+	bool tz_hdcp;
+	enum hdmi_hdcp_state hdcp_state;
+	struct work_struct hdcp_auth_work;
+	struct work_struct hdcp_reauth_work;
+
+#define AUTH_ABORT_EV 1
+#define AUTH_RESULT_RDY_EV 2
+	unsigned long auth_event;
+	wait_queue_head_t auth_event_queue;
+
+	u32 ksv_fifo_w_index;
+	/*
+	 * store aksv from qfprom
+	 */
+	u32 aksv_lsb;
+	u32 aksv_msb;
+	bool aksv_valid;
+	u32 ds_type;
+	u32 bksv_lsb;
+	u32 bksv_msb;
+	u8 dev_count;
+	u8 depth;
+	u8 ksv_list[5 * 127];
+	bool max_cascade_exceeded;
+	bool max_dev_exceeded;
+};
+
+static int hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset,
+	u8 *data, u16 data_len)
+{
+	int rc;
+	int retry = 5;
+	struct i2c_msg msgs[] = {
+		{
+			.addr	= addr >> 1,
+			.flags	= 0,
+			.len	= 1,
+			.buf	= &offset,
+		}, {
+			.addr	= addr >> 1,
+			.flags	= I2C_M_RD,
+			.len	= data_len,
+			.buf	= data,
+		}
+	};
+
+	DBG("Start DDC read");
+retry:
+	rc = i2c_transfer(hdmi->i2c, msgs, 2);
+
+	retry--;
+	if (rc == 2)
+		rc = 0;
+	else if (retry > 0)
+		goto retry;
+	else
+		rc = -EIO;
+
+	DBG("End DDC read %d", rc);
+
+	return rc;
+}
+
+#define HDCP_DDC_WRITE_MAX_BYTE_NUM 32
+
+static int hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset,
+	u8 *data, u16 data_len)
+{
+	int rc;
+	int retry = 10;
+	u8 buf[HDCP_DDC_WRITE_MAX_BYTE_NUM];
+	struct i2c_msg msgs[] = {
+		{
+			.addr	= addr >> 1,
+			.flags	= 0,
+			.len	= 1,
+		}
+	};
+
+	DBG("Start DDC write");
+	if (data_len > (HDCP_DDC_WRITE_MAX_BYTE_NUM - 1)) {
+		pr_err("%s: write size too big\n", __func__);
+		return -ERANGE;
+	}
+
+	buf[0] = offset;
+	memcpy(&buf[1], data, data_len);
+	msgs[0].buf = buf;
+	msgs[0].len = data_len + 1;
+retry:
+	rc = i2c_transfer(hdmi->i2c, msgs, 1);
+
+	retry--;
+	if (rc == 1)
+		rc = 0;
+	else if (retry > 0)
+		goto retry;
+	else
+		rc = -EIO;
+
+	DBG("End DDC write %d", rc);
+
+	return rc;
+}
+
+static int hdmi_hdcp_scm_wr(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 *preg,
+	u32 *pdata, u32 count)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	struct qcom_scm_hdcp_req scm_buf[QCOM_SCM_HDCP_MAX_REQ_CNT];
+	u32 resp, phy_addr, idx = 0;
+	int i, ret = 0;
+
+	WARN_ON(!pdata || !preg || (count == 0));
+
+	if (hdcp_ctrl->tz_hdcp) {
+		phy_addr = (u32)hdmi->mmio_phy_addr;
+
+		while (count) {
+			memset(scm_buf, 0, sizeof(scm_buf));
+			for (i = 0; i < count && i < QCOM_SCM_HDCP_MAX_REQ_CNT;
+				i++) {
+				scm_buf[i].addr = phy_addr + preg[idx];
+				scm_buf[i].val  = pdata[idx];
+				idx++;
+			}
+			ret = qcom_scm_hdcp_req(scm_buf, i, &resp);
+
+			if (ret || resp) {
+				pr_err("%s: error: scm_call ret=%d resp=%u\n",
+					__func__, ret, resp);
+				ret = -EINVAL;
+				break;
+			}
+
+			count -= i;
+		}
+	} else {
+		for (i = 0; i < count; i++)
+			hdmi_write(hdmi, preg[i], pdata[i]);
+	}
+
+	return ret;
+}
+
+void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 reg_val, hdcp_int_status;
+	unsigned long flags;
+
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_INT_CTRL);
+	hdcp_int_status = reg_val & HDCP_INT_STATUS_MASK;
+	if (!hdcp_int_status) {
+		spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+		return;
+	}
+	/* Clear Interrupts */
+	reg_val |= hdcp_int_status << 1;
+	/* Clear AUTH_FAIL_INFO as well */
+	if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT)
+		reg_val |= HDMI_HDCP_INT_CTRL_AUTH_FAIL_INFO_ACK;
+	hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, reg_val);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	DBG("hdcp irq %x", hdcp_int_status);
+
+	if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_INT) {
+		pr_info("%s:AUTH_SUCCESS_INT received\n", __func__);
+		if (HDCP_STATE_AUTHENTICATING == hdcp_ctrl->hdcp_state) {
+			set_bit(AUTH_RESULT_RDY_EV, &hdcp_ctrl->auth_event);
+			wake_up_all(&hdcp_ctrl->auth_event_queue);
+		}
+	}
+
+	if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT) {
+		reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+		pr_info("%s: AUTH_FAIL_INT rcvd, LINK0_STATUS=0x%08x\n",
+			__func__, reg_val);
+		if (HDCP_STATE_AUTHENTICATED == hdcp_ctrl->hdcp_state)
+			queue_work(hdmi->workq, &hdcp_ctrl->hdcp_reauth_work);
+		else if (HDCP_STATE_AUTHENTICATING ==
+				hdcp_ctrl->hdcp_state) {
+			set_bit(AUTH_RESULT_RDY_EV, &hdcp_ctrl->auth_event);
+			wake_up_all(&hdcp_ctrl->auth_event_queue);
+		}
+	}
+}
+
+static int hdmi_hdcp_msleep(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 ms, u32 ev)
+{
+	int rc;
+
+	rc = wait_event_timeout(hdcp_ctrl->auth_event_queue,
+		!!test_bit(ev, &hdcp_ctrl->auth_event),
+		msecs_to_jiffies(ms));
+	if (rc) {
+		pr_info("%s: msleep is canceled by event %d\n",
+				__func__, ev);
+		clear_bit(ev, &hdcp_ctrl->auth_event);
+		return -ECANCELED;
+	}
+
+	return 0;
+}
+
+static int hdmi_hdcp_read_validate_aksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+
+	/* Fetch aksv from QFPROM, this info should be public. */
+	hdcp_ctrl->aksv_lsb = hdmi_qfprom_read(hdmi, HDCP_KSV_LSB);
+	hdcp_ctrl->aksv_msb = hdmi_qfprom_read(hdmi, HDCP_KSV_MSB);
+
+	/* check there are 20 ones in AKSV */
+	if ((hweight32(hdcp_ctrl->aksv_lsb) + hweight32(hdcp_ctrl->aksv_msb))
+			!= 20) {
+		pr_err("%s: AKSV QFPROM doesn't have 20 1's, 20 0's\n",
+			__func__);
+		pr_err("%s: QFPROM AKSV chk failed (AKSV=%02x%08x)\n",
+			__func__, hdcp_ctrl->aksv_msb,
+			hdcp_ctrl->aksv_lsb);
+		return -EINVAL;
+	}
+	DBG("AKSV=%02x%08x", hdcp_ctrl->aksv_msb, hdcp_ctrl->aksv_lsb);
+
+	return 0;
+}
+
+static int reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 reg_val, failure, nack0;
+	int rc = 0;
+
+	/* Check for any DDC transfer failures */
+	reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS);
+	failure = reg_val & HDMI_HDCP_DDC_STATUS_FAILED;
+	nack0 = reg_val & HDMI_HDCP_DDC_STATUS_NACK0;
+	DBG("HDCP_DDC_STATUS=0x%x, FAIL=%d, NACK0=%d",
+		reg_val, failure, nack0);
+
+	if (failure) {
+		/*
+		 * Indicates that the last HDCP HW DDC transfer failed.
+		 * This occurs when a transfer is attempted with HDCP DDC
+		 * disabled (HDCP_DDC_DISABLE=1) or the number of retries
+		 * matches HDCP_DDC_RETRY_CNT.
+		 * Failure occurred,  let's clear it.
+		 */
+		DBG("DDC failure detected");
+
+		/* First, Disable DDC */
+		hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_0,
+			HDMI_HDCP_DDC_CTRL_0_DISABLE);
+
+		/* ACK the Failure to Clear it */
+		reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_CTRL_1);
+		reg_val |= HDMI_HDCP_DDC_CTRL_1_FAILED_ACK;
+		hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_1, reg_val);
+
+		/* Check if the FAILURE got Cleared */
+		reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS);
+		if (reg_val & HDMI_HDCP_DDC_STATUS_FAILED)
+			pr_info("%s: Unable to clear HDCP DDC Failure\n",
+				__func__);
+
+		/* Re-Enable HDCP DDC */
+		hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_0, 0);
+	}
+
+	if (nack0) {
+		DBG("Before: HDMI_DDC_SW_STATUS=0x%08x",
+			hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS));
+		/* Reset HDMI DDC software status */
+		reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
+		reg_val |= HDMI_DDC_CTRL_SW_STATUS_RESET;
+		hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
+
+		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+
+		reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
+		reg_val &= ~HDMI_DDC_CTRL_SW_STATUS_RESET;
+		hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
+
+		/* Reset HDMI DDC Controller */
+		reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
+		reg_val |= HDMI_DDC_CTRL_SOFT_RESET;
+		hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
+
+		/* If previous msleep is aborted, skip this msleep */
+		if (!rc)
+			rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+
+		reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
+		reg_val &= ~HDMI_DDC_CTRL_SOFT_RESET;
+		hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
+		DBG("After: HDMI_DDC_SW_STATUS=0x%08x",
+			hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS));
+	}
+
+	return rc;
+}
+
+static int hdmi_hdcp_hw_ddc_clean(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc;
+	u32 hdcp_ddc_status, ddc_hw_status;
+	u32 xfer_done, xfer_req, hw_done;
+	bool hw_not_ready;
+	u32 timeout_count;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+
+	if (hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS) == 0)
+		return 0;
+
+	/* Wait to be clean on DDC HW engine */
+	timeout_count = 100;
+	do {
+		hdcp_ddc_status = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS);
+		ddc_hw_status = hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS);
+
+		xfer_done = hdcp_ddc_status & HDMI_HDCP_DDC_STATUS_XFER_DONE;
+		xfer_req = hdcp_ddc_status & HDMI_HDCP_DDC_STATUS_XFER_REQ;
+		hw_done = ddc_hw_status & HDMI_DDC_HW_STATUS_DONE;
+		hw_not_ready = !xfer_done || xfer_req || !hw_done;
+
+		if (hw_not_ready)
+			break;
+
+		timeout_count--;
+		if (!timeout_count) {
+			pr_warn("%s: hw_ddc_clean failed\n", __func__);
+			return -ETIMEDOUT;
+		}
+
+		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+		if (rc)
+			return rc;
+	} while (1);
+
+	return 0;
+}
+
+static void hdmi_hdcp_reauth_work(struct work_struct *work)
+{
+	struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work,
+		struct hdmi_hdcp_ctrl, hdcp_reauth_work);
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	unsigned long flags;
+	u32 reg_val;
+
+	DBG("HDCP REAUTH WORK");
+	/*
+	 * Disable HPD circuitry.
+	 * This is needed to reset the HDCP cipher engine so that when we
+	 * attempt a re-authentication, HW would clear the AN0_READY and
+	 * AN1_READY bits in HDMI_HDCP_LINK0_STATUS register
+	 */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+	reg_val &= ~HDMI_HPD_CTRL_ENABLE;
+	hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
+
+	/* Disable HDCP interrupts */
+	hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, 0);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	hdmi_write(hdmi, REG_HDMI_HDCP_RESET,
+		HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE);
+
+	/* Wait to be clean on DDC HW engine */
+	if (hdmi_hdcp_hw_ddc_clean(hdcp_ctrl)) {
+		pr_info("%s: reauth work aborted\n", __func__);
+		return;
+	}
+
+	/* Disable encryption and disable the HDCP block */
+	hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, 0);
+
+	/* Enable HPD circuitry */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+	reg_val |= HDMI_HPD_CTRL_ENABLE;
+	hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	/*
+	 * Only retry defined times then abort current authenticating process
+	 */
+	if (++hdcp_ctrl->auth_retries == AUTH_RETRIES_TIME) {
+		hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
+		hdcp_ctrl->auth_retries = 0;
+		pr_info("%s: abort reauthentication!\n", __func__);
+
+		return;
+	}
+
+	DBG("Queue AUTH WORK");
+	hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATING;
+	queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work);
+}
+
+static int hdmi_hdcp_auth_prepare(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 link0_status;
+	u32 reg_val;
+	unsigned long flags;
+	int rc;
+
+	if (!hdcp_ctrl->aksv_valid) {
+		rc = hdmi_hdcp_read_validate_aksv(hdcp_ctrl);
+		if (rc) {
+			pr_err("%s: ASKV validation failed\n", __func__);
+			hdcp_ctrl->hdcp_state = HDCP_STATE_NO_AKSV;
+			return -ENOTSUPP;
+		}
+		hdcp_ctrl->aksv_valid = true;
+	}
+
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	/* disable HDMI Encrypt */
+	reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+	reg_val &= ~HDMI_CTRL_ENCRYPTED;
+	hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+
+	/* Enabling Software DDC */
+	reg_val = hdmi_read(hdmi, REG_HDMI_DDC_ARBITRATION);
+	reg_val &= ~HDMI_DDC_ARBITRATION_HW_ARBITRATION;
+	hdmi_write(hdmi, REG_HDMI_DDC_ARBITRATION, reg_val);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	/*
+	 * Write AKSV read from QFPROM to the HDCP registers.
+	 * This step is needed for HDCP authentication and must be
+	 * written before enabling HDCP.
+	 */
+	hdmi_write(hdmi, REG_HDMI_HDCP_SW_LOWER_AKSV, hdcp_ctrl->aksv_lsb);
+	hdmi_write(hdmi, REG_HDMI_HDCP_SW_UPPER_AKSV, hdcp_ctrl->aksv_msb);
+
+	/*
+	 * HDCP setup prior to enabling HDCP_CTRL.
+	 * Setup seed values for random number An.
+	 */
+	hdmi_write(hdmi, REG_HDMI_HDCP_ENTROPY_CTRL0, 0xB1FFB0FF);
+	hdmi_write(hdmi, REG_HDMI_HDCP_ENTROPY_CTRL1, 0xF00DFACE);
+
+	/* Disable the RngCipher state */
+	reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DEBUG_CTRL);
+	reg_val &= ~HDMI_HDCP_DEBUG_CTRL_RNG_CIPHER;
+	hdmi_write(hdmi, REG_HDMI_HDCP_DEBUG_CTRL, reg_val);
+	DBG("HDCP_DEBUG_CTRL=0x%08x",
+		hdmi_read(hdmi, REG_HDMI_HDCP_DEBUG_CTRL));
+
+	/*
+	 * Ensure that all register writes are completed before
+	 * enabling HDCP cipher
+	 */
+	wmb();
+
+	/*
+	 * Enable HDCP
+	 * This needs to be done as early as possible in order for the
+	 * hardware to make An available to read
+	 */
+	hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, HDMI_HDCP_CTRL_ENABLE);
+
+	/*
+	 * If we had stale values for the An ready bit, it should most
+	 * likely be cleared now after enabling HDCP cipher
+	 */
+	link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+	DBG("After enabling HDCP Link0_Status=0x%08x", link0_status);
+	if (!(link0_status &
+		(HDMI_HDCP_LINK0_STATUS_AN_0_READY |
+		HDMI_HDCP_LINK0_STATUS_AN_1_READY)))
+		DBG("An not ready after enabling HDCP");
+
+	/* Clear any DDC failures from previous tries before enable HDCP*/
+	rc = reset_hdcp_ddc_failures(hdcp_ctrl);
+
+	return rc;
+}
+
+static void hdmi_hdcp_auth_fail(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 reg_val;
+	unsigned long flags;
+
+	DBG("hdcp auth failed, queue reauth work");
+	/* clear HDMI Encrypt */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+	reg_val &= ~HDMI_CTRL_ENCRYPTED;
+	hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	hdcp_ctrl->hdcp_state = HDCP_STATE_AUTH_FAILED;
+	queue_work(hdmi->workq, &hdcp_ctrl->hdcp_reauth_work);
+}
+
+static void hdmi_hdcp_auth_done(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 reg_val;
+	unsigned long flags;
+
+	/*
+	 * Disable software DDC before going into part3 to make sure
+	 * there is no Arbitration between software and hardware for DDC
+	 */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_DDC_ARBITRATION);
+	reg_val |= HDMI_DDC_ARBITRATION_HW_ARBITRATION;
+	hdmi_write(hdmi, REG_HDMI_DDC_ARBITRATION, reg_val);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	/* enable HDMI Encrypt */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+	reg_val |= HDMI_CTRL_ENCRYPTED;
+	hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATED;
+	hdcp_ctrl->auth_retries = 0;
+}
+
+/*
+ * hdcp authenticating part 1
+ * Wait Key/An ready
+ * Read BCAPS from sink
+ * Write BCAPS and AKSV into HDCP engine
+ * Write An and AKSV to sink
+ * Read BKSV from sink and write into HDCP engine
+ */
+static int hdmi_hdcp_wait_key_an_ready(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 link0_status, keys_state;
+	u32 timeout_count;
+	bool an_ready;
+
+	/* Wait for HDCP keys to be checked and validated */
+	timeout_count = 100;
+	do {
+		link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+		keys_state = (link0_status >> 28) & 0x7;
+		if (keys_state == HDCP_KEYS_STATE_VALID)
+			break;
+
+		DBG("Keys not ready(%d). s=%d, l0=%0x08x",
+			timeout_count, keys_state, link0_status);
+
+		timeout_count--;
+		if (!timeout_count) {
+			pr_err("%s: Wait key state timedout", __func__);
+			return -ETIMEDOUT;
+		}
+
+		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+		if (rc)
+			return rc;
+	} while (1);
+
+	timeout_count = 100;
+	do {
+		link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+		an_ready = (link0_status & HDMI_HDCP_LINK0_STATUS_AN_0_READY)
+			&& (link0_status & HDMI_HDCP_LINK0_STATUS_AN_1_READY);
+		if (an_ready)
+			break;
+
+		DBG("An not ready(%d). l0_status=0x%08x",
+			timeout_count, link0_status);
+
+		timeout_count--;
+		if (!timeout_count) {
+			pr_err("%s: Wait An timedout", __func__);
+			return -ETIMEDOUT;
+		}
+
+		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+		if (rc)
+			return rc;
+	} while (1);
+
+	return 0;
+}
+
+static int hdmi_hdcp_send_aksv_an(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc = 0;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 link0_aksv_0, link0_aksv_1;
+	u32 link0_an[2];
+	u8 aksv[5];
+
+	/* Read An0 and An1 */
+	link0_an[0] = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA5);
+	link0_an[1] = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA6);
+
+	/* Read AKSV */
+	link0_aksv_0 = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA3);
+	link0_aksv_1 = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA4);
+
+	DBG("Link ASKV=%08x%08x", link0_aksv_0, link0_aksv_1);
+	/* Copy An and AKSV to byte arrays for transmission */
+	aksv[0] =  link0_aksv_0        & 0xFF;
+	aksv[1] = (link0_aksv_0 >> 8)  & 0xFF;
+	aksv[2] = (link0_aksv_0 >> 16) & 0xFF;
+	aksv[3] = (link0_aksv_0 >> 24) & 0xFF;
+	aksv[4] =  link0_aksv_1        & 0xFF;
+
+	/* Write An to offset 0x18 */
+	rc = hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x18, (u8 *)link0_an,
+		(u16)sizeof(link0_an));
+	if (rc) {
+		pr_err("%s:An write failed\n", __func__);
+		return rc;
+	}
+	DBG("Link0-An=%08x%08x", link0_an[0], link0_an[1]);
+
+	/* Write AKSV to offset 0x10 */
+	rc = hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x10, aksv, 5);
+	if (rc) {
+		pr_err("%s:AKSV write failed\n", __func__);
+		return rc;
+	}
+	DBG("Link0-AKSV=%02x%08x", link0_aksv_1 & 0xFF, link0_aksv_0);
+
+	return 0;
+}
+
+static int hdmi_hdcp_recv_bksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc = 0;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u8 bksv[5];
+	u32 reg[2], data[2];
+
+	/* Read BKSV at offset 0x00 */
+	rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x00, bksv, 5);
+	if (rc) {
+		pr_err("%s:BKSV read failed\n", __func__);
+		return rc;
+	}
+
+	hdcp_ctrl->bksv_lsb = bksv[0] | (bksv[1] << 8) |
+		(bksv[2] << 16) | (bksv[3] << 24);
+	hdcp_ctrl->bksv_msb = bksv[4];
+	DBG(":BKSV=%02x%08x", hdcp_ctrl->bksv_msb, hdcp_ctrl->bksv_lsb);
+
+	/* check there are 20 ones in BKSV */
+	if ((hweight32(hdcp_ctrl->bksv_lsb) + hweight32(hdcp_ctrl->bksv_msb))
+			!= 20) {
+		pr_err(": BKSV doesn't have 20 1's and 20 0's\n");
+		pr_err(": BKSV chk fail. BKSV=%02x%02x%02x%02x%02x\n",
+			bksv[4], bksv[3], bksv[2], bksv[1], bksv[0]);
+		return -EINVAL;
+	}
+
+	/* Write BKSV read from sink to HDCP registers */
+	reg[0] = REG_HDMI_HDCP_RCVPORT_DATA0;
+	data[0] = hdcp_ctrl->bksv_lsb;
+	reg[1] = REG_HDMI_HDCP_RCVPORT_DATA1;
+	data[1] = hdcp_ctrl->bksv_msb;
+	rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2);
+
+	return rc;
+}
+
+static int hdmi_hdcp_recv_bcaps(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc = 0;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 reg, data;
+	u8 bcaps;
+
+	rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1);
+	if (rc) {
+		pr_err("%s:BCAPS read failed\n", __func__);
+		return rc;
+	}
+	DBG("BCAPS=%02x", bcaps);
+
+	/* receiver (0), repeater (1) */
+	hdcp_ctrl->ds_type = (bcaps & BIT(6)) ? DS_REPEATER : DS_RECEIVER;
+
+	/* Write BCAPS to the hardware */
+	reg = REG_HDMI_HDCP_RCVPORT_DATA12;
+	data = (u32)bcaps;
+	rc = hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
+
+	return rc;
+}
+
+static int hdmi_hdcp_auth_part1_key_exchange(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	unsigned long flags;
+	int rc;
+
+	/* Wait for AKSV key and An ready */
+	rc = hdmi_hdcp_wait_key_an_ready(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: wait key and an ready failed\n", __func__);
+		return rc;
+	};
+
+	/* Read BCAPS and send to HDCP engine */
+	rc = hdmi_hdcp_recv_bcaps(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: read bcaps error, abort\n", __func__);
+		return rc;
+	}
+
+	/*
+	 * 1.1_Features turned off by default.
+	 * No need to write AInfo since 1.1_Features is disabled.
+	 */
+	hdmi_write(hdmi, REG_HDMI_HDCP_RCVPORT_DATA4, 0);
+
+	/* Send AKSV and An to sink */
+	rc = hdmi_hdcp_send_aksv_an(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s:An/Aksv write failed\n", __func__);
+		return rc;
+	}
+
+	/* Read BKSV and send to HDCP engine*/
+	rc = hdmi_hdcp_recv_bksv(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s:BKSV Process failed\n", __func__);
+		return rc;
+	}
+
+	/* Enable HDCP interrupts and ack/clear any stale interrupts */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL,
+		HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_ACK |
+		HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_MASK |
+		HDMI_HDCP_INT_CTRL_AUTH_FAIL_ACK |
+		HDMI_HDCP_INT_CTRL_AUTH_FAIL_MASK |
+		HDMI_HDCP_INT_CTRL_AUTH_FAIL_INFO_ACK);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	return 0;
+}
+
+/* read R0' from sink and pass it to HDCP engine */
+static int hdmi_hdcp_auth_part1_recv_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	int rc = 0;
+	u8 buf[2];
+
+	/*
+	 * HDCP Compliance Test case 1A-01:
+	 * Wait here at least 100ms before reading R0'
+	 */
+	rc = hdmi_hdcp_msleep(hdcp_ctrl, 125, AUTH_ABORT_EV);
+	if (rc)
+		return rc;
+
+	/* Read R0' at offset 0x08 */
+	rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x08, buf, 2);
+	if (rc) {
+		pr_err("%s:R0' read failed\n", __func__);
+		return rc;
+	}
+	DBG("R0'=%02x%02x", buf[1], buf[0]);
+
+	/* Write R0' to HDCP registers and check to see if it is a match */
+	hdmi_write(hdmi, REG_HDMI_HDCP_RCVPORT_DATA2_0,
+		(((u32)buf[1]) << 8) | buf[0]);
+
+	return 0;
+}
+
+/* Wait for authenticating result: R0/R0' are matched or not */
+static int hdmi_hdcp_auth_part1_verify_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 link0_status;
+	int rc;
+
+	/* wait for hdcp irq, 10 sec should be long enough */
+	rc = hdmi_hdcp_msleep(hdcp_ctrl, 10000, AUTH_RESULT_RDY_EV);
+	if (!rc) {
+		pr_err("%s: Wait Auth IRQ timeout\n", __func__);
+		return -ETIMEDOUT;
+	}
+
+	link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+	if (!(link0_status & HDMI_HDCP_LINK0_STATUS_RI_MATCHES)) {
+		pr_err("%s: Authentication Part I failed\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Enable HDCP Encryption */
+	hdmi_write(hdmi, REG_HDMI_HDCP_CTRL,
+		HDMI_HDCP_CTRL_ENABLE |
+		HDMI_HDCP_CTRL_ENCRYPTION_ENABLE);
+
+	return 0;
+}
+
+static int hdmi_hdcp_recv_check_bstatus(struct hdmi_hdcp_ctrl *hdcp_ctrl,
+	u16 *pbstatus)
+{
+	int rc;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	bool max_devs_exceeded = false, max_cascade_exceeded = false;
+	u32 repeater_cascade_depth = 0, down_stream_devices = 0;
+	u16 bstatus;
+	u8 buf[2];
+
+	/* Read BSTATUS at offset 0x41 */
+	rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x41, buf, 2);
+	if (rc) {
+		pr_err("%s: BSTATUS read failed\n", __func__);
+		goto error;
+	}
+	*pbstatus = bstatus = (buf[1] << 8) | buf[0];
+
+
+	down_stream_devices = bstatus & 0x7F;
+	repeater_cascade_depth = (bstatus >> 8) & 0x7;
+	max_devs_exceeded = (bstatus & BIT(7)) ? true : false;
+	max_cascade_exceeded = (bstatus & BIT(11)) ? true : false;
+
+	if (down_stream_devices == 0) {
+		/*
+		 * If no downstream devices are attached to the repeater
+		 * then part II fails.
+		 * todo: The other approach would be to continue PART II.
+		 */
+		pr_err("%s: No downstream devices\n", __func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	/*
+	 * HDCP Compliance 1B-05:
+	 * Check if no. of devices connected to repeater
+	 * exceed max_devices_connected from bit 7 of Bstatus.
+	 */
+	if (max_devs_exceeded) {
+		pr_err("%s: no. of devs connected exceeds max allowed",
+			__func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	/*
+	 * HDCP Compliance 1B-06:
+	 * Check if no. of cascade connected to repeater
+	 * exceed max_cascade_connected from bit 11 of Bstatus.
+	 */
+	if (max_cascade_exceeded) {
+		pr_err("%s: no. of cascade conn exceeds max allowed",
+			__func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+error:
+	hdcp_ctrl->dev_count = down_stream_devices;
+	hdcp_ctrl->max_cascade_exceeded = max_cascade_exceeded;
+	hdcp_ctrl->max_dev_exceeded = max_devs_exceeded;
+	hdcp_ctrl->depth = repeater_cascade_depth;
+	return rc;
+}
+
+static int hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(
+	struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 reg, data;
+	u32 timeout_count;
+	u16 bstatus;
+	u8 bcaps;
+
+	/*
+	 * Wait until READY bit is set in BCAPS, as per HDCP specifications
+	 * maximum permitted time to check for READY bit is five seconds.
+	 */
+	timeout_count = 100;
+	do {
+		/* Read BCAPS at offset 0x40 */
+		rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1);
+		if (rc) {
+			pr_err("%s: BCAPS read failed\n", __func__);
+			return rc;
+		}
+
+		if (bcaps & BIT(5))
+			break;
+
+		timeout_count--;
+		if (!timeout_count) {
+			pr_err("%s: Wait KSV fifo ready timedout", __func__);
+			return -ETIMEDOUT;
+		}
+
+		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+		if (rc)
+			return rc;
+	} while (1);
+
+	rc = hdmi_hdcp_recv_check_bstatus(hdcp_ctrl, &bstatus);
+	if (rc) {
+		pr_err("%s: bstatus error\n", __func__);
+		return rc;
+	}
+
+	/* Write BSTATUS and BCAPS to HDCP registers */
+	reg = REG_HDMI_HDCP_RCVPORT_DATA12;
+	data = bcaps | (bstatus << 8);
+	rc = hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
+	if (rc) {
+		pr_err("%s: BSTATUS write failed\n", __func__);
+		return rc;
+	}
+
+	return 0;
+}
+
+/*
+ * hdcp authenticating part 2: 2nd
+ * read ksv fifo from sink
+ * transfer V' from sink to HDCP engine
+ * reset SHA engine
+ */
+static int hdmi_hdcp_transfer_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	int rc = 0;
+	struct hdmi_hdcp_reg_data reg_data[]  = {
+		{REG_HDMI_HDCP_RCVPORT_DATA7,  0x20, "V' H0"},
+		{REG_HDMI_HDCP_RCVPORT_DATA8,  0x24, "V' H1"},
+		{REG_HDMI_HDCP_RCVPORT_DATA9,  0x28, "V' H2"},
+		{REG_HDMI_HDCP_RCVPORT_DATA10, 0x2C, "V' H3"},
+		{REG_HDMI_HDCP_RCVPORT_DATA11, 0x30, "V' H4"},
+	};
+	struct hdmi_hdcp_reg_data *rd;
+	u32 size = ARRAY_SIZE(reg_data);
+	u32 reg[ARRAY_SIZE(reg_data)];
+	u32 data[ARRAY_SIZE(reg_data)];
+	int i;
+
+	for (i = 0; i < size; i++) {
+		rd = &reg_data[i];
+		rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR,
+			rd->off, (u8 *)&data[i], (u16)sizeof(data[i]));
+		if (rc) {
+			pr_err("%s: Read %s failed\n", __func__, rd->name);
+			goto error;
+		}
+
+		DBG("%s =%x", rd->name, data[i]);
+		reg[i] = reg_data[i].reg_id;
+	}
+
+	rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, size);
+
+error:
+	return rc;
+}
+
+static int hdmi_hdcp_recv_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 ksv_bytes;
+
+	ksv_bytes = 5 * hdcp_ctrl->dev_count;
+
+	rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x43,
+		hdcp_ctrl->ksv_list, ksv_bytes);
+	if (rc)
+		pr_err("%s: KSV FIFO read failed\n", __func__);
+
+	return rc;
+}
+
+static int hdmi_hdcp_reset_sha_engine(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	u32 reg[2], data[2];
+	u32 rc  = 0;
+
+	reg[0] = REG_HDMI_HDCP_SHA_CTRL;
+	data[0] = HDCP_REG_ENABLE;
+	reg[1] = REG_HDMI_HDCP_SHA_CTRL;
+	data[1] = HDCP_REG_DISABLE;
+
+	rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2);
+
+	return rc;
+}
+
+static int hdmi_hdcp_auth_part2_recv_ksv_fifo(
+	struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc;
+	u32 timeout_count;
+
+	/*
+	 * Read KSV FIFO over DDC
+	 * Key Selection vector FIFO Used to pull downstream KSVs
+	 * from HDCP Repeaters.
+	 * All bytes (DEVICE_COUNT * 5) must be read in a single,
+	 * auto incrementing access.
+	 * All bytes read as 0x00 for HDCP Receivers that are not
+	 * HDCP Repeaters (REPEATER == 0).
+	 */
+	timeout_count = 100;
+	do {
+		rc = hdmi_hdcp_recv_ksv_fifo(hdcp_ctrl);
+		if (!rc)
+			break;
+
+		timeout_count--;
+		if (!timeout_count) {
+			pr_err("%s: Recv ksv fifo timedout", __func__);
+			return -ETIMEDOUT;
+		}
+
+		rc = hdmi_hdcp_msleep(hdcp_ctrl, 25, AUTH_ABORT_EV);
+		if (rc)
+			return rc;
+	} while (1);
+
+	rc = hdmi_hdcp_transfer_v_h(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: transfer V failed\n", __func__);
+		return rc;
+	}
+
+	/* reset SHA engine before write ksv fifo */
+	rc = hdmi_hdcp_reset_sha_engine(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: fail to reset sha engine\n", __func__);
+		return rc;
+	}
+
+	return 0;
+}
+
+/*
+ * Write KSV FIFO to HDCP_SHA_DATA.
+ * This is done 1 byte at time starting with the LSB.
+ * Once 64 bytes have been written, we need to poll for
+ * HDCP_SHA_BLOCK_DONE before writing any further
+ * If the last byte is written, we need to poll for
+ * HDCP_SHA_COMP_DONE to wait until HW finish
+ */
+static int hdmi_hdcp_write_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int i;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 ksv_bytes, last_byte = 0;
+	u8 *ksv_fifo = NULL;
+	u32 reg_val, data, reg;
+	u32 rc  = 0;
+
+	ksv_bytes  = 5 * hdcp_ctrl->dev_count;
+
+	/* Check if need to wait for HW completion */
+	if (hdcp_ctrl->ksv_fifo_w_index) {
+		reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_SHA_STATUS);
+		DBG("HDCP_SHA_STATUS=%08x", reg_val);
+		if (hdcp_ctrl->ksv_fifo_w_index == ksv_bytes) {
+			/* check COMP_DONE if last write */
+			if (reg_val & HDMI_HDCP_SHA_STATUS_COMP_DONE) {
+				DBG("COMP_DONE");
+				return 0;
+			} else {
+				return -EAGAIN;
+			}
+		} else {
+			/* check BLOCK_DONE if not last write */
+			if (!(reg_val & HDMI_HDCP_SHA_STATUS_BLOCK_DONE))
+				return -EAGAIN;
+
+			DBG("BLOCK_DONE");
+		}
+	}
+
+	ksv_bytes  -= hdcp_ctrl->ksv_fifo_w_index;
+	if (ksv_bytes <= 64)
+		last_byte = 1;
+	else
+		ksv_bytes = 64;
+
+	ksv_fifo = hdcp_ctrl->ksv_list;
+	ksv_fifo += hdcp_ctrl->ksv_fifo_w_index;
+
+	for (i = 0; i < ksv_bytes; i++) {
+		/* Write KSV byte and set DONE bit[0] for last byte*/
+		reg_val = ksv_fifo[i] << 16;
+		if ((i == (ksv_bytes - 1)) && last_byte)
+			reg_val |= HDMI_HDCP_SHA_DATA_DONE;
+
+		reg = REG_HDMI_HDCP_SHA_DATA;
+		data = reg_val;
+		rc = hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
+
+		if (rc)
+			return rc;
+	}
+
+	hdcp_ctrl->ksv_fifo_w_index += ksv_bytes;
+
+	/*
+	 *return -EAGAIN to notify caller to wait for COMP_DONE or BLOCK_DONE
+	 */
+	return -EAGAIN;
+}
+
+/* write ksv fifo into HDCP engine */
+static int hdmi_hdcp_auth_part2_write_ksv_fifo(
+	struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc;
+	u32 timeout_count;
+
+	hdcp_ctrl->ksv_fifo_w_index = 0;
+	timeout_count = 100;
+	do {
+		rc = hdmi_hdcp_write_ksv_fifo(hdcp_ctrl);
+		if (!rc)
+			break;
+
+		if (rc != -EAGAIN)
+			return rc;
+
+		timeout_count--;
+		if (!timeout_count) {
+			pr_err("%s: Write KSV fifo timedout", __func__);
+			return -ETIMEDOUT;
+		}
+
+		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+		if (rc)
+			return rc;
+	} while (1);
+
+	return 0;
+}
+
+static int hdmi_hdcp_auth_part2_check_v_match(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc = 0;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 link0_status;
+	u32 timeout_count = 100;
+
+	do {
+		link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+		if (link0_status & HDMI_HDCP_LINK0_STATUS_V_MATCHES)
+			break;
+
+		timeout_count--;
+		if (!timeout_count) {
+				pr_err("%s: HDCP V Match timedout", __func__);
+				return -ETIMEDOUT;
+		}
+
+		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+		if (rc)
+			return rc;
+	} while (1);
+
+	return 0;
+}
+
+static void hdmi_hdcp_auth_work(struct work_struct *work)
+{
+	struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work,
+		struct hdmi_hdcp_ctrl, hdcp_auth_work);
+	int rc;
+
+	rc = hdmi_hdcp_auth_prepare(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: auth prepare failed %d\n", __func__, rc);
+		goto end;
+	}
+
+	/* HDCP PartI */
+	rc = hdmi_hdcp_auth_part1_key_exchange(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: key exchange failed %d\n", __func__, rc);
+		goto end;
+	}
+
+	rc = hdmi_hdcp_auth_part1_recv_r0(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: receive r0 failed %d\n", __func__, rc);
+		goto end;
+	}
+
+	rc = hdmi_hdcp_auth_part1_verify_r0(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: verify r0 failed %d\n", __func__, rc);
+		goto end;
+	}
+	pr_info("%s: Authentication Part I successful\n", __func__);
+	if (hdcp_ctrl->ds_type == DS_RECEIVER)
+		goto end;
+
+	/* HDCP PartII */
+	rc = hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: wait ksv fifo ready failed %d\n", __func__, rc);
+		goto end;
+	}
+
+	rc = hdmi_hdcp_auth_part2_recv_ksv_fifo(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: recv ksv fifo failed %d\n", __func__, rc);
+		goto end;
+	}
+
+	rc = hdmi_hdcp_auth_part2_write_ksv_fifo(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: write ksv fifo failed %d\n", __func__, rc);
+		goto end;
+	}
+
+	rc = hdmi_hdcp_auth_part2_check_v_match(hdcp_ctrl);
+	if (rc)
+		pr_err("%s: check v match failed %d\n", __func__, rc);
+
+end:
+	if (rc == -ECANCELED) {
+		pr_info("%s: hdcp authentication canceled\n", __func__);
+	} else if (rc == -ENOTSUPP) {
+		pr_info("%s: hdcp is not supported\n", __func__);
+	} else if (rc) {
+		pr_err("%s: hdcp authentication failed\n", __func__);
+		hdmi_hdcp_auth_fail(hdcp_ctrl);
+	} else {
+		hdmi_hdcp_auth_done(hdcp_ctrl);
+	}
+}
+
+void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 reg_val;
+	unsigned long flags;
+
+	if ((HDCP_STATE_INACTIVE != hdcp_ctrl->hdcp_state) ||
+		(HDCP_STATE_NO_AKSV == hdcp_ctrl->hdcp_state)) {
+		DBG("still active or activating or no askv. returning");
+		return;
+	}
+
+	/* clear HDMI Encrypt */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+	reg_val &= ~HDMI_CTRL_ENCRYPTED;
+	hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	hdcp_ctrl->auth_event = 0;
+	hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATING;
+	hdcp_ctrl->auth_retries = 0;
+	queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work);
+}
+
+void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	unsigned long flags;
+	u32 reg_val;
+
+	if ((HDCP_STATE_INACTIVE == hdcp_ctrl->hdcp_state) ||
+		(HDCP_STATE_NO_AKSV == hdcp_ctrl->hdcp_state)) {
+		DBG("hdcp inactive or no aksv. returning");
+		return;
+	}
+
+	/*
+	 * Disable HPD circuitry.
+	 * This is needed to reset the HDCP cipher engine so that when we
+	 * attempt a re-authentication, HW would clear the AN0_READY and
+	 * AN1_READY bits in HDMI_HDCP_LINK0_STATUS register
+	 */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+	reg_val &= ~HDMI_HPD_CTRL_ENABLE;
+	hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
+
+	/*
+	 * Disable HDCP interrupts.
+	 * Also, need to set the state to inactive here so that any ongoing
+	 * reauth works will know that the HDCP session has been turned off.
+	 */
+	hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, 0);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	/*
+	 * Cancel any pending auth/reauth attempts.
+	 * If one is ongoing, this will wait for it to finish.
+	 * No more reauthentication attempts will be scheduled since we
+	 * set the current state to inactive.
+	 */
+	set_bit(AUTH_ABORT_EV, &hdcp_ctrl->auth_event);
+	wake_up_all(&hdcp_ctrl->auth_event_queue);
+	cancel_work_sync(&hdcp_ctrl->hdcp_auth_work);
+	cancel_work_sync(&hdcp_ctrl->hdcp_reauth_work);
+
+	hdmi_write(hdmi, REG_HDMI_HDCP_RESET,
+		HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE);
+
+	/* Disable encryption and disable the HDCP block */
+	hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, 0);
+
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+	reg_val &= ~HDMI_CTRL_ENCRYPTED;
+	hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+
+	/* Enable HPD circuitry */
+	reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+	reg_val |= HDMI_HPD_CTRL_ENABLE;
+	hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
+
+	DBG("HDCP: Off");
+}
+
+struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi)
+{
+	struct hdmi_hdcp_ctrl *hdcp_ctrl = NULL;
+
+	if (!hdmi->qfprom_mmio) {
+		pr_err("%s: HDCP is not supported without qfprom\n",
+			__func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	hdcp_ctrl = kzalloc(sizeof(*hdcp_ctrl), GFP_KERNEL);
+	if (!hdcp_ctrl)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_WORK(&hdcp_ctrl->hdcp_auth_work, hdmi_hdcp_auth_work);
+	INIT_WORK(&hdcp_ctrl->hdcp_reauth_work, hdmi_hdcp_reauth_work);
+	init_waitqueue_head(&hdcp_ctrl->auth_event_queue);
+	hdcp_ctrl->hdmi = hdmi;
+	hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
+	hdcp_ctrl->aksv_valid = false;
+
+	if (qcom_scm_hdcp_available())
+		hdcp_ctrl->tz_hdcp = true;
+	else
+		hdcp_ctrl->tz_hdcp = false;
+
+	return hdcp_ctrl;
+}
+
+void hdmi_hdcp_destroy(struct hdmi *hdmi)
+{
+	if (hdmi && hdmi->hdcp_ctrl) {
+		kfree(hdmi->hdcp_ctrl);
+		hdmi->hdcp_ctrl = NULL;
+	}
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index 6997ec636c6d..3a01cb5051e2 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -426,57 +426,6 @@ static void hdmi_phy_8960_destroy(struct hdmi_phy *phy)
 	kfree(phy_8960);
 }
 
-static void hdmi_phy_8960_reset(struct hdmi_phy *phy)
-{
-	struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
-	struct hdmi *hdmi = phy_8960->hdmi;
-	unsigned int val;
-
-	val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET);
-	} else {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET);
-	}
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
-	} else {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET_PLL);
-	}
-
-	msleep(100);
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET);
-	} else {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET);
-	}
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET_PLL);
-	} else {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
-	}
-}
-
 static void hdmi_phy_8960_powerup(struct hdmi_phy *phy,
 		unsigned long int pixclock)
 {
@@ -511,7 +460,6 @@ static void hdmi_phy_8960_powerdown(struct hdmi_phy *phy)
 
 static const struct hdmi_phy_funcs hdmi_phy_8960_funcs = {
 		.destroy = hdmi_phy_8960_destroy,
-		.reset = hdmi_phy_8960_reset,
 		.powerup = hdmi_phy_8960_powerup,
 		.powerdown = hdmi_phy_8960_powerdown,
 };
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
index 391433c1af7c..cb01421ae1e4 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
@@ -29,37 +29,6 @@ static void hdmi_phy_8x60_destroy(struct hdmi_phy *phy)
 	kfree(phy_8x60);
 }
 
-static void hdmi_phy_8x60_reset(struct hdmi_phy *phy)
-{
-	struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
-	struct hdmi *hdmi = phy_8x60->hdmi;
-	unsigned int val;
-
-	val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET);
-	} else {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET);
-	}
-
-	msleep(100);
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET);
-	} else {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET);
-	}
-}
-
 static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy,
 		unsigned long int pixclock)
 {
@@ -182,7 +151,6 @@ static void hdmi_phy_8x60_powerdown(struct hdmi_phy *phy)
 
 static const struct hdmi_phy_funcs hdmi_phy_8x60_funcs = {
 		.destroy = hdmi_phy_8x60_destroy,
-		.reset = hdmi_phy_8x60_reset,
 		.powerup = hdmi_phy_8x60_powerup,
 		.powerdown = hdmi_phy_8x60_powerdown,
 };
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
index 59fa6cdacb2a..56ab8917ee9a 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
@@ -19,7 +19,6 @@
 
 struct hdmi_phy_8x74 {
 	struct hdmi_phy base;
-	struct hdmi *hdmi;
 	void __iomem *mmio;
 };
 #define to_hdmi_phy_8x74(x) container_of(x, struct hdmi_phy_8x74, base)
@@ -41,59 +40,6 @@ static void hdmi_phy_8x74_destroy(struct hdmi_phy *phy)
 	kfree(phy_8x74);
 }
 
-static void hdmi_phy_8x74_reset(struct hdmi_phy *phy)
-{
-	struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
-	struct hdmi *hdmi = phy_8x74->hdmi;
-	unsigned int val;
-
-	/* NOTE that HDMI_PHY_CTL is in core mmio, not phy mmio: */
-
-	val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET);
-	} else {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET);
-	}
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
-	} else {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET_PLL);
-	}
-
-	msleep(100);
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET);
-	} else {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET);
-	}
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET_PLL);
-	} else {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
-	}
-}
-
 static void hdmi_phy_8x74_powerup(struct hdmi_phy *phy,
 		unsigned long int pixclock)
 {
@@ -117,7 +63,6 @@ static void hdmi_phy_8x74_powerdown(struct hdmi_phy *phy)
 
 static const struct hdmi_phy_funcs hdmi_phy_8x74_funcs = {
 		.destroy = hdmi_phy_8x74_destroy,
-		.reset = hdmi_phy_8x74_reset,
 		.powerup = hdmi_phy_8x74_powerup,
 		.powerdown = hdmi_phy_8x74_powerdown,
 };
@@ -138,8 +83,6 @@ struct hdmi_phy *hdmi_phy_8x74_init(struct hdmi *hdmi)
 
 	phy->funcs = &hdmi_phy_8x74_funcs;
 
-	phy_8x74->hdmi = hdmi;
-
 	/* for 8x74, the phy mmio is mapped separately: */
 	phy_8x74->mmio = msm_ioremap(hdmi->pdev,
 			"phy_physical", "HDMI_8x74");
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index 978c3f70872a..2aa23b98f8aa 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
-
-Copyright (C) 2013 by the following authors:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
+
+Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 
 Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index 153fc487d683..74b86734fef5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index c4bb9d9c7667..6ac9aa165768 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -334,13 +334,15 @@ static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
 	return 0;
 }
 
-static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc)
+static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc,
+				   struct drm_crtc_state *old_crtc_state)
 {
 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
 	DBG("%s: begin", mdp4_crtc->name);
 }
 
-static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc)
+static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
+				   struct drm_crtc_state *old_crtc_state)
 {
 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
@@ -680,7 +682,5 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
 	drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
 	plane->crtc = crtc;
 
-	mdp4_plane_install_properties(plane, &crtc->base);
-
 	return crtc;
 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
index 7369ee7f0c55..5ed38cf548a1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
@@ -19,8 +19,11 @@
 #include "msm_drv.h"
 #include "mdp4_kms.h"
 
-void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
+void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+		uint32_t old_irqmask)
 {
+	mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_CLEAR,
+		irqmask ^ (irqmask & old_irqmask));
 	mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask);
 }
 
@@ -68,9 +71,10 @@ irqreturn_t mdp4_irq(struct msm_kms *kms)
 	struct drm_device *dev = mdp4_kms->dev;
 	struct msm_drm_private *priv = dev->dev_private;
 	unsigned int id;
-	uint32_t status;
+	uint32_t status, enable;
 
-	status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
+	enable = mdp4_read(mdp4_kms, REG_MDP4_INTR_ENABLE);
+	status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS) & enable;
 	mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
 
 	VERB("status=%08x", status);
@@ -86,13 +90,22 @@ irqreturn_t mdp4_irq(struct msm_kms *kms)
 
 int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
 {
+	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+
+	mdp4_enable(mdp4_kms);
 	mdp_update_vblank_mask(to_mdp_kms(kms),
 			mdp4_crtc_vblank(crtc), true);
+	mdp4_disable(mdp4_kms);
+
 	return 0;
 }
 
 void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
 {
+	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+
+	mdp4_enable(mdp4_kms);
 	mdp_update_vblank_mask(to_mdp_kms(kms),
 			mdp4_crtc_vblank(crtc), false);
+	mdp4_disable(mdp4_kms);
 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 531e4acc2a87..077f7521a971 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -241,22 +241,37 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms)
 }
 
 #ifdef CONFIG_OF
-static struct drm_panel *detect_panel(struct drm_device *dev, const char *name)
+static struct drm_panel *detect_panel(struct drm_device *dev)
 {
-	struct device_node *n;
+	struct device_node *endpoint, *panel_node;
+	struct device_node *np = dev->dev->of_node;
 	struct drm_panel *panel = NULL;
 
-	n = of_parse_phandle(dev->dev->of_node, name, 0);
-	if (n) {
-		panel = of_drm_find_panel(n);
-		if (!panel)
-			panel = ERR_PTR(-EPROBE_DEFER);
+	endpoint = of_graph_get_next_endpoint(np, NULL);
+	if (!endpoint) {
+		dev_err(dev->dev, "no valid endpoint\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	panel_node = of_graph_get_remote_port_parent(endpoint);
+	if (!panel_node) {
+		dev_err(dev->dev, "no valid panel node\n");
+		of_node_put(endpoint);
+		return ERR_PTR(-ENODEV);
+	}
+
+	of_node_put(endpoint);
+
+	panel = of_drm_find_panel(panel_node);
+	if (!panel) {
+		of_node_put(panel_node);
+		return ERR_PTR(-EPROBE_DEFER);
 	}
 
 	return panel;
 }
 #else
-static struct drm_panel *detect_panel(struct drm_device *dev, const char *name)
+static struct drm_panel *detect_panel(struct drm_device *dev)
 {
 	// ??? maybe use a module param to specify which panel is attached?
 }
@@ -294,7 +309,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
 	 * Setup the LCDC/LVDS path: RGB2 -> DMA_P -> LCDC -> LVDS:
 	 */
 
-	panel = detect_panel(dev, "qcom,lvds-panel");
+	panel = detect_panel(dev);
 	if (IS_ERR(panel)) {
 		ret = PTR_ERR(panel);
 		dev_err(dev->dev, "failed to detect LVDS panel: %d\n", ret);
@@ -527,6 +542,11 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
 		goto fail;
 	}
 
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+	dev->mode_config.max_width = 2048;
+	dev->mode_config.max_height = 2048;
+
 	return kms;
 
 fail:
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index c1ecb9d6bdef..8a7f6e1e2bca 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -167,7 +167,8 @@ static inline uint32_t mixercfg(uint32_t mixer_cfg, int mixer,
 int mdp4_disable(struct mdp4_kms *mdp4_kms);
 int mdp4_enable(struct mdp4_kms *mdp4_kms);
 
-void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask);
+void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+		uint32_t old_irqmask);
 void mdp4_irq_preinstall(struct msm_kms *kms);
 int mdp4_irq_postinstall(struct msm_kms *kms);
 void mdp4_irq_uninstall(struct msm_kms *kms);
@@ -175,29 +176,24 @@ irqreturn_t mdp4_irq(struct msm_kms *kms);
 int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
 void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
 
-static inline bool pipe_supports_yuv(enum mdp4_pipe pipe)
+static inline uint32_t mdp4_pipe_caps(enum mdp4_pipe pipe)
 {
 	switch (pipe) {
 	case VG1:
 	case VG2:
 	case VG3:
 	case VG4:
-		return true;
+		return MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC;
+	case RGB1:
+	case RGB2:
+	case RGB3:
+		return MDP_PIPE_CAP_SCALE;
 	default:
-		return false;
+		return 0;
 	}
 }
 
-static inline
-uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
-		uint32_t max_formats)
-{
-	return mdp_get_formats(pixel_formats, max_formats,
-				!pipe_supports_yuv(pipe_id));
-}
-
-void mdp4_plane_install_properties(struct drm_plane *plane,
-		struct drm_mode_object *obj);
 enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane);
 struct drm_plane *mdp4_plane_init(struct drm_device *dev,
 		enum mdp4_pipe pipe_id, bool private_plane);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
index c04843376c54..4cd6e721aa0a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
@@ -346,8 +346,10 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
 
 	mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
 
-	if (panel)
+	if (panel) {
 		drm_panel_disable(panel);
+		drm_panel_unprepare(panel);
+	}
 
 	/*
 	 * Wait for a vsync so we know the ENABLE=0 latched before
@@ -412,8 +414,10 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
 	if (ret)
 		dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
 
-	if (panel)
+	if (panel) {
+		drm_panel_prepare(panel);
 		drm_panel_enable(panel);
+	}
 
 	setup_phy(encoder);
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 247a424445f7..e9dee367b597 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -26,6 +26,7 @@ struct mdp4_plane {
 
 	enum mdp4_pipe pipe;
 
+	uint32_t caps;
 	uint32_t nformats;
 	uint32_t formats[32];
 
@@ -74,7 +75,7 @@ static void mdp4_plane_destroy(struct drm_plane *plane)
 }
 
 /* helper to install properties which are common to planes and crtcs */
-void mdp4_plane_install_properties(struct drm_plane *plane,
+static void mdp4_plane_install_properties(struct drm_plane *plane,
 		struct drm_mode_object *obj)
 {
 	// XXX
@@ -382,9 +383,11 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
 
 	mdp4_plane->pipe = pipe_id;
 	mdp4_plane->name = pipe_names[pipe_id];
+	mdp4_plane->caps = mdp4_pipe_caps(pipe_id);
 
-	mdp4_plane->nformats = mdp4_get_formats(pipe_id, mdp4_plane->formats,
-			ARRAY_SIZE(mdp4_plane->formats));
+	mdp4_plane->nformats = mdp_get_formats(mdp4_plane->formats,
+			ARRAY_SIZE(mdp4_plane->formats),
+			!pipe_supports_yuv(mdp4_plane->caps));
 
 	type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
 	ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index 50e17527e2e5..3469f50d5590 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -381,49 +381,49 @@ static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x0
 static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); }
 #define MDP5_CTL_LAYER_REG_VIG0__MASK				0x00000007
 #define MDP5_CTL_LAYER_REG_VIG0__SHIFT				0
-static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_VIG0__SHIFT) & MDP5_CTL_LAYER_REG_VIG0__MASK;
 }
 #define MDP5_CTL_LAYER_REG_VIG1__MASK				0x00000038
 #define MDP5_CTL_LAYER_REG_VIG1__SHIFT				3
-static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_VIG1__SHIFT) & MDP5_CTL_LAYER_REG_VIG1__MASK;
 }
 #define MDP5_CTL_LAYER_REG_VIG2__MASK				0x000001c0
 #define MDP5_CTL_LAYER_REG_VIG2__SHIFT				6
-static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_VIG2__SHIFT) & MDP5_CTL_LAYER_REG_VIG2__MASK;
 }
 #define MDP5_CTL_LAYER_REG_RGB0__MASK				0x00000e00
 #define MDP5_CTL_LAYER_REG_RGB0__SHIFT				9
-static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_RGB0__SHIFT) & MDP5_CTL_LAYER_REG_RGB0__MASK;
 }
 #define MDP5_CTL_LAYER_REG_RGB1__MASK				0x00007000
 #define MDP5_CTL_LAYER_REG_RGB1__SHIFT				12
-static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_RGB1__SHIFT) & MDP5_CTL_LAYER_REG_RGB1__MASK;
 }
 #define MDP5_CTL_LAYER_REG_RGB2__MASK				0x00038000
 #define MDP5_CTL_LAYER_REG_RGB2__SHIFT				15
-static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_RGB2__SHIFT) & MDP5_CTL_LAYER_REG_RGB2__MASK;
 }
 #define MDP5_CTL_LAYER_REG_DMA0__MASK				0x001c0000
 #define MDP5_CTL_LAYER_REG_DMA0__SHIFT				18
-static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_DMA0__SHIFT) & MDP5_CTL_LAYER_REG_DMA0__MASK;
 }
 #define MDP5_CTL_LAYER_REG_DMA1__MASK				0x00e00000
 #define MDP5_CTL_LAYER_REG_DMA1__SHIFT				21
-static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_DMA1__SHIFT) & MDP5_CTL_LAYER_REG_DMA1__MASK;
 }
@@ -431,13 +431,13 @@ static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val)
 #define MDP5_CTL_LAYER_REG_CURSOR_OUT				0x02000000
 #define MDP5_CTL_LAYER_REG_VIG3__MASK				0x1c000000
 #define MDP5_CTL_LAYER_REG_VIG3__SHIFT				26
-static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_VIG3__SHIFT) & MDP5_CTL_LAYER_REG_VIG3__MASK;
 }
 #define MDP5_CTL_LAYER_REG_RGB3__MASK				0xe0000000
 #define MDP5_CTL_LAYER_REG_RGB3__SHIFT				29
-static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_RGB3__SHIFT) & MDP5_CTL_LAYER_REG_RGB3__MASK;
 }
@@ -499,6 +499,44 @@ static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000001c + __o
 
 static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000020 + __offset_CTL(i0); }
 
+static inline uint32_t __offset_LAYER_EXT(uint32_t idx)
+{
+	switch (idx) {
+		case 0: return 0x00000040;
+		case 1: return 0x00000044;
+		case 2: return 0x00000048;
+		case 3: return 0x0000004c;
+		case 4: return 0x00000050;
+		case 5: return 0x00000054;
+		default: return INVALID_IDX(idx);
+	}
+}
+static inline uint32_t REG_MDP5_CTL_LAYER_EXT(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); }
+
+static inline uint32_t REG_MDP5_CTL_LAYER_EXT_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); }
+#define MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3			0x00000001
+#define MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3			0x00000004
+#define MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3			0x00000010
+#define MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3			0x00000040
+#define MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3			0x00000100
+#define MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3			0x00000400
+#define MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3			0x00001000
+#define MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3			0x00004000
+#define MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3			0x00010000
+#define MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3			0x00040000
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK			0x00f00000
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT			20
+static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR0(enum mdp_mixer_stage_id val)
+{
+	return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK;
+}
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK			0x3c000000
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT			26
+static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR1(enum mdp_mixer_stage_id val)
+{
+	return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK;
+}
+
 static inline uint32_t __offset_PIPE(enum mdp5_pipe idx)
 {
 	switch (idx) {
@@ -803,11 +841,11 @@ static inline uint32_t MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
 }
 #define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT			0x00020000
 #define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB			0x00040000
-#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK			0x00180000
-#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT			19
-static inline uint32_t MDP5_PIPE_SRC_FORMAT_NUM_PLANES(enum mdp_fetch_type val)
+#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK			0x00180000
+#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT			19
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(enum mdp_fetch_type val)
 {
-	return ((val) << MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT) & MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK;
+	return ((val) << MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT) & MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK;
 }
 #define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK			0x01800000
 #define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT			23
@@ -897,41 +935,41 @@ static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val)
 static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00000204 + __offset_PIPE(i0); }
 #define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN			0x00000001
 #define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN			0x00000002
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK		0x00000300
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT		8
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK	0x00000300
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT	8
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(enum mdp5_scale_filter val)
 {
-	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK;
+	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK;
 }
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK		0x00000c00
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT		10
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK	0x00000c00
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT	10
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(enum mdp5_scale_filter val)
 {
-	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK;
+	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK;
 }
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK		0x00003000
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT		12
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK	0x00003000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT	12
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(enum mdp5_scale_filter val)
 {
-	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK;
+	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK;
 }
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK		0x0000c000
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT		14
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK	0x0000c000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT	14
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(enum mdp5_scale_filter val)
 {
-	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK;
+	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK;
 }
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK		0x00030000
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT		16
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK	0x00030000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT	16
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(enum mdp5_scale_filter val)
 {
-	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK;
+	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK;
 }
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK		0x000c0000
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT		18
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK	0x000c0000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT	18
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(enum mdp5_scale_filter val)
 {
-	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK;
+	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK;
 }
 
 static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000210 + __offset_PIPE(i0); }
@@ -984,9 +1022,22 @@ static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x000000
 
 static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00000010 + __offset_LM(i0); }
 
-static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t __offset_BLEND(uint32_t idx)
+{
+	switch (idx) {
+		case 0: return 0x00000020;
+		case 1: return 0x00000050;
+		case 2: return 0x00000080;
+		case 3: return 0x000000b0;
+		case 4: return 0x00000230;
+		case 5: return 0x00000260;
+		case 6: return 0x00000290;
+		default: return INVALID_IDX(idx);
+	}
+}
+static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); }
 #define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK			0x00000003
 #define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT			0
 static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val)
@@ -1008,25 +1059,25 @@ static inline uint32_t MDP5_LM_BLEND_OP_MODE_BG_ALPHA(enum mdp_alpha_type val)
 #define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA			0x00001000
 #define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN			0x00002000
 
-static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000002c + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000030 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000034 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000038 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000003c + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000001c + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000040 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000044 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000048 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + __offset_BLEND(i1); }
 
 static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000000e0 + __offset_LM(i0); }
 #define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK			0x0000ffff
@@ -1260,6 +1311,13 @@ static inline uint32_t REG_MDP5_PP_FBC_LOSSY_MODE(uint32_t i0) { return 0x000000
 static inline uint32_t __offset_WB(uint32_t idx)
 {
 	switch (idx) {
+#if 0  /* TEMPORARY until patch that adds wb.base[] is merged */
+		case 0: return (mdp5_cfg->wb.base[0]);
+		case 1: return (mdp5_cfg->wb.base[1]);
+		case 2: return (mdp5_cfg->wb.base[2]);
+		case 3: return (mdp5_cfg->wb.base[3]);
+		case 4: return (mdp5_cfg->wb.base[4]);
+#endif
 		default: return INVALID_IDX(idx);
 	}
 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index 8b9a7931b162..a1e26f23c7cc 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -22,7 +22,76 @@ struct mdp5_cfg_handler {
 /* mdp5_cfg must be exposed (used in mdp5.xml.h) */
 const struct mdp5_cfg_hw *mdp5_cfg = NULL;
 
-const struct mdp5_cfg_hw msm8x74_config = {
+const struct mdp5_cfg_hw msm8x74v1_config = {
+	.name = "msm8x74v1",
+	.mdp = {
+		.count = 1,
+		.base = { 0x00100 },
+	},
+	.smp = {
+		.mmb_count = 22,
+		.mmb_size = 4096,
+		.clients = {
+			[SSPP_VIG0] =  1, [SSPP_VIG1] =  4, [SSPP_VIG2] =  7,
+			[SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
+			[SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18,
+		},
+	},
+	.ctl = {
+		.count = 5,
+		.base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+		.flush_hw_mask = 0x0003ffff,
+	},
+	.pipe_vig = {
+		.count = 3,
+		.base = { 0x01200, 0x01600, 0x01a00 },
+		.caps = MDP_PIPE_CAP_HFLIP |
+			MDP_PIPE_CAP_VFLIP |
+			MDP_PIPE_CAP_SCALE |
+			MDP_PIPE_CAP_CSC   |
+			0,
+	},
+	.pipe_rgb = {
+		.count = 3,
+		.base = { 0x01e00, 0x02200, 0x02600 },
+		.caps = MDP_PIPE_CAP_HFLIP |
+			MDP_PIPE_CAP_VFLIP |
+			MDP_PIPE_CAP_SCALE |
+			0,
+	},
+	.pipe_dma = {
+		.count = 2,
+		.base = { 0x02a00, 0x02e00 },
+		.caps = MDP_PIPE_CAP_HFLIP |
+			MDP_PIPE_CAP_VFLIP |
+			0,
+	},
+	.lm = {
+		.count = 5,
+		.base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
+		.nb_stages = 5,
+	},
+	.dspp = {
+		.count = 3,
+		.base = { 0x04600, 0x04a00, 0x04e00 },
+	},
+	.pp = {
+		.count = 3,
+		.base = { 0x21b00, 0x21c00, 0x21d00 },
+	},
+	.intf = {
+		.base = { 0x21100, 0x21300, 0x21500, 0x21700 },
+		.connect = {
+			[0] = INTF_eDP,
+			[1] = INTF_DSI,
+			[2] = INTF_DSI,
+			[3] = INTF_HDMI,
+		},
+	},
+	.max_clk = 200000000,
+};
+
+const struct mdp5_cfg_hw msm8x74v2_config = {
 	.name = "msm8x74",
 	.mdp = {
 		.count = 1,
@@ -45,19 +114,27 @@ const struct mdp5_cfg_hw msm8x74_config = {
 	.pipe_vig = {
 		.count = 3,
 		.base = { 0x01200, 0x01600, 0x01a00 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+				MDP_PIPE_CAP_DECIMATION,
 	},
 	.pipe_rgb = {
 		.count = 3,
 		.base = { 0x01e00, 0x02200, 0x02600 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
 	},
 	.pipe_dma = {
 		.count = 2,
 		.base = { 0x02a00, 0x02e00 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
 	},
 	.lm = {
 		.count = 5,
 		.base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
 		.nb_stages = 5,
+		.max_width = 2048,
+		.max_height = 0xFFFF,
 	},
 	.dspp = {
 		.count = 3,
@@ -65,7 +142,7 @@ const struct mdp5_cfg_hw msm8x74_config = {
 	},
 	.ad = {
 		.count = 2,
-		.base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
+		.base = { 0x13100, 0x13300 },
 	},
 	.pp = {
 		.count = 3,
@@ -113,19 +190,27 @@ const struct mdp5_cfg_hw apq8084_config = {
 	.pipe_vig = {
 		.count = 4,
 		.base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+				MDP_PIPE_CAP_DECIMATION,
 	},
 	.pipe_rgb = {
 		.count = 4,
 		.base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
 	},
 	.pipe_dma = {
 		.count = 2,
 		.base = { 0x03200, 0x03600 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
 	},
 	.lm = {
 		.count = 6,
 		.base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
 		.nb_stages = 5,
+		.max_width = 2048,
+		.max_height = 0xFFFF,
 	},
 	.dspp = {
 		.count = 4,
@@ -174,19 +259,27 @@ const struct mdp5_cfg_hw msm8x16_config = {
 	.pipe_vig = {
 		.count = 1,
 		.base = { 0x05000 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+				MDP_PIPE_CAP_DECIMATION,
 	},
 	.pipe_rgb = {
 		.count = 2,
 		.base = { 0x15000, 0x17000 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
 	},
 	.pipe_dma = {
 		.count = 1,
 		.base = { 0x25000 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
 	},
 	.lm = {
 		.count = 2, /* LM0 and LM3 */
 		.base = { 0x45000, 0x48000 },
 		.nb_stages = 5,
+		.max_width = 2048,
+		.max_height = 0xFFFF,
 	},
 	.dspp = {
 		.count = 1,
@@ -203,14 +296,91 @@ const struct mdp5_cfg_hw msm8x16_config = {
 	.max_clk = 320000000,
 };
 
+const struct mdp5_cfg_hw msm8x94_config = {
+	.name = "msm8x94",
+	.mdp = {
+		.count = 1,
+		.base = { 0x01000 },
+	},
+	.smp = {
+		.mmb_count = 44,
+		.mmb_size = 8192,
+		.clients = {
+			[SSPP_VIG0] =  1, [SSPP_VIG1] =  4,
+			[SSPP_VIG2] =  7, [SSPP_VIG3] = 19,
+			[SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
+			[SSPP_RGB0] = 16, [SSPP_RGB1] = 17,
+			[SSPP_RGB2] = 18, [SSPP_RGB3] = 22,
+		},
+		.reserved_state[0] = GENMASK(23, 0),	/* first 24 MMBs */
+		.reserved = {
+			 [1] = 1,  [4] = 1,  [7] = 1, [19] = 1,
+			[16] = 5, [17] = 5, [18] = 5, [22] = 5,
+		},
+	},
+	.ctl = {
+		.count = 5,
+		.base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 },
+		.flush_hw_mask = 0xf0ffffff,
+	},
+	.pipe_vig = {
+		.count = 4,
+		.base = { 0x05000, 0x07000, 0x09000, 0x0b000 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+				MDP_PIPE_CAP_DECIMATION,
+	},
+	.pipe_rgb = {
+		.count = 4,
+		.base = { 0x15000, 0x17000, 0x19000, 0x1b000 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
+	},
+	.pipe_dma = {
+		.count = 2,
+		.base = { 0x25000, 0x27000 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
+	},
+	.lm = {
+		.count = 6,
+		.base = { 0x45000, 0x46000, 0x47000, 0x48000, 0x49000, 0x4a000 },
+		.nb_stages = 8,
+		.max_width = 2048,
+		.max_height = 0xFFFF,
+	},
+	.dspp = {
+		.count = 4,
+		.base = { 0x55000, 0x57000, 0x59000, 0x5b000 },
+
+	},
+	.ad = {
+		.count = 3,
+		.base = { 0x79000, 0x79800, 0x7a000 },
+	},
+	.pp = {
+		.count = 4,
+		.base = { 0x71000, 0x71800, 0x72000, 0x72800 },
+	},
+	.intf = {
+		.base = { 0x6b000, 0x6b800, 0x6c000, 0x6c800, 0x6d000 },
+		.connect = {
+			[0] = INTF_DISABLED,
+			[1] = INTF_DSI,
+			[2] = INTF_DSI,
+			[3] = INTF_HDMI,
+		},
+	},
+	.max_clk = 320000000,
+};
+
 static const struct mdp5_cfg_handler cfg_handlers[] = {
-	{ .revision = 0, .config = { .hw = &msm8x74_config } },
-	{ .revision = 2, .config = { .hw = &msm8x74_config } },
+	{ .revision = 0, .config = { .hw = &msm8x74v1_config } },
+	{ .revision = 2, .config = { .hw = &msm8x74v2_config } },
 	{ .revision = 3, .config = { .hw = &apq8084_config } },
 	{ .revision = 6, .config = { .hw = &msm8x16_config } },
+	{ .revision = 9, .config = { .hw = &msm8x94_config } },
 };
 
-
 static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
 
 const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
index 69349abe59f2..efb918d9f68b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
@@ -42,6 +42,13 @@ struct mdp5_sub_block {
 struct mdp5_lm_block {
 	MDP5_SUB_BLOCK_DEFINITION;
 	uint32_t nb_stages;		/* number of stages per blender */
+	uint32_t max_width;		/* Maximum output resolution */
+	uint32_t max_height;
+};
+
+struct mdp5_pipe_block {
+	MDP5_SUB_BLOCK_DEFINITION;
+	uint32_t caps;			/* pipe capabilities */
 };
 
 struct mdp5_ctl_block {
@@ -70,9 +77,9 @@ struct mdp5_cfg_hw {
 	struct mdp5_sub_block mdp;
 	struct mdp5_smp_block smp;
 	struct mdp5_ctl_block ctl;
-	struct mdp5_sub_block pipe_vig;
-	struct mdp5_sub_block pipe_rgb;
-	struct mdp5_sub_block pipe_dma;
+	struct mdp5_pipe_block pipe_vig;
+	struct mdp5_pipe_block pipe_rgb;
+	struct mdp5_pipe_block pipe_dma;
 	struct mdp5_lm_block  lm;
 	struct mdp5_sub_block dspp;
 	struct mdp5_sub_block ad;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index ee31b16fe7ea..8e6c9b598a57 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -21,6 +21,8 @@ struct mdp5_cmd_encoder {
 	struct mdp5_interface intf;
 	bool enabled;
 	uint32_t bsc;
+
+	struct mdp5_ctl *ctl;
 };
 #define to_mdp5_cmd_encoder(x) container_of(x, struct mdp5_cmd_encoder, base)
 
@@ -210,13 +212,14 @@ static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
 			mode->vsync_end, mode->vtotal,
 			mode->type, mode->flags);
 	pingpong_tearcheck_setup(encoder, mode);
-	mdp5_crtc_set_intf(encoder->crtc, &mdp5_cmd_enc->intf);
+	mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_cmd_enc->intf,
+				mdp5_cmd_enc->ctl);
 }
 
 static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
 {
 	struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
-	struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+	struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
 	struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
 
 	if (WARN_ON(!mdp5_cmd_enc->enabled))
@@ -235,7 +238,7 @@ static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
 static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
 {
 	struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
-	struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+	struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
 	struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
 
 	if (WARN_ON(mdp5_cmd_enc->enabled))
@@ -300,7 +303,7 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
 
 /* initialize command mode encoder */
 struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
-				struct mdp5_interface *intf)
+			struct mdp5_interface *intf, struct mdp5_ctl *ctl)
 {
 	struct drm_encoder *encoder = NULL;
 	struct mdp5_cmd_encoder *mdp5_cmd_enc;
@@ -320,6 +323,7 @@ struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
 
 	memcpy(&mdp5_cmd_enc->intf, intf, sizeof(mdp5_cmd_enc->intf));
 	encoder = &mdp5_cmd_enc->base;
+	mdp5_cmd_enc->ctl = ctl;
 
 	drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs,
 			DRM_MODE_ENCODER_DSI);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index dea3d2e559b1..7f9f4ac88029 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -160,8 +160,7 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
 
 	if (mdp5_crtc->ctl && !crtc->state->enable) {
 		/* set STAGE_UNUSED for all layers */
-		mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
-		mdp5_ctl_release(mdp5_crtc->ctl);
+		mdp5_ctl_blend(mdp5_crtc->ctl, NULL, 0, 0);
 		mdp5_crtc->ctl = NULL;
 	}
 }
@@ -196,13 +195,9 @@ static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
 /*
  * blend_setup() - blend all the planes of a CRTC
  *
- * When border is enabled, the border color will ALWAYS be the base layer.
- * Therefore, the first plane (private RGB pipe) will start at STAGE0.
- * If disabled, the first plane starts at STAGE_BASE.
- *
- * Note:
- * Border is not enabled here because the private plane is exactly
- * the CRTC resolution.
+ * If no base layer is available, border will be enabled as the base layer.
+ * Otherwise all layers will be blended based on their stage calculated
+ * in mdp5_crtc_atomic_check.
  */
 static void blend_setup(struct drm_crtc *crtc)
 {
@@ -210,9 +205,14 @@ static void blend_setup(struct drm_crtc *crtc)
 	struct mdp5_kms *mdp5_kms = get_kms(crtc);
 	struct drm_plane *plane;
 	const struct mdp5_cfg_hw *hw_cfg;
-	uint32_t lm = mdp5_crtc->lm, blend_cfg = 0;
+	struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
+	const struct mdp_format *format;
+	uint32_t lm = mdp5_crtc->lm;
+	uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
 	unsigned long flags;
-#define blender(stage)	((stage) - STAGE_BASE)
+	uint8_t stage[STAGE_MAX + 1];
+	int i, plane_cnt = 0;
+#define blender(stage)	((stage) - STAGE0)
 
 	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
 
@@ -222,33 +222,73 @@ static void blend_setup(struct drm_crtc *crtc)
 	if (!mdp5_crtc->ctl)
 		goto out;
 
+	/* Collect all plane information */
 	drm_atomic_crtc_for_each_plane(plane, crtc) {
-		enum mdp_mixer_stage_id stage =
-			to_mdp5_plane_state(plane->state)->stage;
+		pstate = to_mdp5_plane_state(plane->state);
+		pstates[pstate->stage] = pstate;
+		stage[pstate->stage] = mdp5_plane_pipe(plane);
+		plane_cnt++;
+	}
 
-		/*
-		 * Note: This cannot happen with current implementation but
-		 * we need to check this condition once z property is added
-		 */
-		BUG_ON(stage > hw_cfg->lm.nb_stages);
+	/*
+	* If there is no base layer, enable border color.
+	* Although it's not possbile in current blend logic,
+	* put it here as a reminder.
+	*/
+	if (!pstates[STAGE_BASE] && plane_cnt) {
+		ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
+		DBG("Border Color is enabled");
+	}
 
-		/* LM */
-		mdp5_write(mdp5_kms,
-				REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)),
-				MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
-				MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST));
+	/* The reset for blending */
+	for (i = STAGE0; i <= STAGE_MAX; i++) {
+		if (!pstates[i])
+			continue;
+
+		format = to_mdp_format(
+			msm_framebuffer_format(pstates[i]->base.fb));
+		plane = pstates[i]->base.plane;
+		blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
+			MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
+		fg_alpha = pstates[i]->alpha;
+		bg_alpha = 0xFF - pstates[i]->alpha;
+		DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
+
+		if (format->alpha_enable && pstates[i]->premultiplied) {
+			blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
+				MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
+			if (fg_alpha != 0xff) {
+				bg_alpha = fg_alpha;
+				blend_op |=
+					MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
+					MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
+			} else {
+				blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
+			}
+		} else if (format->alpha_enable) {
+			blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
+				MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
+			if (fg_alpha != 0xff) {
+				bg_alpha = fg_alpha;
+				blend_op |=
+				       MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
+				       MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
+				       MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
+				       MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
+			} else {
+				blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
+			}
+		}
+
+		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
+				blender(i)), blend_op);
 		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
-				blender(stage)), 0xff);
+				blender(i)), fg_alpha);
 		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
-				blender(stage)), 0x00);
-		/* CTL */
-		blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage);
-		DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name,
-				pipe2name(mdp5_plane_pipe(plane)), stage);
+				blender(i)), bg_alpha);
 	}
 
-	DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg);
-	mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
+	mdp5_ctl_blend(mdp5_crtc->ctl, stage, plane_cnt, ctl_blend_flags);
 
 out:
 	spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
@@ -339,25 +379,19 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
 	struct mdp5_kms *mdp5_kms = get_kms(crtc);
 	struct drm_plane *plane;
 	struct drm_device *dev = crtc->dev;
-	struct plane_state pstates[STAGE3 + 1];
+	struct plane_state pstates[STAGE_MAX + 1];
+	const struct mdp5_cfg_hw *hw_cfg;
 	int cnt = 0, i;
 
 	DBG("%s: check", mdp5_crtc->name);
 
-	/* request a free CTL, if none is already allocated for this CRTC */
-	if (state->enable && !mdp5_crtc->ctl) {
-		mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
-		if (WARN_ON(!mdp5_crtc->ctl))
-			return -EINVAL;
-	}
-
 	/* verify that there are not too many planes attached to crtc
 	 * and that we don't have conflicting mixer stages:
 	 */
+	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
 	drm_atomic_crtc_state_for_each_plane(plane, state) {
 		struct drm_plane_state *pstate;
-
-		if (cnt >= ARRAY_SIZE(pstates)) {
+		if (cnt >= (hw_cfg->lm.nb_stages)) {
 			dev_err(dev->dev, "too many planes!\n");
 			return -EINVAL;
 		}
@@ -369,13 +403,13 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
 		 */
 		if (!pstate)
 			pstate = plane->state;
-
 		pstates[cnt].plane = plane;
 		pstates[cnt].state = to_mdp5_plane_state(pstate);
 
 		cnt++;
 	}
 
+	/* assign a stage based on sorted zpos property */
 	sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
 
 	for (i = 0; i < cnt; i++) {
@@ -388,13 +422,15 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
 	return 0;
 }
 
-static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc)
+static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
+				   struct drm_crtc_state *old_crtc_state)
 {
 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 	DBG("%s: begin", mdp5_crtc->name);
 }
 
-static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
+static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
+				   struct drm_crtc_state *old_crtc_state)
 {
 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
@@ -691,8 +727,8 @@ void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
 	complete_flip(crtc, file);
 }
 
-/* set interface for routing crtc->encoder: */
-void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf)
+void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
+		struct mdp5_interface *intf, struct mdp5_ctl *ctl)
 {
 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 	struct mdp5_kms *mdp5_kms = get_kms(crtc);
@@ -715,7 +751,8 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf)
 
 	mdp_irq_update(&mdp5_kms->base);
 
-	mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
+	mdp5_crtc->ctl = ctl;
+	mdp5_ctl_set_pipeline(ctl, intf, lm);
 }
 
 int mdp5_crtc_get_lm(struct drm_crtc *crtc)
@@ -724,12 +761,6 @@ int mdp5_crtc_get_lm(struct drm_crtc *crtc)
 	return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm;
 }
 
-struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
-{
-	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
-	return WARN_ON(!crtc) ? NULL : mdp5_crtc->ctl;
-}
-
 void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
 {
 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
@@ -774,7 +805,5 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
 	drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
 	plane->crtc = crtc;
 
-	mdp5_plane_install_properties(plane, &crtc->base);
-
 	return crtc;
 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
index f2530f224a76..4e81ca4f964a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -17,7 +17,7 @@
 /*
  * CTL - MDP Control Pool Manager
  *
- * Controls are shared between all CRTCs.
+ * Controls are shared between all display interfaces.
  *
  * They are intended to be used for data path configuration.
  * The top level register programming describes the complete data path for
@@ -27,12 +27,11 @@
  *
  * In certain use cases (high-resolution dual pipe), one single CTL can be
  * shared across multiple CRTCs.
- *
- * Because the number of CTLs can be less than the number of CRTCs,
- * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is
- * requested by the client (in mdp5_crtc_mode_set()).
  */
 
+#define CTL_STAT_BUSY		0x1
+#define CTL_STAT_BOOKED	0x2
+
 struct op_mode {
 	struct mdp5_interface intf;
 
@@ -46,8 +45,8 @@ struct mdp5_ctl {
 	u32 id;
 	int lm;
 
-	/* whether this CTL has been allocated or not: */
-	bool busy;
+	/* CTL status bitmask */
+	u32 status;
 
 	/* Operation Mode Configuration for the Pipeline */
 	struct op_mode pipeline;
@@ -61,7 +60,10 @@ struct mdp5_ctl {
 
 	bool cursor_on;
 
-	struct drm_crtc *crtc;
+	/* True if the current CTL has FLUSH bits pending for single FLUSH. */
+	bool flush_pending;
+
+	struct mdp5_ctl *pair; /* Paired CTL to be flushed together */
 };
 
 struct mdp5_ctl_manager {
@@ -74,6 +76,10 @@ struct mdp5_ctl_manager {
 	/* to filter out non-present bits in the current hardware config */
 	u32 flush_hw_mask;
 
+	/* status for single FLUSH */
+	bool single_flush_supported;
+	u32 single_flush_pending_mask;
+
 	/* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
 	spinlock_t pool_lock;
 	struct mdp5_ctl ctls[MAX_CTL];
@@ -168,11 +174,21 @@ static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
 	spin_unlock_irqrestore(&ctl->hw_lock, flags);
 }
 
-int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
+int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl,
+		struct mdp5_interface *intf, int lm)
 {
 	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
 	struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
 
+	if (unlikely(WARN_ON(intf->num != ctl->pipeline.intf.num))) {
+		dev_err(mdp5_kms->dev->dev,
+			"CTL %d is allocated by INTF %d, but used by INTF %d\n",
+			ctl->id, ctl->pipeline.intf.num, intf->num);
+		return -EINVAL;
+	}
+
+	ctl->lm = lm;
+
 	memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));
 
 	ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
@@ -287,29 +303,85 @@ int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable)
 		blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
 
 	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
+	ctl->cursor_on = enable;
 
 	spin_unlock_irqrestore(&ctl->hw_lock, flags);
 
 	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
-	ctl->cursor_on = enable;
 
 	return 0;
 }
 
-int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
+static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
+		enum mdp_mixer_stage_id stage)
+{
+	switch (pipe) {
+	case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
+	case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
+	case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
+	case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
+	case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
+	case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
+	case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
+	case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
+	case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
+	case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
+	default:	return 0;
+	}
+}
+
+static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
+		enum mdp_mixer_stage_id stage)
+{
+	if (stage < STAGE6)
+		return 0;
+
+	switch (pipe) {
+	case SSPP_VIG0: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3;
+	case SSPP_VIG1: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3;
+	case SSPP_VIG2: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3;
+	case SSPP_RGB0: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3;
+	case SSPP_RGB1: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3;
+	case SSPP_RGB2: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3;
+	case SSPP_DMA0: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3;
+	case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3;
+	case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3;
+	case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3;
+	default:	return 0;
+	}
+}
+
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt,
+	u32 ctl_blend_op_flags)
 {
 	unsigned long flags;
+	u32 blend_cfg = 0, blend_ext_cfg = 0;
+	int i, start_stage;
+
+	if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
+		start_stage = STAGE0;
+		blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
+	} else {
+		start_stage = STAGE_BASE;
+	}
 
+	for (i = start_stage; i < start_stage + stage_cnt; i++) {
+		blend_cfg |= mdp_ctl_blend_mask(stage[i], i);
+		blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i);
+	}
+
+	spin_lock_irqsave(&ctl->hw_lock, flags);
 	if (ctl->cursor_on)
 		blend_cfg |=  MDP5_CTL_LAYER_REG_CURSOR_OUT;
-	else
-		blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
 
-	spin_lock_irqsave(&ctl->hw_lock, flags);
-	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
+	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, ctl->lm), blend_cfg);
+	ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, ctl->lm), blend_ext_cfg);
 	spin_unlock_irqrestore(&ctl->hw_lock, flags);
 
-	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(lm);
+	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(ctl->lm);
+
+	DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", ctl->lm,
+		blend_cfg, blend_ext_cfg);
 
 	return 0;
 }
@@ -379,6 +451,31 @@ static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask)
 	return sw_mask;
 }
 
+static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
+		u32 *flush_id)
+{
+	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+
+	if (ctl->pair) {
+		DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask);
+		ctl->flush_pending = true;
+		ctl_mgr->single_flush_pending_mask |= (*flush_mask);
+		*flush_mask = 0;
+
+		if (ctl->pair->flush_pending) {
+			*flush_id = min_t(u32, ctl->id, ctl->pair->id);
+			*flush_mask = ctl_mgr->single_flush_pending_mask;
+
+			ctl->flush_pending = false;
+			ctl->pair->flush_pending = false;
+			ctl_mgr->single_flush_pending_mask = 0;
+
+			DBG("Single FLUSH mask %x,ID %d", *flush_mask,
+				*flush_id);
+		}
+	}
+}
+
 /**
  * mdp5_ctl_commit() - Register Flush
  *
@@ -400,6 +497,8 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
 	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
 	struct op_mode *pipeline = &ctl->pipeline;
 	unsigned long flags;
+	u32 flush_id = ctl->id;
+	u32 curr_ctl_flush_mask;
 
 	pipeline->start_mask &= ~flush_mask;
 
@@ -415,9 +514,13 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
 
 	flush_mask &= ctl_mgr->flush_hw_mask;
 
+	curr_ctl_flush_mask = flush_mask;
+
+	fix_for_single_flush(ctl, &flush_mask, &flush_id);
+
 	if (flush_mask) {
 		spin_lock_irqsave(&ctl->hw_lock, flags);
-		ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
+		ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
 		spin_unlock_irqrestore(&ctl->hw_lock, flags);
 	}
 
@@ -426,7 +529,7 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
 		refill_start_mask(ctl);
 	}
 
-	return flush_mask;
+	return curr_ctl_flush_mask;
 }
 
 u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
@@ -434,59 +537,85 @@ u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
 	return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id));
 }
 
-void mdp5_ctl_release(struct mdp5_ctl *ctl)
+int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
 {
-	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
-	unsigned long flags;
+	return WARN_ON(!ctl) ? -EINVAL : ctl->id;
+}
 
-	if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) {
-		dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)",
-				ctl->id, ctl->busy);
-		return;
+/*
+ * mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH
+ */
+int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
+{
+	struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm;
+	struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
+
+	/* do nothing silently if hw doesn't support */
+	if (!ctl_mgr->single_flush_supported)
+		return 0;
+
+	if (!enable) {
+		ctlx->pair = NULL;
+		ctly->pair = NULL;
+		mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), 0);
+		return 0;
+	} else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
+		dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
+		return -EINVAL;
+	} else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
+		dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
+		return -EINVAL;
 	}
 
-	spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
-	ctl->busy = false;
-	spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
+	ctlx->pair = ctly;
+	ctly->pair = ctlx;
 
-	DBG("CTL %d released", ctl->id);
-}
+	mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0),
+		MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
 
-int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
-{
-	return WARN_ON(!ctl) ? -EINVAL : ctl->id;
+	return 0;
 }
 
 /*
- * mdp5_ctl_request() - CTL dynamic allocation
+ * mdp5_ctl_request() - CTL allocation
  *
- * Note: Current implementation considers that we can only have one CRTC per CTL
+ * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs.
+ * If no CTL is available in preferred category, allocate from the other one.
  *
- * @return first free CTL
+ * @return fail if no CTL is available.
  */
 struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
-		struct drm_crtc *crtc)
+		int intf_num)
 {
 	struct mdp5_ctl *ctl = NULL;
+	const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED;
+	u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0;
 	unsigned long flags;
 	int c;
 
 	spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
 
+	/* search the preferred */
 	for (c = 0; c < ctl_mgr->nctl; c++)
-		if (!ctl_mgr->ctls[c].busy)
-			break;
+		if ((ctl_mgr->ctls[c].status & checkm) == match)
+			goto found;
 
-	if (unlikely(c >= ctl_mgr->nctl)) {
-		dev_err(ctl_mgr->dev->dev, "No more CTL available!");
-		goto unlock;
-	}
+	dev_warn(ctl_mgr->dev->dev,
+		"fall back to the other CTL category for INTF %d!\n", intf_num);
 
-	ctl = &ctl_mgr->ctls[c];
+	match ^= CTL_STAT_BOOKED;
+	for (c = 0; c < ctl_mgr->nctl; c++)
+		if ((ctl_mgr->ctls[c].status & checkm) == match)
+			goto found;
 
-	ctl->lm = mdp5_crtc_get_lm(crtc);
-	ctl->crtc = crtc;
-	ctl->busy = true;
+	dev_err(ctl_mgr->dev->dev, "No more CTL available!");
+	goto unlock;
+
+found:
+	ctl = &ctl_mgr->ctls[c];
+	ctl->pipeline.intf.num = intf_num;
+	ctl->lm = -1;
+	ctl->status |= CTL_STAT_BUSY;
 	ctl->pending_ctl_trigger = 0;
 	DBG("CTL %d allocated", ctl->id);
 
@@ -515,9 +644,11 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
 }
 
 struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
-		void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
+		void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd)
 {
 	struct mdp5_ctl_manager *ctl_mgr;
+	const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd);
+	int rev = mdp5_cfg_get_hw_rev(cfg_hnd);
 	const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
 	unsigned long flags;
 	int c, ret;
@@ -551,14 +682,28 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
 		if (WARN_ON(!ctl_cfg->base[c])) {
 			dev_err(dev->dev, "CTL_%d: base is null!\n", c);
 			ret = -EINVAL;
+			spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
 			goto fail;
 		}
 		ctl->ctlm = ctl_mgr;
 		ctl->id = c;
 		ctl->reg_offset = ctl_cfg->base[c];
-		ctl->busy = false;
+		ctl->status = 0;
 		spin_lock_init(&ctl->hw_lock);
 	}
+
+	/*
+	 * In Dual DSI case, CTL0 and CTL1 are always assigned to two DSI
+	 * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when
+	 * only write into CTL0's FLUSH register) to keep two DSI pipes in sync.
+	 * Single FLUSH is supported from hw rev v3.0.
+	 */
+	if (rev >= 3) {
+		ctl_mgr->single_flush_supported = true;
+		/* Reserve CTL0/1 for INTF1/2 */
+		ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED;
+		ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED;
+	}
 	spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
 	DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
index 4678228c4f14..96148c6f863c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -23,7 +23,7 @@
  */
 struct mdp5_ctl_manager;
 struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
-		void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg);
+		void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd);
 void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm);
 void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
 
@@ -32,49 +32,32 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
  * mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler,
  * which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
  */
-struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc);
+struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, int intf_num);
+
 int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl);
 
 struct mdp5_interface;
-int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf);
+int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_interface *intf,
+				int lm);
 int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled);
 
 int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable);
-
-/*
- * blend_cfg (LM blender config):
- *
- * The function below allows the caller of mdp5_ctl_blend() to specify how pipes
- * are being blended according to their stage (z-order), through @blend_cfg arg.
- */
-static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
-		enum mdp_mixer_stage_id stage)
-{
-	switch (pipe) {
-	case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
-	case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
-	case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
-	case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
-	case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
-	case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
-	case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
-	case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
-	case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
-	case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
-	default:	return 0;
-	}
-}
+int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable);
 
 /*
  * mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM)
  *
- * @blend_cfg: see LM blender config definition below
+ * @stage: array to contain the pipe num for each stage
+ * @stage_cnt: valid stage number in stage array
+ * @ctl_blend_op_flags: blender operation mode flags
  *
  * Note:
  * CTL registers need to be flushed after calling this function
  * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
  */
-int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
+#define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT	BIT(0)
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt,
+	u32 ctl_blend_op_flags);
 
 /**
  * mdp_ctl_flush_mask...() - Register FLUSH masks
@@ -91,8 +74,6 @@ u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf);
 u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
 u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl);
 
-void mdp5_ctl_release(struct mdp5_ctl *ctl);
-
 
 
 #endif /* __MDP5_CTL_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index de97c08f3f1f..c9e32b08a7a0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -27,6 +27,8 @@ struct mdp5_encoder {
 	spinlock_t intf_lock;	/* protect REG_MDP5_INTF_* registers */
 	bool enabled;
 	uint32_t bsc;
+
+	struct mdp5_ctl *ctl;
 };
 #define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
 
@@ -222,14 +224,15 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
 
 	spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
 
-	mdp5_crtc_set_intf(encoder->crtc, &mdp5_encoder->intf);
+	mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_encoder->intf,
+				mdp5_encoder->ctl);
 }
 
 static void mdp5_encoder_disable(struct drm_encoder *encoder)
 {
 	struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
 	struct mdp5_kms *mdp5_kms = get_kms(encoder);
-	struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+	struct mdp5_ctl *ctl = mdp5_encoder->ctl;
 	int lm = mdp5_crtc_get_lm(encoder->crtc);
 	struct mdp5_interface *intf = &mdp5_encoder->intf;
 	int intfn = mdp5_encoder->intf.num;
@@ -264,7 +267,7 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
 {
 	struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
 	struct mdp5_kms *mdp5_kms = get_kms(encoder);
-	struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+	struct mdp5_ctl *ctl = mdp5_encoder->ctl;
 	struct mdp5_interface *intf = &mdp5_encoder->intf;
 	int intfn = mdp5_encoder->intf.num;
 	unsigned long flags;
@@ -294,6 +297,7 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
 					struct drm_encoder *slave_encoder)
 {
 	struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+	struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder);
 	struct mdp5_kms *mdp5_kms;
 	int intf_num;
 	u32 data = 0;
@@ -316,12 +320,13 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
 
 	/* Make sure clocks are on when connectors calling this function. */
 	mdp5_enable(mdp5_kms);
-	mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0),
-		MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
 	/* Dumb Panel, Sync mode */
 	mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), 0);
 	mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0), data);
 	mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1);
+
+	mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true);
+
 	mdp5_disable(mdp5_kms);
 
 	return 0;
@@ -329,7 +334,7 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
 
 /* initialize encoder */
 struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
-				struct mdp5_interface *intf)
+			struct mdp5_interface *intf, struct mdp5_ctl *ctl)
 {
 	struct drm_encoder *encoder = NULL;
 	struct mdp5_encoder *mdp5_encoder;
@@ -345,6 +350,7 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
 
 	memcpy(&mdp5_encoder->intf, intf, sizeof(mdp5_encoder->intf));
 	encoder = &mdp5_encoder->base;
+	mdp5_encoder->ctl = ctl;
 
 	spin_lock_init(&mdp5_encoder->intf_lock);
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index 33bd4c6160dd..b1f73bee1368 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -21,8 +21,11 @@
 #include "msm_drv.h"
 #include "mdp5_kms.h"
 
-void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
+void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+		uint32_t old_irqmask)
 {
+	mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_CLEAR(0),
+		irqmask ^ (irqmask & old_irqmask));
 	mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_EN(0), irqmask);
 }
 
@@ -71,9 +74,10 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
 	struct drm_device *dev = mdp5_kms->dev;
 	struct msm_drm_private *priv = dev->dev_private;
 	unsigned int id;
-	uint32_t status;
+	uint32_t status, enable;
 
-	status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0));
+	enable = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_EN(0));
+	status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0)) & enable;
 	mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), status);
 
 	VERB("status=%08x", status);
@@ -112,15 +116,24 @@ irqreturn_t mdp5_irq(struct msm_kms *kms)
 
 int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
 {
+	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+
+	mdp5_enable(mdp5_kms);
 	mdp_update_vblank_mask(to_mdp_kms(kms),
 			mdp5_crtc_vblank(crtc), true);
+	mdp5_disable(mdp5_kms);
+
 	return 0;
 }
 
 void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
 {
+	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+
+	mdp5_enable(mdp5_kms);
 	mdp_update_vblank_mask(to_mdp_kms(kms),
 			mdp5_crtc_vblank(crtc), false);
+	mdp5_disable(mdp5_kms);
 }
 
 /*
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index e253db5de5aa..047cb0433ccb 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -177,7 +177,8 @@ int mdp5_disable(struct mdp5_kms *mdp5_kms)
 	clk_disable_unprepare(mdp5_kms->ahb_clk);
 	clk_disable_unprepare(mdp5_kms->axi_clk);
 	clk_disable_unprepare(mdp5_kms->core_clk);
-	clk_disable_unprepare(mdp5_kms->lut_clk);
+	if (mdp5_kms->lut_clk)
+		clk_disable_unprepare(mdp5_kms->lut_clk);
 
 	return 0;
 }
@@ -189,14 +190,15 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
 	clk_prepare_enable(mdp5_kms->ahb_clk);
 	clk_prepare_enable(mdp5_kms->axi_clk);
 	clk_prepare_enable(mdp5_kms->core_clk);
-	clk_prepare_enable(mdp5_kms->lut_clk);
+	if (mdp5_kms->lut_clk)
+		clk_prepare_enable(mdp5_kms->lut_clk);
 
 	return 0;
 }
 
 static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
 		enum mdp5_intf_type intf_type, int intf_num,
-		enum mdp5_intf_mode intf_mode)
+		enum mdp5_intf_mode intf_mode, struct mdp5_ctl *ctl)
 {
 	struct drm_device *dev = mdp5_kms->dev;
 	struct msm_drm_private *priv = dev->dev_private;
@@ -209,9 +211,9 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
 
 	if ((intf_type == INTF_DSI) &&
 		(intf_mode == MDP5_INTF_DSI_MODE_COMMAND))
-		encoder = mdp5_cmd_encoder_init(dev, &intf);
+		encoder = mdp5_cmd_encoder_init(dev, &intf, ctl);
 	else
-		encoder = mdp5_encoder_init(dev, &intf);
+		encoder = mdp5_encoder_init(dev, &intf, ctl);
 
 	if (IS_ERR(encoder)) {
 		dev_err(dev->dev, "failed to construct encoder\n");
@@ -249,6 +251,8 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
 	const struct mdp5_cfg_hw *hw_cfg =
 					mdp5_cfg_get_hw_config(mdp5_kms->cfg);
 	enum mdp5_intf_type intf_type = hw_cfg->intf.connect[intf_num];
+	struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm;
+	struct mdp5_ctl *ctl;
 	struct drm_encoder *encoder;
 	int ret = 0;
 
@@ -259,8 +263,14 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
 		if (!priv->edp)
 			break;
 
+		ctl = mdp5_ctlm_request(ctlm, intf_num);
+		if (!ctl) {
+			ret = -EINVAL;
+			break;
+		}
+
 		encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num,
-					MDP5_INTF_MODE_NONE);
+					MDP5_INTF_MODE_NONE, ctl);
 		if (IS_ERR(encoder)) {
 			ret = PTR_ERR(encoder);
 			break;
@@ -272,8 +282,14 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
 		if (!priv->hdmi)
 			break;
 
+		ctl = mdp5_ctlm_request(ctlm, intf_num);
+		if (!ctl) {
+			ret = -EINVAL;
+			break;
+		}
+
 		encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num,
-					MDP5_INTF_MODE_NONE);
+					MDP5_INTF_MODE_NONE, ctl);
 		if (IS_ERR(encoder)) {
 			ret = PTR_ERR(encoder);
 			break;
@@ -298,14 +314,20 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
 		if (!priv->dsi[dsi_id])
 			break;
 
+		ctl = mdp5_ctlm_request(ctlm, intf_num);
+		if (!ctl) {
+			ret = -EINVAL;
+			break;
+		}
+
 		for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
 			mode = (i == MSM_DSI_CMD_ENCODER_ID) ?
 				MDP5_INTF_DSI_MODE_COMMAND :
 				MDP5_INTF_DSI_MODE_VIDEO;
 			dsi_encs[i] = construct_encoder(mdp5_kms, INTF_DSI,
-							intf_num, mode);
-			if (IS_ERR(dsi_encs)) {
-				ret = PTR_ERR(dsi_encs);
+							intf_num, mode, ctl);
+			if (IS_ERR(dsi_encs[i])) {
+				ret = PTR_ERR(dsi_encs[i]);
 				break;
 			}
 		}
@@ -327,9 +349,12 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
 	static const enum mdp5_pipe crtcs[] = {
 			SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
 	};
-	static const enum mdp5_pipe pub_planes[] = {
+	static const enum mdp5_pipe vig_planes[] = {
 			SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
 	};
+	static const enum mdp5_pipe dma_planes[] = {
+			SSPP_DMA0, SSPP_DMA1,
+	};
 	struct drm_device *dev = mdp5_kms->dev;
 	struct msm_drm_private *priv = dev->dev_private;
 	const struct mdp5_cfg_hw *hw_cfg;
@@ -350,7 +375,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
 		struct drm_crtc *crtc;
 
 		plane = mdp5_plane_init(dev, crtcs[i], true,
-				hw_cfg->pipe_rgb.base[i]);
+			hw_cfg->pipe_rgb.base[i], hw_cfg->pipe_rgb.caps);
 		if (IS_ERR(plane)) {
 			ret = PTR_ERR(plane);
 			dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
@@ -368,16 +393,30 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
 		priv->crtcs[priv->num_crtcs++] = crtc;
 	}
 
-	/* Construct public planes: */
+	/* Construct video planes: */
 	for (i = 0; i < hw_cfg->pipe_vig.count; i++) {
 		struct drm_plane *plane;
 
-		plane = mdp5_plane_init(dev, pub_planes[i], false,
-				hw_cfg->pipe_vig.base[i]);
+		plane = mdp5_plane_init(dev, vig_planes[i], false,
+			hw_cfg->pipe_vig.base[i], hw_cfg->pipe_vig.caps);
+		if (IS_ERR(plane)) {
+			ret = PTR_ERR(plane);
+			dev_err(dev->dev, "failed to construct %s plane: %d\n",
+					pipe2name(vig_planes[i]), ret);
+			goto fail;
+		}
+	}
+
+	/* DMA planes */
+	for (i = 0; i < hw_cfg->pipe_dma.count; i++) {
+		struct drm_plane *plane;
+
+		plane = mdp5_plane_init(dev, dma_planes[i], false,
+				hw_cfg->pipe_dma.base[i], hw_cfg->pipe_dma.caps);
 		if (IS_ERR(plane)) {
 			ret = PTR_ERR(plane);
 			dev_err(dev->dev, "failed to construct %s plane: %d\n",
-					pipe2name(pub_planes[i]), ret);
+					pipe2name(dma_planes[i]), ret);
 			goto fail;
 		}
 	}
@@ -489,7 +528,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
 		goto fail;
 	ret = get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk");
 	if (ret)
-		goto fail;
+		DBG("failed to get (optional) lut_clk clock");
 	ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk");
 	if (ret)
 		goto fail;
@@ -521,7 +560,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
 		goto fail;
 	}
 
-	mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw);
+	mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg);
 	if (IS_ERR(mdp5_kms->ctlm)) {
 		ret = PTR_ERR(mdp5_kms->ctlm);
 		mdp5_kms->ctlm = NULL;
@@ -577,6 +616,11 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
 		goto fail;
 	}
 
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+	dev->mode_config.max_width = config->hw->lm.max_width;
+	dev->mode_config.max_height = config->hw->lm.max_height;
+
 	return kms;
 
 fail:
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index e79ac09b7216..0bb62423586e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -70,18 +70,12 @@ struct mdp5_kms {
 struct mdp5_plane_state {
 	struct drm_plane_state base;
 
-	/* "virtual" zpos.. we calculate actual mixer-stage at runtime
-	 * by sorting the attached planes by zpos and then assigning
-	 * mixer stage lowest to highest.  Private planes get default
-	 * zpos of zero, and public planes a unique value that is
-	 * greater than zero.  This way, things work out if a naive
-	 * userspace assigns planes to a crtc without setting zpos.
-	 */
-	int zpos;
+	/* aligned with property */
+	uint8_t premultiplied;
+	uint8_t zpos;
+	uint8_t alpha;
 
-	/* the actual mixer stage, calculated in crtc->atomic_check()
-	 * NOTE: this should move to mdp5_crtc_state, when that exists
-	 */
+	/* assigned by crtc blender */
 	enum mdp_mixer_stage_id stage;
 
 	/* some additional transactional status to help us know in the
@@ -192,7 +186,8 @@ static inline uint32_t lm2ppdone(int lm)
 int mdp5_disable(struct mdp5_kms *mdp5_kms);
 int mdp5_enable(struct mdp5_kms *mdp5_kms);
 
-void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask);
+void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+		uint32_t old_irqmask);
 void mdp5_irq_preinstall(struct msm_kms *kms);
 int mdp5_irq_postinstall(struct msm_kms *kms);
 void mdp5_irq_uninstall(struct msm_kms *kms);
@@ -202,60 +197,38 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
 int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
 void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
 
-static inline bool pipe_supports_yuv(enum mdp5_pipe pipe)
-{
-	switch (pipe) {
-	case SSPP_VIG0:
-	case SSPP_VIG1:
-	case SSPP_VIG2:
-	case SSPP_VIG3:
-		return true;
-	default:
-		return false;
-	}
-}
-
-static inline
-uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
-		uint32_t max_formats)
-{
-	return mdp_get_formats(pixel_formats, max_formats,
-				!pipe_supports_yuv(pipe));
-}
-
-void mdp5_plane_install_properties(struct drm_plane *plane,
-		struct drm_mode_object *obj);
 uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
 void mdp5_plane_complete_flip(struct drm_plane *plane);
 void mdp5_plane_complete_commit(struct drm_plane *plane,
 	struct drm_plane_state *state);
 enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
 struct drm_plane *mdp5_plane_init(struct drm_device *dev,
-		enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
+		enum mdp5_pipe pipe, bool private_plane,
+		uint32_t reg_offset, uint32_t caps);
 
 uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
 
 int mdp5_crtc_get_lm(struct drm_crtc *crtc);
-struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc);
 void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
-void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf);
+void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
+		struct mdp5_interface *intf, struct mdp5_ctl *ctl);
 void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc);
 struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
 		struct drm_plane *plane, int id);
 
 struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
-		struct mdp5_interface *intf);
+		struct mdp5_interface *intf, struct mdp5_ctl *ctl);
 int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
 					struct drm_encoder *slave_encoder);
 
 #ifdef CONFIG_DRM_MSM_DSI
 struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
-				struct mdp5_interface *intf);
+		struct mdp5_interface *intf, struct mdp5_ctl *ctl);
 int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
 					struct drm_encoder *slave_encoder);
 #else
-static inline struct drm_encoder *mdp5_cmd_encoder_init(
-			struct drm_device *dev, struct mdp5_interface *intf)
+static inline struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
+		struct mdp5_interface *intf, struct mdp5_ctl *ctl)
 {
 	return ERR_PTR(-EINVAL);
 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 22275568ab8b..07fb62fea6dc 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2014-2015 The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -26,6 +26,7 @@ struct mdp5_plane {
 
 	spinlock_t pipe_lock;	/* protect REG_MDP5_PIPE_* registers */
 	uint32_t reg_offset;
+	uint32_t caps;
 
 	uint32_t flush_mask;	/* used to commit pipe registers */
 
@@ -40,6 +41,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
 		unsigned int crtc_w, unsigned int crtc_h,
 		uint32_t src_x, uint32_t src_y,
 		uint32_t src_w, uint32_t src_h);
+
 static void set_scanout_locked(struct drm_plane *plane,
 		struct drm_framebuffer *fb);
 
@@ -64,18 +66,122 @@ static void mdp5_plane_destroy(struct drm_plane *plane)
 	kfree(mdp5_plane);
 }
 
+static void mdp5_plane_install_rotation_property(struct drm_device *dev,
+		struct drm_plane *plane)
+{
+	struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+
+	if (!(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP) &&
+		!(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP))
+		return;
+
+	if (!dev->mode_config.rotation_property)
+		dev->mode_config.rotation_property =
+			drm_mode_create_rotation_property(dev,
+			BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y));
+
+	if (dev->mode_config.rotation_property)
+		drm_object_attach_property(&plane->base,
+			dev->mode_config.rotation_property,
+			0);
+}
+
 /* helper to install properties which are common to planes and crtcs */
-void mdp5_plane_install_properties(struct drm_plane *plane,
+static void mdp5_plane_install_properties(struct drm_plane *plane,
 		struct drm_mode_object *obj)
 {
-	// XXX
+	struct drm_device *dev = plane->dev;
+	struct msm_drm_private *dev_priv = dev->dev_private;
+	struct drm_property *prop;
+
+#define INSTALL_PROPERTY(name, NAME, init_val, fnc, ...) do { \
+		prop = dev_priv->plane_property[PLANE_PROP_##NAME]; \
+		if (!prop) { \
+			prop = drm_property_##fnc(dev, 0, #name, \
+				##__VA_ARGS__); \
+			if (!prop) { \
+				dev_warn(dev->dev, \
+					"Create property %s failed\n", \
+					#name); \
+				return; \
+			} \
+			dev_priv->plane_property[PLANE_PROP_##NAME] = prop; \
+		} \
+		drm_object_attach_property(&plane->base, prop, init_val); \
+	} while (0)
+
+#define INSTALL_RANGE_PROPERTY(name, NAME, min, max, init_val) \
+		INSTALL_PROPERTY(name, NAME, init_val, \
+				create_range, min, max)
+
+#define INSTALL_ENUM_PROPERTY(name, NAME, init_val) \
+		INSTALL_PROPERTY(name, NAME, init_val, \
+				create_enum, name##_prop_enum_list, \
+				ARRAY_SIZE(name##_prop_enum_list))
+
+	INSTALL_RANGE_PROPERTY(zpos, ZPOS, 1, 255, 1);
+
+	mdp5_plane_install_rotation_property(dev, plane);
+
+#undef INSTALL_RANGE_PROPERTY
+#undef INSTALL_ENUM_PROPERTY
+#undef INSTALL_PROPERTY
+}
+
+static int mdp5_plane_atomic_set_property(struct drm_plane *plane,
+		struct drm_plane_state *state, struct drm_property *property,
+		uint64_t val)
+{
+	struct drm_device *dev = plane->dev;
+	struct mdp5_plane_state *pstate;
+	struct msm_drm_private *dev_priv = dev->dev_private;
+	int ret = 0;
+
+	pstate = to_mdp5_plane_state(state);
+
+#define SET_PROPERTY(name, NAME, type) do { \
+		if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \
+			pstate->name = (type)val; \
+			DBG("Set property %s %d", #name, (type)val); \
+			goto done; \
+		} \
+	} while (0)
+
+	SET_PROPERTY(zpos, ZPOS, uint8_t);
+
+	dev_err(dev->dev, "Invalid property\n");
+	ret = -EINVAL;
+done:
+	return ret;
+#undef SET_PROPERTY
 }
 
-int mdp5_plane_set_property(struct drm_plane *plane,
-		struct drm_property *property, uint64_t val)
+static int mdp5_plane_atomic_get_property(struct drm_plane *plane,
+		const struct drm_plane_state *state,
+		struct drm_property *property, uint64_t *val)
 {
-	// XXX
-	return -EINVAL;
+	struct drm_device *dev = plane->dev;
+	struct mdp5_plane_state *pstate;
+	struct msm_drm_private *dev_priv = dev->dev_private;
+	int ret = 0;
+
+	pstate = to_mdp5_plane_state(state);
+
+#define GET_PROPERTY(name, NAME, type) do { \
+		if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \
+			*val = pstate->name; \
+			DBG("Get property %s %lld", #name, *val); \
+			goto done; \
+		} \
+	} while (0)
+
+	GET_PROPERTY(zpos, ZPOS, uint8_t);
+
+	dev_err(dev->dev, "Invalid property\n");
+	ret = -EINVAL;
+done:
+	return ret;
+#undef SET_PROPERTY
 }
 
 static void mdp5_plane_reset(struct drm_plane *plane)
@@ -88,11 +194,15 @@ static void mdp5_plane_reset(struct drm_plane *plane)
 	kfree(to_mdp5_plane_state(plane->state));
 	mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
 
-	if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
-		mdp5_state->zpos = 0;
-	} else {
-		mdp5_state->zpos = 1 + drm_plane_index(plane);
-	}
+	/* assign default blend parameters */
+	mdp5_state->alpha = 255;
+	mdp5_state->premultiplied = 0;
+
+	if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+		mdp5_state->zpos = STAGE_BASE;
+	else
+		mdp5_state->zpos = STAGE0 + drm_plane_index(plane);
+
 	mdp5_state->base.plane = plane;
 
 	plane->state = &mdp5_state->base;
@@ -131,7 +241,9 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
 		.update_plane = drm_atomic_helper_update_plane,
 		.disable_plane = drm_atomic_helper_disable_plane,
 		.destroy = mdp5_plane_destroy,
-		.set_property = mdp5_plane_set_property,
+		.set_property = drm_atomic_helper_plane_set_property,
+		.atomic_set_property = mdp5_plane_atomic_set_property,
+		.atomic_get_property = mdp5_plane_atomic_get_property,
 		.reset = mdp5_plane_reset,
 		.atomic_duplicate_state = mdp5_plane_duplicate_state,
 		.atomic_destroy_state = mdp5_plane_destroy_state,
@@ -164,10 +276,44 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
 {
 	struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
 	struct drm_plane_state *old_state = plane->state;
+	const struct mdp_format *format;
+	bool vflip, hflip;
 
 	DBG("%s: check (%d -> %d)", mdp5_plane->name,
 			plane_enabled(old_state), plane_enabled(state));
 
+	if (plane_enabled(state)) {
+		format = to_mdp_format(msm_framebuffer_format(state->fb));
+		if (MDP_FORMAT_IS_YUV(format) &&
+			!pipe_supports_yuv(mdp5_plane->caps)) {
+			dev_err(plane->dev->dev,
+				"Pipe doesn't support YUV\n");
+
+			return -EINVAL;
+		}
+
+		if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) &&
+			(((state->src_w >> 16) != state->crtc_w) ||
+			((state->src_h >> 16) != state->crtc_h))) {
+			dev_err(plane->dev->dev,
+				"Pipe doesn't support scaling (%dx%d -> %dx%d)\n",
+				state->src_w >> 16, state->src_h >> 16,
+				state->crtc_w, state->crtc_h);
+
+			return -EINVAL;
+		}
+
+		hflip = !!(state->rotation & BIT(DRM_REFLECT_X));
+		vflip = !!(state->rotation & BIT(DRM_REFLECT_Y));
+		if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) ||
+			(hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) {
+			dev_err(plane->dev->dev,
+				"Pipe doesn't support flip\n");
+
+			return -EINVAL;
+		}
+	}
+
 	if (plane_enabled(state) && plane_enabled(old_state)) {
 		/* we cannot change SMP block configuration during scanout: */
 		bool full_modeset = false;
@@ -346,16 +492,21 @@ static int calc_phase_step(uint32_t src, uint32_t dst, uint32_t *out_phase)
 	return 0;
 }
 
-static int calc_scalex_steps(uint32_t pixel_format, uint32_t src, uint32_t dest,
+static int calc_scalex_steps(struct drm_plane *plane,
+		uint32_t pixel_format, uint32_t src, uint32_t dest,
 		uint32_t phasex_steps[2])
 {
+	struct mdp5_kms *mdp5_kms = get_kms(plane);
+	struct device *dev = mdp5_kms->dev->dev;
 	uint32_t phasex_step;
 	unsigned int hsub;
 	int ret;
 
 	ret = calc_phase_step(src, dest, &phasex_step);
-	if (ret)
+	if (ret) {
+		dev_err(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
 		return ret;
+	}
 
 	hsub = drm_format_horz_chroma_subsampling(pixel_format);
 
@@ -365,16 +516,21 @@ static int calc_scalex_steps(uint32_t pixel_format, uint32_t src, uint32_t dest,
 	return 0;
 }
 
-static int calc_scaley_steps(uint32_t pixel_format, uint32_t src, uint32_t dest,
+static int calc_scaley_steps(struct drm_plane *plane,
+		uint32_t pixel_format, uint32_t src, uint32_t dest,
 		uint32_t phasey_steps[2])
 {
+	struct mdp5_kms *mdp5_kms = get_kms(plane);
+	struct device *dev = mdp5_kms->dev->dev;
 	uint32_t phasey_step;
 	unsigned int vsub;
 	int ret;
 
 	ret = calc_phase_step(src, dest, &phasey_step);
-	if (ret)
+	if (ret) {
+		dev_err(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
 		return ret;
+	}
 
 	vsub = drm_format_vert_chroma_subsampling(pixel_format);
 
@@ -384,28 +540,38 @@ static int calc_scaley_steps(uint32_t pixel_format, uint32_t src, uint32_t dest,
 	return 0;
 }
 
-static uint32_t get_scalex_config(uint32_t src, uint32_t dest)
-{
-	uint32_t filter;
-
-	filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
-
-	return  MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
-		MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(filter) |
-		MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(filter)  |
-		MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(filter);
-}
-
-static uint32_t get_scaley_config(uint32_t src, uint32_t dest)
+static uint32_t get_scale_config(enum mdp_chroma_samp_type chroma_sample,
+		uint32_t src, uint32_t dest, bool hor)
 {
-	uint32_t filter;
-
-	filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
+	uint32_t y_filter =   (src <= dest) ? SCALE_FILTER_CA  : SCALE_FILTER_PCMN;
+	uint32_t y_a_filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
+	uint32_t uv_filter = ((src / 2) <= dest) ? /* 2x upsample */
+			SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
+	uint32_t value = 0;
+
+	if (chroma_sample == CHROMA_420 || chroma_sample == CHROMA_H2V1) {
+		if (hor)
+			value = MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
+				MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(y_filter) |
+				MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(y_a_filter) |
+				MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(uv_filter);
+		else
+			value = MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
+				MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(y_filter) |
+				MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(y_a_filter) |
+				MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(uv_filter);
+	} else if (src != dest) {
+		if (hor)
+			value = MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
+				MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(y_a_filter) |
+				MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(y_a_filter);
+		else
+			value = MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
+				MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(y_a_filter) |
+				MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(y_a_filter);
+	}
 
-	return  MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
-		MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(filter) |
-		MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(filter)  |
-		MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(filter);
+	return value;
 }
 
 static int mdp5_plane_mode_set(struct drm_plane *plane,
@@ -416,8 +582,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
 		uint32_t src_w, uint32_t src_h)
 {
 	struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+	struct drm_plane_state *pstate = plane->state;
 	struct mdp5_kms *mdp5_kms = get_kms(plane);
-	struct device *dev = mdp5_kms->dev->dev;
 	enum mdp5_pipe pipe = mdp5_plane->pipe;
 	const struct mdp_format *format;
 	uint32_t nplanes, config = 0;
@@ -425,6 +591,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
 	uint32_t phasex_step[2] = {0,}, phasey_step[2] = {0,};
 	uint32_t hdecm = 0, vdecm = 0;
 	uint32_t pix_format;
+	bool vflip, hflip;
 	unsigned long flags;
 	int ret;
 
@@ -449,7 +616,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
 
 	/* Request some memory from the SMP: */
 	ret = mdp5_smp_request(mdp5_kms->smp,
-			mdp5_plane->pipe, fb->pixel_format, src_w);
+			mdp5_plane->pipe, format, src_w, false);
 	if (ret)
 		return ret;
 
@@ -461,29 +628,23 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
 	 */
 	mdp5_smp_configure(mdp5_kms->smp, pipe);
 
-	/* SCALE is used to both scale and up-sample chroma components */
+	ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, phasex_step);
+	if (ret)
+		return ret;
 
-	if ((src_w != crtc_w) || MDP_FORMAT_IS_YUV(format)) {
-		/* TODO calc hdecm */
-		ret = calc_scalex_steps(pix_format, src_w, crtc_w, phasex_step);
-		if (ret) {
-			dev_err(dev, "X scaling (%d -> %d) failed: %d\n",
-					src_w, crtc_w, ret);
-			return ret;
-		}
-		config |= get_scalex_config(src_w, crtc_w);
-	}
+	ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, phasey_step);
+	if (ret)
+		return ret;
 
-	if ((src_h != crtc_h) || MDP_FORMAT_IS_YUV(format)) {
-		/* TODO calc vdecm */
-		ret = calc_scaley_steps(pix_format, src_h, crtc_h, phasey_step);
-		if (ret) {
-			dev_err(dev, "Y scaling (%d -> %d) failed: %d\n",
-					src_h, crtc_h, ret);
-			return ret;
-		}
-		config |= get_scaley_config(src_h, crtc_h);
-	}
+	/* TODO calc hdecm, vdecm */
+
+	/* SCALE is used to both scale and up-sample chroma components */
+	config |= get_scale_config(format->chroma_sample, src_w, crtc_w, true);
+	config |= get_scale_config(format->chroma_sample, src_h, crtc_h, false);
+	DBG("scale config = %x", config);
+
+	hflip = !!(pstate->rotation & BIT(DRM_REFLECT_X));
+	vflip = !!(pstate->rotation & BIT(DRM_REFLECT_Y));
 
 	spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
 
@@ -516,7 +677,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
 			MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
 			MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
 			COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
-			MDP5_PIPE_SRC_FORMAT_NUM_PLANES(format->fetch_type) |
+			MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) |
 			MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample));
 
 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
@@ -526,29 +687,35 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
 			MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
 
 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
+			(hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) |
+			(vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) |
 			MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
 
 	/* not using secure mode: */
 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
 
-	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
-			phasex_step[0]);
-	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
-			phasey_step[0]);
-	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
-			phasex_step[1]);
-	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
-			phasey_step[1]);
-	mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
-			MDP5_PIPE_DECIMATION_VERT(vdecm) |
-			MDP5_PIPE_DECIMATION_HORZ(hdecm));
-	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config);
-
-	if (MDP_FORMAT_IS_YUV(format))
-		csc_enable(mdp5_kms, pipe,
-				mdp_get_default_csc_cfg(CSC_YUV2RGB));
-	else
-		csc_disable(mdp5_kms, pipe);
+	if (mdp5_plane->caps & MDP_PIPE_CAP_SCALE) {
+		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
+				phasex_step[0]);
+		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
+				phasey_step[0]);
+		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
+				phasex_step[1]);
+		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
+				phasey_step[1]);
+		mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
+				MDP5_PIPE_DECIMATION_VERT(vdecm) |
+				MDP5_PIPE_DECIMATION_HORZ(hdecm));
+		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config);
+	}
+
+	if (mdp5_plane->caps & MDP_PIPE_CAP_CSC) {
+		if (MDP_FORMAT_IS_YUV(format))
+			csc_enable(mdp5_kms, pipe,
+					mdp_get_default_csc_cfg(CSC_YUV2RGB));
+		else
+			csc_disable(mdp5_kms, pipe);
+	}
 
 	set_scanout_locked(plane, fb);
 
@@ -599,7 +766,8 @@ void mdp5_plane_complete_commit(struct drm_plane *plane,
 
 /* initialize plane */
 struct drm_plane *mdp5_plane_init(struct drm_device *dev,
-		enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)
+		enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset,
+		uint32_t caps)
 {
 	struct drm_plane *plane = NULL;
 	struct mdp5_plane *mdp5_plane;
@@ -616,9 +784,11 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
 
 	mdp5_plane->pipe = pipe;
 	mdp5_plane->name = pipe2name(pipe);
+	mdp5_plane->caps = caps;
 
-	mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats,
-			ARRAY_SIZE(mdp5_plane->formats));
+	mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
+		ARRAY_SIZE(mdp5_plane->formats),
+		!pipe_supports_yuv(mdp5_plane->caps));
 
 	mdp5_plane->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
 	mdp5_plane->reg_offset = reg_offset;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 64a27d86f2f5..563cca972dcb 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -90,6 +90,8 @@
 struct mdp5_smp {
 	struct drm_device *dev;
 
+	const struct mdp5_smp_block *cfg;
+
 	int blk_cnt;
 	int blk_size;
 
@@ -137,14 +139,12 @@ static int smp_request_block(struct mdp5_smp *smp,
 		u32 cid, int nblks)
 {
 	struct mdp5_kms *mdp5_kms = get_kms(smp);
-	const struct mdp5_cfg_hw *hw_cfg;
 	struct mdp5_client_smp_state *ps = &smp->client_state[cid];
 	int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
 	int reserved;
 	unsigned long flags;
 
-	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
-	reserved = hw_cfg->smp.reserved[cid];
+	reserved = smp->cfg->reserved[cid];
 
 	spin_lock_irqsave(&smp->state_lock, flags);
 
@@ -209,12 +209,14 @@ static void set_fifo_thresholds(struct mdp5_smp *smp,
  * decimated width.  Ie. SMP buffering sits downstream of decimation (which
  * presumably happens during the dma from scanout buffer).
  */
-int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width)
+int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
+		const struct mdp_format *format, u32 width, bool hdecim)
 {
 	struct mdp5_kms *mdp5_kms = get_kms(smp);
 	struct drm_device *dev = mdp5_kms->dev;
 	int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
 	int i, hsub, nplanes, nlines, nblks, ret;
+	u32 fmt = format->base.pixel_format;
 
 	nplanes = drm_format_num_planes(fmt);
 	hsub = drm_format_horz_chroma_subsampling(fmt);
@@ -222,6 +224,21 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 wid
 	/* different if BWC (compressed framebuffer?) enabled: */
 	nlines = 2;
 
+	/* Newer MDPs have split/packing logic, which fetches sub-sampled
+	 * U and V components (splits them from Y if necessary) and packs
+	 * them together, writes to SMP using a single client.
+	 */
+	if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) {
+		fmt = DRM_FORMAT_NV24;
+		nplanes = 2;
+
+		/* if decimation is enabled, HW decimates less on the
+		 * sub sampled chroma components
+		 */
+		if (hdecim && (hsub > 1))
+			hsub = 1;
+	}
+
 	for (i = 0, nblks = 0; i < nplanes; i++) {
 		int n, fetch_stride, cpp;
 
@@ -388,6 +405,7 @@ struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_blo
 	}
 
 	smp->dev = dev;
+	smp->cfg = cfg;
 	smp->blk_cnt = cfg->mmb_count;
 	smp->blk_size = cfg->mmb_size;
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
index 5b6c2363f592..20b87e800ea3 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -39,7 +39,8 @@ struct mdp5_smp;
 struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg);
 void  mdp5_smp_destroy(struct mdp5_smp *smp);
 
-int  mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width);
+int  mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
+		const struct mdp_format *format, u32 width, bool hdecim);
 void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe);
 void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe);
 void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe);
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
index 641d036c5bcb..4f792c4e40f4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -46,7 +46,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
 
 enum mdp_chroma_samp_type {
-	CHROMA_RGB = 0,
+	CHROMA_FULL = 0,
 	CHROMA_H2V1 = 1,
 	CHROMA_H1V2 = 2,
 	CHROMA_420 = 3,
@@ -65,6 +65,10 @@ enum mdp_mixer_stage_id {
 	STAGE1 = 3,
 	STAGE2 = 4,
 	STAGE3 = 5,
+	STAGE4 = 6,
+	STAGE5 = 7,
+	STAGE6 = 8,
+	STAGE_MAX = 8,
 };
 
 enum mdp_alpha_type {
diff --git a/drivers/gpu/drm/msm/mdp/mdp_format.c b/drivers/gpu/drm/msm/mdp/mdp_format.c
index 7b0524dc1872..1c2caffc97e4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_format.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_format.c
@@ -71,7 +71,7 @@ static struct csc_cfg csc_convert[CSC_MAX] = {
 	},
 };
 
-#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs) { \
+#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs, yuv) { \
 		.base = { .pixel_format = DRM_FORMAT_ ## name }, \
 		.bpc_a = BPC ## a ## A,                          \
 		.bpc_r = BPC ## r,                               \
@@ -83,7 +83,8 @@ static struct csc_cfg csc_convert[CSC_MAX] = {
 		.cpp = c,                                        \
 		.unpack_count = cnt,                             \
 		.fetch_type = fp,                                \
-		.chroma_sample = cs                              \
+		.chroma_sample = cs,                             \
+		.is_yuv = yuv,                                   \
 }
 
 #define BPC0A 0
@@ -95,30 +96,49 @@ static struct csc_cfg csc_convert[CSC_MAX] = {
 static const struct mdp_format formats[] = {
 	/*  name      a  r  g  b   e0 e1 e2 e3  alpha   tight  cpp cnt ... */
 	FMT(ARGB8888, 8, 8, 8, 8,  1, 0, 2, 3,  true,   true,  4,  4,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(ABGR8888, 8, 8, 8, 8,  2, 0, 1, 3,  true,   true,  4,  4,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(RGBA8888, 8, 8, 8, 8,  3, 1, 0, 2,  true,   true,  4,  4,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(BGRA8888, 8, 8, 8, 8,  3, 2, 0, 1,  true,   true,  4,  4,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(XRGB8888, 8, 8, 8, 8,  1, 0, 2, 3,  false,  true,  4,  4,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(RGB888,   0, 8, 8, 8,  1, 0, 2, 0,  false,  true,  3,  3,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(BGR888,   0, 8, 8, 8,  2, 0, 1, 0,  false,  true,  3,  3,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(RGB565,   0, 5, 6, 5,  1, 0, 2, 0,  false,  true,  2,  3,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(BGR565,   0, 5, 6, 5,  2, 0, 1, 0,  false,  true,  2,  3,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 
 	/* --- RGB formats above / YUV formats below this line --- */
 
+	/* 2 plane YUV */
 	FMT(NV12,     0, 8, 8, 8,  1, 2, 0, 0,  false,  true,  2, 2,
-			MDP_PLANE_PSEUDO_PLANAR, CHROMA_420),
+			MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true),
 	FMT(NV21,     0, 8, 8, 8,  2, 1, 0, 0,  false,  true,  2, 2,
-			MDP_PLANE_PSEUDO_PLANAR, CHROMA_420),
+			MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true),
+	FMT(NV16,     0, 8, 8, 8,  1, 2, 0, 0,  false,  true,  2, 2,
+			MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true),
+	FMT(NV61,     0, 8, 8, 8,  2, 1, 0, 0,  false,  true,  2, 2,
+			MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true),
+	/* 1 plane YUV */
+	FMT(VYUY,     0, 8, 8, 8,  2, 0, 1, 0,  false,  true,  2, 4,
+			MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+	FMT(UYVY,     0, 8, 8, 8,  1, 0, 2, 0,  false,  true,  2, 4,
+			MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+	FMT(YUYV,     0, 8, 8, 8,  0, 1, 0, 2,  false,  true,  2, 4,
+			MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+	FMT(YVYU,     0, 8, 8, 8,  0, 2, 0, 1,  false,  true,  2, 4,
+			MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+	/* 3 plane YUV */
+	FMT(YUV420,   0, 8, 8, 8,  2, 1, 0, 0,  false,  true,  1, 1,
+			MDP_PLANE_PLANAR, CHROMA_420, true),
+	FMT(YVU420,   0, 8, 8, 8,  1, 2, 0, 0,  false,  true,  1, 1,
+			MDP_PLANE_PLANAR, CHROMA_420, true),
 };
 
 /*
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c
index 1988c243f437..64287304054d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.c
@@ -39,7 +39,8 @@ static void update_irq(struct mdp_kms *mdp_kms)
 	list_for_each_entry(irq, &mdp_kms->irq_list, node)
 		irqmask |= irq->irqmask;
 
-	mdp_kms->funcs->set_irqmask(mdp_kms, irqmask);
+	mdp_kms->funcs->set_irqmask(mdp_kms, irqmask, mdp_kms->cur_irq_mask);
+	mdp_kms->cur_irq_mask = irqmask;
 }
 
 /* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
index 2d3428cb74d0..46a94e7d50e2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -30,7 +30,8 @@ struct mdp_kms;
 
 struct mdp_kms_funcs {
 	struct msm_kms_funcs base;
-	void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask);
+	void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask,
+		uint32_t old_irqmask);
 };
 
 struct mdp_kms {
@@ -42,6 +43,7 @@ struct mdp_kms {
 	bool in_irq;
 	struct list_head irq_list;    /* list of mdp4_irq */
 	uint32_t vblank_mask;         /* irq bits set for userspace vblank */
+	uint32_t cur_irq_mask;        /* current irq mask */
 };
 #define to_mdp_kms(x) container_of(x, struct mdp_kms, base)
 
@@ -90,13 +92,27 @@ struct mdp_format {
 	uint8_t cpp, unpack_count;
 	enum mdp_fetch_type fetch_type;
 	enum mdp_chroma_samp_type chroma_sample;
+	bool is_yuv;
 };
 #define to_mdp_format(x) container_of(x, struct mdp_format, base)
-#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->chroma_sample > CHROMA_RGB)
+#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv)
 
 uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only);
 const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
 
+/* MDP pipe capabilities */
+#define MDP_PIPE_CAP_HFLIP			BIT(0)
+#define MDP_PIPE_CAP_VFLIP			BIT(1)
+#define MDP_PIPE_CAP_SCALE			BIT(2)
+#define MDP_PIPE_CAP_CSC			BIT(3)
+#define MDP_PIPE_CAP_DECIMATION			BIT(4)
+
+static inline bool pipe_supports_yuv(uint32_t pipe_caps)
+{
+	return (pipe_caps & MDP_PIPE_CAP_SCALE) &&
+		(pipe_caps & MDP_PIPE_CAP_CSC);
+}
+
 enum csc_type {
 	CSC_RGB2RGB = 0,
 	CSC_YUV2RGB,
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index d3467b115e04..0339c5d82d37 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -116,6 +116,65 @@ u32 msm_readl(const void __iomem *addr)
 	return val;
 }
 
+struct vblank_event {
+	struct list_head node;
+	int crtc_id;
+	bool enable;
+};
+
+static void vblank_ctrl_worker(struct work_struct *work)
+{
+	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
+						struct msm_vblank_ctrl, work);
+	struct msm_drm_private *priv = container_of(vbl_ctrl,
+					struct msm_drm_private, vblank_ctrl);
+	struct msm_kms *kms = priv->kms;
+	struct vblank_event *vbl_ev, *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vbl_ctrl->lock, flags);
+	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
+		list_del(&vbl_ev->node);
+		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+
+		if (vbl_ev->enable)
+			kms->funcs->enable_vblank(kms,
+						priv->crtcs[vbl_ev->crtc_id]);
+		else
+			kms->funcs->disable_vblank(kms,
+						priv->crtcs[vbl_ev->crtc_id]);
+
+		kfree(vbl_ev);
+
+		spin_lock_irqsave(&vbl_ctrl->lock, flags);
+	}
+
+	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+}
+
+static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
+					int crtc_id, bool enable)
+{
+	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
+	struct vblank_event *vbl_ev;
+	unsigned long flags;
+
+	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
+	if (!vbl_ev)
+		return -ENOMEM;
+
+	vbl_ev->crtc_id = crtc_id;
+	vbl_ev->enable = enable;
+
+	spin_lock_irqsave(&vbl_ctrl->lock, flags);
+	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
+	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+
+	queue_work(priv->wq, &vbl_ctrl->work);
+
+	return 0;
+}
+
 /*
  * DRM operations:
  */
@@ -125,6 +184,18 @@ static int msm_unload(struct drm_device *dev)
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_kms *kms = priv->kms;
 	struct msm_gpu *gpu = priv->gpu;
+	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
+	struct vblank_event *vbl_ev, *tmp;
+
+	/* We must cancel and cleanup any pending vblank enable/disable
+	 * work before drm_irq_uninstall() to avoid work re-enabling an
+	 * irq after uninstall has disabled it.
+	 */
+	cancel_work_sync(&vbl_ctrl->work);
+	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
+		list_del(&vbl_ev->node);
+		kfree(vbl_ev);
+	}
 
 	drm_kms_helper_poll_fini(dev);
 	drm_mode_config_cleanup(dev);
@@ -282,6 +353,9 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
 
 	INIT_LIST_HEAD(&priv->inactive_list);
 	INIT_LIST_HEAD(&priv->fence_cbs);
+	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
+	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
+	spin_lock_init(&priv->vblank_ctrl.lock);
 
 	drm_mode_config_init(dev);
 
@@ -331,10 +405,6 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
 		}
 	}
 
-	dev->mode_config.min_width = 0;
-	dev->mode_config.min_height = 0;
-	dev->mode_config.max_width = 2048;
-	dev->mode_config.max_height = 2048;
 	dev->mode_config.funcs = &mode_config_funcs;
 
 	ret = drm_vblank_init(dev, priv->num_crtcs);
@@ -468,7 +538,7 @@ static int msm_enable_vblank(struct drm_device *dev, int crtc_id)
 	if (!kms)
 		return -ENXIO;
 	DBG("dev=%p, crtc=%d", dev, crtc_id);
-	return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
+	return vblank_ctrl_queue_work(priv, crtc_id, true);
 }
 
 static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
@@ -478,7 +548,7 @@ static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
 	if (!kms)
 		return;
 	DBG("dev=%p, crtc=%d", dev, crtc_id);
-	kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
+	vblank_ctrl_queue_work(priv, crtc_id, false);
 }
 
 /*
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 4ff0ec9c994b..3be7a56b14f1 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -30,6 +30,7 @@
 #include <linux/list.h>
 #include <linux/iommu.h>
 #include <linux/types.h>
+#include <linux/of_graph.h>
 #include <asm/sizes.h>
 
 #ifndef CONFIG_OF
@@ -64,6 +65,19 @@ struct msm_file_private {
 	int dummy;
 };
 
+enum msm_mdp_plane_property {
+	PLANE_PROP_ZPOS,
+	PLANE_PROP_ALPHA,
+	PLANE_PROP_PREMULTIPLIED,
+	PLANE_PROP_MAX_NUM
+};
+
+struct msm_vblank_ctrl {
+	struct work_struct work;
+	struct list_head event_list;
+	spinlock_t lock;
+};
+
 struct msm_drm_private {
 
 	struct msm_kms *kms;
@@ -128,6 +142,9 @@ struct msm_drm_private {
 	unsigned int num_connectors;
 	struct drm_connector *connectors[8];
 
+	/* Properties */
+	struct drm_property *plane_property[PLANE_PROP_MAX_NUM];
+
 	/* VRAM carveout, used when no IOMMU: */
 	struct {
 		unsigned long size;
@@ -137,6 +154,8 @@ struct msm_drm_private {
 		 */
 		struct drm_mm mm;
 	} vram;
+
+	struct msm_vblank_ctrl vblank_ctrl;
 };
 
 struct msm_format {
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 95f6532df02d..f97a1964ef39 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -43,11 +43,11 @@ static struct fb_ops msm_fb_ops = {
 	/* Note: to properly handle manual update displays, we wrap the
 	 * basic fbdev ops which write to the framebuffer
 	 */
-	.fb_read = fb_sys_read,
-	.fb_write = fb_sys_write,
-	.fb_fillrect = sys_fillrect,
-	.fb_copyarea = sys_copyarea,
-	.fb_imageblit = sys_imageblit,
+	.fb_read = drm_fb_helper_sys_read,
+	.fb_write = drm_fb_helper_sys_write,
+	.fb_fillrect = drm_fb_helper_sys_fillrect,
+	.fb_copyarea = drm_fb_helper_sys_copyarea,
+	.fb_imageblit = drm_fb_helper_sys_imageblit,
 	.fb_mmap = msm_fbdev_mmap,
 
 	.fb_check_var = drm_fb_helper_check_var,
@@ -144,10 +144,10 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
 		goto fail_unlock;
 	}
 
-	fbi = framebuffer_alloc(0, dev->dev);
-	if (!fbi) {
+	fbi = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(fbi)) {
 		dev_err(dev->dev, "failed to allocate fb info\n");
-		ret = -ENOMEM;
+		ret = PTR_ERR(fbi);
 		goto fail_unlock;
 	}
 
@@ -155,7 +155,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
 
 	fbdev->fb = fb;
 	helper->fb = fb;
-	helper->fbdev = fbi;
 
 	fbi->par = helper;
 	fbi->flags = FBINFO_DEFAULT;
@@ -163,12 +162,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
 
 	strcpy(fbi->fix.id, "msm");
 
-	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto fail_unlock;
-	}
-
 	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
 	drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
 
@@ -191,7 +184,6 @@ fail_unlock:
 fail:
 
 	if (ret) {
-		framebuffer_release(fbi);
 		if (fb) {
 			drm_framebuffer_unregister_private(fb);
 			drm_framebuffer_remove(fb);
@@ -266,17 +258,11 @@ void msm_fbdev_free(struct drm_device *dev)
 	struct msm_drm_private *priv = dev->dev_private;
 	struct drm_fb_helper *helper = priv->fbdev;
 	struct msm_fbdev *fbdev;
-	struct fb_info *fbi;
 
 	DBG();
 
-	fbi = helper->fbdev;
-
-	/* only cleanup framebuffer if it is present */
-	if (fbi) {
-		unregister_framebuffer(fbi);
-		framebuffer_release(fbi);
-	}
+	drm_fb_helper_unregister_fbi(helper);
+	drm_fb_helper_release_fbi(helper);
 
 	drm_fb_helper_fini(helper);
 
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 2b765663c1a3..a34b437dbc8f 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -18,7 +18,6 @@ nouveau-y += $(nvkm-y)
 ifdef CONFIG_X86
 nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
 endif
-nouveau-y += nouveau_agp.o
 nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o
 nouveau-y += nouveau_drm.o
 nouveau-y += nouveau_hwmon.o
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index c6361422a0b2..82bd4658aa58 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -198,7 +198,7 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
 		int *burst, int *lwm)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	struct nv_fifo_info fifo_data;
 	struct nv_sim_state sim_data;
 	int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index af7249ca0f4b..78cb033bc015 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -65,8 +65,8 @@ int nv04_dac_output_offset(struct drm_encoder *encoder)
 
 static int sample_load_twice(struct drm_device *dev, bool sense[2])
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
-	struct nvkm_timer *ptimer = nvxx_timer(device);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvif_object *device = &drm->device.object;
 	int i;
 
 	for (i = 0; i < 2; i++) {
@@ -80,17 +80,22 @@ static int sample_load_twice(struct drm_device *dev, bool sense[2])
 		 * use a 10ms timeout (guards against crtc being inactive, in
 		 * which case blank state would never change)
 		 */
-		if (!nvkm_timer_wait_eq(ptimer, 10000000,
-					NV_PRMCIO_INP0__COLOR,
-					0x00000001, 0x00000000))
+		if (nvif_msec(&drm->device, 10,
+			if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
+				break;
+		) < 0)
 			return -EBUSY;
-		if (!nvkm_timer_wait_eq(ptimer, 10000000,
-					NV_PRMCIO_INP0__COLOR,
-					0x00000001, 0x00000001))
+
+		if (nvif_msec(&drm->device, 10,
+			if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
+				break;
+		) < 0)
 			return -EBUSY;
-		if (!nvkm_timer_wait_eq(ptimer, 10000000,
-					NV_PRMCIO_INP0__COLOR,
-					0x00000001, 0x00000000))
+
+		if (nvif_msec(&drm->device, 10,
+			if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
+				break;
+		) < 0)
 			return -EBUSY;
 
 		udelay(100);
@@ -128,7 +133,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
 						 struct drm_connector *connector)
 {
 	struct drm_device *dev = encoder->dev;
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
 	uint8_t saved_palette0[3], saved_palette_mask;
@@ -231,8 +236,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvif_device *device = &nouveau_drm(dev)->device;
-	struct nvkm_gpio *gpio = nvxx_gpio(device);
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
+	struct nvkm_gpio *gpio = nvxx_gpio(&drm->device);
 	struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
 	uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
 	uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
@@ -265,10 +270,10 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
 	}
 
 	if (gpio) {
-		saved_gpio1 = gpio->get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
-		saved_gpio0 = gpio->get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
-		gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, dcb->type == DCB_OUTPUT_TV);
-		gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, dcb->type == DCB_OUTPUT_TV);
+		saved_gpio1 = nvkm_gpio_get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
+		saved_gpio0 = nvkm_gpio_get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
+		nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, dcb->type == DCB_OUTPUT_TV);
+		nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, dcb->type == DCB_OUTPUT_TV);
 	}
 
 	msleep(4);
@@ -320,8 +325,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
 	nvif_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
 
 	if (gpio) {
-		gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, saved_gpio1);
-		gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, saved_gpio0);
+		nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, saved_gpio1);
+		nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, saved_gpio0);
 	}
 
 	return sample;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 7cfb0cbc9b6e..429ab5e3025a 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -281,7 +281,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
 			      struct drm_display_mode *adjusted_mode)
 {
 	struct drm_device *dev = encoder->dev;
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
 	struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
@@ -485,7 +485,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
 {
 #ifdef __powerpc__
 	struct drm_device *dev = encoder->dev;
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 
 	/* BIOS scripts usually take care of the backlight, thanks
 	 * Apple for your consistency.
@@ -493,11 +493,11 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
 	if (dev->pdev->device == 0x0174 || dev->pdev->device == 0x0179 ||
 	    dev->pdev->device == 0x0189 || dev->pdev->device == 0x0329) {
 		if (mode == DRM_MODE_DPMS_ON) {
-			nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 1 << 31);
-			nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
+			nvif_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 1 << 31);
+			nvif_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
 		} else {
-			nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
-			nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 0);
+			nvif_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
+			nvif_mask(device, NV_PCRTC_GPIO_EXT, 3, 0);
 		}
 	}
 #endif
@@ -624,8 +624,8 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
 	struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
-	struct nvkm_i2c_port *port = i2c->find(i2c, 2);
-	struct nvkm_i2c_board_info info[] = {
+	struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI);
+	struct nvkm_i2c_bus_probe info[] = {
 		{
 		    {
 		        .type = "sil164",
@@ -639,16 +639,15 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
 	};
 	int type;
 
-	if (!nv_gf4_disp_arch(dev) || !port ||
-	    get_tmds_slave(encoder))
+	if (!nv_gf4_disp_arch(dev) || !bus || get_tmds_slave(encoder))
 		return;
 
-	type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL, NULL);
+	type = nvkm_i2c_bus_probe(bus, "TMDS transmitter", info, NULL, NULL);
 	if (type < 0)
 		return;
 
 	drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
-			     &port->adapter, &info[type].dev);
+			     &bus->i2c, &info[type].dev);
 }
 
 static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 4131be5507ab..9e650081c357 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -47,7 +47,7 @@ nv04_display_create(struct drm_device *dev)
 	if (!disp)
 		return -ENOMEM;
 
-	nvif_object_map(nvif_object(&drm->device));
+	nvif_object_map(&drm->device.object);
 
 	nouveau_display(dev)->priv = disp;
 	nouveau_display(dev)->dtor = nv04_display_destroy;
@@ -101,7 +101,9 @@ nv04_display_create(struct drm_device *dev)
 
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 		struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-		nv_encoder->i2c = i2c->find(i2c, nv_encoder->dcb->i2c_index);
+		struct nvkm_i2c_bus *bus =
+			nvkm_i2c_bus_find(i2c, nv_encoder->dcb->i2c_index);
+		nv_encoder->i2c = bus ? &bus->i2c : NULL;
 	}
 
 	/* Save previous state */
@@ -151,7 +153,7 @@ nv04_display_destroy(struct drm_device *dev)
 	nouveau_display(dev)->priv = NULL;
 	kfree(disp);
 
-	nvif_object_unmap(nvif_object(&drm->device));
+	nvif_object_unmap(&drm->device.object);
 }
 
 int
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index c910c5d5c662..6c9a1e89810f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -172,7 +172,7 @@ nouveau_bios_run_init_table(struct drm_device *dev, u16 table,
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvkm_bios *bios = nvxx_bios(&drm->device);
 	struct nvbios_init init = {
-		.subdev = nv_subdev(bios),
+		.subdev = &bios->subdev,
 		.bios = bios,
 		.offset = table,
 		.outp = outp,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 42e07afc4c2b..956a833b8200 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -165,8 +165,8 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype,
 		       struct nvkm_pll_vals *pllvals)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvif_device *device = &drm->device;
-	struct nvkm_bios *bios = nvxx_bios(device);
+	struct nvif_object *device = &drm->device.object;
+	struct nvkm_bios *bios = nvxx_bios(&drm->device);
 	uint32_t reg1, pll1, pll2 = 0;
 	struct nvbios_pll pll_lim;
 	int ret;
@@ -660,8 +660,7 @@ nv_load_state_ext(struct drm_device *dev, int head,
 		  struct nv04_mode_state *state)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvif_device *device = &drm->device;
-	struct nvkm_timer *ptimer = nvxx_timer(device);
+	struct nvif_object *device = &drm->device.object;
 	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
 	uint32_t reg900;
 	int i;
@@ -678,10 +677,10 @@ nv_load_state_ext(struct drm_device *dev, int head,
 		nvif_wr32(device, NV_PVIDEO_INTR_EN, 0);
 		nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0);
 		nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0);
-		nvif_wr32(device, NV_PVIDEO_LIMIT(0), device->info.ram_size - 1);
-		nvif_wr32(device, NV_PVIDEO_LIMIT(1), device->info.ram_size - 1);
-		nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), device->info.ram_size - 1);
-		nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), device->info.ram_size - 1);
+		nvif_wr32(device, NV_PVIDEO_LIMIT(0), drm->device.info.ram_size - 1);
+		nvif_wr32(device, NV_PVIDEO_LIMIT(1), drm->device.info.ram_size - 1);
+		nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), drm->device.info.ram_size - 1);
+		nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), drm->device.info.ram_size - 1);
 		nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0);
 
 		NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
@@ -741,8 +740,14 @@ nv_load_state_ext(struct drm_device *dev, int head,
 		if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN) {
 			/* Not waiting for vertical retrace before modifying
 			   CRE_53/CRE_54 causes lockups. */
-			nvkm_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
-			nvkm_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
+			nvif_msec(&drm->device, 650,
+				if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8))
+					break;
+			);
+			nvif_msec(&drm->device, 650,
+				if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8))
+					break;
+			);
 		}
 
 		wr_cio_state(dev, head, regp, NV_CIO_CRE_42);
@@ -765,7 +770,7 @@ static void
 nv_save_state_palette(struct drm_device *dev, int head,
 		      struct nv04_mode_state *state)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	int head_offset = head * NV_PRMDIO_SIZE, i;
 
 	nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
@@ -784,7 +789,7 @@ void
 nouveau_hw_load_state_palette(struct drm_device *dev, int head,
 			      struct nv04_mode_state *state)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	int head_offset = head * NV_PRMDIO_SIZE, i;
 
 	nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.h b/drivers/gpu/drm/nouveau/dispnv04/hw.h
index 6c796178bf0c..3bded60c5596 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.h
@@ -60,7 +60,7 @@ extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp,
 static inline uint32_t NVReadCRTC(struct drm_device *dev,
 					int head, uint32_t reg)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	uint32_t val;
 	if (head)
 		reg += NV_PCRTC0_SIZE;
@@ -71,7 +71,7 @@ static inline uint32_t NVReadCRTC(struct drm_device *dev,
 static inline void NVWriteCRTC(struct drm_device *dev,
 					int head, uint32_t reg, uint32_t val)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	if (head)
 		reg += NV_PCRTC0_SIZE;
 	nvif_wr32(device, reg, val);
@@ -80,7 +80,7 @@ static inline void NVWriteCRTC(struct drm_device *dev,
 static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
 					int head, uint32_t reg)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	uint32_t val;
 	if (head)
 		reg += NV_PRAMDAC0_SIZE;
@@ -91,7 +91,7 @@ static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
 static inline void NVWriteRAMDAC(struct drm_device *dev,
 					int head, uint32_t reg, uint32_t val)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	if (head)
 		reg += NV_PRAMDAC0_SIZE;
 	nvif_wr32(device, reg, val);
@@ -120,7 +120,7 @@ static inline void nv_write_tmds(struct drm_device *dev,
 static inline void NVWriteVgaCrtc(struct drm_device *dev,
 					int head, uint8_t index, uint8_t value)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
 	nvif_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
 }
@@ -128,7 +128,7 @@ static inline void NVWriteVgaCrtc(struct drm_device *dev,
 static inline uint8_t NVReadVgaCrtc(struct drm_device *dev,
 					int head, uint8_t index)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	uint8_t val;
 	nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
 	val = nvif_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE);
@@ -165,7 +165,7 @@ static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_
 static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
 					int head, uint32_t reg)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	uint8_t val;
 
@@ -181,7 +181,7 @@ static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
 static inline void NVWritePRMVIO(struct drm_device *dev,
 					int head, uint32_t reg, uint8_t value)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	struct nouveau_drm *drm = nouveau_drm(dev);
 
 	/* Only NV4x have two pvio ranges; other twoHeads cards MUST call
@@ -194,14 +194,14 @@ static inline void NVWritePRMVIO(struct drm_device *dev,
 
 static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
 	nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20);
 }
 
 static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
 	return !(nvif_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20);
 }
@@ -209,7 +209,7 @@ static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
 static inline void NVWriteVgaAttr(struct drm_device *dev,
 					int head, uint8_t index, uint8_t value)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	if (NVGetEnablePalette(dev, head))
 		index &= ~0x20;
 	else
@@ -223,7 +223,7 @@ static inline void NVWriteVgaAttr(struct drm_device *dev,
 static inline uint8_t NVReadVgaAttr(struct drm_device *dev,
 					int head, uint8_t index)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	uint8_t val;
 	if (NVGetEnablePalette(dev, head))
 		index &= ~0x20;
@@ -259,7 +259,7 @@ static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect)
 static inline bool
 nv_heads_tied(struct drm_device *dev)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	struct nouveau_drm *drm = nouveau_drm(dev);
 
 	if (drm->device.info.chipset == 0x11)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index 9f2498571d09..aeebdd402478 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -96,7 +96,8 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 		  uint32_t src_x, uint32_t src_y,
 		  uint32_t src_w, uint32_t src_h)
 {
-	struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
+	struct nouveau_drm *drm = nouveau_drm(plane->dev);
+	struct nvif_object *dev = &drm->device.object;
 	struct nouveau_plane *nv_plane =
 		container_of(plane, struct nouveau_plane, base);
 	struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
@@ -118,7 +119,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 	if (format > 0xffff)
 		return -ERANGE;
 
-	if (dev->info.chipset >= 0x30) {
+	if (drm->device.info.chipset >= 0x30) {
 		if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1))
 			return -ERANGE;
 	} else {
@@ -173,7 +174,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 static int
 nv10_disable_plane(struct drm_plane *plane)
 {
-	struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
+	struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object;
 	struct nouveau_plane *nv_plane =
 		container_of(plane, struct nouveau_plane, base);
 
@@ -197,7 +198,7 @@ nv_destroy_plane(struct drm_plane *plane)
 static void
 nv10_set_params(struct nouveau_plane *plane)
 {
-	struct nvif_device *dev = &nouveau_drm(plane->base.dev)->device;
+	struct nvif_object *dev = &nouveau_drm(plane->base.dev)->device.object;
 	u32 luma = (plane->brightness - 512) << 16 | plane->contrast;
 	u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) |
 		(cos_mul(plane->hue, plane->saturation) & 0xffff);
@@ -261,7 +262,7 @@ nv10_overlay_init(struct drm_device *device)
 {
 	struct nouveau_drm *drm = nouveau_drm(device);
 	struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
-	int num_formats = ARRAY_SIZE(formats);
+	unsigned int num_formats = ARRAY_SIZE(formats);
 	int ret;
 
 	if (!plane)
@@ -346,7 +347,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 		  uint32_t src_x, uint32_t src_y,
 		  uint32_t src_w, uint32_t src_h)
 {
-	struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
+	struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object;
 	struct nouveau_plane *nv_plane =
 		container_of(plane, struct nouveau_plane, base);
 	struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
@@ -426,7 +427,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 static int
 nv04_disable_plane(struct drm_plane *plane)
 {
-	struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
+	struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object;
 	struct nouveau_plane *nv_plane =
 		container_of(plane, struct nouveau_plane, base);
 
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 70e95cf6fd19..5345eb5378a8 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -35,7 +35,7 @@
 
 #include <drm/i2c/ch7006.h>
 
-static struct nvkm_i2c_board_info nv04_tv_encoder_info[] = {
+static struct nvkm_i2c_bus_probe nv04_tv_encoder_info[] = {
 	{
 		{
 			I2C_BOARD_INFO("ch7006", 0x75),
@@ -55,9 +55,13 @@ int nv04_tv_identify(struct drm_device *dev, int i2c_index)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
-
-	return i2c->identify(i2c, i2c_index, "TV encoder",
-			     nv04_tv_encoder_info, NULL, NULL);
+	struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, i2c_index);
+	if (bus) {
+		return nvkm_i2c_bus_probe(bus, "TV encoder",
+					  nv04_tv_encoder_info,
+					  NULL, NULL);
+	}
+	return -ENODEV;
 }
 
 
@@ -205,7 +209,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
 	struct drm_device *dev = connector->dev;
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
-	struct nvkm_i2c_port *port = i2c->find(i2c, entry->i2c_index);
+	struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, entry->i2c_index);
 	int type, ret;
 
 	/* Ensure that we can talk to this encoder */
@@ -231,7 +235,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
 
 	/* Run the slave-specific initialization */
 	ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
-				   &port->adapter,
+				   &bus->i2c,
 				   &nv04_tv_encoder_info[type].dev);
 	if (ret < 0)
 		goto fail_cleanup;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index d9720dda8385..b734195d80a0 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -62,8 +62,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
 	head = (dacclk & 0x100) >> 8;
 
 	/* Save the previous state. */
-	gpio1 = gpio->get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
-	gpio0 = gpio->get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
+	gpio1 = nvkm_gpio_get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
+	gpio0 = nvkm_gpio_get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
 	fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
 	fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
 	fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
@@ -74,8 +74,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
 	ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
 
 	/* Prepare the DAC for load detection.  */
-	gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, true);
-	gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, true);
+	nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, true);
+	nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, true);
 
 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
@@ -120,8 +120,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
-	gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, gpio1);
-	gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, gpio0);
+	nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, gpio1);
+	nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, gpio0);
 
 	return sample;
 }
@@ -130,18 +130,10 @@ static bool
 get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvif_device *device = &drm->device;
+	struct nvkm_device *device = nvxx_device(&drm->device);
 
-	/* Zotac FX5200 */
-	if (nv_device_match(nvxx_object(device), 0x0322, 0x19da, 0x1035) ||
-	    nv_device_match(nvxx_object(device), 0x0322, 0x19da, 0x2035)) {
-		*pin_mask = 0xc;
-		return false;
-	}
-
-	/* MSI nForce2 IGP */
-	if (nv_device_match(nvxx_object(device), 0x01f0, 0x1462, 0x5710)) {
-		*pin_mask = 0xc;
+	if (device->quirk && device->quirk->tv_pin_mask) {
+		*pin_mask = device->quirk->tv_pin_mask;
 		return false;
 	}
 
@@ -395,8 +387,8 @@ static void  nv17_tv_dpms(struct drm_encoder *encoder, int mode)
 
 	nv_load_ptv(dev, regs, 200);
 
-	gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, mode == DRM_MODE_DPMS_ON);
-	gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, mode == DRM_MODE_DPMS_ON);
+	nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, mode == DRM_MODE_DPMS_ON);
+	nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, mode == DRM_MODE_DPMS_ON);
 
 	nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
 }
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
index 225894cdcac2..459910b6bb32 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
@@ -131,13 +131,13 @@ static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg,
 				uint32_t val)
 {
 	struct nvif_device *device = &nouveau_drm(dev)->device;
-	nvif_wr32(device, reg, val);
+	nvif_wr32(&device->object, reg, val);
 }
 
 static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg)
 {
 	struct nvif_device *device = &nouveau_drm(dev)->device;
-	return nvif_rd32(device, reg);
+	return nvif_rd32(&device->object, reg);
 }
 
 static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg,
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 64f8b2f687d2..95a64d89547c 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -45,6 +45,11 @@
 #define GM107_DISP                                                   0x00009470
 #define GM204_DISP                                                   0x00009570
 
+#define NV31_MPEG                                                    0x00003174
+#define G82_MPEG                                                     0x00008274
+
+#define NV74_VP2                                                     0x00007476
+
 #define NV50_DISP_CURSOR                                             0x0000507a
 #define G82_DISP_CURSOR                                              0x0000827a
 #define GT214_DISP_CURSOR                                            0x0000857a
@@ -94,15 +99,40 @@
 #define MAXWELL_A                                                    0x0000b097
 #define MAXWELL_B                                                    0x0000b197
 
+#define NV74_BSP                                                     0x000074b0
+
+#define GT212_MSVLD                                                  0x000085b1
+#define IGT21A_MSVLD                                                 0x000086b1
+#define G98_MSVLD                                                    0x000088b1
+#define GF100_MSVLD                                                  0x000090b1
+#define GK104_MSVLD                                                  0x000095b1
+
+#define GT212_MSPDEC                                                 0x000085b2
+#define G98_MSPDEC                                                   0x000088b2
+#define GF100_MSPDEC                                                 0x000090b2
+#define GK104_MSPDEC                                                 0x000095b2
+
+#define GT212_MSPPP                                                  0x000085b3
+#define G98_MSPPP                                                    0x000088b3
+#define GF100_MSPPP                                                  0x000090b3
+
+#define G98_SEC                                                      0x000088b4
+
+#define GT212_DMA                                                    0x000085b5
+#define FERMI_DMA                                                    0x000090b5
+#define KEPLER_DMA_COPY_A                                            0x0000a0b5
+#define MAXWELL_DMA_COPY_A                                           0x0000b0b5
+
+#define FERMI_DECOMPRESS                                             0x000090b8
+
 #define FERMI_COMPUTE_A                                              0x000090c0
 #define FERMI_COMPUTE_B                                              0x000091c0
-
 #define KEPLER_COMPUTE_A                                             0x0000a0c0
 #define KEPLER_COMPUTE_B                                             0x0000a1c0
-
 #define MAXWELL_COMPUTE_A                                            0x0000b0c0
 #define MAXWELL_COMPUTE_B                                            0x0000b1c0
 
+#define NV74_CIPHER                                                  0x000074c1
 
 /*******************************************************************************
  * client
@@ -126,32 +156,10 @@ struct nv_device_v0 {
 	__u8  version;
 	__u8  pad01[7];
 	__u64 device;	/* device identifier, ~0 for client default */
-#define NV_DEVICE_V0_DISABLE_IDENTIFY                     0x0000000000000001ULL
-#define NV_DEVICE_V0_DISABLE_MMIO                         0x0000000000000002ULL
-#define NV_DEVICE_V0_DISABLE_VBIOS                        0x0000000000000004ULL
-#define NV_DEVICE_V0_DISABLE_CORE                         0x0000000000000008ULL
-#define NV_DEVICE_V0_DISABLE_DISP                         0x0000000000010000ULL
-#define NV_DEVICE_V0_DISABLE_FIFO                         0x0000000000020000ULL
-#define NV_DEVICE_V0_DISABLE_GR                           0x0000000100000000ULL
-#define NV_DEVICE_V0_DISABLE_MPEG                         0x0000000200000000ULL
-#define NV_DEVICE_V0_DISABLE_ME                           0x0000000400000000ULL
-#define NV_DEVICE_V0_DISABLE_VP                           0x0000000800000000ULL
-#define NV_DEVICE_V0_DISABLE_CIPHER                       0x0000001000000000ULL
-#define NV_DEVICE_V0_DISABLE_BSP                          0x0000002000000000ULL
-#define NV_DEVICE_V0_DISABLE_MSPPP                        0x0000004000000000ULL
-#define NV_DEVICE_V0_DISABLE_CE0                          0x0000008000000000ULL
-#define NV_DEVICE_V0_DISABLE_CE1                          0x0000010000000000ULL
-#define NV_DEVICE_V0_DISABLE_VIC                          0x0000020000000000ULL
-#define NV_DEVICE_V0_DISABLE_MSENC                        0x0000040000000000ULL
-#define NV_DEVICE_V0_DISABLE_CE2                          0x0000080000000000ULL
-#define NV_DEVICE_V0_DISABLE_MSVLD                        0x0000100000000000ULL
-#define NV_DEVICE_V0_DISABLE_SEC                          0x0000200000000000ULL
-#define NV_DEVICE_V0_DISABLE_MSPDEC                       0x0000400000000000ULL
-	__u64 disable;	/* disable particular subsystems */
-	__u64 debug0;	/* as above, but *internal* ids, and *NOT* ABI */
 };
 
 #define NV_DEVICE_V0_INFO                                                  0x00
+#define NV_DEVICE_V0_TIME                                                  0x01
 
 struct nv_device_info_v0 {
 	__u8  version;
@@ -176,6 +184,14 @@ struct nv_device_info_v0 {
 	__u8  pad06[2];
 	__u64 ram_size;
 	__u64 ram_user;
+	char  chip[16];
+	char  name[64];
+};
+
+struct nv_device_time_v0 {
+	__u8  version;
+	__u8  pad01[7];
+	__u64 time;
 };
 
 
@@ -235,13 +251,13 @@ struct gf100_dma_v0 {
 	__u8  pad03[5];
 };
 
-struct gf110_dma_v0 {
+struct gf119_dma_v0 {
 	__u8  version;
-#define GF110_DMA_V0_PAGE_LP                                               0x00
-#define GF110_DMA_V0_PAGE_SP                                               0x01
+#define GF119_DMA_V0_PAGE_LP                                               0x00
+#define GF119_DMA_V0_PAGE_SP                                               0x01
 	__u8  page;
-#define GF110_DMA_V0_KIND_PITCH                                            0x00
-#define GF110_DMA_V0_KIND_VM                                               0xff
+#define GF119_DMA_V0_KIND_PITCH                                            0x00
+#define GF119_DMA_V0_KIND_VM                                               0xff
 	__u8  kind;
 	__u8  pad03[5];
 };
@@ -251,33 +267,74 @@ struct gf110_dma_v0 {
  * perfmon
  ******************************************************************************/
 
-struct nvif_perfctr_v0 {
+#define NVIF_PERFMON_V0_QUERY_DOMAIN                                       0x00
+#define NVIF_PERFMON_V0_QUERY_SIGNAL                                       0x01
+#define NVIF_PERFMON_V0_QUERY_SOURCE                                       0x02
+
+struct nvif_perfmon_query_domain_v0 {
 	__u8  version;
-	__u8  pad01[1];
-	__u16 logic_op;
-	__u8  pad04[4];
-	char  name[4][64];
+	__u8  id;
+	__u8  counter_nr;
+	__u8  iter;
+	__u16 signal_nr;
+	__u8  pad05[2];
+	char  name[64];
 };
 
-#define NVIF_PERFCTR_V0_QUERY                                              0x00
-#define NVIF_PERFCTR_V0_SAMPLE                                             0x01
-#define NVIF_PERFCTR_V0_READ                                               0x02
+struct nvif_perfmon_query_signal_v0 {
+	__u8  version;
+	__u8  domain;
+	__u16 iter;
+	__u8  signal;
+	__u8  source_nr;
+	__u8  pad05[2];
+	char  name[64];
+};
 
-struct nvif_perfctr_query_v0 {
+struct nvif_perfmon_query_source_v0 {
 	__u8  version;
-	__u8  pad01[3];
-	__u32 iter;
+	__u8  domain;
+	__u8  signal;
+	__u8  iter;
+	__u8  pad04[4];
+	__u32 source;
+	__u32 mask;
 	char  name[64];
 };
 
-struct nvif_perfctr_sample {
+
+/*******************************************************************************
+ * perfdom
+ ******************************************************************************/
+
+struct nvif_perfdom_v0 {
+	__u8  version;
+	__u8  domain;
+	__u8  mode;
+	__u8  pad03[1];
+	struct {
+		__u8  signal[4];
+		__u64 source[4][8];
+		__u16 logic_op;
+	} ctr[4];
 };
 
-struct nvif_perfctr_read_v0 {
+#define NVIF_PERFDOM_V0_INIT                                               0x00
+#define NVIF_PERFDOM_V0_SAMPLE                                             0x01
+#define NVIF_PERFDOM_V0_READ                                               0x02
+
+struct nvif_perfdom_init {
+};
+
+struct nvif_perfdom_sample {
+};
+
+struct nvif_perfdom_read_v0 {
 	__u8  version;
 	__u8  pad01[7];
-	__u32 ctr;
+	__u32 ctr[4];
 	__u32 clk;
+	__u8  pad04[4];
 };
 
 
@@ -337,7 +394,16 @@ struct nv03_channel_dma_v0 {
 	__u8  version;
 	__u8  chid;
 	__u8  pad02[2];
-	__u32 pushbuf;
+	__u32 offset;
+	__u64 pushbuf;
+};
+
+struct nv50_channel_dma_v0 {
+	__u8  version;
+	__u8  chid;
+	__u8  pad02[6];
+	__u64 vm;
+	__u64 pushbuf;
 	__u64 offset;
 };
 
@@ -350,10 +416,20 @@ struct nv03_channel_dma_v0 {
 struct nv50_channel_gpfifo_v0 {
 	__u8  version;
 	__u8  chid;
-	__u8  pad01[6];
-	__u32 pushbuf;
+	__u8  pad02[2];
 	__u32 ilength;
 	__u64 ioffset;
+	__u64 pushbuf;
+	__u64 vm;
+};
+
+struct fermi_channel_gpfifo_v0 {
+	__u8  version;
+	__u8  chid;
+	__u8  pad02[2];
+	__u32 ilength;
+	__u64 ioffset;
+	__u64 vm;
 };
 
 struct kepler_channel_gpfifo_a_v0 {
@@ -367,10 +443,9 @@ struct kepler_channel_gpfifo_a_v0 {
 #define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_ENC                              0x40
 	__u8  engine;
 	__u16 chid;
-	__u8  pad04[4];
-	__u32 pushbuf;
 	__u32 ilength;
 	__u64 ioffset;
+	__u64 vm;
 };
 
 /*******************************************************************************
@@ -491,8 +566,8 @@ struct nv50_disp_pior_pwr_v0 {
 /* core */
 struct nv50_disp_core_channel_dma_v0 {
 	__u8  version;
-	__u8  pad01[3];
-	__u32 pushbuf;
+	__u8  pad01[7];
+	__u64 pushbuf;
 };
 
 #define NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT                          0x00
@@ -509,9 +584,9 @@ struct nv50_disp_cursor_v0 {
 /* base */
 struct nv50_disp_base_channel_dma_v0 {
 	__u8  version;
-	__u8  pad01[2];
 	__u8  head;
-	__u32 pushbuf;
+	__u8  pad02[6];
+	__u64 pushbuf;
 };
 
 #define NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT                          0x00
@@ -519,9 +594,9 @@ struct nv50_disp_base_channel_dma_v0 {
 /* overlay */
 struct nv50_disp_overlay_channel_dma_v0 {
 	__u8  version;
-	__u8  pad01[2];
 	__u8  head;
-	__u32 pushbuf;
+	__u8  pad02[6];
+	__u64 pushbuf;
 };
 
 #define NV50_DISP_OVERLAY_CHANNEL_DMA_V0_NTFY_UEVENT                       0x00
@@ -536,6 +611,20 @@ struct nv50_disp_overlay_v0 {
 #define NV50_DISP_OVERLAY_V0_NTFY_UEVENT                                   0x00
 
 /*******************************************************************************
+ * software
+ ******************************************************************************/
+
+#define NVSW_NTFY_UEVENT                                                   0x00
+
+#define NV04_NVSW_GET_REF                                                  0x00
+
+struct nv04_nvsw_get_ref_v0 {
+	__u8  version;
+	__u8  pad01[3];
+	__u32 ref;
+};
+
+/*******************************************************************************
  * fermi
  ******************************************************************************/
 
diff --git a/drivers/gpu/drm/nouveau/include/nvif/client.h b/drivers/gpu/drm/nouveau/include/nvif/client.h
index eca648ef0f7a..4a7f6f7b836d 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/client.h
@@ -4,36 +4,25 @@
 #include <nvif/object.h>
 
 struct nvif_client {
-	struct nvif_object base;
-	struct nvif_object *object; /*XXX: hack for nvif_object() */
+	struct nvif_object object;
 	const struct nvif_driver *driver;
+	u64 version;
+	u8 route;
 	bool super;
 };
 
-static inline struct nvif_client *
-nvif_client(struct nvif_object *object)
-{
-	while (object && object->parent != object)
-		object = object->parent;
-	return (void *)object;
-}
-
-int  nvif_client_init(void (*dtor)(struct nvif_client *), const char *,
-		      const char *, u64, const char *, const char *,
+int  nvif_client_init(const char *drv, const char *name, u64 device,
+		      const char *cfg, const char *dbg,
 		      struct nvif_client *);
 void nvif_client_fini(struct nvif_client *);
-int  nvif_client_new(const char *, const char *, u64, const char *,
-		     const char *, struct nvif_client **);
-void nvif_client_ref(struct nvif_client *, struct nvif_client **);
 int  nvif_client_ioctl(struct nvif_client *, void *, u32);
 int  nvif_client_suspend(struct nvif_client *);
 int  nvif_client_resume(struct nvif_client *);
 
 /*XXX*/
 #include <core/client.h>
-#define nvxx_client(a) ({ \
-	struct nvif_client *_client = nvif_client(nvif_object(a)); \
-	nvkm_client(_client->base.priv); \
+#define nvxx_client(a) ({                                                      \
+	struct nvif_client *_client = (a);                                     \
+	(struct nvkm_client *)_client->object.priv;                            \
 })
-
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h
index 88553a741ab7..700a9b206726 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/device.h
@@ -5,26 +5,35 @@
 #include <nvif/class.h>
 
 struct nvif_device {
-	struct nvif_object base;
-	struct nvif_object *object; /*XXX: hack for nvif_object() */
+	struct nvif_object object;
 	struct nv_device_info_v0 info;
 };
 
-static inline struct nvif_device *
-nvif_device(struct nvif_object *object)
-{
-	while (object && object->oclass != 0x0080 /*XXX: NV_DEVICE_CLASS*/ )
-		object = object->parent;
-	return (void *)object;
-}
-
-int  nvif_device_init(struct nvif_object *, void (*dtor)(struct nvif_device *),
-		      u32 handle, u32 oclass, void *, u32,
+int  nvif_device_init(struct nvif_object *, u32 handle, s32 oclass, void *, u32,
 		      struct nvif_device *);
 void nvif_device_fini(struct nvif_device *);
-int  nvif_device_new(struct nvif_object *, u32 handle, u32 oclass,
-		     void *, u32, struct nvif_device **);
-void nvif_device_ref(struct nvif_device *, struct nvif_device **);
+u64  nvif_device_time(struct nvif_device *);
+
+/* Delay based on GPU time (ie. PTIMER).
+ *
+ * Will return -ETIMEDOUT unless the loop was terminated with 'break',
+ * where it will return the number of nanoseconds taken instead.
+ */
+#define nvif_nsec(d,n,cond...) ({                                              \
+	struct nvif_device *_device = (d);                                     \
+	u64 _nsecs = (n), _time0 = nvif_device_time(_device);                  \
+	s64 _taken = 0;                                                        \
+                                                                               \
+	do {                                                                   \
+		cond                                                           \
+	} while (_taken = nvif_device_time(_device) - _time0, _taken < _nsecs);\
+                                                                               \
+	if (_taken >= _nsecs)                                                  \
+		_taken = -ETIMEDOUT;                                           \
+	_taken;                                                                \
+})
+#define nvif_usec(d,u,cond...) nvif_nsec((d), (u) * 1000, ##cond)
+#define nvif_msec(d,m,cond...) nvif_usec((d), (m) * 1000, ##cond)
 
 /*XXX*/
 #include <subdev/bios.h>
@@ -36,26 +45,30 @@ void nvif_device_ref(struct nvif_device *, struct nvif_device **);
 #include <subdev/i2c.h>
 #include <subdev/timer.h>
 #include <subdev/therm.h>
+#include <subdev/pci.h>
 
-#define nvxx_device(a) nv_device(nvxx_object((a)))
-#define nvxx_bios(a) nvkm_bios(nvxx_device(a))
-#define nvxx_fb(a) nvkm_fb(nvxx_device(a))
-#define nvxx_mmu(a) nvkm_mmu(nvxx_device(a))
-#define nvxx_bar(a) nvkm_bar(nvxx_device(a))
-#define nvxx_gpio(a) nvkm_gpio(nvxx_device(a))
-#define nvxx_clk(a) nvkm_clk(nvxx_device(a))
-#define nvxx_i2c(a) nvkm_i2c(nvxx_device(a))
-#define nvxx_timer(a) nvkm_timer(nvxx_device(a))
-#define nvxx_wait(a,b,c,d) nv_wait(nvxx_timer(a), (b), (c), (d))
-#define nvxx_wait_cb(a,b,c) nv_wait_cb(nvxx_timer(a), (b), (c))
-#define nvxx_therm(a) nvkm_therm(nvxx_device(a))
+#define nvxx_device(a) ({                                                      \
+	struct nvif_device *_device = (a);                                     \
+	struct {                                                               \
+		struct nvkm_object object;                                     \
+		struct nvkm_device *device;                                    \
+	} *_udevice = _device->object.priv;                                    \
+	_udevice->device;                                                      \
+})
+#define nvxx_bios(a) nvxx_device(a)->bios
+#define nvxx_fb(a) nvxx_device(a)->fb
+#define nvxx_mmu(a) nvxx_device(a)->mmu
+#define nvxx_bar(a) nvxx_device(a)->bar
+#define nvxx_gpio(a) nvxx_device(a)->gpio
+#define nvxx_clk(a) nvxx_device(a)->clk
+#define nvxx_i2c(a) nvxx_device(a)->i2c
+#define nvxx_therm(a) nvxx_device(a)->therm
 
 #include <core/device.h>
 #include <engine/fifo.h>
 #include <engine/gr.h>
 #include <engine/sw.h>
 
-#define nvxx_fifo(a) nvkm_fifo(nvxx_device(a))
-#define nvxx_fifo_chan(a) ((struct nvkm_fifo_chan *)nvxx_object(a))
-#define nvxx_gr(a) ((struct nvkm_gr *)nvkm_engine(nvxx_object(a), NVDEV_ENGINE_GR))
+#define nvxx_fifo(a) nvxx_device(a)->fifo
+#define nvxx_gr(a) nvxx_device(a)->gr
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
index 4cd8e323b23d..b0ac0215ebf9 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
@@ -1,11 +1,10 @@
 #ifndef __NVIF_IOCTL_H__
 #define __NVIF_IOCTL_H__
 
+#define NVIF_VERSION_LATEST                               0x0000000000000000ULL
+
 struct nvif_ioctl_v0 {
 	__u8  version;
-#define NVIF_IOCTL_V0_OWNER_NVIF                                           0x00
-#define NVIF_IOCTL_V0_OWNER_ANY                                            0xff
-	__u8  owner;
 #define NVIF_IOCTL_V0_NOP                                                  0x00
 #define NVIF_IOCTL_V0_SCLASS                                               0x01
 #define NVIF_IOCTL_V0_NEW                                                  0x02
@@ -20,17 +19,20 @@ struct nvif_ioctl_v0 {
 #define NVIF_IOCTL_V0_NTFY_GET                                             0x0b
 #define NVIF_IOCTL_V0_NTFY_PUT                                             0x0c
 	__u8  type;
-	__u8  path_nr;
+	__u8  pad02[4];
+#define NVIF_IOCTL_V0_OWNER_NVIF                                           0x00
+#define NVIF_IOCTL_V0_OWNER_ANY                                            0xff
+	__u8  owner;
 #define NVIF_IOCTL_V0_ROUTE_NVIF                                           0x00
 #define NVIF_IOCTL_V0_ROUTE_HIDDEN                                         0xff
-	__u8  pad04[3];
 	__u8  route;
 	__u64 token;
-	__u32 path[8];		/* in reverse */
+	__u64 object;
 	__u8  data[];		/* ioctl data (below) */
 };
 
-struct nvif_ioctl_nop {
+struct nvif_ioctl_nop_v0 {
+	__u64 version;
 };
 
 struct nvif_ioctl_sclass_v0 {
@@ -38,7 +40,11 @@ struct nvif_ioctl_sclass_v0 {
 	__u8  version;
 	__u8  count;
 	__u8  pad02[6];
-	__u32 oclass[];
+	struct nvif_ioctl_sclass_oclass_v0 {
+		__s32 oclass;
+		__s16 minver;
+		__s16 maxver;
+	} oclass[];
 };
 
 struct nvif_ioctl_new_v0 {
@@ -47,11 +53,17 @@ struct nvif_ioctl_new_v0 {
 	__u8  pad01[6];
 	__u8  route;
 	__u64 token;
+	__u64 object;
 	__u32 handle;
 /* these class numbers are made up by us, and not nvidia-assigned */
-#define NVIF_IOCTL_NEW_V0_PERFCTR                                    0x0000ffff
-#define NVIF_IOCTL_NEW_V0_CONTROL                                    0x0000fffe
-	__u32 oclass;
+#define NVIF_IOCTL_NEW_V0_CONTROL                                            -1
+#define NVIF_IOCTL_NEW_V0_PERFMON                                            -2
+#define NVIF_IOCTL_NEW_V0_PERFDOM                                            -3
+#define NVIF_IOCTL_NEW_V0_SW_NV04                                            -4
+#define NVIF_IOCTL_NEW_V0_SW_NV10                                            -5
+#define NVIF_IOCTL_NEW_V0_SW_NV50                                            -6
+#define NVIF_IOCTL_NEW_V0_SW_GF100                                           -7
+	__s32 oclass;
 	__u8  data[];		/* class data (class.h) */
 };
 
diff --git a/drivers/gpu/drm/nouveau/include/nvif/notify.h b/drivers/gpu/drm/nouveau/include/nvif/notify.h
index 9ebfa3b45e76..51e2eb580809 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/notify.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/notify.h
@@ -23,17 +23,11 @@ struct nvif_notify {
 	struct work_struct work;
 };
 
-int  nvif_notify_init(struct nvif_object *, void (*dtor)(struct nvif_notify *),
-		      int (*func)(struct nvif_notify *), bool work, u8 type,
-		      void *data, u32 size, u32 reply, struct nvif_notify *);
+int  nvif_notify_init(struct nvif_object *, int (*func)(struct nvif_notify *),
+		      bool work, u8 type, void *data, u32 size, u32 reply,
+		      struct nvif_notify *);
 int  nvif_notify_fini(struct nvif_notify *);
 int  nvif_notify_get(struct nvif_notify *);
 int  nvif_notify_put(struct nvif_notify *);
 int  nvif_notify(const void *, u32, const void *, u32);
-
-int  nvif_notify_new(struct nvif_object *, int (*func)(struct nvif_notify *),
-		     bool work, u8 type, void *data, u32 size, u32 reply,
-		     struct nvif_notify **);
-void nvif_notify_ref(struct nvif_notify *, struct nvif_notify **);
-
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index 04c874707b96..8d815967767f 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -3,73 +3,73 @@
 
 #include <nvif/os.h>
 
+struct nvif_sclass {
+	s32 oclass;
+	int minver;
+	int maxver;
+};
+
 struct nvif_object {
-	struct nvif_object *parent;
-	struct nvif_object *object; /*XXX: hack for nvif_object() */
-	struct kref refcount;
+	struct nvif_client *client;
 	u32 handle;
-	u32 oclass;
-	void *data;
-	u32   size;
+	s32 oclass;
 	void *priv; /*XXX: hack */
-	void (*dtor)(struct nvif_object *);
 	struct {
 		void __iomem *ptr;
 		u32 size;
 	} map;
 };
 
-int  nvif_object_init(struct nvif_object *, void (*dtor)(struct nvif_object *),
-		      u32 handle, u32 oclass, void *, u32,
+int  nvif_object_init(struct nvif_object *, u32 handle, s32 oclass, void *, u32,
 		      struct nvif_object *);
 void nvif_object_fini(struct nvif_object *);
-int  nvif_object_new(struct nvif_object *, u32 handle, u32 oclass,
-		     void *, u32, struct nvif_object **);
-void nvif_object_ref(struct nvif_object *, struct nvif_object **);
 int  nvif_object_ioctl(struct nvif_object *, void *, u32, void **);
-int  nvif_object_sclass(struct nvif_object *, u32 *, int);
+int  nvif_object_sclass_get(struct nvif_object *, struct nvif_sclass **);
+void nvif_object_sclass_put(struct nvif_sclass **);
 u32  nvif_object_rd(struct nvif_object *, int, u64);
 void nvif_object_wr(struct nvif_object *, int, u64, u32);
 int  nvif_object_mthd(struct nvif_object *, u32, void *, u32);
 int  nvif_object_map(struct nvif_object *);
 void nvif_object_unmap(struct nvif_object *);
 
+#define nvif_handle(a) (unsigned long)(void *)(a)
 #define nvif_object(a) (a)->object
 
-#define ioread8_native ioread8
-#define iowrite8_native iowrite8
-#define nvif_rd(a,b,c) ({                                                      \
-	struct nvif_object *_object = nvif_object(a);                          \
+#define nvif_rd(a,f,b,c) ({                                                    \
+	struct nvif_object *_object = (a);                                     \
 	u32 _data;                                                             \
 	if (likely(_object->map.ptr))                                          \
-		_data = ioread##b##_native((u8 __iomem *)_object->map.ptr + (c));      \
+		_data = f((u8 __iomem *)_object->map.ptr + (c));               \
 	else                                                                   \
-		_data = nvif_object_rd(_object, (b) / 8, (c));                 \
+		_data = nvif_object_rd(_object, (b), (c));                     \
 	_data;                                                                 \
 })
-#define nvif_wr(a,b,c,d) ({                                                    \
-	struct nvif_object *_object = nvif_object(a);                          \
+#define nvif_wr(a,f,b,c,d) ({                                                  \
+	struct nvif_object *_object = (a);                                     \
 	if (likely(_object->map.ptr))                                          \
-		iowrite##b##_native((d), (u8 __iomem *)_object->map.ptr + (c));        \
+		f((d), (u8 __iomem *)_object->map.ptr + (c));                  \
 	else                                                                   \
-		nvif_object_wr(_object, (b) / 8, (c), (d));                    \
+		nvif_object_wr(_object, (b), (c), (d));                        \
 })
-#define nvif_rd08(a,b) ({ u8  _v = nvif_rd((a), 8, (b)); _v; })
-#define nvif_rd16(a,b) ({ u16 _v = nvif_rd((a), 16, (b)); _v; })
-#define nvif_rd32(a,b) ({ u32 _v = nvif_rd((a), 32, (b)); _v; })
-#define nvif_wr08(a,b,c) nvif_wr((a), 8, (b), (u8)(c))
-#define nvif_wr16(a,b,c) nvif_wr((a), 16, (b), (u16)(c))
-#define nvif_wr32(a,b,c) nvif_wr((a), 32, (b), (u32)(c))
+#define nvif_rd08(a,b) ({ ((u8)nvif_rd((a), ioread8, 1, (b))); })
+#define nvif_rd16(a,b) ({ ((u16)nvif_rd((a), ioread16_native, 2, (b))); })
+#define nvif_rd32(a,b) ({ ((u32)nvif_rd((a), ioread32_native, 4, (b))); })
+#define nvif_wr08(a,b,c) nvif_wr((a), iowrite8, 1, (b), (u8)(c))
+#define nvif_wr16(a,b,c) nvif_wr((a), iowrite16_native, 2, (b), (u16)(c))
+#define nvif_wr32(a,b,c) nvif_wr((a), iowrite32_native, 4, (b), (u32)(c))
 #define nvif_mask(a,b,c,d) ({                                                  \
-	u32 _v = nvif_rd32(nvif_object(a), (b));                               \
-	nvif_wr32(nvif_object(a), (b), (_v & ~(c)) | (d));                     \
-	_v;                                                                    \
+	struct nvif_object *__object = (a);                                    \
+	u32 _addr = (b), _data = nvif_rd32(__object, _addr);                   \
+	nvif_wr32(__object, _addr, (_data & ~(c)) | (d));                      \
+	_data;                                                                 \
 })
 
-#define nvif_mthd(a,b,c,d) nvif_object_mthd(nvif_object(a), (b), (c), (d))
+#define nvif_mthd(a,b,c,d) nvif_object_mthd((a), (b), (c), (d))
 
 /*XXX*/
 #include <core/object.h>
-#define nvxx_object(a) ((struct nvkm_object *)nvif_object(a)->priv)
-
+#define nvxx_object(a) ({                                                      \
+	struct nvif_object *_object = (a);                                     \
+	(struct nvkm_object *)_object->priv;                                   \
+})
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/os.h b/drivers/gpu/drm/nouveau/include/nvif/os.h
index bdd05ee7ec72..3accc99d8e0b 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/os.h
@@ -24,9 +24,15 @@
 #include <linux/power_supply.h>
 #include <linux/clk.h>
 #include <linux/regulator/consumer.h>
+#include <linux/agp_backend.h>
+#include <linux/reset.h>
+#include <linux/iommu.h>
 
 #include <asm/unaligned.h>
 
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/pmc.h>
+
 #ifndef ioread32_native
 #ifdef __BIG_ENDIAN
 #define ioread16_native ioread16be
@@ -40,5 +46,4 @@
 #define iowrite32_native iowrite32
 #endif /* def __BIG_ENDIAN else */
 #endif /* !ioread32_native */
-
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
index a35b38244502..eaf5905a87a3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
@@ -1,55 +1,52 @@
 #ifndef __NVKM_CLIENT_H__
 #define __NVKM_CLIENT_H__
-#include <core/namedb.h>
+#include <core/object.h>
 
 struct nvkm_client {
-	struct nvkm_namedb namedb;
-	struct nvkm_handle *root;
-	struct nvkm_object *device;
+	struct nvkm_object object;
 	char name[32];
+	u64 device;
 	u32 debug;
-	struct nvkm_vm *vm;
+
+	struct nvkm_client_notify *notify[16];
+	struct rb_root objroot;
+	struct rb_root dmaroot;
+
 	bool super;
 	void *data;
-
 	int (*ntfy)(const void *, u32, const void *, u32);
-	struct nvkm_client_notify *notify[16];
+
+	struct nvkm_vm *vm;
 };
 
-static inline struct nvkm_client *
-nv_client(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!nv_iclass(obj, NV_CLIENT_CLASS)))
-		nv_assert("BAD CAST -> NvClient, %08x", nv_hclass(obj));
-#endif
-	return obj;
-}
-
-static inline struct nvkm_client *
-nvkm_client(void *obj)
-{
-	struct nvkm_object *client = nv_object(obj);
-	while (client && !(nv_iclass(client, NV_CLIENT_CLASS)))
-		client = client->parent;
-	return (void *)client;
-}
-
-#define nvkm_client_create(n,c,oc,od,d)                                     \
-	nvkm_client_create_((n), (c), (oc), (od), sizeof(**d), (void **)d)
-
-int  nvkm_client_create_(const char *name, u64 device, const char *cfg,
-			    const char *dbg, int, void **);
-#define nvkm_client_destroy(p)                                              \
-	nvkm_namedb_destroy(&(p)->base)
+bool nvkm_client_insert(struct nvkm_client *, struct nvkm_object *);
+void nvkm_client_remove(struct nvkm_client *, struct nvkm_object *);
+struct nvkm_object *nvkm_client_search(struct nvkm_client *, u64 object);
 
+int  nvkm_client_new(const char *name, u64 device, const char *cfg,
+		     const char *dbg, struct nvkm_client **);
+void nvkm_client_del(struct nvkm_client **);
 int  nvkm_client_init(struct nvkm_client *);
 int  nvkm_client_fini(struct nvkm_client *, bool suspend);
-const char *nvkm_client_name(void *obj);
 
 int nvkm_client_notify_new(struct nvkm_object *, struct nvkm_event *,
 			   void *data, u32 size);
 int nvkm_client_notify_del(struct nvkm_client *, int index);
 int nvkm_client_notify_get(struct nvkm_client *, int index);
 int nvkm_client_notify_put(struct nvkm_client *, int index);
+
+/* logging for client-facing objects */
+#define nvif_printk(o,l,p,f,a...) do {                                         \
+	struct nvkm_object *_object = (o);                                     \
+	struct nvkm_client *_client = _object->client;                         \
+	if (_client->debug >= NV_DBG_##l)                                      \
+		printk(KERN_##p "nouveau: %s:%08x:%08x: "f, _client->name,     \
+		       _object->handle, _object->oclass, ##a);                 \
+} while(0)
+#define nvif_fatal(o,f,a...) nvif_printk((o), FATAL, CRIT, f, ##a)
+#define nvif_error(o,f,a...) nvif_printk((o), ERROR,  ERR, f, ##a)
+#define nvif_debug(o,f,a...) nvif_printk((o), DEBUG, INFO, f, ##a)
+#define nvif_trace(o,f,a...) nvif_printk((o), TRACE, INFO, f, ##a)
+#define nvif_info(o,f,a...)  nvif_printk((o),  INFO, INFO, f, ##a)
+#define nvif_ioctl(o,f,a...) nvif_trace((o), "ioctl: "f, ##a)
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h b/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h
index d07cb860b56c..c59fd4e2ad5e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h
@@ -1,18 +1,11 @@
 #ifndef __NVKM_DEBUG_H__
 #define __NVKM_DEBUG_H__
-extern int nv_info_debug_level;
-
 #define NV_DBG_FATAL    0
 #define NV_DBG_ERROR    1
 #define NV_DBG_WARN     2
-#define NV_DBG_INFO     nv_info_debug_level
+#define NV_DBG_INFO     3
 #define NV_DBG_DEBUG    4
 #define NV_DBG_TRACE    5
 #define NV_DBG_PARANOIA 6
 #define NV_DBG_SPAM     7
-
-#define NV_DBG_INFO_NORMAL 3
-#define NV_DBG_INFO_SILENT NV_DBG_DEBUG
-
-#define nv_debug_level(a) nv_info_debug_level = NV_DBG_INFO_##a
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 333db33a162c..8f760002e401 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -1,24 +1,84 @@
 #ifndef __NVKM_DEVICE_H__
 #define __NVKM_DEVICE_H__
-#include <core/engine.h>
 #include <core/event.h>
+#include <core/object.h>
+
+enum nvkm_devidx {
+	NVKM_SUBDEV_PCI,
+	NVKM_SUBDEV_VBIOS,
+	NVKM_SUBDEV_DEVINIT,
+	NVKM_SUBDEV_IBUS,
+	NVKM_SUBDEV_GPIO,
+	NVKM_SUBDEV_I2C,
+	NVKM_SUBDEV_FUSE,
+	NVKM_SUBDEV_MXM,
+	NVKM_SUBDEV_MC,
+	NVKM_SUBDEV_BUS,
+	NVKM_SUBDEV_TIMER,
+	NVKM_SUBDEV_FB,
+	NVKM_SUBDEV_LTC,
+	NVKM_SUBDEV_INSTMEM,
+	NVKM_SUBDEV_MMU,
+	NVKM_SUBDEV_BAR,
+	NVKM_SUBDEV_PMU,
+	NVKM_SUBDEV_VOLT,
+	NVKM_SUBDEV_THERM,
+	NVKM_SUBDEV_CLK,
+
+	NVKM_ENGINE_DMAOBJ,
+	NVKM_ENGINE_IFB,
+	NVKM_ENGINE_FIFO,
+	NVKM_ENGINE_SW,
+	NVKM_ENGINE_GR,
+	NVKM_ENGINE_MPEG,
+	NVKM_ENGINE_ME,
+	NVKM_ENGINE_VP,
+	NVKM_ENGINE_CIPHER,
+	NVKM_ENGINE_BSP,
+	NVKM_ENGINE_MSPPP,
+	NVKM_ENGINE_CE0,
+	NVKM_ENGINE_CE1,
+	NVKM_ENGINE_CE2,
+	NVKM_ENGINE_VIC,
+	NVKM_ENGINE_MSENC,
+	NVKM_ENGINE_DISP,
+	NVKM_ENGINE_PM,
+	NVKM_ENGINE_MSVLD,
+	NVKM_ENGINE_SEC,
+	NVKM_ENGINE_MSPDEC,
+
+	NVKM_SUBDEV_NR
+};
+
+enum nvkm_device_type {
+	NVKM_DEVICE_PCI,
+	NVKM_DEVICE_AGP,
+	NVKM_DEVICE_PCIE,
+	NVKM_DEVICE_TEGRA,
+};
 
 struct nvkm_device {
-	struct nvkm_engine engine;
+	const struct nvkm_device_func *func;
+	const struct nvkm_device_quirk *quirk;
+	struct device *dev;
+	enum nvkm_device_type type;
+	u64 handle;
+	const char *name;
+	const char *cfgopt;
+	const char *dbgopt;
+
 	struct list_head head;
+	struct mutex mutex;
+	int refcount;
 
-	struct pci_dev *pdev;
-	struct platform_device *platformdev;
-	u64 handle;
+	void __iomem *pri;
 
 	struct nvkm_event event;
 
-	const char *cfgopt;
-	const char *dbgopt;
-	const char *name;
-	const char *cname;
 	u64 disable_mask;
+	u32 debug;
 
+	const struct nvkm_device_chip *chip;
 	enum {
 		NV_04    = 0x04,
 		NV_10    = 0x10,
@@ -35,67 +95,157 @@ struct nvkm_device {
 	u8  chiprev;
 	u32 crystal;
 
-	struct nvkm_oclass *oclass[NVDEV_SUBDEV_NR];
-	struct nvkm_object *subdev[NVDEV_SUBDEV_NR];
-
 	struct {
 		struct notifier_block nb;
 	} acpi;
+
+	struct nvkm_bar *bar;
+	struct nvkm_bios *bios;
+	struct nvkm_bus *bus;
+	struct nvkm_clk *clk;
+	struct nvkm_devinit *devinit;
+	struct nvkm_fb *fb;
+	struct nvkm_fuse *fuse;
+	struct nvkm_gpio *gpio;
+	struct nvkm_i2c *i2c;
+	struct nvkm_subdev *ibus;
+	struct nvkm_instmem *imem;
+	struct nvkm_ltc *ltc;
+	struct nvkm_mc *mc;
+	struct nvkm_mmu *mmu;
+	struct nvkm_subdev *mxm;
+	struct nvkm_pci *pci;
+	struct nvkm_pmu *pmu;
+	struct nvkm_therm *therm;
+	struct nvkm_timer *timer;
+	struct nvkm_volt *volt;
+
+	struct nvkm_engine *bsp;
+	struct nvkm_engine *ce[3];
+	struct nvkm_engine *cipher;
+	struct nvkm_disp *disp;
+	struct nvkm_dma *dma;
+	struct nvkm_fifo *fifo;
+	struct nvkm_gr *gr;
+	struct nvkm_engine *ifb;
+	struct nvkm_engine *me;
+	struct nvkm_engine *mpeg;
+	struct nvkm_engine *msenc;
+	struct nvkm_engine *mspdec;
+	struct nvkm_engine *msppp;
+	struct nvkm_engine *msvld;
+	struct nvkm_pm *pm;
+	struct nvkm_engine *sec;
+	struct nvkm_sw *sw;
+	struct nvkm_engine *vic;
+	struct nvkm_engine *vp;
+};
+
+struct nvkm_subdev *nvkm_device_subdev(struct nvkm_device *, int index);
+struct nvkm_engine *nvkm_device_engine(struct nvkm_device *, int index);
+
+struct nvkm_device_func {
+	struct nvkm_device_pci *(*pci)(struct nvkm_device *);
+	struct nvkm_device_tegra *(*tegra)(struct nvkm_device *);
+	void *(*dtor)(struct nvkm_device *);
+	int (*preinit)(struct nvkm_device *);
+	int (*init)(struct nvkm_device *);
+	void (*fini)(struct nvkm_device *, bool suspend);
+	resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
+	resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
+	bool cpu_coherent;
+};
+
+struct nvkm_device_quirk {
+	u8 tv_pin_mask;
+	u8 tv_gpio;
+	bool War00C800_0;
+};
+
+struct nvkm_device_chip {
+	const char *name;
+
+	int (*bar    )(struct nvkm_device *, int idx, struct nvkm_bar **);
+	int (*bios   )(struct nvkm_device *, int idx, struct nvkm_bios **);
+	int (*bus    )(struct nvkm_device *, int idx, struct nvkm_bus **);
+	int (*clk    )(struct nvkm_device *, int idx, struct nvkm_clk **);
+	int (*devinit)(struct nvkm_device *, int idx, struct nvkm_devinit **);
+	int (*fb     )(struct nvkm_device *, int idx, struct nvkm_fb **);
+	int (*fuse   )(struct nvkm_device *, int idx, struct nvkm_fuse **);
+	int (*gpio   )(struct nvkm_device *, int idx, struct nvkm_gpio **);
+	int (*i2c    )(struct nvkm_device *, int idx, struct nvkm_i2c **);
+	int (*ibus   )(struct nvkm_device *, int idx, struct nvkm_subdev **);
+	int (*imem   )(struct nvkm_device *, int idx, struct nvkm_instmem **);
+	int (*ltc    )(struct nvkm_device *, int idx, struct nvkm_ltc **);
+	int (*mc     )(struct nvkm_device *, int idx, struct nvkm_mc **);
+	int (*mmu    )(struct nvkm_device *, int idx, struct nvkm_mmu **);
+	int (*mxm    )(struct nvkm_device *, int idx, struct nvkm_subdev **);
+	int (*pci    )(struct nvkm_device *, int idx, struct nvkm_pci **);
+	int (*pmu    )(struct nvkm_device *, int idx, struct nvkm_pmu **);
+	int (*therm  )(struct nvkm_device *, int idx, struct nvkm_therm **);
+	int (*timer  )(struct nvkm_device *, int idx, struct nvkm_timer **);
+	int (*volt   )(struct nvkm_device *, int idx, struct nvkm_volt **);
+
+	int (*bsp    )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*ce[3]  )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*cipher )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*disp   )(struct nvkm_device *, int idx, struct nvkm_disp **);
+	int (*dma    )(struct nvkm_device *, int idx, struct nvkm_dma **);
+	int (*fifo   )(struct nvkm_device *, int idx, struct nvkm_fifo **);
+	int (*gr     )(struct nvkm_device *, int idx, struct nvkm_gr **);
+	int (*ifb    )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*me     )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*mpeg   )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*msenc  )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*mspdec )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*msppp  )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*msvld  )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*pm     )(struct nvkm_device *, int idx, struct nvkm_pm **);
+	int (*sec    )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*sw     )(struct nvkm_device *, int idx, struct nvkm_sw **);
+	int (*vic    )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*vp     )(struct nvkm_device *, int idx, struct nvkm_engine **);
 };
 
 struct nvkm_device *nvkm_device_find(u64 name);
 int nvkm_device_list(u64 *name, int size);
 
-struct nvkm_device *nv_device(void *obj);
-
-static inline bool
-nv_device_match(struct nvkm_object *object, u16 dev, u16 ven, u16 sub)
-{
-	struct nvkm_device *device = nv_device(object);
-	return device->pdev->device == dev &&
-	       device->pdev->subsystem_vendor == ven &&
-	       device->pdev->subsystem_device == sub;
-}
-
-static inline bool
-nv_device_is_pci(struct nvkm_device *device)
-{
-	return device->pdev != NULL;
-}
-
-static inline bool
-nv_device_is_cpu_coherent(struct nvkm_device *device)
-{
-	return (!IS_ENABLED(CONFIG_ARM) && nv_device_is_pci(device));
-}
-
-static inline struct device *
-nv_device_base(struct nvkm_device *device)
-{
-	return nv_device_is_pci(device) ? &device->pdev->dev :
-					  &device->platformdev->dev;
-}
-
-resource_size_t
-nv_device_resource_start(struct nvkm_device *device, unsigned int bar);
-
-resource_size_t
-nv_device_resource_len(struct nvkm_device *device, unsigned int bar);
-
-int
-nv_device_get_irq(struct nvkm_device *device, bool stall);
-
-struct platform_device;
-
-enum nv_bus_type {
-	NVKM_BUS_PCI,
-	NVKM_BUS_PLATFORM,
+/* privileged register interface accessor macros */
+#define nvkm_rd08(d,a) ioread8((d)->pri + (a))
+#define nvkm_rd16(d,a) ioread16_native((d)->pri + (a))
+#define nvkm_rd32(d,a) ioread32_native((d)->pri + (a))
+#define nvkm_wr08(d,a,v) iowrite8((v), (d)->pri + (a))
+#define nvkm_wr16(d,a,v) iowrite16_native((v), (d)->pri + (a))
+#define nvkm_wr32(d,a,v) iowrite32_native((v), (d)->pri + (a))
+#define nvkm_mask(d,a,m,v) ({                                                  \
+	struct nvkm_device *_device = (d);                                     \
+	u32 _addr = (a), _temp = nvkm_rd32(_device, _addr);                    \
+	nvkm_wr32(_device, _addr, (_temp & ~(m)) | (v));                       \
+	_temp;                                                                 \
+})
+
+void nvkm_device_del(struct nvkm_device **);
+
+struct nvkm_device_oclass {
+	int (*ctor)(struct nvkm_device *, const struct nvkm_oclass *,
+		    void *data, u32 size, struct nvkm_object **);
+	struct nvkm_sclass base;
 };
 
-#define nvkm_device_create(p,t,n,s,c,d,u)                                   \
-	nvkm_device_create_((void *)(p), (t), (n), (s), (c), (d),           \
-			       sizeof(**u), (void **)u)
-int  nvkm_device_create_(void *, enum nv_bus_type type, u64 name,
-			    const char *sname, const char *cfg, const char *dbg,
-			    int, void **);
+extern const struct nvkm_sclass nvkm_udevice_sclass;
+
+/* device logging */
+#define nvdev_printk_(d,l,p,f,a...) do {                                       \
+	struct nvkm_device *_device = (d);                                     \
+	if (_device->debug >= (l))                                             \
+		dev_##p(_device->dev, f, ##a);                                 \
+} while(0)
+#define nvdev_printk(d,l,p,f,a...) nvdev_printk_((d), NV_DBG_##l, p, f, ##a)
+#define nvdev_fatal(d,f,a...) nvdev_printk((d), FATAL,   crit, f, ##a)
+#define nvdev_error(d,f,a...) nvdev_printk((d), ERROR,    err, f, ##a)
+#define nvdev_warn(d,f,a...)  nvdev_printk((d),  WARN, notice, f, ##a)
+#define nvdev_info(d,f,a...)  nvdev_printk((d),  INFO,   info, f, ##a)
+#define nvdev_debug(d,f,a...) nvdev_printk((d), DEBUG,   info, f, ##a)
+#define nvdev_trace(d,f,a...) nvdev_printk((d), TRACE,   info, f, ##a)
+#define nvdev_spam(d,f,a...)  nvdev_printk((d),  SPAM,    dbg, f, ##a)
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/devidx.h b/drivers/gpu/drm/nouveau/include/nvkm/core/devidx.h
deleted file mode 100644
index 60c5888b5df3..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/devidx.h
+++ /dev/null
@@ -1,62 +0,0 @@
-#ifndef __NVKM_DEVIDX_H__
-#define __NVKM_DEVIDX_H__
-enum nvkm_devidx {
-	NVDEV_ENGINE_DEVICE,
-	NVDEV_SUBDEV_VBIOS,
-
-	/* All subdevs from DEVINIT to DEVINIT_LAST will be created before
-	 * *any* of them are initialised.  This subdev category is used
-	 * for any subdevs that the VBIOS init table parsing may call out
-	 * to during POST.
-	 */
-	NVDEV_SUBDEV_DEVINIT,
-	NVDEV_SUBDEV_IBUS,
-	NVDEV_SUBDEV_GPIO,
-	NVDEV_SUBDEV_I2C,
-	NVDEV_SUBDEV_DEVINIT_LAST = NVDEV_SUBDEV_I2C,
-
-	/* This grouping of subdevs are initialised right after they've
-	 * been created, and are allowed to assume any subdevs in the
-	 * list above them exist and have been initialised.
-	 */
-	NVDEV_SUBDEV_FUSE,
-	NVDEV_SUBDEV_MXM,
-	NVDEV_SUBDEV_MC,
-	NVDEV_SUBDEV_BUS,
-	NVDEV_SUBDEV_TIMER,
-	NVDEV_SUBDEV_FB,
-	NVDEV_SUBDEV_LTC,
-	NVDEV_SUBDEV_INSTMEM,
-	NVDEV_SUBDEV_MMU,
-	NVDEV_SUBDEV_BAR,
-	NVDEV_SUBDEV_PMU,
-	NVDEV_SUBDEV_VOLT,
-	NVDEV_SUBDEV_THERM,
-	NVDEV_SUBDEV_CLK,
-
-	NVDEV_ENGINE_FIRST,
-	NVDEV_ENGINE_DMAOBJ = NVDEV_ENGINE_FIRST,
-	NVDEV_ENGINE_IFB,
-	NVDEV_ENGINE_FIFO,
-	NVDEV_ENGINE_SW,
-	NVDEV_ENGINE_GR,
-	NVDEV_ENGINE_MPEG,
-	NVDEV_ENGINE_ME,
-	NVDEV_ENGINE_VP,
-	NVDEV_ENGINE_CIPHER,
-	NVDEV_ENGINE_BSP,
-	NVDEV_ENGINE_MSPPP,
-	NVDEV_ENGINE_CE0,
-	NVDEV_ENGINE_CE1,
-	NVDEV_ENGINE_CE2,
-	NVDEV_ENGINE_VIC,
-	NVDEV_ENGINE_MSENC,
-	NVDEV_ENGINE_DISP,
-	NVDEV_ENGINE_PM,
-	NVDEV_ENGINE_MSVLD,
-	NVDEV_ENGINE_SEC,
-	NVDEV_ENGINE_MSPDEC,
-
-	NVDEV_SUBDEV_NR,
-};
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h
deleted file mode 100644
index 1bf2e8eb4268..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef __NVKM_ENGCTX_H__
-#define __NVKM_ENGCTX_H__
-#include <core/gpuobj.h>
-
-#include <subdev/mmu.h>
-
-#define NV_ENGCTX_(eng,var) (NV_ENGCTX_CLASS | ((var) << 8) | (eng))
-#define NV_ENGCTX(name,var)  NV_ENGCTX_(NVDEV_ENGINE_##name, (var))
-
-struct nvkm_engctx {
-	struct nvkm_gpuobj gpuobj;
-	struct nvkm_vma vma;
-	struct list_head head;
-	unsigned long save;
-	u64 addr;
-};
-
-static inline struct nvkm_engctx *
-nv_engctx(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!nv_iclass(obj, NV_ENGCTX_CLASS)))
-		nv_assert("BAD CAST -> NvEngCtx, %08x", nv_hclass(obj));
-#endif
-	return obj;
-}
-
-#define nvkm_engctx_create(p,e,c,g,s,a,f,d)                                 \
-	nvkm_engctx_create_((p), (e), (c), (g), (s), (a), (f),              \
-			       sizeof(**d), (void **)d)
-
-int  nvkm_engctx_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, struct nvkm_object *,
-			    u32 size, u32 align, u32 flags,
-			    int length, void **data);
-void nvkm_engctx_destroy(struct nvkm_engctx *);
-int  nvkm_engctx_init(struct nvkm_engctx *);
-int  nvkm_engctx_fini(struct nvkm_engctx *, bool suspend);
-
-int  _nvkm_engctx_ctor(struct nvkm_object *, struct nvkm_object *,
-			  struct nvkm_oclass *, void *, u32,
-			  struct nvkm_object **);
-void _nvkm_engctx_dtor(struct nvkm_object *);
-int  _nvkm_engctx_init(struct nvkm_object *);
-int  _nvkm_engctx_fini(struct nvkm_object *, bool suspend);
-#define _nvkm_engctx_rd32 _nvkm_gpuobj_rd32
-#define _nvkm_engctx_wr32 _nvkm_gpuobj_wr32
-
-struct nvkm_object *nvkm_engctx_get(struct nvkm_engine *, u64 addr);
-void nvkm_engctx_put(struct nvkm_object *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
index faf0fd2f0638..48bf128456a1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
@@ -1,56 +1,49 @@
 #ifndef __NVKM_ENGINE_H__
 #define __NVKM_ENGINE_H__
+#define nvkm_engine(p) container_of((p), struct nvkm_engine, subdev)
 #include <core/subdev.h>
-
-#define NV_ENGINE_(eng,var) (NV_ENGINE_CLASS | ((var) << 8) | (eng))
-#define NV_ENGINE(name,var)  NV_ENGINE_(NVDEV_ENGINE_##name, (var))
+struct nvkm_fifo_chan;
+struct nvkm_fb_tile;
 
 struct nvkm_engine {
+	const struct nvkm_engine_func *func;
 	struct nvkm_subdev subdev;
-	struct nvkm_oclass *cclass;
-	struct nvkm_oclass *sclass;
-
-	struct list_head contexts;
 	spinlock_t lock;
 
-	void (*tile_prog)(struct nvkm_engine *, int region);
-	int  (*tlb_flush)(struct nvkm_engine *);
+	int usecount;
 };
 
-static inline struct nvkm_engine *
-nv_engine(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!nv_iclass(obj, NV_ENGINE_CLASS)))
-		nv_assert("BAD CAST -> NvEngine, %08x", nv_hclass(obj));
-#endif
-	return obj;
-}
-
-static inline int
-nv_engidx(struct nvkm_engine *engine)
-{
-	return nv_subidx(&engine->subdev);
-}
-
-struct nvkm_engine *nvkm_engine(void *obj, int idx);
-
-#define nvkm_engine_create(p,e,c,d,i,f,r)                                   \
-	nvkm_engine_create_((p), (e), (c), (d), (i), (f),                   \
-			       sizeof(**r),(void **)r)
-
-#define nvkm_engine_destroy(p)                                              \
-	nvkm_subdev_destroy(&(p)->subdev)
-#define nvkm_engine_init(p)                                                 \
-	nvkm_subdev_init(&(p)->subdev)
-#define nvkm_engine_fini(p,s)                                               \
-	nvkm_subdev_fini(&(p)->subdev, (s))
-
-int nvkm_engine_create_(struct nvkm_object *, struct nvkm_object *,
-			   struct nvkm_oclass *, bool, const char *,
-			   const char *, int, void **);
+struct nvkm_engine_func {
+	void *(*dtor)(struct nvkm_engine *);
+	int (*oneinit)(struct nvkm_engine *);
+	int (*init)(struct nvkm_engine *);
+	int (*fini)(struct nvkm_engine *, bool suspend);
+	void (*intr)(struct nvkm_engine *);
+	void (*tile)(struct nvkm_engine *, int region, struct nvkm_fb_tile *);
+
+	struct {
+		int (*sclass)(struct nvkm_oclass *, int index,
+			      const struct nvkm_device_oclass **);
+	} base;
+
+	struct {
+		int (*cclass)(struct nvkm_fifo_chan *,
+			      const struct nvkm_oclass *,
+			      struct nvkm_object **);
+		int (*sclass)(struct nvkm_oclass *, int index);
+	} fifo;
+
+	const struct nvkm_object_func *cclass;
+	struct nvkm_sclass sclass[];
+};
 
-#define _nvkm_engine_dtor _nvkm_subdev_dtor
-#define _nvkm_engine_init _nvkm_subdev_init
-#define _nvkm_engine_fini _nvkm_subdev_fini
+int nvkm_engine_ctor(const struct nvkm_engine_func *, struct nvkm_device *,
+		     int index, u32 pmc_enable, bool enable,
+		     struct nvkm_engine *);
+int nvkm_engine_new_(const struct nvkm_engine_func *, struct nvkm_device *,
+		     int index, u32 pmc_enable, bool enable,
+		     struct nvkm_engine **);
+struct nvkm_engine *nvkm_engine_ref(struct nvkm_engine *);
+void nvkm_engine_unref(struct nvkm_engine **);
+void nvkm_engine_tile(struct nvkm_engine *, int region);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
index e76f76f115e9..40429a82f792 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
@@ -10,12 +10,11 @@ struct nvkm_enum {
 };
 
 const struct nvkm_enum *nvkm_enum_find(const struct nvkm_enum *, u32 value);
-const struct nvkm_enum *nvkm_enum_print(const struct nvkm_enum *, u32 value);
 
 struct nvkm_bitfield {
 	u32 mask;
 	const char *name;
 };
 
-void nvkm_bitfield_print(const struct nvkm_bitfield *, u32 value);
+void nvkm_snprintbf(char *, int, const struct nvkm_bitfield *, u32 value);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
index e0187e7abb6e..d4f56eafb073 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
@@ -1,64 +1,40 @@
 #ifndef __NVKM_GPUOBJ_H__
 #define __NVKM_GPUOBJ_H__
 #include <core/object.h>
+#include <core/memory.h>
 #include <core/mm.h>
 struct nvkm_vma;
 struct nvkm_vm;
 
 #define NVOBJ_FLAG_ZERO_ALLOC 0x00000001
-#define NVOBJ_FLAG_ZERO_FREE  0x00000002
 #define NVOBJ_FLAG_HEAP       0x00000004
 
 struct nvkm_gpuobj {
 	struct nvkm_object object;
-	struct nvkm_object *parent;
+	const struct nvkm_gpuobj_func *func;
+	struct nvkm_gpuobj *parent;
+	struct nvkm_memory *memory;
 	struct nvkm_mm_node *node;
-	struct nvkm_mm heap;
 
-	u32 flags;
 	u64 addr;
 	u32 size;
-};
+	struct nvkm_mm heap;
 
-static inline struct nvkm_gpuobj *
-nv_gpuobj(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!nv_iclass(obj, NV_GPUOBJ_CLASS)))
-		nv_assert("BAD CAST -> NvGpuObj, %08x", nv_hclass(obj));
-#endif
-	return obj;
-}
+	void __iomem *map;
+};
 
-#define nvkm_gpuobj_create(p,e,c,v,g,s,a,f,d)                               \
-	nvkm_gpuobj_create_((p), (e), (c), (v), (g), (s), (a), (f),         \
-			       sizeof(**d), (void **)d)
-#define nvkm_gpuobj_init(p) nvkm_object_init(&(p)->object)
-#define nvkm_gpuobj_fini(p,s) nvkm_object_fini(&(p)->object, (s))
-int  nvkm_gpuobj_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, u32 pclass,
-			    struct nvkm_object *, u32 size, u32 align,
-			    u32 flags, int length, void **);
-void nvkm_gpuobj_destroy(struct nvkm_gpuobj *);
+struct nvkm_gpuobj_func {
+	void *(*acquire)(struct nvkm_gpuobj *);
+	void (*release)(struct nvkm_gpuobj *);
+	u32 (*rd32)(struct nvkm_gpuobj *, u32 offset);
+	void (*wr32)(struct nvkm_gpuobj *, u32 offset, u32 data);
+};
 
-int  nvkm_gpuobj_new(struct nvkm_object *, struct nvkm_object *, u32 size,
-		     u32 align, u32 flags, struct nvkm_gpuobj **);
-int  nvkm_gpuobj_dup(struct nvkm_object *, struct nvkm_gpuobj *,
-		     struct nvkm_gpuobj **);
-int  nvkm_gpuobj_map(struct nvkm_gpuobj *, u32 acc, struct nvkm_vma *);
-int  nvkm_gpuobj_map_vm(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access,
-			struct nvkm_vma *);
+int nvkm_gpuobj_new(struct nvkm_device *, u32 size, int align, bool zero,
+		    struct nvkm_gpuobj *parent, struct nvkm_gpuobj **);
+void nvkm_gpuobj_del(struct nvkm_gpuobj **);
+int nvkm_gpuobj_wrap(struct nvkm_memory *, struct nvkm_gpuobj **);
+int nvkm_gpuobj_map(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access,
+		    struct nvkm_vma *);
 void nvkm_gpuobj_unmap(struct nvkm_vma *);
-
-static inline void
-nvkm_gpuobj_ref(struct nvkm_gpuobj *obj, struct nvkm_gpuobj **ref)
-{
-	nvkm_object_ref(&obj->object, (struct nvkm_object **)ref);
-}
-
-void _nvkm_gpuobj_dtor(struct nvkm_object *);
-int  _nvkm_gpuobj_init(struct nvkm_object *);
-int  _nvkm_gpuobj_fini(struct nvkm_object *, bool);
-u32  _nvkm_gpuobj_rd32(struct nvkm_object *, u64);
-void _nvkm_gpuobj_wr32(struct nvkm_object *, u64, u32);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h b/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h
deleted file mode 100644
index 67f384d0916c..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#ifndef __NVKM_HANDLE_H__
-#define __NVKM_HANDLE_H__
-#include <core/os.h>
-struct nvkm_object;
-
-struct nvkm_handle {
-	struct nvkm_namedb *namedb;
-	struct list_head node;
-
-	struct list_head head;
-	struct list_head tree;
-	u32 name;
-	u32 priv;
-
-	u8  route;
-	u64 token;
-
-	struct nvkm_handle *parent;
-	struct nvkm_object *object;
-};
-
-int  nvkm_handle_create(struct nvkm_object *, u32 parent, u32 handle,
-			struct nvkm_object *, struct nvkm_handle **);
-void nvkm_handle_destroy(struct nvkm_handle *);
-int  nvkm_handle_init(struct nvkm_handle *);
-int  nvkm_handle_fini(struct nvkm_handle *, bool suspend);
-
-struct nvkm_object *nvkm_handle_ref(struct nvkm_object *, u32 name);
-
-struct nvkm_handle *nvkm_handle_get_class(struct nvkm_object *, u16);
-struct nvkm_handle *nvkm_handle_get_vinst(struct nvkm_object *, u64);
-struct nvkm_handle *nvkm_handle_get_cinst(struct nvkm_object *, u32);
-void nvkm_handle_put(struct nvkm_handle *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
new file mode 100644
index 000000000000..9363b839a9da
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
@@ -0,0 +1,53 @@
+#ifndef __NVKM_MEMORY_H__
+#define __NVKM_MEMORY_H__
+#include <core/os.h>
+struct nvkm_device;
+struct nvkm_vma;
+struct nvkm_vm;
+
+enum nvkm_memory_target {
+	NVKM_MEM_TARGET_INST,
+	NVKM_MEM_TARGET_VRAM,
+	NVKM_MEM_TARGET_HOST,
+};
+
+struct nvkm_memory {
+	const struct nvkm_memory_func *func;
+};
+
+struct nvkm_memory_func {
+	void *(*dtor)(struct nvkm_memory *);
+	enum nvkm_memory_target (*target)(struct nvkm_memory *);
+	u64 (*addr)(struct nvkm_memory *);
+	u64 (*size)(struct nvkm_memory *);
+	void (*boot)(struct nvkm_memory *, struct nvkm_vm *);
+	void __iomem *(*acquire)(struct nvkm_memory *);
+	void (*release)(struct nvkm_memory *);
+	u32 (*rd32)(struct nvkm_memory *, u64 offset);
+	void (*wr32)(struct nvkm_memory *, u64 offset, u32 data);
+	void (*map)(struct nvkm_memory *, struct nvkm_vma *, u64 offset);
+};
+
+void nvkm_memory_ctor(const struct nvkm_memory_func *, struct nvkm_memory *);
+int nvkm_memory_new(struct nvkm_device *, enum nvkm_memory_target,
+		    u64 size, u32 align, bool zero, struct nvkm_memory **);
+void nvkm_memory_del(struct nvkm_memory **);
+#define nvkm_memory_target(p) (p)->func->target(p)
+#define nvkm_memory_addr(p) (p)->func->addr(p)
+#define nvkm_memory_size(p) (p)->func->size(p)
+#define nvkm_memory_boot(p,v) (p)->func->boot((p),(v))
+#define nvkm_memory_map(p,v,o) (p)->func->map((p),(v),(o))
+
+/* accessor macros - kmap()/done() must bracket use of the other accessor
+ * macros to guarantee correct behaviour across all chipsets
+ */
+#define nvkm_kmap(o)     (o)->func->acquire(o)
+#define nvkm_ro32(o,a)   (o)->func->rd32((o), (a))
+#define nvkm_wo32(o,a,d) (o)->func->wr32((o), (a), (d))
+#define nvkm_mo32(o,a,m,d) ({                                                  \
+	u32 _addr = (a), _data = nvkm_ro32((o), _addr);                        \
+	nvkm_wo32((o), _addr, (_data & ~(m)) | (d));                           \
+	_data;                                                                 \
+})
+#define nvkm_done(o)     (o)->func->release(o)
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
index 096eb1a623ee..d92fd41e4056 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
@@ -27,7 +27,7 @@ struct nvkm_mm {
 static inline bool
 nvkm_mm_initialised(struct nvkm_mm *mm)
 {
-	return mm->block_size != 0;
+	return mm->heap_nodes;
 }
 
 int  nvkm_mm_init(struct nvkm_mm *, u32 offset, u32 length, u32 block);
@@ -37,4 +37,5 @@ int  nvkm_mm_head(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
 int  nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
 		  u32 size_min, u32 align, struct nvkm_mm_node **);
 void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **);
+void nvkm_mm_dump(struct nvkm_mm *, const char *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h b/drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h
deleted file mode 100644
index 4cfe16fcde9b..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h
+++ /dev/null
@@ -1,53 +0,0 @@
-#ifndef __NVKM_NAMEDB_H__
-#define __NVKM_NAMEDB_H__
-#include <core/parent.h>
-struct nvkm_handle;
-
-struct nvkm_namedb {
-	struct nvkm_parent parent;
-	rwlock_t lock;
-	struct list_head list;
-};
-
-static inline struct nvkm_namedb *
-nv_namedb(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!nv_iclass(obj, NV_NAMEDB_CLASS)))
-		nv_assert("BAD CAST -> NvNameDB, %08x", nv_hclass(obj));
-#endif
-	return obj;
-}
-
-#define nvkm_namedb_create(p,e,c,v,s,m,d)                                   \
-	nvkm_namedb_create_((p), (e), (c), (v), (s), (m),                   \
-			       sizeof(**d), (void **)d)
-#define nvkm_namedb_init(p)                                                 \
-	nvkm_parent_init(&(p)->parent)
-#define nvkm_namedb_fini(p,s)                                               \
-	nvkm_parent_fini(&(p)->parent, (s))
-#define nvkm_namedb_destroy(p)                                              \
-	nvkm_parent_destroy(&(p)->parent)
-
-int  nvkm_namedb_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, u32 pclass,
-			    struct nvkm_oclass *, u64 engcls,
-			    int size, void **);
-
-int  _nvkm_namedb_ctor(struct nvkm_object *, struct nvkm_object *,
-			  struct nvkm_oclass *, void *, u32,
-			  struct nvkm_object **);
-#define _nvkm_namedb_dtor _nvkm_parent_dtor
-#define _nvkm_namedb_init _nvkm_parent_init
-#define _nvkm_namedb_fini _nvkm_parent_fini
-
-int  nvkm_namedb_insert(struct nvkm_namedb *, u32 name, struct nvkm_object *,
-			struct nvkm_handle *);
-void nvkm_namedb_remove(struct nvkm_handle *);
-
-struct nvkm_handle *nvkm_namedb_get(struct nvkm_namedb *, u32);
-struct nvkm_handle *nvkm_namedb_get_class(struct nvkm_namedb *, u16);
-struct nvkm_handle *nvkm_namedb_get_vinst(struct nvkm_namedb *, u64);
-struct nvkm_handle *nvkm_namedb_get_cinst(struct nvkm_namedb *, u32);
-void nvkm_namedb_put(struct nvkm_handle *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
index 6e3cd3908400..dcd048b91fac 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
@@ -1,203 +1,88 @@
 #ifndef __NVKM_OBJECT_H__
 #define __NVKM_OBJECT_H__
 #include <core/os.h>
-#include <core/printk.h>
-
-#define NV_PARENT_CLASS 0x80000000
-#define NV_NAMEDB_CLASS 0x40000000
-#define NV_CLIENT_CLASS 0x20000000
-#define NV_SUBDEV_CLASS 0x10000000
-#define NV_ENGINE_CLASS 0x08000000
-#define NV_MEMOBJ_CLASS 0x04000000
-#define NV_GPUOBJ_CLASS 0x02000000
-#define NV_ENGCTX_CLASS 0x01000000
-#define NV_OBJECT_CLASS 0x0000ffff
+#include <core/debug.h>
+struct nvkm_event;
+struct nvkm_gpuobj;
+struct nvkm_oclass;
 
 struct nvkm_object {
-	struct nvkm_oclass *oclass;
-	struct nvkm_object *parent;
+	const struct nvkm_object_func *func;
+	struct nvkm_client *client;
 	struct nvkm_engine *engine;
-	atomic_t refcount;
-	atomic_t usecount;
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-#define NVKM_OBJECT_MAGIC 0x75ef0bad
-	struct list_head list;
-	u32 _magic;
-#endif
-};
-
-static inline struct nvkm_object *
-nv_object(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (likely(obj)) {
-		struct nvkm_object *object = obj;
-		if (unlikely(object->_magic != NVKM_OBJECT_MAGIC))
-			nv_assert("BAD CAST -> NvObject, invalid magic");
-	}
-#endif
-	return obj;
-}
-
-#define nvkm_object_create(p,e,c,s,d)                                       \
-	nvkm_object_create_((p), (e), (c), (s), sizeof(**d), (void **)d)
-int  nvkm_object_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, u32, int size, void **);
-void nvkm_object_destroy(struct nvkm_object *);
-int  nvkm_object_init(struct nvkm_object *);
-int  nvkm_object_fini(struct nvkm_object *, bool suspend);
-
-int _nvkm_object_ctor(struct nvkm_object *, struct nvkm_object *,
-			 struct nvkm_oclass *, void *, u32,
-			 struct nvkm_object **);
-
-extern struct nvkm_ofuncs nvkm_object_ofuncs;
-
-/* Don't allocate dynamically, because lockdep needs lock_class_keys to be in
- * ".data". */
-struct nvkm_oclass {
+	s32 oclass;
 	u32 handle;
-	struct nvkm_ofuncs * const ofuncs;
-	struct nvkm_omthds * const omthds;
-	struct lock_class_key lock_class_key;
-};
-
-#define nv_oclass(o)    nv_object(o)->oclass
-#define nv_hclass(o)    nv_oclass(o)->handle
-#define nv_iclass(o,i) (nv_hclass(o) & (i))
-#define nv_mclass(o)    nv_iclass(o, NV_OBJECT_CLASS)
 
-static inline struct nvkm_object *
-nv_pclass(struct nvkm_object *parent, u32 oclass)
-{
-	while (parent && !nv_iclass(parent, oclass))
-		parent = parent->parent;
-	return parent;
-}
+	struct list_head head;
+	struct list_head tree;
+	u8  route;
+	u64 token;
+	u64 object;
+	struct rb_node node;
+};
 
-struct nvkm_omthds {
-	u32 start;
-	u32 limit;
-	int (*call)(struct nvkm_object *, u32, void *, u32);
+struct nvkm_object_func {
+	void *(*dtor)(struct nvkm_object *);
+	int (*init)(struct nvkm_object *);
+	int (*fini)(struct nvkm_object *, bool suspend);
+	int (*mthd)(struct nvkm_object *, u32 mthd, void *data, u32 size);
+	int (*ntfy)(struct nvkm_object *, u32 mthd, struct nvkm_event **);
+	int (*map)(struct nvkm_object *, u64 *addr, u32 *size);
+	int (*rd08)(struct nvkm_object *, u64 addr, u8 *data);
+	int (*rd16)(struct nvkm_object *, u64 addr, u16 *data);
+	int (*rd32)(struct nvkm_object *, u64 addr, u32 *data);
+	int (*wr08)(struct nvkm_object *, u64 addr, u8 data);
+	int (*wr16)(struct nvkm_object *, u64 addr, u16 data);
+	int (*wr32)(struct nvkm_object *, u64 addr, u32 data);
+	int (*bind)(struct nvkm_object *, struct nvkm_gpuobj *, int align,
+		    struct nvkm_gpuobj **);
+	int (*sclass)(struct nvkm_object *, int index, struct nvkm_oclass *);
 };
 
-struct nvkm_event;
-struct nvkm_ofuncs {
-	int  (*ctor)(struct nvkm_object *, struct nvkm_object *,
-		     struct nvkm_oclass *, void *data, u32 size,
+void nvkm_object_ctor(const struct nvkm_object_func *,
+		      const struct nvkm_oclass *, struct nvkm_object *);
+int nvkm_object_new_(const struct nvkm_object_func *,
+		     const struct nvkm_oclass *, void *data, u32 size,
 		     struct nvkm_object **);
-	void (*dtor)(struct nvkm_object *);
-	int  (*init)(struct nvkm_object *);
-	int  (*fini)(struct nvkm_object *, bool suspend);
-	int  (*mthd)(struct nvkm_object *, u32, void *, u32);
-	int  (*ntfy)(struct nvkm_object *, u32, struct nvkm_event **);
-	int  (* map)(struct nvkm_object *, u64 *, u32 *);
-	u8   (*rd08)(struct nvkm_object *, u64 offset);
-	u16  (*rd16)(struct nvkm_object *, u64 offset);
-	u32  (*rd32)(struct nvkm_object *, u64 offset);
-	void (*wr08)(struct nvkm_object *, u64 offset, u8 data);
-	void (*wr16)(struct nvkm_object *, u64 offset, u16 data);
-	void (*wr32)(struct nvkm_object *, u64 offset, u32 data);
+int nvkm_object_new(const struct nvkm_oclass *, void *data, u32 size,
+		    struct nvkm_object **);
+void nvkm_object_del(struct nvkm_object **);
+void *nvkm_object_dtor(struct nvkm_object *);
+int nvkm_object_init(struct nvkm_object *);
+int nvkm_object_fini(struct nvkm_object *, bool suspend);
+int nvkm_object_mthd(struct nvkm_object *, u32 mthd, void *data, u32 size);
+int nvkm_object_ntfy(struct nvkm_object *, u32 mthd, struct nvkm_event **);
+int nvkm_object_map(struct nvkm_object *, u64 *addr, u32 *size);
+int nvkm_object_rd08(struct nvkm_object *, u64 addr, u8  *data);
+int nvkm_object_rd16(struct nvkm_object *, u64 addr, u16 *data);
+int nvkm_object_rd32(struct nvkm_object *, u64 addr, u32 *data);
+int nvkm_object_wr08(struct nvkm_object *, u64 addr, u8   data);
+int nvkm_object_wr16(struct nvkm_object *, u64 addr, u16  data);
+int nvkm_object_wr32(struct nvkm_object *, u64 addr, u32  data);
+int nvkm_object_bind(struct nvkm_object *, struct nvkm_gpuobj *, int align,
+		     struct nvkm_gpuobj **);
+
+struct nvkm_sclass {
+	int minver;
+	int maxver;
+	s32 oclass;
+	const struct nvkm_object_func *func;
+	int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
+		    struct nvkm_object **);
 };
 
-static inline struct nvkm_ofuncs *
-nv_ofuncs(void *obj)
-{
-	return nv_oclass(obj)->ofuncs;
-}
-
-int  nvkm_object_ctor(struct nvkm_object *, struct nvkm_object *,
-		      struct nvkm_oclass *, void *, u32,
-		      struct nvkm_object **);
-void nvkm_object_ref(struct nvkm_object *, struct nvkm_object **);
-int  nvkm_object_inc(struct nvkm_object *);
-int  nvkm_object_dec(struct nvkm_object *, bool suspend);
-void nvkm_object_debug(void);
-
-static inline int
-nv_exec(void *obj, u32 mthd, void *data, u32 size)
-{
-	struct nvkm_omthds *method = nv_oclass(obj)->omthds;
-
-	while (method && method->call) {
-		if (mthd >= method->start && mthd <= method->limit)
-			return method->call(obj, mthd, data, size);
-		method++;
-	}
-
-	return -EINVAL;
-}
-
-static inline int
-nv_call(void *obj, u32 mthd, u32 data)
-{
-	return nv_exec(obj, mthd, &data, sizeof(data));
-}
-
-static inline u8
-nv_ro08(void *obj, u64 addr)
-{
-	u8 data = nv_ofuncs(obj)->rd08(obj, addr);
-	nv_spam(obj, "nv_ro08 0x%08llx 0x%02x\n", addr, data);
-	return data;
-}
-
-static inline u16
-nv_ro16(void *obj, u64 addr)
-{
-	u16 data = nv_ofuncs(obj)->rd16(obj, addr);
-	nv_spam(obj, "nv_ro16 0x%08llx 0x%04x\n", addr, data);
-	return data;
-}
-
-static inline u32
-nv_ro32(void *obj, u64 addr)
-{
-	u32 data = nv_ofuncs(obj)->rd32(obj, addr);
-	nv_spam(obj, "nv_ro32 0x%08llx 0x%08x\n", addr, data);
-	return data;
-}
-
-static inline void
-nv_wo08(void *obj, u64 addr, u8 data)
-{
-	nv_spam(obj, "nv_wo08 0x%08llx 0x%02x\n", addr, data);
-	nv_ofuncs(obj)->wr08(obj, addr, data);
-}
-
-static inline void
-nv_wo16(void *obj, u64 addr, u16 data)
-{
-	nv_spam(obj, "nv_wo16 0x%08llx 0x%04x\n", addr, data);
-	nv_ofuncs(obj)->wr16(obj, addr, data);
-}
-
-static inline void
-nv_wo32(void *obj, u64 addr, u32 data)
-{
-	nv_spam(obj, "nv_wo32 0x%08llx 0x%08x\n", addr, data);
-	nv_ofuncs(obj)->wr32(obj, addr, data);
-}
-
-static inline u32
-nv_mo32(void *obj, u64 addr, u32 mask, u32 data)
-{
-	u32 temp = nv_ro32(obj, addr);
-	nv_wo32(obj, addr, (temp & ~mask) | data);
-	return temp;
-}
-
-static inline int
-nv_memcmp(void *obj, u32 addr, const char *str, u32 len)
-{
-	unsigned char c1, c2;
-
-	while (len--) {
-		c1 = nv_ro08(obj, addr++);
-		c2 = *(str++);
-		if (c1 != c2)
-			return c1 - c2;
-	}
-	return 0;
-}
+struct nvkm_oclass {
+	int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
+		    struct nvkm_object **);
+	struct nvkm_sclass base;
+	const void *priv;
+	const void *engn;
+	u32 handle;
+	u8  route;
+	u64 token;
+	u64 object;
+	struct nvkm_client *client;
+	struct nvkm_object *parent;
+	struct nvkm_engine *engine;
+};
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h b/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h
new file mode 100644
index 000000000000..bd52236cc2f4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h
@@ -0,0 +1,22 @@
+#ifndef __NVKM_OPROXY_H__
+#define __NVKM_OPROXY_H__
+#define nvkm_oproxy(p) container_of((p), struct nvkm_oproxy, base)
+#include <core/object.h>
+
+struct nvkm_oproxy {
+	const struct nvkm_oproxy_func *func;
+	struct nvkm_object base;
+	struct nvkm_object *object;
+};
+
+struct nvkm_oproxy_func {
+	void (*dtor[2])(struct nvkm_oproxy *);
+	int  (*init[2])(struct nvkm_oproxy *);
+	int  (*fini[2])(struct nvkm_oproxy *, bool suspend);
+};
+
+void nvkm_oproxy_ctor(const struct nvkm_oproxy_func *,
+		      const struct nvkm_oclass *, struct nvkm_oproxy *);
+int  nvkm_oproxy_new_(const struct nvkm_oproxy_func *,
+		      const struct nvkm_oclass *, struct nvkm_oproxy **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/option.h b/drivers/gpu/drm/nouveau/include/nvkm/core/option.h
index 532bfa8e3f72..80fdc146e816 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/option.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/option.h
@@ -4,6 +4,7 @@
 
 const char *nvkm_stropt(const char *optstr, const char *opt, int *len);
 bool nvkm_boolopt(const char *optstr, const char *opt, bool value);
+long nvkm_longopt(const char *optstr, const char *opt, long value);
 int  nvkm_dbgopt(const char *optstr, const char *sub);
 
 /* compares unterminated string 'str' with zero-terminated string 'cmp' */
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/parent.h b/drivers/gpu/drm/nouveau/include/nvkm/core/parent.h
deleted file mode 100644
index 837e4fe966a5..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/parent.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef __NVKM_PARENT_H__
-#define __NVKM_PARENT_H__
-#include <core/object.h>
-
-struct nvkm_sclass {
-	struct nvkm_sclass *sclass;
-	struct nvkm_engine *engine;
-	struct nvkm_oclass *oclass;
-};
-
-struct nvkm_parent {
-	struct nvkm_object object;
-
-	struct nvkm_sclass *sclass;
-	u64 engine;
-
-	int  (*context_attach)(struct nvkm_object *, struct nvkm_object *);
-	int  (*context_detach)(struct nvkm_object *, bool suspend,
-			       struct nvkm_object *);
-
-	int  (*object_attach)(struct nvkm_object *parent,
-			      struct nvkm_object *object, u32 name);
-	void (*object_detach)(struct nvkm_object *parent, int cookie);
-};
-
-static inline struct nvkm_parent *
-nv_parent(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!(nv_iclass(obj, NV_PARENT_CLASS))))
-		nv_assert("BAD CAST -> NvParent, %08x", nv_hclass(obj));
-#endif
-	return obj;
-}
-
-#define nvkm_parent_create(p,e,c,v,s,m,d)                                   \
-	nvkm_parent_create_((p), (e), (c), (v), (s), (m),                   \
-			       sizeof(**d), (void **)d)
-#define nvkm_parent_init(p)                                                 \
-	nvkm_object_init(&(p)->object)
-#define nvkm_parent_fini(p,s)                                               \
-	nvkm_object_fini(&(p)->object, (s))
-
-int  nvkm_parent_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, u32 pclass,
-			    struct nvkm_oclass *, u64 engcls,
-			    int size, void **);
-void nvkm_parent_destroy(struct nvkm_parent *);
-
-void _nvkm_parent_dtor(struct nvkm_object *);
-#define _nvkm_parent_init nvkm_object_init
-#define _nvkm_parent_fini nvkm_object_fini
-
-int nvkm_parent_sclass(struct nvkm_object *, u16 handle,
-		       struct nvkm_object **pengine,
-		       struct nvkm_oclass **poclass);
-int nvkm_parent_lclass(struct nvkm_object *, u32 *, int);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
new file mode 100644
index 000000000000..78d41be20b8c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
@@ -0,0 +1,14 @@
+#ifndef __NVKM_DEVICE_PCI_H__
+#define __NVKM_DEVICE_PCI_H__
+#include <core/device.h>
+
+struct nvkm_device_pci {
+	struct nvkm_device device;
+	struct pci_dev *pdev;
+	bool suspend;
+};
+
+int nvkm_device_pci_new(struct pci_dev *, const char *cfg, const char *dbg,
+			bool detect, bool mmio, u64 subdev_mask,
+			struct nvkm_device **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/printk.h b/drivers/gpu/drm/nouveau/include/nvkm/core/printk.h
deleted file mode 100644
index 83648177059f..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/printk.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __NVKM_PRINTK_H__
-#define __NVKM_PRINTK_H__
-#include <core/os.h>
-#include <core/debug.h>
-struct nvkm_object;
-
-void __printf(3, 4)
-nv_printk_(struct nvkm_object *, int, const char *, ...);
-
-#define nv_printk(o,l,f,a...) do {                                             \
-	if (NV_DBG_##l <= CONFIG_NOUVEAU_DEBUG)                                \
-		nv_printk_(nv_object(o), NV_DBG_##l, f, ##a);                  \
-} while(0)
-
-#define nv_fatal(o,f,a...) nv_printk((o), FATAL, f, ##a)
-#define nv_error(o,f,a...) nv_printk((o), ERROR, f, ##a)
-#define nv_warn(o,f,a...) nv_printk((o), WARN, f, ##a)
-#define nv_info(o,f,a...) nv_printk((o), INFO, f, ##a)
-#define nv_debug(o,f,a...) nv_printk((o), DEBUG, f, ##a)
-#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a)
-#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a)
-#define nv_ioctl(o,f,a...) nv_trace(nvkm_client(o), "ioctl: "f, ##a)
-
-#define nv_assert(f,a...) do {                                                 \
-	if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG)                              \
-		nv_printk_(NULL, NV_DBG_FATAL, f "\n", ##a);                   \
-	BUG_ON(1);                                                             \
-} while(0)
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h b/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
index cc132eaa10cc..5ee6298991e2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
@@ -2,19 +2,27 @@
 #define __NVKM_RAMHT_H__
 #include <core/gpuobj.h>
 
+struct nvkm_ramht_data {
+	struct nvkm_gpuobj *inst;
+	int chid;
+	u32 handle;
+};
+
 struct nvkm_ramht {
-	struct nvkm_gpuobj gpuobj;
+	struct nvkm_device *device;
+	struct nvkm_gpuobj *parent;
+	struct nvkm_gpuobj *gpuobj;
+	int size;
 	int bits;
+	struct nvkm_ramht_data data[];
 };
 
-int  nvkm_ramht_insert(struct nvkm_ramht *, int chid, u32 handle, u32 context);
+int  nvkm_ramht_new(struct nvkm_device *, u32 size, u32 align,
+		    struct nvkm_gpuobj *, struct nvkm_ramht **);
+void nvkm_ramht_del(struct nvkm_ramht **);
+int  nvkm_ramht_insert(struct nvkm_ramht *, struct nvkm_object *,
+		       int chid, int addr, u32 handle, u32 context);
 void nvkm_ramht_remove(struct nvkm_ramht *, int cookie);
-int  nvkm_ramht_new(struct nvkm_object *, struct nvkm_object *, u32 size,
-		    u32 align, struct nvkm_ramht **);
-
-static inline void
-nvkm_ramht_ref(struct nvkm_ramht *obj, struct nvkm_ramht **ref)
-{
-	nvkm_gpuobj_ref(&obj->gpuobj, (struct nvkm_gpuobj **)ref);
-}
+struct nvkm_gpuobj *
+nvkm_ramht_search(struct nvkm_ramht *, int chid, u32 handle);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
index 6fdc39116aac..3b5dc9c63069 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
@@ -1,119 +1,50 @@
 #ifndef __NVKM_SUBDEV_H__
 #define __NVKM_SUBDEV_H__
-#include <core/object.h>
-#include <core/devidx.h>
-
-#define NV_SUBDEV_(sub,var) (NV_SUBDEV_CLASS | ((var) << 8) | (sub))
-#define NV_SUBDEV(name,var)  NV_SUBDEV_(NVDEV_SUBDEV_##name, (var))
+#include <core/device.h>
 
 struct nvkm_subdev {
-	struct nvkm_object object;
+	const struct nvkm_subdev_func *func;
+	struct nvkm_device *device;
+	enum nvkm_devidx index;
+	u32 pmc_enable;
 	struct mutex mutex;
-	const char *name;
-	void __iomem *mmio;
 	u32 debug;
-	u32 unit;
 
-	void (*intr)(struct nvkm_subdev *);
+	bool oneinit;
 };
 
-static inline struct nvkm_subdev *
-nv_subdev(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!nv_iclass(obj, NV_SUBDEV_CLASS)))
-		nv_assert("BAD CAST -> NvSubDev, %08x", nv_hclass(obj));
-#endif
-	return obj;
-}
-
-static inline int
-nv_subidx(struct nvkm_subdev *subdev)
-{
-	return nv_hclass(subdev) & 0xff;
-}
-
-struct nvkm_subdev *nvkm_subdev(void *obj, int idx);
-
-#define nvkm_subdev_create(p,e,o,v,s,f,d)                                   \
-	nvkm_subdev_create_((p), (e), (o), (v), (s), (f),                   \
-			       sizeof(**d),(void **)d)
+struct nvkm_subdev_func {
+	void *(*dtor)(struct nvkm_subdev *);
+	int (*preinit)(struct nvkm_subdev *);
+	int (*oneinit)(struct nvkm_subdev *);
+	int (*init)(struct nvkm_subdev *);
+	int (*fini)(struct nvkm_subdev *, bool suspend);
+	void (*intr)(struct nvkm_subdev *);
+};
 
-int  nvkm_subdev_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, u32 pclass,
-			    const char *sname, const char *fname,
-			    int size, void **);
-void nvkm_subdev_destroy(struct nvkm_subdev *);
+extern const char *nvkm_subdev_name[NVKM_SUBDEV_NR];
+void nvkm_subdev_ctor(const struct nvkm_subdev_func *, struct nvkm_device *,
+		      int index, u32 pmc_enable, struct nvkm_subdev *);
+void nvkm_subdev_del(struct nvkm_subdev **);
+int  nvkm_subdev_preinit(struct nvkm_subdev *);
 int  nvkm_subdev_init(struct nvkm_subdev *);
 int  nvkm_subdev_fini(struct nvkm_subdev *, bool suspend);
-void nvkm_subdev_reset(struct nvkm_object *);
-
-void _nvkm_subdev_dtor(struct nvkm_object *);
-int  _nvkm_subdev_init(struct nvkm_object *);
-int  _nvkm_subdev_fini(struct nvkm_object *, bool suspend);
-
-#define s_printk(s,l,f,a...) do {                                              \
-	if ((s)->debug >= OS_DBG_##l) {                                        \
-		nv_printk((s)->base.parent, (s)->name, l, f, ##a);             \
+void nvkm_subdev_intr(struct nvkm_subdev *);
+
+/* subdev logging */
+#define nvkm_printk_(s,l,p,f,a...) do {                                        \
+	struct nvkm_subdev *_subdev = (s);                                     \
+	if (_subdev->debug >= (l)) {                                           \
+		dev_##p(_subdev->device->dev, "%s: "f,                         \
+			nvkm_subdev_name[_subdev->index], ##a);                \
 	}                                                                      \
 } while(0)
-
-static inline u8
-nv_rd08(void *obj, u32 addr)
-{
-	struct nvkm_subdev *subdev = nv_subdev(obj);
-	u8 data = ioread8(subdev->mmio + addr);
-	nv_spam(subdev, "nv_rd08 0x%06x 0x%02x\n", addr, data);
-	return data;
-}
-
-static inline u16
-nv_rd16(void *obj, u32 addr)
-{
-	struct nvkm_subdev *subdev = nv_subdev(obj);
-	u16 data = ioread16_native(subdev->mmio + addr);
-	nv_spam(subdev, "nv_rd16 0x%06x 0x%04x\n", addr, data);
-	return data;
-}
-
-static inline u32
-nv_rd32(void *obj, u32 addr)
-{
-	struct nvkm_subdev *subdev = nv_subdev(obj);
-	u32 data = ioread32_native(subdev->mmio + addr);
-	nv_spam(subdev, "nv_rd32 0x%06x 0x%08x\n", addr, data);
-	return data;
-}
-
-static inline void
-nv_wr08(void *obj, u32 addr, u8 data)
-{
-	struct nvkm_subdev *subdev = nv_subdev(obj);
-	nv_spam(subdev, "nv_wr08 0x%06x 0x%02x\n", addr, data);
-	iowrite8(data, subdev->mmio + addr);
-}
-
-static inline void
-nv_wr16(void *obj, u32 addr, u16 data)
-{
-	struct nvkm_subdev *subdev = nv_subdev(obj);
-	nv_spam(subdev, "nv_wr16 0x%06x 0x%04x\n", addr, data);
-	iowrite16_native(data, subdev->mmio + addr);
-}
-
-static inline void
-nv_wr32(void *obj, u32 addr, u32 data)
-{
-	struct nvkm_subdev *subdev = nv_subdev(obj);
-	nv_spam(subdev, "nv_wr32 0x%06x 0x%08x\n", addr, data);
-	iowrite32_native(data, subdev->mmio + addr);
-}
-
-static inline u32
-nv_mask(void *obj, u32 addr, u32 mask, u32 data)
-{
-	u32 temp = nv_rd32(obj, addr);
-	nv_wr32(obj, addr, (temp & ~mask) | data);
-	return temp;
-}
+#define nvkm_printk(s,l,p,f,a...) nvkm_printk_((s), NV_DBG_##l, p, f, ##a)
+#define nvkm_fatal(s,f,a...) nvkm_printk((s), FATAL,   crit, f, ##a)
+#define nvkm_error(s,f,a...) nvkm_printk((s), ERROR,    err, f, ##a)
+#define nvkm_warn(s,f,a...)  nvkm_printk((s),  WARN, notice, f, ##a)
+#define nvkm_info(s,f,a...)  nvkm_printk((s),  INFO,   info, f, ##a)
+#define nvkm_debug(s,f,a...) nvkm_printk((s), DEBUG,   info, f, ##a)
+#define nvkm_trace(s,f,a...) nvkm_printk((s), TRACE,   info, f, ##a)
+#define nvkm_spam(s,f,a...)  nvkm_printk((s),  SPAM,    dbg, f, ##a)
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
new file mode 100644
index 000000000000..5aa2480da25f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
@@ -0,0 +1,35 @@
+#ifndef __NVKM_DEVICE_TEGRA_H__
+#define __NVKM_DEVICE_TEGRA_H__
+#include <core/device.h>
+#include <core/mm.h>
+
+struct nvkm_device_tegra {
+	struct nvkm_device device;
+	struct platform_device *pdev;
+	int irq;
+
+	struct reset_control *rst;
+	struct clk *clk;
+	struct clk *clk_pwr;
+
+	struct regulator *vdd;
+
+	struct {
+		/*
+		 * Protects accesses to mm from subsystems
+		 */
+		struct mutex mutex;
+
+		struct nvkm_mm mm;
+		struct iommu_domain *domain;
+		unsigned long pgshift;
+	} iommu;
+
+	int gpu_speedo;
+};
+
+int nvkm_device_tegra_new(struct platform_device *,
+			  const char *cfg, const char *dbg,
+			  bool detect, bool mmio, u64 subdev_mask,
+			  struct nvkm_device **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
index e489beef2b92..904820558fc0 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
@@ -1,5 +1,5 @@
 #ifndef __NVKM_BSP_H__
 #define __NVKM_BSP_H__
-#include <core/engine.h>
-extern struct nvkm_oclass g84_bsp_oclass;
+#include <engine/xtensa.h>
+int g84_bsp_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index e832f729e1b4..e2e22cd5305b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -1,16 +1,9 @@
 #ifndef __NVKM_CE_H__
 #define __NVKM_CE_H__
-#include <core/engine.h>
+#include <engine/falcon.h>
 
-void gt215_ce_intr(struct nvkm_subdev *);
-
-extern struct nvkm_oclass gt215_ce_oclass;
-extern struct nvkm_oclass gf100_ce0_oclass;
-extern struct nvkm_oclass gf100_ce1_oclass;
-extern struct nvkm_oclass gk104_ce0_oclass;
-extern struct nvkm_oclass gk104_ce1_oclass;
-extern struct nvkm_oclass gk104_ce2_oclass;
-extern struct nvkm_oclass gm204_ce0_oclass;
-extern struct nvkm_oclass gm204_ce1_oclass;
-extern struct nvkm_oclass gm204_ce2_oclass;
+int gt215_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gf100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gk104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gm204_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
index 57c29e91bad5..03fa57a7c30a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
@@ -1,5 +1,5 @@
 #ifndef __NVKM_CIPHER_H__
 #define __NVKM_CIPHER_H__
 #include <core/engine.h>
-extern struct nvkm_oclass g84_cipher_oclass;
+int g84_cipher_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/device.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/device.h
deleted file mode 100644
index 5d4805e67e76..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/device.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef __NOUVEAU_SUBDEV_DEVICE_H__
-#define __NOUVEAU_SUBDEV_DEVICE_H__
-
-#include <core/device.h>
-
-struct platform_device;
-
-enum nv_bus_type {
-	NOUVEAU_BUS_PCI,
-	NOUVEAU_BUS_PLATFORM,
-};
-
-#define nouveau_device_create(p,t,n,s,c,d,u)                                   \
-	nouveau_device_create_((void *)(p), (t), (n), (s), (c), (d),           \
-			       sizeof(**u), (void **)u)
-
-int  nouveau_device_create_(void *, enum nv_bus_type type, u64 name,
-			    const char *sname, const char *cfg, const char *dbg,
-			    int, void **);
-
-int nv04_identify(struct nouveau_device *);
-int nv10_identify(struct nouveau_device *);
-int nv20_identify(struct nouveau_device *);
-int nv30_identify(struct nouveau_device *);
-int nv40_identify(struct nouveau_device *);
-int nv50_identify(struct nouveau_device *);
-int nvc0_identify(struct nouveau_device *);
-int nve0_identify(struct nouveau_device *);
-int gm100_identify(struct nouveau_device *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index a5e1ed81312f..efc74d03346b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -1,32 +1,35 @@
 #ifndef __NVKM_DISP_H__
 #define __NVKM_DISP_H__
+#define nvkm_disp(p) container_of((p), struct nvkm_disp, engine)
 #include <core/engine.h>
 #include <core/event.h>
 
 struct nvkm_disp {
-	struct nvkm_engine base;
+	const struct nvkm_disp_func *func;
+	struct nvkm_engine engine;
+
+	struct nvkm_oproxy *client;
 
 	struct list_head outp;
+	struct list_head conn;
 
 	struct nvkm_event hpd;
 	struct nvkm_event vblank;
-};
 
-static inline struct nvkm_disp *
-nvkm_disp(void *obj)
-{
-	return (void *)nvkm_engine(obj, NVDEV_ENGINE_DISP);
-}
+	struct {
+		int nr;
+	} head;
+};
 
-extern struct nvkm_oclass *nv04_disp_oclass;
-extern struct nvkm_oclass *nv50_disp_oclass;
-extern struct nvkm_oclass *g84_disp_oclass;
-extern struct nvkm_oclass *gt200_disp_oclass;
-extern struct nvkm_oclass *g94_disp_oclass;
-extern struct nvkm_oclass *gt215_disp_oclass;
-extern struct nvkm_oclass *gf110_disp_oclass;
-extern struct nvkm_oclass *gk104_disp_oclass;
-extern struct nvkm_oclass *gk110_disp_oclass;
-extern struct nvkm_oclass *gm107_disp_oclass;
-extern struct nvkm_oclass *gm204_disp_oclass;
+int nv04_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int nv50_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int g84_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gt200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int g94_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gt215_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gf119_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gk104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gk110_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gm204_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
new file mode 100644
index 000000000000..114bfb737a81
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
@@ -0,0 +1,32 @@
+#ifndef __NVKM_DMA_H__
+#define __NVKM_DMA_H__
+#include <core/engine.h>
+struct nvkm_client;
+
+struct nvkm_dmaobj {
+	const struct nvkm_dmaobj_func *func;
+	struct nvkm_dma *dma;
+
+	struct nvkm_object object;
+	u32 target;
+	u32 access;
+	u64 start;
+	u64 limit;
+
+	struct rb_node rb;
+	u64 handle; /*XXX HANDLE MERGE */
+};
+
+struct nvkm_dma {
+	const struct nvkm_dma_func *func;
+	struct nvkm_engine engine;
+};
+
+struct nvkm_dmaobj *
+nvkm_dma_search(struct nvkm_dma *, struct nvkm_client *, u64 object);
+
+int nv04_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
+int nv50_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
+int gf100_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
+int gf119_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dmaobj.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dmaobj.h
deleted file mode 100644
index c4fce8afcf83..000000000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/dmaobj.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef __NVKM_DMAOBJ_H__
-#define __NVKM_DMAOBJ_H__
-#include <core/engine.h>
-struct nvkm_gpuobj;
-
-struct nvkm_dmaobj {
-	struct nvkm_object base;
-	u32 target;
-	u32 access;
-	u64 start;
-	u64 limit;
-};
-
-struct nvkm_dmaeng {
-	struct nvkm_engine base;
-
-	/* creates a "physical" dma object from a struct nvkm_dmaobj */
-	int (*bind)(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
-		    struct nvkm_gpuobj **);
-};
-
-extern struct nvkm_oclass *nv04_dmaeng_oclass;
-extern struct nvkm_oclass *nv50_dmaeng_oclass;
-extern struct nvkm_oclass *gf100_dmaeng_oclass;
-extern struct nvkm_oclass *gf110_dmaeng_oclass;
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index bd38cf9130fc..81c0bc66a9f8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -1,41 +1,18 @@
 #ifndef __NVKM_FALCON_H__
 #define __NVKM_FALCON_H__
-#include <core/engctx.h>
-
-struct nvkm_falcon_chan {
-	struct nvkm_engctx base;
-};
-
-#define nvkm_falcon_context_create(p,e,c,g,s,a,f,d)                         \
-	nvkm_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nvkm_falcon_context_destroy(d)                                      \
-	nvkm_engctx_destroy(&(d)->base)
-#define nvkm_falcon_context_init(d)                                         \
-	nvkm_engctx_init(&(d)->base)
-#define nvkm_falcon_context_fini(d,s)                                       \
-	nvkm_engctx_fini(&(d)->base, (s))
-
-#define _nvkm_falcon_context_ctor _nvkm_engctx_ctor
-#define _nvkm_falcon_context_dtor _nvkm_engctx_dtor
-#define _nvkm_falcon_context_init _nvkm_engctx_init
-#define _nvkm_falcon_context_fini _nvkm_engctx_fini
-#define _nvkm_falcon_context_rd32 _nvkm_engctx_rd32
-#define _nvkm_falcon_context_wr32 _nvkm_engctx_wr32
-
-struct nvkm_falcon_data {
-	bool external;
-};
-
+#define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine)
 #include <core/engine.h>
+struct nvkm_fifo_chan;
 
 struct nvkm_falcon {
-	struct nvkm_engine base;
+	const struct nvkm_falcon_func *func;
+	struct nvkm_engine engine;
 
 	u32 addr;
 	u8  version;
 	u8  secret;
 
-	struct nvkm_gpuobj *core;
+	struct nvkm_memory *core;
 	bool external;
 
 	struct {
@@ -51,31 +28,21 @@ struct nvkm_falcon {
 	} data;
 };
 
-#define nv_falcon(priv) (&(priv)->base)
-
-#define nvkm_falcon_create(p,e,c,b,d,i,f,r)                                 \
-	nvkm_falcon_create_((p), (e), (c), (b), (d), (i), (f),              \
-			       sizeof(**r),(void **)r)
-#define nvkm_falcon_destroy(p)                                              \
-	nvkm_engine_destroy(&(p)->base)
-#define nvkm_falcon_init(p) ({                                              \
-	struct nvkm_falcon *falcon = (p);                                   \
-	_nvkm_falcon_init(nv_object(falcon));                               \
-})
-#define nvkm_falcon_fini(p,s) ({                                            \
-	struct nvkm_falcon *falcon = (p);                                   \
-	_nvkm_falcon_fini(nv_object(falcon), (s));                          \
-})
-
-int nvkm_falcon_create_(struct nvkm_object *, struct nvkm_object *,
-			   struct nvkm_oclass *, u32, bool, const char *,
-			   const char *, int, void **);
+int nvkm_falcon_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
+		     int index, bool enable, u32 addr, struct nvkm_engine **);
 
-void nvkm_falcon_intr(struct nvkm_subdev *subdev);
-
-#define _nvkm_falcon_dtor _nvkm_engine_dtor
-int  _nvkm_falcon_init(struct nvkm_object *);
-int  _nvkm_falcon_fini(struct nvkm_object *, bool);
-u32  _nvkm_falcon_rd32(struct nvkm_object *, u64);
-void _nvkm_falcon_wr32(struct nvkm_object *, u64, u32);
+struct nvkm_falcon_func {
+	struct {
+		u32 *data;
+		u32  size;
+	} code;
+	struct {
+		u32 *data;
+		u32  size;
+	} data;
+	u32 pmc_enable;
+	void (*init)(struct nvkm_falcon *);
+	void (*intr)(struct nvkm_falcon *, struct nvkm_fifo_chan *);
+	struct nvkm_sclass sclass[];
+};
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index 97cdeab8e44c..9e6644955d19 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -1,127 +1,67 @@
 #ifndef __NVKM_FIFO_H__
 #define __NVKM_FIFO_H__
-#include <core/namedb.h>
+#include <core/engine.h>
+#include <core/event.h>
+
+#define NVKM_FIFO_CHID_NR 4096
+
+struct nvkm_fifo_engn {
+	struct nvkm_object *object;
+	int refcount;
+	int usecount;
+};
 
 struct nvkm_fifo_chan {
-	struct nvkm_namedb namedb;
-	struct nvkm_dmaobj *pushdma;
-	struct nvkm_gpuobj *pushgpu;
+	const struct nvkm_fifo_chan_func *func;
+	struct nvkm_fifo *fifo;
+	u64 engines;
+	struct nvkm_object object;
+
+	struct list_head head;
+	u16 chid;
+	struct nvkm_gpuobj *inst;
+	struct nvkm_gpuobj *push;
+	struct nvkm_vm *vm;
 	void __iomem *user;
 	u64 addr;
 	u32 size;
-	u16 chid;
-	atomic_t refcnt; /* NV04_NVSW_SET_REF */
-};
-
-static inline struct nvkm_fifo_chan *
-nvkm_fifo_chan(void *obj)
-{
-	return (void *)nv_namedb(obj);
-}
-
-#define nvkm_fifo_channel_create(p,e,c,b,a,s,n,m,d)                         \
-	nvkm_fifo_channel_create_((p), (e), (c), (b), (a), (s), (n),        \
-				     (m), sizeof(**d), (void **)d)
-#define nvkm_fifo_channel_init(p)                                           \
-	nvkm_namedb_init(&(p)->namedb)
-#define nvkm_fifo_channel_fini(p,s)                                         \
-	nvkm_namedb_fini(&(p)->namedb, (s))
-
-int  nvkm_fifo_channel_create_(struct nvkm_object *,
-				  struct nvkm_object *,
-				  struct nvkm_oclass *,
-				  int bar, u32 addr, u32 size, u32 push,
-				  u64 engmask, int len, void **);
-void nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *);
 
-#define _nvkm_fifo_channel_init _nvkm_namedb_init
-#define _nvkm_fifo_channel_fini _nvkm_namedb_fini
-
-void _nvkm_fifo_channel_dtor(struct nvkm_object *);
-int  _nvkm_fifo_channel_map(struct nvkm_object *, u64 *, u32 *);
-u32  _nvkm_fifo_channel_rd32(struct nvkm_object *, u64);
-void _nvkm_fifo_channel_wr32(struct nvkm_object *, u64, u32);
-int  _nvkm_fifo_channel_ntfy(struct nvkm_object *, u32, struct nvkm_event **);
-
-#include <core/gpuobj.h>
-
-struct nvkm_fifo_base {
-	struct nvkm_gpuobj gpuobj;
+	struct nvkm_fifo_engn engn[NVKM_SUBDEV_NR];
 };
 
-#define nvkm_fifo_context_create(p,e,c,g,s,a,f,d)                           \
-	nvkm_gpuobj_create((p), (e), (c), 0, (g), (s), (a), (f), (d))
-#define nvkm_fifo_context_destroy(p)                                        \
-	nvkm_gpuobj_destroy(&(p)->gpuobj)
-#define nvkm_fifo_context_init(p)                                           \
-	nvkm_gpuobj_init(&(p)->gpuobj)
-#define nvkm_fifo_context_fini(p,s)                                         \
-	nvkm_gpuobj_fini(&(p)->gpuobj, (s))
-
-#define _nvkm_fifo_context_dtor _nvkm_gpuobj_dtor
-#define _nvkm_fifo_context_init _nvkm_gpuobj_init
-#define _nvkm_fifo_context_fini _nvkm_gpuobj_fini
-#define _nvkm_fifo_context_rd32 _nvkm_gpuobj_rd32
-#define _nvkm_fifo_context_wr32 _nvkm_gpuobj_wr32
-
-#include <core/engine.h>
-#include <core/event.h>
-
 struct nvkm_fifo {
-	struct nvkm_engine base;
+	const struct nvkm_fifo_func *func;
+	struct nvkm_engine engine;
 
-	struct nvkm_event cevent; /* channel creation event */
-	struct nvkm_event uevent; /* async user trigger */
-
-	struct nvkm_object **channel;
+	DECLARE_BITMAP(mask, NVKM_FIFO_CHID_NR);
+	int nr;
+	struct list_head chan;
 	spinlock_t lock;
-	u16 min;
-	u16 max;
 
-	int  (*chid)(struct nvkm_fifo *, struct nvkm_object *);
-	void (*pause)(struct nvkm_fifo *, unsigned long *);
-	void (*start)(struct nvkm_fifo *, unsigned long *);
+	struct nvkm_event uevent; /* async user trigger */
+	struct nvkm_event cevent; /* channel creation event */
 };
 
-static inline struct nvkm_fifo *
-nvkm_fifo(void *obj)
-{
-	return (void *)nvkm_engine(obj, NVDEV_ENGINE_FIFO);
-}
-
-#define nvkm_fifo_create(o,e,c,fc,lc,d)                                     \
-	nvkm_fifo_create_((o), (e), (c), (fc), (lc), sizeof(**d), (void **)d)
-#define nvkm_fifo_init(p)                                                   \
-	nvkm_engine_init(&(p)->base)
-#define nvkm_fifo_fini(p,s)                                                 \
-	nvkm_engine_fini(&(p)->base, (s))
-
-int nvkm_fifo_create_(struct nvkm_object *, struct nvkm_object *,
-			 struct nvkm_oclass *, int min, int max,
-			 int size, void **);
-void nvkm_fifo_destroy(struct nvkm_fifo *);
-const char *
-nvkm_client_name_for_fifo_chid(struct nvkm_fifo *fifo, u32 chid);
-
-#define _nvkm_fifo_init _nvkm_engine_init
-#define _nvkm_fifo_fini _nvkm_engine_fini
-
-extern struct nvkm_oclass *nv04_fifo_oclass;
-extern struct nvkm_oclass *nv10_fifo_oclass;
-extern struct nvkm_oclass *nv17_fifo_oclass;
-extern struct nvkm_oclass *nv40_fifo_oclass;
-extern struct nvkm_oclass *nv50_fifo_oclass;
-extern struct nvkm_oclass *g84_fifo_oclass;
-extern struct nvkm_oclass *gf100_fifo_oclass;
-extern struct nvkm_oclass *gk104_fifo_oclass;
-extern struct nvkm_oclass *gk20a_fifo_oclass;
-extern struct nvkm_oclass *gk208_fifo_oclass;
-extern struct nvkm_oclass *gm204_fifo_oclass;
-
-int  nvkm_fifo_uevent_ctor(struct nvkm_object *, void *, u32,
-			   struct nvkm_notify *);
-void nvkm_fifo_uevent(struct nvkm_fifo *);
-
-void nv04_fifo_intr(struct nvkm_subdev *);
-int  nv04_fifo_context_attach(struct nvkm_object *, struct nvkm_object *);
+void nvkm_fifo_pause(struct nvkm_fifo *, unsigned long *);
+void nvkm_fifo_start(struct nvkm_fifo *, unsigned long *);
+
+void nvkm_fifo_chan_put(struct nvkm_fifo *, unsigned long flags,
+			struct nvkm_fifo_chan **);
+struct nvkm_fifo_chan *
+nvkm_fifo_chan_inst(struct nvkm_fifo *, u64 inst, unsigned long *flags);
+struct nvkm_fifo_chan *
+nvkm_fifo_chan_chid(struct nvkm_fifo *, int chid, unsigned long *flags);
+
+int nv04_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int nv10_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int nv17_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int nv40_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int nv50_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int g84_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gf100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gk104_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gk208_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gk20a_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gm204_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 7cbe20280760..f126e54d2e30 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -1,88 +1,46 @@
 #ifndef __NVKM_GR_H__
 #define __NVKM_GR_H__
-#include <core/engctx.h>
-
-struct nvkm_gr_chan {
-	struct nvkm_engctx base;
-};
-
-#define nvkm_gr_context_create(p,e,c,g,s,a,f,d)                          \
-	nvkm_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nvkm_gr_context_destroy(d)                                       \
-	nvkm_engctx_destroy(&(d)->base)
-#define nvkm_gr_context_init(d)                                          \
-	nvkm_engctx_init(&(d)->base)
-#define nvkm_gr_context_fini(d,s)                                        \
-	nvkm_engctx_fini(&(d)->base, (s))
-
-#define _nvkm_gr_context_dtor _nvkm_engctx_dtor
-#define _nvkm_gr_context_init _nvkm_engctx_init
-#define _nvkm_gr_context_fini _nvkm_engctx_fini
-#define _nvkm_gr_context_rd32 _nvkm_engctx_rd32
-#define _nvkm_gr_context_wr32 _nvkm_engctx_wr32
-
 #include <core/engine.h>
 
 struct nvkm_gr {
-	struct nvkm_engine base;
-
-	/* Returns chipset-specific counts of units packed into an u64.
-	 */
-	u64 (*units)(struct nvkm_gr *);
+	const struct nvkm_gr_func *func;
+	struct nvkm_engine engine;
 };
 
-static inline struct nvkm_gr *
-nvkm_gr(void *obj)
-{
-	return (void *)nvkm_engine(obj, NVDEV_ENGINE_GR);
-}
-
-#define nvkm_gr_create(p,e,c,y,d)                                        \
-	nvkm_engine_create((p), (e), (c), (y), "PGRAPH", "graphics", (d))
-#define nvkm_gr_destroy(d)                                               \
-	nvkm_engine_destroy(&(d)->base)
-#define nvkm_gr_init(d)                                                  \
-	nvkm_engine_init(&(d)->base)
-#define nvkm_gr_fini(d,s)                                                \
-	nvkm_engine_fini(&(d)->base, (s))
-
-#define _nvkm_gr_dtor _nvkm_engine_dtor
-#define _nvkm_gr_init _nvkm_engine_init
-#define _nvkm_gr_fini _nvkm_engine_fini
-
-extern struct nvkm_oclass nv04_gr_oclass;
-extern struct nvkm_oclass nv10_gr_oclass;
-extern struct nvkm_oclass nv20_gr_oclass;
-extern struct nvkm_oclass nv25_gr_oclass;
-extern struct nvkm_oclass nv2a_gr_oclass;
-extern struct nvkm_oclass nv30_gr_oclass;
-extern struct nvkm_oclass nv34_gr_oclass;
-extern struct nvkm_oclass nv35_gr_oclass;
-extern struct nvkm_oclass nv40_gr_oclass;
-extern struct nvkm_oclass nv50_gr_oclass;
-extern struct nvkm_oclass *gf100_gr_oclass;
-extern struct nvkm_oclass *gf108_gr_oclass;
-extern struct nvkm_oclass *gf104_gr_oclass;
-extern struct nvkm_oclass *gf110_gr_oclass;
-extern struct nvkm_oclass *gf117_gr_oclass;
-extern struct nvkm_oclass *gf119_gr_oclass;
-extern struct nvkm_oclass *gk104_gr_oclass;
-extern struct nvkm_oclass *gk20a_gr_oclass;
-extern struct nvkm_oclass *gk110_gr_oclass;
-extern struct nvkm_oclass *gk110b_gr_oclass;
-extern struct nvkm_oclass *gk208_gr_oclass;
-extern struct nvkm_oclass *gm107_gr_oclass;
-extern struct nvkm_oclass *gm204_gr_oclass;
-extern struct nvkm_oclass *gm206_gr_oclass;
-
-#include <core/enum.h>
-
-extern const struct nvkm_bitfield nv04_gr_nsource[];
-extern struct nvkm_ofuncs nv04_gr_ofuncs;
-bool nv04_gr_idle(void *obj);
-
-extern const struct nvkm_bitfield nv10_gr_intr_name[];
-extern const struct nvkm_bitfield nv10_gr_nstatus[];
-
-extern const struct nvkm_enum nv50_data_error_names[];
+u64 nvkm_gr_units(struct nvkm_gr *);
+int nvkm_gr_tlb_flush(struct nvkm_gr *);
+
+int nv04_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv10_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv15_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv17_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv20_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv25_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv2a_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv30_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv34_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv35_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv40_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv44_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv50_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int g84_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gt200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int mcp79_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gt215_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int mcp89_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gf100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gf104_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gf108_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gf110_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gf117_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gf119_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gk104_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gk110_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gk110b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gk208_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gk20a_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gm107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gm204_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gm206_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
index 4e500b398064..257738eff9f6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
@@ -1,62 +1,9 @@
 #ifndef __NVKM_MPEG_H__
 #define __NVKM_MPEG_H__
-#include <core/engctx.h>
-
-struct nvkm_mpeg_chan {
-	struct nvkm_engctx base;
-};
-
-#define nvkm_mpeg_context_create(p,e,c,g,s,a,f,d)                           \
-	nvkm_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nvkm_mpeg_context_destroy(d)                                        \
-	nvkm_engctx_destroy(&(d)->base)
-#define nvkm_mpeg_context_init(d)                                           \
-	nvkm_engctx_init(&(d)->base)
-#define nvkm_mpeg_context_fini(d,s)                                         \
-	nvkm_engctx_fini(&(d)->base, (s))
-
-#define _nvkm_mpeg_context_dtor _nvkm_engctx_dtor
-#define _nvkm_mpeg_context_init _nvkm_engctx_init
-#define _nvkm_mpeg_context_fini _nvkm_engctx_fini
-#define _nvkm_mpeg_context_rd32 _nvkm_engctx_rd32
-#define _nvkm_mpeg_context_wr32 _nvkm_engctx_wr32
-
 #include <core/engine.h>
-
-struct nvkm_mpeg {
-	struct nvkm_engine base;
-};
-
-#define nvkm_mpeg_create(p,e,c,d)                                           \
-	nvkm_engine_create((p), (e), (c), true, "PMPEG", "mpeg", (d))
-#define nvkm_mpeg_destroy(d)                                                \
-	nvkm_engine_destroy(&(d)->base)
-#define nvkm_mpeg_init(d)                                                   \
-	nvkm_engine_init(&(d)->base)
-#define nvkm_mpeg_fini(d,s)                                                 \
-	nvkm_engine_fini(&(d)->base, (s))
-
-#define _nvkm_mpeg_dtor _nvkm_engine_dtor
-#define _nvkm_mpeg_init _nvkm_engine_init
-#define _nvkm_mpeg_fini _nvkm_engine_fini
-
-extern struct nvkm_oclass nv31_mpeg_oclass;
-extern struct nvkm_oclass nv40_mpeg_oclass;
-extern struct nvkm_oclass nv44_mpeg_oclass;
-extern struct nvkm_oclass nv50_mpeg_oclass;
-extern struct nvkm_oclass g84_mpeg_oclass;
-extern struct nvkm_ofuncs nv31_mpeg_ofuncs;
-extern struct nvkm_oclass nv31_mpeg_cclass;
-extern struct nvkm_oclass nv31_mpeg_sclass[];
-extern struct nvkm_oclass nv40_mpeg_sclass[];
-void nv31_mpeg_intr(struct nvkm_subdev *);
-void nv31_mpeg_tile_prog(struct nvkm_engine *, int);
-int  nv31_mpeg_init(struct nvkm_object *);
-
-extern struct nvkm_ofuncs nv50_mpeg_ofuncs;
-int  nv50_mpeg_context_ctor(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, void *, u32,
-			    struct nvkm_object **);
-void nv50_mpeg_intr(struct nvkm_subdev *);
-int  nv50_mpeg_init(struct nvkm_object *);
+int nv31_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
+int nv40_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
+int nv44_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
+int nv50_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
+int g84_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
index 54b7672eed9c..08516ca82e04 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
@@ -1,7 +1,8 @@
 #ifndef __NVKM_MSPDEC_H__
 #define __NVKM_MSPDEC_H__
-#include <core/engine.h>
-extern struct nvkm_oclass g98_mspdec_oclass;
-extern struct nvkm_oclass gf100_mspdec_oclass;
-extern struct nvkm_oclass gk104_mspdec_oclass;
+#include <engine/falcon.h>
+int g98_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gt215_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gf100_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gk104_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
index c6c69d0a8d01..85fd306021ac 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
@@ -1,6 +1,7 @@
 #ifndef __NVKM_MSPPP_H__
 #define __NVKM_MSPPP_H__
-#include <core/engine.h>
-extern struct nvkm_oclass g98_msppp_oclass;
-extern struct nvkm_oclass gf100_msppp_oclass;
+#include <engine/falcon.h>
+int g98_msppp_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gt215_msppp_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gf100_msppp_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
index 1f193b7bd6c5..99757ed96f76 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
@@ -1,7 +1,9 @@
 #ifndef __NVKM_MSVLD_H__
 #define __NVKM_MSVLD_H__
-#include <core/engine.h>
-extern struct nvkm_oclass g98_msvld_oclass;
-extern struct nvkm_oclass gf100_msvld_oclass;
-extern struct nvkm_oclass gk104_msvld_oclass;
+#include <engine/falcon.h>
+int g98_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gt215_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
+int mcp89_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gf100_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gk104_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
index 93181bbf0f63..240855ad8c8d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
@@ -2,33 +2,24 @@
 #define __NVKM_PM_H__
 #include <core/engine.h>
 
-struct nvkm_perfdom;
-struct nvkm_perfctr;
 struct nvkm_pm {
-	struct nvkm_engine base;
+	const struct nvkm_pm_func *func;
+	struct nvkm_engine engine;
 
-	struct nvkm_perfctx *context;
-	void *profile_data;
+	struct nvkm_object *perfmon;
 
 	struct list_head domains;
+	struct list_head sources;
 	u32 sequence;
-
-	/*XXX: temp for daemon backend */
-	u32 pwr[8];
-	u32 last;
 };
 
-static inline struct nvkm_pm *
-nvkm_pm(void *obj)
-{
-	return (void *)nvkm_engine(obj, NVDEV_ENGINE_PM);
-}
-
-extern struct nvkm_oclass *nv40_pm_oclass;
-extern struct nvkm_oclass *nv50_pm_oclass;
-extern struct nvkm_oclass *g84_pm_oclass;
-extern struct nvkm_oclass *gt215_pm_oclass;
-extern struct nvkm_oclass gf100_pm_oclass;
-extern struct nvkm_oclass gk104_pm_oclass;
-extern struct nvkm_oclass gk110_pm_oclass;
+int nv40_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int nv50_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int g84_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int gt200_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int gt215_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int gf100_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int gf108_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int gf117_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int gk104_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
index 44590a2a479d..7317ef4c0207 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
@@ -1,5 +1,5 @@
 #ifndef __NVKM_SEC_H__
 #define __NVKM_SEC_H__
-#include <core/engine.h>
-extern struct nvkm_oclass g98_sec_oclass;
+#include <engine/falcon.h>
+int g98_sec_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
index a529013c92ab..096e7dbd1e65 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
@@ -1,50 +1,18 @@
 #ifndef __NVKM_SW_H__
 #define __NVKM_SW_H__
-#include <core/engctx.h>
-
-struct nvkm_sw_chan {
-	struct nvkm_engctx base;
-
-	int (*flip)(void *);
-	void *flip_data;
-};
-
-#define nvkm_sw_context_create(p,e,c,d)                               \
-	nvkm_engctx_create((p), (e), (c), (p), 0, 0, 0, (d))
-#define nvkm_sw_context_destroy(d)                                    \
-	nvkm_engctx_destroy(&(d)->base)
-#define nvkm_sw_context_init(d)                                       \
-	nvkm_engctx_init(&(d)->base)
-#define nvkm_sw_context_fini(d,s)                                     \
-	nvkm_engctx_fini(&(d)->base, (s))
-
-#define _nvkm_sw_context_dtor _nvkm_engctx_dtor
-#define _nvkm_sw_context_init _nvkm_engctx_init
-#define _nvkm_sw_context_fini _nvkm_engctx_fini
-
 #include <core/engine.h>
 
 struct nvkm_sw {
-	struct nvkm_engine base;
-};
+	const struct nvkm_sw_func *func;
+	struct nvkm_engine engine;
 
-#define nvkm_sw_create(p,e,c,d)                                       \
-	nvkm_engine_create((p), (e), (c), true, "SW", "software", (d))
-#define nvkm_sw_destroy(d)                                            \
-	nvkm_engine_destroy(&(d)->base)
-#define nvkm_sw_init(d)                                               \
-	nvkm_engine_init(&(d)->base)
-#define nvkm_sw_fini(d,s)                                             \
-	nvkm_engine_fini(&(d)->base, (s))
-
-#define _nvkm_sw_dtor _nvkm_engine_dtor
-#define _nvkm_sw_init _nvkm_engine_init
-#define _nvkm_sw_fini _nvkm_engine_fini
+	struct list_head chan;
+};
 
-extern struct nvkm_oclass *nv04_sw_oclass;
-extern struct nvkm_oclass *nv10_sw_oclass;
-extern struct nvkm_oclass *nv50_sw_oclass;
-extern struct nvkm_oclass *gf100_sw_oclass;
+bool nvkm_sw_mthd(struct nvkm_sw *sw, int chid, int subc, u32 mthd, u32 data);
 
-void nv04_sw_intr(struct nvkm_subdev *);
+int nv04_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
+int nv10_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
+int nv50_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
+int gf100_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
index 7851f18c5add..616ea91e03f8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
@@ -1,5 +1,5 @@
 #ifndef __NVKM_VP_H__
 #define __NVKM_VP_H__
-#include <core/engine.h>
-extern struct nvkm_oclass g84_vp_oclass;
+#include <engine/xtensa.h>
+int g84_vp_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
index 7a216cca2865..3128d21a5d1a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
@@ -1,35 +1,23 @@
 #ifndef __NVKM_XTENSA_H__
 #define __NVKM_XTENSA_H__
+#define nvkm_xtensa(p) container_of((p), struct nvkm_xtensa, engine)
 #include <core/engine.h>
-struct nvkm_gpuobj;
 
 struct nvkm_xtensa {
-	struct nvkm_engine base;
-
+	const struct nvkm_xtensa_func *func;
 	u32 addr;
-	struct nvkm_gpuobj *gpu_fw;
-	u32 fifo_val;
-	u32 unkd28;
-};
+	struct nvkm_engine engine;
 
-#define nvkm_xtensa_create(p,e,c,b,d,i,f,r)				\
-	nvkm_xtensa_create_((p), (e), (c), (b), (d), (i), (f),	\
-			       sizeof(**r),(void **)r)
+	struct nvkm_memory *gpu_fw;
+};
 
-int _nvkm_xtensa_engctx_ctor(struct nvkm_object *,
-				struct nvkm_object *,
-				struct nvkm_oclass *, void *, u32,
-				struct nvkm_object **);
+int nvkm_xtensa_new_(const struct nvkm_xtensa_func *, struct nvkm_device *,
+		     int index, bool enable, u32 addr, struct nvkm_engine **);
 
-void _nvkm_xtensa_intr(struct nvkm_subdev *);
-int nvkm_xtensa_create_(struct nvkm_object *,
-			   struct nvkm_object *,
-			   struct nvkm_oclass *, u32, bool,
-			   const char *, const char *,
-			   int, void **);
-#define _nvkm_xtensa_dtor _nvkm_engine_dtor
-int _nvkm_xtensa_init(struct nvkm_object *);
-int _nvkm_xtensa_fini(struct nvkm_object *, bool);
-u32  _nvkm_xtensa_rd32(struct nvkm_object *, u64);
-void _nvkm_xtensa_wr32(struct nvkm_object *, u64, u32);
+struct nvkm_xtensa_func {
+	u32 pmc_enable;
+	u32 fifo_val;
+	u32 unkd28;
+	struct nvkm_sclass sclass[];
+};
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
index c7a007b8bc10..d3071b5a4f98 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
@@ -1,33 +1,24 @@
 #ifndef __NVKM_BAR_H__
 #define __NVKM_BAR_H__
 #include <core/subdev.h>
-struct nvkm_mem;
 struct nvkm_vma;
 
 struct nvkm_bar {
-	struct nvkm_subdev base;
+	const struct nvkm_bar_func *func;
+	struct nvkm_subdev subdev;
 
-	int  (*alloc)(struct nvkm_bar *, struct nvkm_object *,
-		      struct nvkm_mem *, struct nvkm_object **);
-
-	int  (*kmap)(struct nvkm_bar *, struct nvkm_mem *, u32 flags,
-		     struct nvkm_vma *);
-	int  (*umap)(struct nvkm_bar *, struct nvkm_mem *, u32 flags,
-		     struct nvkm_vma *);
-	void (*unmap)(struct nvkm_bar *, struct nvkm_vma *);
-	void (*flush)(struct nvkm_bar *);
+	spinlock_t lock;
 
 	/* whether the BAR supports to be ioremapped WC or should be uncached */
 	bool iomap_uncached;
 };
 
-static inline struct nvkm_bar *
-nvkm_bar(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_BAR);
-}
+void nvkm_bar_flush(struct nvkm_bar *);
+struct nvkm_vm *nvkm_bar_kmap(struct nvkm_bar *);
+int nvkm_bar_umap(struct nvkm_bar *, u64 size, int type, struct nvkm_vma *);
 
-extern struct nvkm_oclass nv50_bar_oclass;
-extern struct nvkm_oclass gf100_bar_oclass;
-extern struct nvkm_oclass gk20a_bar_oclass;
+int nv50_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int g84_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int gf100_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int gk20a_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
index cef287e0bbf2..e39a1fea930b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
@@ -3,7 +3,7 @@
 #include <core/subdev.h>
 
 struct nvkm_bios {
-	struct nvkm_subdev base;
+	struct nvkm_subdev subdev;
 	u32 size;
 	u8 *data;
 
@@ -19,14 +19,13 @@ struct nvkm_bios {
 	} version;
 };
 
-static inline struct nvkm_bios *
-nvkm_bios(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_VBIOS);
-}
-
 u8  nvbios_checksum(const u8 *data, int size);
 u16 nvbios_findstr(const u8 *data, int size, const char *str, int len);
+int nvbios_memcmp(struct nvkm_bios *, u32 addr, const char *, u32 len);
+
+#define nvbios_rd08(b,o) (b)->data[(o)]
+#define nvbios_rd16(b,o) get_unaligned_le16(&(b)->data[(o)])
+#define nvbios_rd32(b,o) get_unaligned_le32(&(b)->data[(o)])
 
-extern struct nvkm_oclass nvkm_bios_oclass;
+int nvkm_bios_new(struct nvkm_device *, int, struct nvkm_bios **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h
index 4107aa546a21..3f0c7c414026 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h
@@ -4,8 +4,8 @@ static inline u16
 bmp_version(struct nvkm_bios *bios)
 {
 	if (bios->bmp_offset) {
-		return nv_ro08(bios, bios->bmp_offset + 5) << 8 |
-		       nv_ro08(bios, bios->bmp_offset + 6);
+		return nvbios_rd08(bios, bios->bmp_offset + 5) << 8 |
+		       nvbios_rd08(bios, bios->bmp_offset + 6);
 	}
 
 	return 0x0000;
@@ -15,7 +15,7 @@ static inline u16
 bmp_mem_init_table(struct nvkm_bios *bios)
 {
 	if (bmp_version(bios) >= 0x0300)
-		return nv_ro16(bios, bios->bmp_offset + 24);
+		return nvbios_rd16(bios, bios->bmp_offset + 24);
 	return 0x0000;
 }
 
@@ -23,7 +23,7 @@ static inline u16
 bmp_sdr_seq_table(struct nvkm_bios *bios)
 {
 	if (bmp_version(bios) >= 0x0300)
-		return nv_ro16(bios, bios->bmp_offset + 26);
+		return nvbios_rd16(bios, bios->bmp_offset + 26);
 	return 0x0000;
 }
 
@@ -31,7 +31,7 @@ static inline u16
 bmp_ddr_seq_table(struct nvkm_bios *bios)
 {
 	if (bmp_version(bios) >= 0x0300)
-		return nv_ro16(bios, bios->bmp_offset + 28);
+		return nvbios_rd16(bios, bios->bmp_offset + 28);
 	return 0x0000;
 }
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h
index 578a667eed3b..4dc1c8af840c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h
@@ -1,5 +1,6 @@
 #ifndef __NVBIOS_INIT_H__
 #define __NVBIOS_INIT_H__
+
 struct nvbios_init {
 	struct nvkm_subdev *subdev;
 	struct nvkm_bios *bios;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
index 420426793880..3a9abd38aca8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
@@ -7,6 +7,11 @@ struct nvbios_ramcfg {
 	unsigned rammap_max;
 	union {
 		struct {
+			unsigned rammap_00_16_20:1;
+			unsigned rammap_00_16_40:1;
+			unsigned rammap_00_17_02:1;
+		};
+		struct {
 			unsigned rammap_10_04_02:1;
 			unsigned rammap_10_04_08:1;
 		};
@@ -32,15 +37,32 @@ struct nvbios_ramcfg {
 	unsigned ramcfg_ver;
 	unsigned ramcfg_hdr;
 	unsigned ramcfg_timing;
+	unsigned ramcfg_DLLoff;
+	unsigned ramcfg_RON;
 	union {
 		struct {
+			unsigned ramcfg_00_03_01:1;
+			unsigned ramcfg_00_03_02:1;
+			unsigned ramcfg_00_03_08:1;
+			unsigned ramcfg_00_03_10:1;
+			unsigned ramcfg_00_04_02:1;
+			unsigned ramcfg_00_04_04:1;
+			unsigned ramcfg_00_04_20:1;
+			unsigned ramcfg_00_05:8;
+			unsigned ramcfg_00_06:8;
+			unsigned ramcfg_00_07:8;
+			unsigned ramcfg_00_08:8;
+			unsigned ramcfg_00_09:8;
+			unsigned ramcfg_00_0a_0f:4;
+			unsigned ramcfg_00_0a_f0:4;
+		};
+		struct {
 			unsigned ramcfg_10_02_01:1;
 			unsigned ramcfg_10_02_02:1;
 			unsigned ramcfg_10_02_04:1;
 			unsigned ramcfg_10_02_08:1;
 			unsigned ramcfg_10_02_10:1;
 			unsigned ramcfg_10_02_20:1;
-			unsigned ramcfg_10_DLLoff:1;
 			unsigned ramcfg_10_03_0f:4;
 			unsigned ramcfg_10_04_01:1;
 			unsigned ramcfg_10_05:8;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h
index 609a905ec780..8d8ee13721ec 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h
@@ -7,6 +7,8 @@ u32 nvbios_rammapTe(struct nvkm_bios *, u8 *ver, u8 *hdr,
 
 u32 nvbios_rammapEe(struct nvkm_bios *, int idx,
 		    u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_rammapEp_from_perf(struct nvkm_bios *bios, u32 data, u8 size,
+		    struct nvbios_ramcfg *p);
 u32 nvbios_rammapEp(struct nvkm_bios *, int idx,
 		    u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *);
 u32 nvbios_rammapEm(struct nvkm_bios *, u16 mhz,
@@ -15,6 +17,8 @@ u32 nvbios_rammapEm(struct nvkm_bios *, u16 mhz,
 u32 nvbios_rammapSe(struct nvkm_bios *, u32 data,
 		    u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
 		    u8 *ver, u8 *hdr);
+u32 nvbios_rammapSp_from_perf(struct nvkm_bios *bios, u32 data, u8 size, int idx,
+		    struct nvbios_ramcfg *p);
 u32 nvbios_rammapSp(struct nvkm_bios *, u32 data,
 		    u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
 		    u8 *ver, u8 *hdr, struct nvbios_ramcfg *);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
index fba83c04849e..6a04d9c07944 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
@@ -2,49 +2,23 @@
 #define __NVKM_BUS_H__
 #include <core/subdev.h>
 
-struct nvkm_bus_intr {
-	u32 stat;
-	u32 unit;
-};
-
 struct nvkm_bus {
-	struct nvkm_subdev base;
-	int (*hwsq_exec)(struct nvkm_bus *, u32 *, u32);
-	u32 hwsq_size;
+	const struct nvkm_bus_func *func;
+	struct nvkm_subdev subdev;
 };
 
-static inline struct nvkm_bus *
-nvkm_bus(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_BUS);
-}
-
-#define nvkm_bus_create(p, e, o, d)                                         \
-	nvkm_subdev_create_((p), (e), (o), 0, "PBUS", "master",             \
-			       sizeof(**d), (void **)d)
-#define nvkm_bus_destroy(p)                                                 \
-	nvkm_subdev_destroy(&(p)->base)
-#define nvkm_bus_init(p)                                                    \
-	nvkm_subdev_init(&(p)->base)
-#define nvkm_bus_fini(p, s)                                                 \
-	nvkm_subdev_fini(&(p)->base, (s))
-
-#define _nvkm_bus_dtor _nvkm_subdev_dtor
-#define _nvkm_bus_init _nvkm_subdev_init
-#define _nvkm_bus_fini _nvkm_subdev_fini
-
-extern struct nvkm_oclass *nv04_bus_oclass;
-extern struct nvkm_oclass *nv31_bus_oclass;
-extern struct nvkm_oclass *nv50_bus_oclass;
-extern struct nvkm_oclass *g94_bus_oclass;
-extern struct nvkm_oclass *gf100_bus_oclass;
-
 /* interface to sequencer */
 struct nvkm_hwsq;
-int  nvkm_hwsq_init(struct nvkm_bus *, struct nvkm_hwsq **);
+int  nvkm_hwsq_init(struct nvkm_subdev *, struct nvkm_hwsq **);
 int  nvkm_hwsq_fini(struct nvkm_hwsq **, bool exec);
 void nvkm_hwsq_wr32(struct nvkm_hwsq *, u32 addr, u32 data);
 void nvkm_hwsq_setf(struct nvkm_hwsq *, u8 flag, int data);
 void nvkm_hwsq_wait(struct nvkm_hwsq *, u8 flag, u8 data);
 void nvkm_hwsq_nsec(struct nvkm_hwsq *, u32 nsec);
+
+int nv04_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
+int nv31_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
+int nv50_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
+int g94_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
+int gf100_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
index f5d303850d8c..8708f0a4e188 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
@@ -71,9 +71,10 @@ struct nvkm_domain {
 };
 
 struct nvkm_clk {
-	struct nvkm_subdev base;
+	const struct nvkm_clk_func *func;
+	struct nvkm_subdev subdev;
 
-	struct nvkm_domain *domains;
+	const struct nvkm_domain *domains;
 	struct nvkm_pstate bstate;
 
 	struct list_head states;
@@ -94,68 +95,27 @@ struct nvkm_clk {
 
 	bool allow_reclock;
 
-	int  (*read)(struct nvkm_clk *, enum nv_clk_src);
-	int  (*calc)(struct nvkm_clk *, struct nvkm_cstate *);
-	int  (*prog)(struct nvkm_clk *);
-	void (*tidy)(struct nvkm_clk *);
-
 	/*XXX: die, these are here *only* to support the completely
-	 *     bat-shit insane what-was-nvkm_hw.c code
+	 *     bat-shit insane what-was-nouveau_hw.c code
 	 */
 	int (*pll_calc)(struct nvkm_clk *, struct nvbios_pll *, int clk,
 			struct nvkm_pll_vals *pv);
 	int (*pll_prog)(struct nvkm_clk *, u32 reg1, struct nvkm_pll_vals *pv);
 };
 
-static inline struct nvkm_clk *
-nvkm_clk(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_CLK);
-}
-
-#define nvkm_clk_create(p,e,o,i,r,s,n,d)                                  \
-	nvkm_clk_create_((p), (e), (o), (i), (r), (s), (n), sizeof(**d),  \
-			      (void **)d)
-#define nvkm_clk_destroy(p) ({                                            \
-	struct nvkm_clk *clk = (p);                                       \
-	_nvkm_clk_dtor(nv_object(clk));                                   \
-})
-#define nvkm_clk_init(p) ({                                               \
-	struct nvkm_clk *clk = (p);                                       \
-	_nvkm_clk_init(nv_object(clk));                                   \
-})
-#define nvkm_clk_fini(p,s) ({                                             \
-	struct nvkm_clk *clk = (p);                                       \
-	_nvkm_clk_fini(nv_object(clk), (s));                              \
-})
-
-int  nvkm_clk_create_(struct nvkm_object *, struct nvkm_object *,
-			   struct nvkm_oclass *,
-			   struct nvkm_domain *, struct nvkm_pstate *,
-			   int, bool, int, void **);
-void _nvkm_clk_dtor(struct nvkm_object *);
-int  _nvkm_clk_init(struct nvkm_object *);
-int  _nvkm_clk_fini(struct nvkm_object *, bool);
-
-extern struct nvkm_oclass nv04_clk_oclass;
-extern struct nvkm_oclass nv40_clk_oclass;
-extern struct nvkm_oclass *nv50_clk_oclass;
-extern struct nvkm_oclass *g84_clk_oclass;
-extern struct nvkm_oclass *mcp77_clk_oclass;
-extern struct nvkm_oclass gt215_clk_oclass;
-extern struct nvkm_oclass gf100_clk_oclass;
-extern struct nvkm_oclass gk104_clk_oclass;
-extern struct nvkm_oclass gk20a_clk_oclass;
-
-int nv04_clk_pll_set(struct nvkm_clk *, u32 type, u32 freq);
-int nv04_clk_pll_calc(struct nvkm_clk *, struct nvbios_pll *, int clk,
-		      struct nvkm_pll_vals *);
-int nv04_clk_pll_prog(struct nvkm_clk *, u32 reg1, struct nvkm_pll_vals *);
-int gt215_clk_pll_calc(struct nvkm_clk *, struct nvbios_pll *,
-		       int clk, struct nvkm_pll_vals *);
-
+int nvkm_clk_read(struct nvkm_clk *, enum nv_clk_src);
 int nvkm_clk_ustate(struct nvkm_clk *, int req, int pwr);
 int nvkm_clk_astate(struct nvkm_clk *, int req, int rel, bool wait);
 int nvkm_clk_dstate(struct nvkm_clk *, int req, int rel);
 int nvkm_clk_tstate(struct nvkm_clk *, int req, int rel);
+
+int nv04_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int nv40_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int nv50_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int g84_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int mcp77_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int gt215_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int gf100_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int gk104_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int gk20a_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
index d1bbe0d62b35..6c1407fd317b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
@@ -1,32 +1,31 @@
 #ifndef __NVKM_DEVINIT_H__
 #define __NVKM_DEVINIT_H__
 #include <core/subdev.h>
+struct nvkm_devinit;
 
 struct nvkm_devinit {
-	struct nvkm_subdev base;
+	const struct nvkm_devinit_func *func;
+	struct nvkm_subdev subdev;
 	bool post;
-	void (*meminit)(struct nvkm_devinit *);
-	int  (*pll_set)(struct nvkm_devinit *, u32 type, u32 freq);
-	u32  (*mmio)(struct nvkm_devinit *, u32 addr);
 };
 
-static inline struct nvkm_devinit *
-nvkm_devinit(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_DEVINIT);
-}
+u32 nvkm_devinit_mmio(struct nvkm_devinit *, u32 addr);
+int nvkm_devinit_pll_set(struct nvkm_devinit *, u32 type, u32 khz);
+void nvkm_devinit_meminit(struct nvkm_devinit *);
+u64 nvkm_devinit_disable(struct nvkm_devinit *);
+int nvkm_devinit_post(struct nvkm_devinit *, u64 *disable);
 
-extern struct nvkm_oclass *nv04_devinit_oclass;
-extern struct nvkm_oclass *nv05_devinit_oclass;
-extern struct nvkm_oclass *nv10_devinit_oclass;
-extern struct nvkm_oclass *nv1a_devinit_oclass;
-extern struct nvkm_oclass *nv20_devinit_oclass;
-extern struct nvkm_oclass *nv50_devinit_oclass;
-extern struct nvkm_oclass *g84_devinit_oclass;
-extern struct nvkm_oclass *g98_devinit_oclass;
-extern struct nvkm_oclass *gt215_devinit_oclass;
-extern struct nvkm_oclass *mcp89_devinit_oclass;
-extern struct nvkm_oclass *gf100_devinit_oclass;
-extern struct nvkm_oclass *gm107_devinit_oclass;
-extern struct nvkm_oclass *gm204_devinit_oclass;
+int nv04_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int nv05_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int nv10_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int nv1a_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int nv20_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int nv50_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int g84_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int g98_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int gt215_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int mcp89_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int gf100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int gm107_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int gm204_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 16da56cf43b0..85ab72c7f821 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -18,7 +18,7 @@
 #define NV_MEM_TARGET_VM          3
 #define NV_MEM_TARGET_GART        4
 
-#define NV_MEM_TYPE_VM 0x7f
+#define NVKM_RAM_TYPE_VM 0x7f
 #define NV_MEM_COMP_VM 0x03
 
 struct nvkm_mem {
@@ -46,62 +46,47 @@ struct nvkm_fb_tile {
 };
 
 struct nvkm_fb {
-	struct nvkm_subdev base;
-
-	bool (*memtype_valid)(struct nvkm_fb *, u32 memtype);
+	const struct nvkm_fb_func *func;
+	struct nvkm_subdev subdev;
 
 	struct nvkm_ram *ram;
 
-	struct nvkm_mm vram;
-	struct nvkm_mm tags;
-
 	struct {
 		struct nvkm_fb_tile region[16];
 		int regions;
-		void (*init)(struct nvkm_fb *, int i, u32 addr, u32 size,
-			     u32 pitch, u32 flags, struct nvkm_fb_tile *);
-		void (*comp)(struct nvkm_fb *, int i, u32 size, u32 flags,
-			     struct nvkm_fb_tile *);
-		void (*fini)(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
-		void (*prog)(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
 	} tile;
 };
 
-static inline struct nvkm_fb *
-nvkm_fb(void *obj)
-{
-	/* fbram uses this before device subdev pointer is valid */
-	if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
-	    nv_subidx(obj) == NVDEV_SUBDEV_FB)
-		return obj;
-
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_FB);
-}
-
-extern struct nvkm_oclass *nv04_fb_oclass;
-extern struct nvkm_oclass *nv10_fb_oclass;
-extern struct nvkm_oclass *nv1a_fb_oclass;
-extern struct nvkm_oclass *nv20_fb_oclass;
-extern struct nvkm_oclass *nv25_fb_oclass;
-extern struct nvkm_oclass *nv30_fb_oclass;
-extern struct nvkm_oclass *nv35_fb_oclass;
-extern struct nvkm_oclass *nv36_fb_oclass;
-extern struct nvkm_oclass *nv40_fb_oclass;
-extern struct nvkm_oclass *nv41_fb_oclass;
-extern struct nvkm_oclass *nv44_fb_oclass;
-extern struct nvkm_oclass *nv46_fb_oclass;
-extern struct nvkm_oclass *nv47_fb_oclass;
-extern struct nvkm_oclass *nv49_fb_oclass;
-extern struct nvkm_oclass *nv4e_fb_oclass;
-extern struct nvkm_oclass *nv50_fb_oclass;
-extern struct nvkm_oclass *g84_fb_oclass;
-extern struct nvkm_oclass *gt215_fb_oclass;
-extern struct nvkm_oclass *mcp77_fb_oclass;
-extern struct nvkm_oclass *mcp89_fb_oclass;
-extern struct nvkm_oclass *gf100_fb_oclass;
-extern struct nvkm_oclass *gk104_fb_oclass;
-extern struct nvkm_oclass *gk20a_fb_oclass;
-extern struct nvkm_oclass *gm107_fb_oclass;
+bool nvkm_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
+void nvkm_fb_tile_init(struct nvkm_fb *, int region, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
+void nvkm_fb_tile_fini(struct nvkm_fb *, int region, struct nvkm_fb_tile *);
+void nvkm_fb_tile_prog(struct nvkm_fb *, int region, struct nvkm_fb_tile *);
+
+int nv04_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv10_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv1a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv20_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv25_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv30_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv35_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv36_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv40_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv41_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv44_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv46_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv47_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv49_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv4e_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv50_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int g84_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gt215_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int mcp77_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int mcp89_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gf100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
 
 #include <subdev/bios.h>
 #include <subdev/bios/ramcfg.h>
@@ -112,36 +97,35 @@ struct nvkm_ram_data {
 	u32 freq;
 };
 
+enum nvkm_ram_type {
+	NVKM_RAM_TYPE_UNKNOWN = 0,
+	NVKM_RAM_TYPE_STOLEN,
+	NVKM_RAM_TYPE_SGRAM,
+	NVKM_RAM_TYPE_SDRAM,
+	NVKM_RAM_TYPE_DDR1,
+	NVKM_RAM_TYPE_DDR2,
+	NVKM_RAM_TYPE_DDR3,
+	NVKM_RAM_TYPE_GDDR2,
+	NVKM_RAM_TYPE_GDDR3,
+	NVKM_RAM_TYPE_GDDR4,
+	NVKM_RAM_TYPE_GDDR5
+};
+
 struct nvkm_ram {
-	struct nvkm_object base;
-	enum {
-		NV_MEM_TYPE_UNKNOWN = 0,
-		NV_MEM_TYPE_STOLEN,
-		NV_MEM_TYPE_SGRAM,
-		NV_MEM_TYPE_SDRAM,
-		NV_MEM_TYPE_DDR1,
-		NV_MEM_TYPE_DDR2,
-		NV_MEM_TYPE_DDR3,
-		NV_MEM_TYPE_GDDR2,
-		NV_MEM_TYPE_GDDR3,
-		NV_MEM_TYPE_GDDR4,
-		NV_MEM_TYPE_GDDR5
-	} type;
-	u64 stolen;
+	const struct nvkm_ram_func *func;
+	struct nvkm_fb *fb;
+	enum nvkm_ram_type type;
 	u64 size;
-	u32 tags;
+
+#define NVKM_RAM_MM_SHIFT 12
+	struct nvkm_mm vram;
+	struct nvkm_mm tags;
+	u64 stolen;
 
 	int ranks;
 	int parts;
 	int part_mask;
 
-	int  (*get)(struct nvkm_fb *, u64 size, u32 align, u32 size_nc,
-		    u32 type, struct nvkm_mem **);
-	void (*put)(struct nvkm_fb *, struct nvkm_mem **);
-
-	int  (*calc)(struct nvkm_fb *, u32 freq);
-	int  (*prog)(struct nvkm_fb *);
-	void (*tidy)(struct nvkm_fb *);
 	u32 freq;
 	u32 mr[16];
 	u32 mr1_nuts;
@@ -151,4 +135,17 @@ struct nvkm_ram {
 	struct nvkm_ram_data xition;
 	struct nvkm_ram_data target;
 };
+
+struct nvkm_ram_func {
+	void *(*dtor)(struct nvkm_ram *);
+	int (*init)(struct nvkm_ram *);
+
+	int (*get)(struct nvkm_ram *, u64 size, u32 align, u32 size_nc,
+		   u32 type, struct nvkm_mem **);
+	void (*put)(struct nvkm_ram *, struct nvkm_mem **);
+
+	int (*calc)(struct nvkm_ram *, u32 freq);
+	int (*prog)(struct nvkm_ram *);
+	void (*tidy)(struct nvkm_ram *);
+};
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
index a1384786adc9..ae201e388487 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
@@ -1,28 +1,16 @@
 #ifndef __NVKM_FUSE_H__
 #define __NVKM_FUSE_H__
 #include <core/subdev.h>
-#include <core/device.h>
 
 struct nvkm_fuse {
-	struct nvkm_subdev base;
+	const struct nvkm_fuse_func *func;
+	struct nvkm_subdev subdev;
+	spinlock_t lock;
 };
 
-static inline struct nvkm_fuse *
-nvkm_fuse(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_FUSE);
-}
+u32 nvkm_fuse_read(struct nvkm_fuse *, u32 addr);
 
-#define nvkm_fuse_create(p, e, o, d)                                        \
-	nvkm_fuse_create_((p), (e), (o), sizeof(**d), (void **)d)
-
-int  nvkm_fuse_create_(struct nvkm_object *, struct nvkm_object *,
-			  struct nvkm_oclass *, int, void **);
-void _nvkm_fuse_dtor(struct nvkm_object *);
-int  _nvkm_fuse_init(struct nvkm_object *);
-#define _nvkm_fuse_fini _nvkm_subdev_fini
-
-extern struct nvkm_oclass nv50_fuse_oclass;
-extern struct nvkm_oclass gf100_fuse_oclass;
-extern struct nvkm_oclass gm107_fuse_oclass;
+int nv50_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **);
+int gf100_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **);
+int gm107_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
index ca5099a81b5a..9b9c6d2f90b6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
@@ -19,26 +19,21 @@ struct nvkm_gpio_ntfy_rep {
 };
 
 struct nvkm_gpio {
-	struct nvkm_subdev base;
+	const struct nvkm_gpio_func *func;
+	struct nvkm_subdev subdev;
 
 	struct nvkm_event event;
-
-	void (*reset)(struct nvkm_gpio *, u8 func);
-	int  (*find)(struct nvkm_gpio *, int idx, u8 tag, u8 line,
-		     struct dcb_gpio_func *);
-	int  (*set)(struct nvkm_gpio *, int idx, u8 tag, u8 line, int state);
-	int  (*get)(struct nvkm_gpio *, int idx, u8 tag, u8 line);
 };
 
-static inline struct nvkm_gpio *
-nvkm_gpio(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_GPIO);
-}
-
-extern struct nvkm_oclass *nv10_gpio_oclass;
-extern struct nvkm_oclass *nv50_gpio_oclass;
-extern struct nvkm_oclass *g94_gpio_oclass;
-extern struct nvkm_oclass *gf110_gpio_oclass;
-extern struct nvkm_oclass *gk104_gpio_oclass;
+void nvkm_gpio_reset(struct nvkm_gpio *, u8 func);
+int nvkm_gpio_find(struct nvkm_gpio *, int idx, u8 tag, u8 line,
+		   struct dcb_gpio_func *);
+int nvkm_gpio_set(struct nvkm_gpio *, int idx, u8 tag, u8 line, int state);
+int nvkm_gpio_get(struct nvkm_gpio *, int idx, u8 tag, u8 line);
+
+int nv10_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
+int nv50_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
+int g94_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
+int gf119_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
+int gk104_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
index a2e33730f05e..6b6224dbd5bb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
@@ -6,15 +6,6 @@
 #include <subdev/bios.h>
 #include <subdev/bios/i2c.h>
 
-#define NV_I2C_PORT(n)    (0x00 + (n))
-#define NV_I2C_AUX(n)     (0x10 + (n))
-#define NV_I2C_EXT(n)     (0x20 + (n))
-#define NV_I2C_DEFAULT(n) (0x80 + (n))
-
-#define NV_I2C_TYPE_DCBI2C(n) (0x0000 | (n))
-#define NV_I2C_TYPE_EXTDDC(e) (0x0005 | (e) << 8)
-#define NV_I2C_TYPE_EXTAUX(e) (0x0006 | (e) << 8)
-
 struct nvkm_i2c_ntfy_req {
 #define NVKM_I2C_PLUG                                                      0x01
 #define NVKM_I2C_UNPLUG                                                    0x02
@@ -29,72 +20,79 @@ struct nvkm_i2c_ntfy_rep {
 	u8 mask;
 };
 
-struct nvkm_i2c_port {
-	struct nvkm_object base;
-	struct i2c_adapter adapter;
-	struct mutex mutex;
+struct nvkm_i2c_bus_probe {
+	struct i2c_board_info dev;
+	u8 udelay; /* set to 0 to use the standard delay */
+};
 
-	struct list_head head;
-	u8  index;
-	int aux;
+struct nvkm_i2c_bus {
+	const struct nvkm_i2c_bus_func *func;
+	struct nvkm_i2c_pad *pad;
+#define NVKM_I2C_BUS_CCB(n) /* 'n' is ccb index */                           (n)
+#define NVKM_I2C_BUS_EXT(n) /* 'n' is dcb external encoder type */ ((n) + 0x100)
+#define NVKM_I2C_BUS_PRI /* ccb primary comm. port */                        -1
+#define NVKM_I2C_BUS_SEC /* ccb secondary comm. port */                      -2
+	int id;
 
-	const struct nvkm_i2c_func *func;
+	struct mutex mutex;
+	struct list_head head;
+	struct i2c_adapter i2c;
 };
 
-struct nvkm_i2c_func {
-	void (*drive_scl)(struct nvkm_i2c_port *, int);
-	void (*drive_sda)(struct nvkm_i2c_port *, int);
-	int  (*sense_scl)(struct nvkm_i2c_port *);
-	int  (*sense_sda)(struct nvkm_i2c_port *);
+int nvkm_i2c_bus_acquire(struct nvkm_i2c_bus *);
+void nvkm_i2c_bus_release(struct nvkm_i2c_bus *);
+int nvkm_i2c_bus_probe(struct nvkm_i2c_bus *, const char *,
+		       struct nvkm_i2c_bus_probe *,
+		       bool (*)(struct nvkm_i2c_bus *,
+			        struct i2c_board_info *, void *), void *);
 
-	int  (*aux)(struct nvkm_i2c_port *, bool, u8, u32, u8 *, u8);
-	int  (*pattern)(struct nvkm_i2c_port *, int pattern);
-	int  (*lnk_ctl)(struct nvkm_i2c_port *, int nr, int bw, bool enh);
-	int  (*drv_ctl)(struct nvkm_i2c_port *, int lane, int sw, int pe);
-};
+struct nvkm_i2c_aux {
+	const struct nvkm_i2c_aux_func *func;
+	struct nvkm_i2c_pad *pad;
+#define NVKM_I2C_AUX_CCB(n) /* 'n' is ccb index */                           (n)
+#define NVKM_I2C_AUX_EXT(n) /* 'n' is dcb external encoder type */ ((n) + 0x100)
+	int id;
 
-struct nvkm_i2c_board_info {
-	struct i2c_board_info dev;
-	u8 udelay; /* set to 0 to use the standard delay */
+	struct mutex mutex;
+	struct list_head head;
+	struct i2c_adapter i2c;
+
+	u32 intr;
 };
 
+void nvkm_i2c_aux_monitor(struct nvkm_i2c_aux *, bool monitor);
+int nvkm_i2c_aux_acquire(struct nvkm_i2c_aux *);
+void nvkm_i2c_aux_release(struct nvkm_i2c_aux *);
+int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
+		      u32 addr, u8 *data, u8 size);
+int nvkm_i2c_aux_lnk_ctl(struct nvkm_i2c_aux *, int link_nr, int link_bw,
+			 bool enhanced_framing);
+
 struct nvkm_i2c {
-	struct nvkm_subdev base;
-	struct nvkm_event event;
+	const struct nvkm_i2c_func *func;
+	struct nvkm_subdev subdev;
 
-	struct nvkm_i2c_port *(*find)(struct nvkm_i2c *, u8 index);
-	struct nvkm_i2c_port *(*find_type)(struct nvkm_i2c *, u16 type);
-	int  (*acquire_pad)(struct nvkm_i2c_port *, unsigned long timeout);
-	void (*release_pad)(struct nvkm_i2c_port *);
-	int  (*acquire)(struct nvkm_i2c_port *, unsigned long timeout);
-	void (*release)(struct nvkm_i2c_port *);
-	int  (*identify)(struct nvkm_i2c *, int index,
-			 const char *what, struct nvkm_i2c_board_info *,
-			 bool (*match)(struct nvkm_i2c_port *,
-				       struct i2c_board_info *, void *),
-			 void *);
-
-	wait_queue_head_t wait;
-	struct list_head ports;
+	struct list_head pad;
+	struct list_head bus;
+	struct list_head aux;
+
+	struct nvkm_event event;
 };
 
-static inline struct nvkm_i2c *
-nvkm_i2c(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_I2C);
-}
+struct nvkm_i2c_bus *nvkm_i2c_bus_find(struct nvkm_i2c *, int);
+struct nvkm_i2c_aux *nvkm_i2c_aux_find(struct nvkm_i2c *, int);
 
-extern struct nvkm_oclass *nv04_i2c_oclass;
-extern struct nvkm_oclass *nv4e_i2c_oclass;
-extern struct nvkm_oclass *nv50_i2c_oclass;
-extern struct nvkm_oclass *g94_i2c_oclass;
-extern struct nvkm_oclass *gf110_i2c_oclass;
-extern struct nvkm_oclass *gf117_i2c_oclass;
-extern struct nvkm_oclass *gk104_i2c_oclass;
-extern struct nvkm_oclass *gm204_i2c_oclass;
+int nv04_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int nv4e_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int nv50_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int g94_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int gf117_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int gf119_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int gk104_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int gm204_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
 
 static inline int
-nv_rdi2cr(struct nvkm_i2c_port *port, u8 addr, u8 reg)
+nvkm_rdi2cr(struct i2c_adapter *adap, u8 addr, u8 reg)
 {
 	u8 val;
 	struct i2c_msg msgs[] = {
@@ -102,7 +100,7 @@ nv_rdi2cr(struct nvkm_i2c_port *port, u8 addr, u8 reg)
 		{ .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = &val },
 	};
 
-	int ret = i2c_transfer(&port->adapter, msgs, 2);
+	int ret = i2c_transfer(adap, msgs, ARRAY_SIZE(msgs));
 	if (ret != 2)
 		return -EIO;
 
@@ -110,14 +108,14 @@ nv_rdi2cr(struct nvkm_i2c_port *port, u8 addr, u8 reg)
 }
 
 static inline int
-nv_wri2cr(struct nvkm_i2c_port *port, u8 addr, u8 reg, u8 val)
+nvkm_wri2cr(struct i2c_adapter *adap, u8 addr, u8 reg, u8 val)
 {
 	u8 buf[2] = { reg, val };
 	struct i2c_msg msgs[] = {
 		{ .addr = addr, .flags = 0, .len = 2, .buf = buf },
 	};
 
-	int ret = i2c_transfer(&port->adapter, msgs, 1);
+	int ret = i2c_transfer(adap, msgs, ARRAY_SIZE(msgs));
 	if (ret != 1)
 		return -EIO;
 
@@ -125,11 +123,30 @@ nv_wri2cr(struct nvkm_i2c_port *port, u8 addr, u8 reg, u8 val)
 }
 
 static inline bool
-nv_probe_i2c(struct nvkm_i2c_port *port, u8 addr)
+nvkm_probe_i2c(struct i2c_adapter *adap, u8 addr)
 {
-	return nv_rdi2cr(port, addr, 0) >= 0;
+	return nvkm_rdi2cr(adap, addr, 0) >= 0;
 }
 
-int nv_rdaux(struct nvkm_i2c_port *, u32 addr, u8 *data, u8 size);
-int nv_wraux(struct nvkm_i2c_port *, u32 addr, u8 *data, u8 size);
+static inline int
+nvkm_rdaux(struct nvkm_i2c_aux *aux, u32 addr, u8 *data, u8 size)
+{
+	int ret = nvkm_i2c_aux_acquire(aux);
+	if (ret == 0) {
+		ret = nvkm_i2c_aux_xfer(aux, true, 9, addr, data, size);
+		nvkm_i2c_aux_release(aux);
+	}
+	return ret;
+}
+
+static inline int
+nvkm_wraux(struct nvkm_i2c_aux *aux, u32 addr, u8 *data, u8 size)
+{
+	int ret = nvkm_i2c_aux_acquire(aux);
+	if (ret == 0) {
+		ret = nvkm_i2c_aux_xfer(aux, true, 8, addr, data, size);
+		nvkm_i2c_aux_release(aux);
+	}
+	return ret;
+}
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
index 2150d8af0040..9d512cd5a0a7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
@@ -2,31 +2,7 @@
 #define __NVKM_IBUS_H__
 #include <core/subdev.h>
 
-struct nvkm_ibus {
-	struct nvkm_subdev base;
-};
-
-static inline struct nvkm_ibus *
-nvkm_ibus(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_IBUS);
-}
-
-#define nvkm_ibus_create(p,e,o,d)                                           \
-	nvkm_subdev_create_((p), (e), (o), 0, "PIBUS", "ibus",              \
-			       sizeof(**d), (void **)d)
-#define nvkm_ibus_destroy(p)                                                \
-	nvkm_subdev_destroy(&(p)->base)
-#define nvkm_ibus_init(p)                                                   \
-	nvkm_subdev_init(&(p)->base)
-#define nvkm_ibus_fini(p,s)                                                 \
-	nvkm_subdev_fini(&(p)->base, (s))
-
-#define _nvkm_ibus_dtor _nvkm_subdev_dtor
-#define _nvkm_ibus_init _nvkm_subdev_init
-#define _nvkm_ibus_fini _nvkm_subdev_fini
-
-extern struct nvkm_oclass gf100_ibus_oclass;
-extern struct nvkm_oclass gk104_ibus_oclass;
-extern struct nvkm_oclass gk20a_ibus_oclass;
+int gf100_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
+int gk104_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
+int gk20a_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
index 1bcb763cfca0..28bc202f9753 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -1,49 +1,29 @@
 #ifndef __NVKM_INSTMEM_H__
 #define __NVKM_INSTMEM_H__
 #include <core/subdev.h>
-
-struct nvkm_instobj {
-	struct nvkm_object base;
-	struct list_head head;
-	u32 *suspend;
-	u64 addr;
-	u32 size;
-};
-
-static inline struct nvkm_instobj *
-nv_memobj(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!nv_iclass(obj, NV_MEMOBJ_CLASS)))
-		nv_assert("BAD CAST -> NvMemObj, %08x", nv_hclass(obj));
-#endif
-	return obj;
-}
+struct nvkm_memory;
 
 struct nvkm_instmem {
-	struct nvkm_subdev base;
-	struct list_head list;
+	const struct nvkm_instmem_func *func;
+	struct nvkm_subdev subdev;
 
+	struct list_head list;
 	u32 reserved;
-	int (*alloc)(struct nvkm_instmem *, struct nvkm_object *,
-		     u32 size, u32 align, struct nvkm_object **);
+
+	struct nvkm_memory *vbios;
+	struct nvkm_ramht  *ramht;
+	struct nvkm_memory *ramro;
+	struct nvkm_memory *ramfc;
 };
 
-static inline struct nvkm_instmem *
-nvkm_instmem(void *obj)
-{
-	/* nv04/nv40 impls need to create objects in their constructor,
-	 * which is before the subdev pointer is valid
-	 */
-	if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
-	    nv_subidx(obj) == NVDEV_SUBDEV_INSTMEM)
-		return obj;
+u32 nvkm_instmem_rd32(struct nvkm_instmem *, u32 addr);
+void nvkm_instmem_wr32(struct nvkm_instmem *, u32 addr, u32 data);
+int nvkm_instobj_new(struct nvkm_instmem *, u32 size, u32 align, bool zero,
+		     struct nvkm_memory **);
 
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_INSTMEM);
-}
 
-extern struct nvkm_oclass *nv04_instmem_oclass;
-extern struct nvkm_oclass *nv40_instmem_oclass;
-extern struct nvkm_oclass *nv50_instmem_oclass;
-extern struct nvkm_oclass *gk20a_instmem_oclass;
+int nv04_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
+int nv40_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
+int nv50_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
+int gk20a_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index cd5d29fc0565..c773b5e958b4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -1,31 +1,36 @@
 #ifndef __NVKM_LTC_H__
 #define __NVKM_LTC_H__
 #include <core/subdev.h>
-struct nvkm_mm_node;
+#include <core/mm.h>
 
 #define NVKM_LTC_MAX_ZBC_CNT 16
 
 struct nvkm_ltc {
-	struct nvkm_subdev base;
+	const struct nvkm_ltc_func *func;
+	struct nvkm_subdev subdev;
 
-	int  (*tags_alloc)(struct nvkm_ltc *, u32 count,
-			   struct nvkm_mm_node **);
-	void (*tags_free)(struct nvkm_ltc *, struct nvkm_mm_node **);
-	void (*tags_clear)(struct nvkm_ltc *, u32 first, u32 count);
+	u32 ltc_nr;
+	u32 lts_nr;
+
+	u32 num_tags;
+	u32 tag_base;
+	struct nvkm_mm tags;
+	struct nvkm_mm_node *tag_ram;
 
 	int zbc_min;
 	int zbc_max;
-	int (*zbc_color_get)(struct nvkm_ltc *, int index, const u32[4]);
-	int (*zbc_depth_get)(struct nvkm_ltc *, int index, const u32);
+	u32 zbc_color[NVKM_LTC_MAX_ZBC_CNT][4];
+	u32 zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
 };
 
-static inline struct nvkm_ltc *
-nvkm_ltc(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_LTC);
-}
+int nvkm_ltc_tags_alloc(struct nvkm_ltc *, u32 count, struct nvkm_mm_node **);
+void nvkm_ltc_tags_free(struct nvkm_ltc *, struct nvkm_mm_node **);
+void nvkm_ltc_tags_clear(struct nvkm_ltc *, u32 first, u32 count);
+
+int nvkm_ltc_zbc_color_get(struct nvkm_ltc *, int index, const u32[4]);
+int nvkm_ltc_zbc_depth_get(struct nvkm_ltc *, int index, const u32);
 
-extern struct nvkm_oclass *gf100_ltc_oclass;
-extern struct nvkm_oclass *gk104_ltc_oclass;
-extern struct nvkm_oclass *gm107_ltc_oclass;
+int gf100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
+int gk104_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
+int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index 055bea7702a1..4de05e718f83 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -3,26 +3,19 @@
 #include <core/subdev.h>
 
 struct nvkm_mc {
-	struct nvkm_subdev base;
-	bool use_msi;
-	unsigned int irq;
-	void (*unk260)(struct nvkm_mc *, u32);
+	const struct nvkm_mc_func *func;
+	struct nvkm_subdev subdev;
 };
 
-static inline struct nvkm_mc *
-nvkm_mc(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_MC);
-}
+void nvkm_mc_intr(struct nvkm_mc *, bool *handled);
+void nvkm_mc_intr_unarm(struct nvkm_mc *);
+void nvkm_mc_intr_rearm(struct nvkm_mc *);
+void nvkm_mc_unk260(struct nvkm_mc *, u32 data);
 
-extern struct nvkm_oclass *nv04_mc_oclass;
-extern struct nvkm_oclass *nv40_mc_oclass;
-extern struct nvkm_oclass *nv44_mc_oclass;
-extern struct nvkm_oclass *nv4c_mc_oclass;
-extern struct nvkm_oclass *nv50_mc_oclass;
-extern struct nvkm_oclass *g94_mc_oclass;
-extern struct nvkm_oclass *g98_mc_oclass;
-extern struct nvkm_oclass *gf100_mc_oclass;
-extern struct nvkm_oclass *gf106_mc_oclass;
-extern struct nvkm_oclass *gk20a_mc_oclass;
+int nv04_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int nv44_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int nv50_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int g98_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int gf100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 3a5368776c31..dcd3deff27a4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -6,7 +6,7 @@ struct nvkm_device;
 struct nvkm_mem;
 
 struct nvkm_vm_pgt {
-	struct nvkm_gpuobj *obj[2];
+	struct nvkm_memory *mem[2];
 	u32 refcount[2];
 };
 
@@ -26,74 +26,23 @@ struct nvkm_vma {
 
 struct nvkm_vm {
 	struct nvkm_mmu *mmu;
+
+	struct mutex mutex;
 	struct nvkm_mm mm;
 	struct kref refcount;
 
 	struct list_head pgd_list;
-	atomic_t engref[NVDEV_SUBDEV_NR];
+	atomic_t engref[NVKM_SUBDEV_NR];
 
 	struct nvkm_vm_pgt *pgt;
 	u32 fpde;
 	u32 lpde;
 };
 
-struct nvkm_mmu {
-	struct nvkm_subdev base;
-
-	u64 limit;
-	u8  dma_bits;
-	u32 pgt_bits;
-	u8  spg_shift;
-	u8  lpg_shift;
-
-	int  (*create)(struct nvkm_mmu *, u64 offset, u64 length,
-		       u64 mm_offset, struct nvkm_vm **);
-
-	void (*map_pgt)(struct nvkm_gpuobj *pgd, u32 pde,
-			struct nvkm_gpuobj *pgt[2]);
-	void (*map)(struct nvkm_vma *, struct nvkm_gpuobj *,
-		    struct nvkm_mem *, u32 pte, u32 cnt,
-		    u64 phys, u64 delta);
-	void (*map_sg)(struct nvkm_vma *, struct nvkm_gpuobj *,
-		       struct nvkm_mem *, u32 pte, u32 cnt, dma_addr_t *);
-	void (*unmap)(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt);
-	void (*flush)(struct nvkm_vm *);
-};
-
-static inline struct nvkm_mmu *
-nvkm_mmu(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_MMU);
-}
-
-#define nvkm_mmu_create(p,e,o,i,f,d)                                      \
-	nvkm_subdev_create((p), (e), (o), 0, (i), (f), (d))
-#define nvkm_mmu_destroy(p)                                               \
-	nvkm_subdev_destroy(&(p)->base)
-#define nvkm_mmu_init(p)                                                  \
-	nvkm_subdev_init(&(p)->base)
-#define nvkm_mmu_fini(p,s)                                                \
-	nvkm_subdev_fini(&(p)->base, (s))
-
-#define _nvkm_mmu_dtor _nvkm_subdev_dtor
-#define _nvkm_mmu_init _nvkm_subdev_init
-#define _nvkm_mmu_fini _nvkm_subdev_fini
-
-extern struct nvkm_oclass nv04_mmu_oclass;
-extern struct nvkm_oclass nv41_mmu_oclass;
-extern struct nvkm_oclass nv44_mmu_oclass;
-extern struct nvkm_oclass nv50_mmu_oclass;
-extern struct nvkm_oclass gf100_mmu_oclass;
-
-int  nv04_vm_create(struct nvkm_mmu *, u64, u64, u64,
-		    struct nvkm_vm **);
-void nv04_mmu_dtor(struct nvkm_object *);
-
-int  nvkm_vm_create(struct nvkm_mmu *, u64 offset, u64 length, u64 mm_offset,
-		    u32 block, struct nvkm_vm **);
 int  nvkm_vm_new(struct nvkm_device *, u64 offset, u64 length, u64 mm_offset,
-		 struct nvkm_vm **);
+		 struct lock_class_key *, struct nvkm_vm **);
 int  nvkm_vm_ref(struct nvkm_vm *, struct nvkm_vm **, struct nvkm_gpuobj *pgd);
+int  nvkm_vm_boot(struct nvkm_vm *, u64 size);
 int  nvkm_vm_get(struct nvkm_vm *, u64 size, u32 page_shift, u32 access,
 		 struct nvkm_vma *);
 void nvkm_vm_put(struct nvkm_vma *);
@@ -101,4 +50,19 @@ void nvkm_vm_map(struct nvkm_vma *, struct nvkm_mem *);
 void nvkm_vm_map_at(struct nvkm_vma *, u64 offset, struct nvkm_mem *);
 void nvkm_vm_unmap(struct nvkm_vma *);
 void nvkm_vm_unmap_at(struct nvkm_vma *, u64 offset, u64 length);
+
+struct nvkm_mmu {
+	const struct nvkm_mmu_func *func;
+	struct nvkm_subdev subdev;
+
+	u64 limit;
+	u8  dma_bits;
+	u8  lpg_shift;
+};
+
+int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
index fba613477b1a..ed0250139dae 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
@@ -2,33 +2,5 @@
 #define __NVKM_MXM_H__
 #include <core/subdev.h>
 
-#define MXM_SANITISE_DCB 0x00000001
-
-struct nvkm_mxm {
-	struct nvkm_subdev base;
-	u32 action;
-	u8 *mxms;
-};
-
-static inline struct nvkm_mxm *
-nvkm_mxm(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_MXM);
-}
-
-#define nvkm_mxm_create(p,e,o,d)                                            \
-	nvkm_mxm_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_mxm_init(p)                                                    \
-	nvkm_subdev_init(&(p)->base)
-#define nvkm_mxm_fini(p,s)                                                  \
-	nvkm_subdev_fini(&(p)->base, (s))
-int  nvkm_mxm_create_(struct nvkm_object *, struct nvkm_object *,
-			 struct nvkm_oclass *, int, void **);
-void nvkm_mxm_destroy(struct nvkm_mxm *);
-
-#define _nvkm_mxm_dtor _nvkm_subdev_dtor
-#define _nvkm_mxm_init _nvkm_subdev_init
-#define _nvkm_mxm_fini _nvkm_subdev_fini
-
-extern struct nvkm_oclass nv50_mxm_oclass;
+int nv50_mxm_new(struct nvkm_device *, int, struct nvkm_subdev **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
new file mode 100644
index 000000000000..5b3c054f3b55
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
@@ -0,0 +1,34 @@
+#ifndef __NVKM_PCI_H__
+#define __NVKM_PCI_H__
+#include <core/subdev.h>
+
+struct nvkm_pci {
+	const struct nvkm_pci_func *func;
+	struct nvkm_subdev subdev;
+	struct pci_dev *pdev;
+	int irq;
+
+	struct {
+		struct agp_bridge_data *bridge;
+		u32 mode;
+		u64 base;
+		u64 size;
+		int mtrr;
+		bool cma;
+		bool acquired;
+	} agp;
+
+	bool msi;
+};
+
+u32 nvkm_pci_rd32(struct nvkm_pci *, u16 addr);
+void nvkm_pci_wr08(struct nvkm_pci *, u16 addr, u8 data);
+void nvkm_pci_wr32(struct nvkm_pci *, u16 addr, u32 data);
+void nvkm_pci_rom_shadow(struct nvkm_pci *, bool shadow);
+
+int nv04_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+int nv40_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+int nv4c_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+int nv50_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
index 755942352557..e61923d5e49c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -3,7 +3,8 @@
 #include <core/subdev.h>
 
 struct nvkm_pmu {
-	struct nvkm_subdev base;
+	const struct nvkm_pmu_func *func;
+	struct nvkm_subdev subdev;
 
 	struct {
 		u32 base;
@@ -20,24 +21,20 @@ struct nvkm_pmu {
 		u32 message;
 		u32 data[2];
 	} recv;
-
-	int  (*message)(struct nvkm_pmu *, u32[2], u32, u32, u32, u32);
-	void (*pgob)(struct nvkm_pmu *, bool);
 };
 
-static inline struct nvkm_pmu *
-nvkm_pmu(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_PMU);
-}
-
-extern struct nvkm_oclass *gt215_pmu_oclass;
-extern struct nvkm_oclass *gf100_pmu_oclass;
-extern struct nvkm_oclass *gf110_pmu_oclass;
-extern struct nvkm_oclass *gk104_pmu_oclass;
-extern struct nvkm_oclass *gk110_pmu_oclass;
-extern struct nvkm_oclass *gk208_pmu_oclass;
-extern struct nvkm_oclass *gk20a_pmu_oclass;
+int nvkm_pmu_send(struct nvkm_pmu *, u32 reply[2], u32 process,
+		  u32 message, u32 data0, u32 data1);
+void nvkm_pmu_pgob(struct nvkm_pmu *, bool enable);
+
+int gt215_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gf100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gf119_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gk104_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gk110_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gk208_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gk20a_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
 
 /* interface to MEMX process running on PMU */
 struct nvkm_memx;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
index 6662829b6db1..b268b96faece 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
@@ -2,6 +2,28 @@
 #define __NVKM_THERM_H__
 #include <core/subdev.h>
 
+#include <subdev/bios.h>
+#include <subdev/bios/therm.h>
+#include <subdev/timer.h>
+
+enum nvkm_therm_thrs_direction {
+	NVKM_THERM_THRS_FALLING = 0,
+	NVKM_THERM_THRS_RISING = 1
+};
+
+enum nvkm_therm_thrs_state {
+	NVKM_THERM_THRS_LOWER = 0,
+	NVKM_THERM_THRS_HIGHER = 1
+};
+
+enum nvkm_therm_thrs {
+	NVKM_THERM_THRS_FANBOOST = 0,
+	NVKM_THERM_THRS_DOWNCLOCK = 1,
+	NVKM_THERM_THRS_CRITICAL = 2,
+	NVKM_THERM_THRS_SHUTDOWN = 3,
+	NVKM_THERM_THRS_NR
+};
+
 enum nvkm_therm_fan_mode {
 	NVKM_THERM_CTRL_NONE = 0,
 	NVKM_THERM_CTRL_MANUAL = 1,
@@ -24,56 +46,54 @@ enum nvkm_therm_attr_type {
 };
 
 struct nvkm_therm {
-	struct nvkm_subdev base;
+	const struct nvkm_therm_func *func;
+	struct nvkm_subdev subdev;
 
-	int (*pwm_ctrl)(struct nvkm_therm *, int line, bool);
-	int (*pwm_get)(struct nvkm_therm *, int line, u32 *, u32 *);
-	int (*pwm_set)(struct nvkm_therm *, int line, u32, u32);
-	int (*pwm_clock)(struct nvkm_therm *, int line);
+	/* automatic thermal management */
+	struct nvkm_alarm alarm;
+	spinlock_t lock;
+	struct nvbios_therm_trip_point *last_trip;
+	int mode;
+	int cstate;
+	int suspend;
+
+	/* bios */
+	struct nvbios_therm_sensor bios_sensor;
+
+	/* fan priv */
+	struct nvkm_fan *fan;
+
+	/* alarms priv */
+	struct {
+		spinlock_t alarm_program_lock;
+		struct nvkm_alarm therm_poll_alarm;
+		enum nvkm_therm_thrs_state alarm_state[NVKM_THERM_THRS_NR];
+	} sensor;
+
+	/* what should be done if the card overheats */
+	struct {
+		void (*downclock)(struct nvkm_therm *, bool active);
+		void (*pause)(struct nvkm_therm *, bool active);
+	} emergency;
+
+	/* ic */
+	struct i2c_client *ic;
 
 	int (*fan_get)(struct nvkm_therm *);
 	int (*fan_set)(struct nvkm_therm *, int);
-	int (*fan_sense)(struct nvkm_therm *);
-
-	int (*temp_get)(struct nvkm_therm *);
 
 	int (*attr_get)(struct nvkm_therm *, enum nvkm_therm_attr_type);
 	int (*attr_set)(struct nvkm_therm *, enum nvkm_therm_attr_type, int);
 };
 
-static inline struct nvkm_therm *
-nvkm_therm(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_THERM);
-}
-
-#define nvkm_therm_create(p,e,o,d)                                          \
-	nvkm_therm_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_therm_destroy(p) ({                                            \
-	struct nvkm_therm *therm = (p);                                     \
-        _nvkm_therm_dtor(nv_object(therm));                                 \
-})
-#define nvkm_therm_init(p) ({                                               \
-	struct nvkm_therm *therm = (p);                                     \
-        _nvkm_therm_init(nv_object(therm));                                 \
-})
-#define nvkm_therm_fini(p,s) ({                                             \
-	struct nvkm_therm *therm = (p);                                     \
-        _nvkm_therm_init(nv_object(therm), (s));                            \
-})
-
-int  nvkm_therm_create_(struct nvkm_object *, struct nvkm_object *,
-			   struct nvkm_oclass *, int, void **);
-void _nvkm_therm_dtor(struct nvkm_object *);
-int  _nvkm_therm_init(struct nvkm_object *);
-int  _nvkm_therm_fini(struct nvkm_object *, bool);
-
-int  nvkm_therm_cstate(struct nvkm_therm *, int, int);
-
-extern struct nvkm_oclass nv40_therm_oclass;
-extern struct nvkm_oclass nv50_therm_oclass;
-extern struct nvkm_oclass g84_therm_oclass;
-extern struct nvkm_oclass gt215_therm_oclass;
-extern struct nvkm_oclass gf110_therm_oclass;
-extern struct nvkm_oclass gm107_therm_oclass;
+int nvkm_therm_temp_get(struct nvkm_therm *);
+int nvkm_therm_fan_sense(struct nvkm_therm *);
+int nvkm_therm_cstate(struct nvkm_therm *, int, int);
+
+int nv40_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int nv50_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int g84_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int gt215_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int gf119_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int gm107_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index 4ad55082ef7a..62ed0880b0e1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -9,53 +9,58 @@ struct nvkm_alarm {
 };
 
 static inline void
-nvkm_alarm_init(struct nvkm_alarm *alarm,
-		   void (*func)(struct nvkm_alarm *))
+nvkm_alarm_init(struct nvkm_alarm *alarm, void (*func)(struct nvkm_alarm *))
 {
 	INIT_LIST_HEAD(&alarm->head);
 	alarm->func = func;
 }
 
-bool nvkm_timer_wait_eq(void *, u64 nsec, u32 addr, u32 mask, u32 data);
-bool nvkm_timer_wait_ne(void *, u64 nsec, u32 addr, u32 mask, u32 data);
-bool nvkm_timer_wait_cb(void *, u64 nsec, bool (*func)(void *), void *data);
-void nvkm_timer_alarm(void *, u32 nsec, struct nvkm_alarm *);
-void nvkm_timer_alarm_cancel(void *, struct nvkm_alarm *);
-
-#define NV_WAIT_DEFAULT 2000000000ULL
-#define nv_wait(o,a,m,v)                                                       \
-	nvkm_timer_wait_eq((o), NV_WAIT_DEFAULT, (a), (m), (v))
-#define nv_wait_ne(o,a,m,v)                                                    \
-	nvkm_timer_wait_ne((o), NV_WAIT_DEFAULT, (a), (m), (v))
-#define nv_wait_cb(o,c,d)                                                      \
-	nvkm_timer_wait_cb((o), NV_WAIT_DEFAULT, (c), (d))
-
 struct nvkm_timer {
-	struct nvkm_subdev base;
-	u64  (*read)(struct nvkm_timer *);
-	void (*alarm)(struct nvkm_timer *, u64 time, struct nvkm_alarm *);
-	void (*alarm_cancel)(struct nvkm_timer *, struct nvkm_alarm *);
-};
+	const struct nvkm_timer_func *func;
+	struct nvkm_subdev subdev;
 
-static inline struct nvkm_timer *
-nvkm_timer(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_TIMER);
-}
+	struct list_head alarms;
+	spinlock_t lock;
+};
 
-#define nvkm_timer_create(p,e,o,d)                                          \
-	nvkm_subdev_create_((p), (e), (o), 0, "PTIMER", "timer",            \
-			       sizeof(**d), (void **)d)
-#define nvkm_timer_destroy(p)                                               \
-	nvkm_subdev_destroy(&(p)->base)
-#define nvkm_timer_init(p)                                                  \
-	nvkm_subdev_init(&(p)->base)
-#define nvkm_timer_fini(p,s)                                                \
-	nvkm_subdev_fini(&(p)->base, (s))
+u64 nvkm_timer_read(struct nvkm_timer *);
+void nvkm_timer_alarm(struct nvkm_timer *, u32 nsec, struct nvkm_alarm *);
+void nvkm_timer_alarm_cancel(struct nvkm_timer *, struct nvkm_alarm *);
 
-int nvkm_timer_create_(struct nvkm_object *, struct nvkm_engine *,
-			  struct nvkm_oclass *, int size, void **);
+/* Delay based on GPU time (ie. PTIMER).
+ *
+ * Will return -ETIMEDOUT unless the loop was terminated with 'break',
+ * where it will return the number of nanoseconds taken instead.
+ *
+ * NVKM_DELAY can be passed for 'cond' to disable the timeout warning,
+ * which is useful for unconditional delay loops.
+ */
+#define NVKM_DELAY _warn = false;
+#define nvkm_nsec(d,n,cond...) ({                                              \
+	struct nvkm_device *_device = (d);                                     \
+	struct nvkm_timer *_tmr = _device->timer;                              \
+	u64 _nsecs = (n), _time0 = nvkm_timer_read(_tmr);                      \
+	s64 _taken = 0;                                                        \
+	bool _warn = true;                                                     \
+                                                                               \
+	do {                                                                   \
+		cond                                                           \
+	} while (_taken = nvkm_timer_read(_tmr) - _time0, _taken < _nsecs);    \
+                                                                               \
+	if (_taken >= _nsecs) {                                                \
+		if (_warn) {                                                   \
+			dev_warn(_device->dev, "timeout at %s:%d/%s()!\n",     \
+				 __FILE__, __LINE__, __func__);                \
+		}                                                              \
+		_taken = -ETIMEDOUT;                                           \
+	}                                                                      \
+	_taken;                                                                \
+})
+#define nvkm_usec(d,u,cond...) nvkm_nsec((d), (u) * 1000, ##cond)
+#define nvkm_msec(d,m,cond...) nvkm_usec((d), (m) * 1000, ##cond)
 
-extern struct nvkm_oclass nv04_timer_oclass;
-extern struct nvkm_oclass gk20a_timer_oclass;
+int nv04_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
+int nv40_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
+int nv41_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
+int gk20a_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h
index fee09ad818e4..ce5636fe2a66 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h
@@ -1,30 +1,28 @@
 #ifndef __NOUVEAU_VGA_H__
 #define __NOUVEAU_VGA_H__
-
-#include <core/os.h>
+#include <core/subdev.h>
 
 /* access to various legacy io ports */
-u8   nv_rdport(void *obj, int head, u16 port);
-void nv_wrport(void *obj, int head, u16 port, u8 value);
+u8   nvkm_rdport(struct nvkm_device *, int head, u16 port);
+void nvkm_wrport(struct nvkm_device *, int head, u16 port, u8 value);
 
 /* VGA Sequencer */
-u8   nv_rdvgas(void *obj, int head, u8 index);
-void nv_wrvgas(void *obj, int head, u8 index, u8 value);
+u8   nvkm_rdvgas(struct nvkm_device *, int head, u8 index);
+void nvkm_wrvgas(struct nvkm_device *, int head, u8 index, u8 value);
 
 /* VGA Graphics */
-u8   nv_rdvgag(void *obj, int head, u8 index);
-void nv_wrvgag(void *obj, int head, u8 index, u8 value);
+u8   nvkm_rdvgag(struct nvkm_device *, int head, u8 index);
+void nvkm_wrvgag(struct nvkm_device *, int head, u8 index, u8 value);
 
 /* VGA CRTC */
-u8   nv_rdvgac(void *obj, int head, u8 index);
-void nv_wrvgac(void *obj, int head, u8 index, u8 value);
+u8   nvkm_rdvgac(struct nvkm_device *, int head, u8 index);
+void nvkm_wrvgac(struct nvkm_device *, int head, u8 index, u8 value);
 
 /* VGA indexed port access dispatcher */
-u8   nv_rdvgai(void *obj, int head, u16 port, u8 index);
-void nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value);
-
-bool nv_lockvgac(void *obj, bool lock);
-u8   nv_rdvgaowner(void *obj);
-void nv_wrvgaowner(void *obj, u8);
+u8   nvkm_rdvgai(struct nvkm_device *, int head, u16 port, u8 index);
+void nvkm_wrvgai(struct nvkm_device *, int head, u16 port, u8 index, u8 value);
 
+bool nvkm_lockvgac(struct nvkm_device *, bool lock);
+u8   nvkm_rdvgaowner(struct nvkm_device *);
+void nvkm_wrvgaowner(struct nvkm_device *, u8);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
index e3d7243fbb1d..5c8a3f1196de 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
@@ -2,19 +2,9 @@
 #define __NVKM_VOLT_H__
 #include <core/subdev.h>
 
-struct nvkm_voltage {
-	u32 uv;
-	u8  id;
-};
-
 struct nvkm_volt {
-	struct nvkm_subdev base;
-
-	int (*vid_get)(struct nvkm_volt *);
-	int (*get)(struct nvkm_volt *);
-	int (*vid_set)(struct nvkm_volt *, u8 vid);
-	int (*set)(struct nvkm_volt *, u32 uv);
-	int (*set_id)(struct nvkm_volt *, u8 id, int condition);
+	const struct nvkm_volt_func *func;
+	struct nvkm_subdev subdev;
 
 	u8 vid_mask;
 	u8 vid_nr;
@@ -24,35 +14,9 @@ struct nvkm_volt {
 	} vid[256];
 };
 
-static inline struct nvkm_volt *
-nvkm_volt(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_VOLT);
-}
-
-#define nvkm_volt_create(p, e, o, d)                                        \
-	nvkm_volt_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_volt_destroy(p) ({                                             \
-	struct nvkm_volt *v = (p);                                          \
-	_nvkm_volt_dtor(nv_object(v));                                      \
-})
-#define nvkm_volt_init(p) ({                                                \
-	struct nvkm_volt *v = (p);                                          \
-	_nvkm_volt_init(nv_object(v));                                      \
-})
-#define nvkm_volt_fini(p,s)                                                 \
-	nvkm_subdev_fini((p), (s))
-
-int  nvkm_volt_create_(struct nvkm_object *, struct nvkm_object *,
-			  struct nvkm_oclass *, int, void **);
-void _nvkm_volt_dtor(struct nvkm_object *);
-int  _nvkm_volt_init(struct nvkm_object *);
-#define _nvkm_volt_fini _nvkm_subdev_fini
-
-extern struct nvkm_oclass nv40_volt_oclass;
-extern struct nvkm_oclass gk20a_volt_oclass;
+int nvkm_volt_get(struct nvkm_volt *);
+int nvkm_volt_set_id(struct nvkm_volt *, u8 id, int condition);
 
-int nvkm_voltgpio_init(struct nvkm_volt *);
-int nvkm_voltgpio_get(struct nvkm_volt *);
-int nvkm_voltgpio_set(struct nvkm_volt *, u8);
+int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
+int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index d8b0891a141c..d336c2247d6a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -51,7 +51,7 @@ nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev)
 			 * device (ie. the one that belongs to the fd it
 			 * opened)
 			 */
-			if (nvif_device_init(&cli->base.base, NULL,
+			if (nvif_device_init(&cli->base.object,
 					     NOUVEAU_ABI16_DEVICE, NV_DEVICE,
 					     &args, sizeof(args),
 					     &abi16->device) == 0)
@@ -69,28 +69,28 @@ nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev)
 int
 nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
 {
-	struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base);
+	struct nouveau_cli *cli = (void *)abi16->device.object.client;
 	mutex_unlock(&cli->mutex);
 	return ret;
 }
 
-u16
+s32
 nouveau_abi16_swclass(struct nouveau_drm *drm)
 {
 	switch (drm->device.info.family) {
 	case NV_DEVICE_INFO_V0_TNT:
-		return 0x006e;
+		return NVIF_IOCTL_NEW_V0_SW_NV04;
 	case NV_DEVICE_INFO_V0_CELSIUS:
 	case NV_DEVICE_INFO_V0_KELVIN:
 	case NV_DEVICE_INFO_V0_RANKINE:
 	case NV_DEVICE_INFO_V0_CURIE:
-		return 0x016e;
+		return NVIF_IOCTL_NEW_V0_SW_NV10;
 	case NV_DEVICE_INFO_V0_TESLA:
-		return 0x506e;
+		return NVIF_IOCTL_NEW_V0_SW_NV50;
 	case NV_DEVICE_INFO_V0_FERMI:
 	case NV_DEVICE_INFO_V0_KEPLER:
 	case NV_DEVICE_INFO_V0_MAXWELL:
-		return 0x906e;
+		return NVIF_IOCTL_NEW_V0_SW_GF100;
 	}
 
 	return 0x0000;
@@ -100,6 +100,7 @@ static void
 nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
 			struct nouveau_abi16_ntfy *ntfy)
 {
+	nvif_object_fini(&ntfy->object);
 	nvkm_mm_free(&chan->heap, &ntfy->node);
 	list_del(&ntfy->head);
 	kfree(ntfy);
@@ -132,7 +133,8 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
 
 	/* destroy channel object, all children will be killed too */
 	if (chan->chan) {
-		abi16->handles &= ~(1ULL << (chan->chan->object->handle & 0xffff));
+		abi16->handles &= ~(1ULL << (chan->chan->user.handle & 0xffff));
+		nouveau_channel_idle(chan->chan);
 		nouveau_channel_del(&chan->chan);
 	}
 
@@ -143,7 +145,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
 void
 nouveau_abi16_fini(struct nouveau_abi16 *abi16)
 {
-	struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base);
+	struct nouveau_cli *cli = (void *)abi16->device.object.client;
 	struct nouveau_abi16_chan *chan, *temp;
 
 	/* cleanup channels */
@@ -164,7 +166,6 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
 	struct nouveau_cli *cli = nouveau_cli(file_priv);
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvif_device *device = &drm->device;
-	struct nvkm_timer *ptimer = nvxx_timer(device);
 	struct nvkm_gr *gr = nvxx_gr(device);
 	struct drm_nouveau_getparam *getparam = data;
 
@@ -173,19 +174,19 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
 		getparam->value = device->info.chipset;
 		break;
 	case NOUVEAU_GETPARAM_PCI_VENDOR:
-		if (nv_device_is_pci(nvxx_device(device)))
+		if (nvxx_device(device)->func->pci)
 			getparam->value = dev->pdev->vendor;
 		else
 			getparam->value = 0;
 		break;
 	case NOUVEAU_GETPARAM_PCI_DEVICE:
-		if (nv_device_is_pci(nvxx_device(device)))
+		if (nvxx_device(device)->func->pci)
 			getparam->value = dev->pdev->device;
 		else
 			getparam->value = 0;
 		break;
 	case NOUVEAU_GETPARAM_BUS_TYPE:
-		if (!nv_device_is_pci(nvxx_device(device)))
+		if (!nvxx_device(device)->func->pci)
 			getparam->value = 3;
 		else
 		if (drm_pci_device_is_agp(dev))
@@ -206,7 +207,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
 		getparam->value = 0; /* deprecated */
 		break;
 	case NOUVEAU_GETPARAM_PTIMER_TIME:
-		getparam->value = ptimer->read(ptimer);
+		getparam->value = nvif_device_time(device);
 		break;
 	case NOUVEAU_GETPARAM_HAS_BO_USAGE:
 		getparam->value = 1;
@@ -215,10 +216,10 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
 		getparam->value = 1;
 		break;
 	case NOUVEAU_GETPARAM_GRAPH_UNITS:
-		getparam->value = gr->units ? gr->units(gr) : 0;
+		getparam->value = nvkm_gr_units(gr);
 		break;
 	default:
-		NV_PRINTK(debug, cli, "unknown parameter %lld\n", getparam->param);
+		NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
 		return -EINVAL;
 	}
 
@@ -337,7 +338,7 @@ nouveau_abi16_chan(struct nouveau_abi16 *abi16, int channel)
 	struct nouveau_abi16_chan *chan;
 
 	list_for_each_entry(chan, &abi16->channels, head) {
-		if (chan->chan->object->handle == NOUVEAU_ABI16_CHAN(channel))
+		if (chan->chan->user.handle == NOUVEAU_ABI16_CHAN(channel))
 			return chan;
 	}
 
@@ -365,40 +366,91 @@ int
 nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
 {
 	struct drm_nouveau_grobj_alloc *init = data;
-	struct {
-		struct nvif_ioctl_v0 ioctl;
-		struct nvif_ioctl_new_v0 new;
-	} args = {
-		.ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY,
-		.ioctl.type = NVIF_IOCTL_V0_NEW,
-		.ioctl.path_nr = 3,
-		.ioctl.path[2] = NOUVEAU_ABI16_CLIENT,
-		.ioctl.path[1] = NOUVEAU_ABI16_DEVICE,
-		.ioctl.path[0] = NOUVEAU_ABI16_CHAN(init->channel),
-		.new.route = NVDRM_OBJECT_ABI16,
-		.new.handle = init->handle,
-		.new.oclass = init->class,
-	};
 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_abi16_chan *chan;
+	struct nouveau_abi16_ntfy *ntfy;
 	struct nvif_client *client;
-	int ret;
+	struct nvif_sclass *sclass;
+	s32 oclass = 0;
+	int ret, i;
 
 	if (unlikely(!abi16))
 		return -ENOMEM;
 
 	if (init->handle == ~0)
 		return nouveau_abi16_put(abi16, -EINVAL);
-	client = nvif_client(nvif_object(&abi16->device));
+	client = abi16->device.object.client;
+
+	chan = nouveau_abi16_chan(abi16, init->channel);
+	if (!chan)
+		return nouveau_abi16_put(abi16, -ENOENT);
+
+	ret = nvif_object_sclass_get(&chan->chan->user, &sclass);
+	if (ret < 0)
+		return nouveau_abi16_put(abi16, ret);
 
-	/* compatibility with userspace that assumes 506e for all chipsets */
-	if (init->class == 0x506e) {
-		init->class = nouveau_abi16_swclass(drm);
-		if (init->class == 0x906e)
-			return nouveau_abi16_put(abi16, 0);
+	if ((init->class & 0x00ff) == 0x006e) {
+		/* nvsw: compatibility with older 0x*6e class identifier */
+		for (i = 0; !oclass && i < ret; i++) {
+			switch (sclass[i].oclass) {
+			case NVIF_IOCTL_NEW_V0_SW_NV04:
+			case NVIF_IOCTL_NEW_V0_SW_NV10:
+			case NVIF_IOCTL_NEW_V0_SW_NV50:
+			case NVIF_IOCTL_NEW_V0_SW_GF100:
+				oclass = sclass[i].oclass;
+				break;
+			default:
+				break;
+			}
+		}
+	} else
+	if ((init->class & 0x00ff) == 0x00b1) {
+		/* msvld: compatibility with incorrect version exposure */
+		for (i = 0; i < ret; i++) {
+			if ((sclass[i].oclass & 0x00ff) == 0x00b1) {
+				oclass = sclass[i].oclass;
+				break;
+			}
+		}
+	} else
+	if ((init->class & 0x00ff) == 0x00b2) { /* mspdec */
+		/* mspdec: compatibility with incorrect version exposure */
+		for (i = 0; i < ret; i++) {
+			if ((sclass[i].oclass & 0x00ff) == 0x00b2) {
+				oclass = sclass[i].oclass;
+				break;
+			}
+		}
+	} else
+	if ((init->class & 0x00ff) == 0x00b3) { /* msppp */
+		/* msppp: compatibility with incorrect version exposure */
+		for (i = 0; i < ret; i++) {
+			if ((sclass[i].oclass & 0x00ff) == 0x00b3) {
+				oclass = sclass[i].oclass;
+				break;
+			}
+		}
+	} else {
+		oclass = init->class;
 	}
 
-	ret = nvif_client_ioctl(client, &args, sizeof(args));
+	nvif_object_sclass_put(&sclass);
+	if (!oclass)
+		return nouveau_abi16_put(abi16, -EINVAL);
+
+	ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
+	if (!ntfy)
+		return nouveau_abi16_put(abi16, -ENOMEM);
+
+	list_add(&ntfy->head, &chan->notifiers);
+
+	client->route = NVDRM_OBJECT_ABI16;
+	ret = nvif_object_init(&chan->chan->user, init->handle, oclass,
+			       NULL, 0, &ntfy->object);
+	client->route = NVDRM_OBJECT_NVIF;
+
+	if (ret)
+		nouveau_abi16_ntfy_fini(chan, ntfy);
 	return nouveau_abi16_put(abi16, ret);
 }
 
@@ -406,27 +458,13 @@ int
 nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
 {
 	struct drm_nouveau_notifierobj_alloc *info = data;
-	struct {
-		struct nvif_ioctl_v0 ioctl;
-		struct nvif_ioctl_new_v0 new;
-		struct nv_dma_v0 ctxdma;
-	} args = {
-		.ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY,
-		.ioctl.type = NVIF_IOCTL_V0_NEW,
-		.ioctl.path_nr = 3,
-		.ioctl.path[2] = NOUVEAU_ABI16_CLIENT,
-		.ioctl.path[1] = NOUVEAU_ABI16_DEVICE,
-		.ioctl.path[0] = NOUVEAU_ABI16_CHAN(info->channel),
-		.new.route = NVDRM_OBJECT_ABI16,
-		.new.handle = info->handle,
-		.new.oclass = NV_DMA_IN_MEMORY,
-	};
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
 	struct nouveau_abi16_chan *chan;
 	struct nouveau_abi16_ntfy *ntfy;
 	struct nvif_device *device = &abi16->device;
 	struct nvif_client *client;
+	struct nv_dma_v0 args = {};
 	int ret;
 
 	if (unlikely(!abi16))
@@ -435,7 +473,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
 	/* completely unnecessary for these chipsets... */
 	if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI))
 		return nouveau_abi16_put(abi16, -EINVAL);
-	client = nvif_client(nvif_object(&abi16->device));
+	client = abi16->device.object.client;
 
 	chan = nouveau_abi16_chan(abi16, info->channel);
 	if (!chan)
@@ -446,41 +484,43 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
 		return nouveau_abi16_put(abi16, -ENOMEM);
 
 	list_add(&ntfy->head, &chan->notifiers);
-	ntfy->handle = info->handle;
 
 	ret = nvkm_mm_head(&chan->heap, 0, 1, info->size, info->size, 1,
 			   &ntfy->node);
 	if (ret)
 		goto done;
 
-	args.ctxdma.start = ntfy->node->offset;
-	args.ctxdma.limit = ntfy->node->offset + ntfy->node->length - 1;
+	args.start = ntfy->node->offset;
+	args.limit = ntfy->node->offset + ntfy->node->length - 1;
 	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
-		args.ctxdma.target = NV_DMA_V0_TARGET_VM;
-		args.ctxdma.access = NV_DMA_V0_ACCESS_VM;
-		args.ctxdma.start += chan->ntfy_vma.offset;
-		args.ctxdma.limit += chan->ntfy_vma.offset;
+		args.target = NV_DMA_V0_TARGET_VM;
+		args.access = NV_DMA_V0_ACCESS_VM;
+		args.start += chan->ntfy_vma.offset;
+		args.limit += chan->ntfy_vma.offset;
 	} else
-	if (drm->agp.stat == ENABLED) {
-		args.ctxdma.target = NV_DMA_V0_TARGET_AGP;
-		args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR;
-		args.ctxdma.start += drm->agp.base + chan->ntfy->bo.offset;
-		args.ctxdma.limit += drm->agp.base + chan->ntfy->bo.offset;
-		client->super = true;
+	if (drm->agp.bridge) {
+		args.target = NV_DMA_V0_TARGET_AGP;
+		args.access = NV_DMA_V0_ACCESS_RDWR;
+		args.start += drm->agp.base + chan->ntfy->bo.offset;
+		args.limit += drm->agp.base + chan->ntfy->bo.offset;
 	} else {
-		args.ctxdma.target = NV_DMA_V0_TARGET_VM;
-		args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR;
-		args.ctxdma.start += chan->ntfy->bo.offset;
-		args.ctxdma.limit += chan->ntfy->bo.offset;
+		args.target = NV_DMA_V0_TARGET_VM;
+		args.access = NV_DMA_V0_ACCESS_RDWR;
+		args.start += chan->ntfy->bo.offset;
+		args.limit += chan->ntfy->bo.offset;
 	}
 
-	ret = nvif_client_ioctl(client, &args, sizeof(args));
+	client->route = NVDRM_OBJECT_ABI16;
+	client->super = true;
+	ret = nvif_object_init(&chan->chan->user, info->handle,
+			       NV_DMA_IN_MEMORY, &args, sizeof(args),
+			       &ntfy->object);
 	client->super = false;
+	client->route = NVDRM_OBJECT_NVIF;
 	if (ret)
 		goto done;
 
 	info->offset = ntfy->node->offset;
-
 done:
 	if (ret)
 		nouveau_abi16_ntfy_fini(chan, ntfy);
@@ -491,47 +531,28 @@ int
 nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
 {
 	struct drm_nouveau_gpuobj_free *fini = data;
-	struct {
-		struct nvif_ioctl_v0 ioctl;
-		struct nvif_ioctl_del del;
-	} args = {
-		.ioctl.owner = NVDRM_OBJECT_ABI16,
-		.ioctl.type = NVIF_IOCTL_V0_DEL,
-		.ioctl.path_nr = 4,
-		.ioctl.path[3] = NOUVEAU_ABI16_CLIENT,
-		.ioctl.path[2] = NOUVEAU_ABI16_DEVICE,
-		.ioctl.path[1] = NOUVEAU_ABI16_CHAN(fini->channel),
-		.ioctl.path[0] = fini->handle,
-	};
 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
 	struct nouveau_abi16_chan *chan;
 	struct nouveau_abi16_ntfy *ntfy;
-	struct nvif_client *client;
-	int ret;
+	int ret = -ENOENT;
 
 	if (unlikely(!abi16))
 		return -ENOMEM;
 
 	chan = nouveau_abi16_chan(abi16, fini->channel);
 	if (!chan)
-		return nouveau_abi16_put(abi16, -ENOENT);
-	client = nvif_client(nvif_object(&abi16->device));
+		return nouveau_abi16_put(abi16, -EINVAL);
 
 	/* synchronize with the user channel and destroy the gpu object */
 	nouveau_channel_idle(chan->chan);
 
-	ret = nvif_client_ioctl(client, &args, sizeof(args));
-	if (ret)
-		return nouveau_abi16_put(abi16, ret);
-
-	/* cleanup extra state if this object was a notifier */
 	list_for_each_entry(ntfy, &chan->notifiers, head) {
-		if (ntfy->handle == fini->handle) {
-			nvkm_mm_free(&chan->heap, &ntfy->node);
-			list_del(&ntfy->head);
+		if (ntfy->object.handle == fini->handle) {
+			nouveau_abi16_ntfy_fini(chan, ntfy);
+			ret = 0;
 			break;
 		}
 	}
 
-	return nouveau_abi16_put(abi16, 0);
+	return nouveau_abi16_put(abi16, ret);
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 86eb1caf4957..6584557afa40 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -13,9 +13,9 @@ int nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS);
 int nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS);
 
 struct nouveau_abi16_ntfy {
+	struct nvif_object object;
 	struct list_head head;
 	struct nvkm_mm_node *node;
-	u32 handle;
 };
 
 struct nouveau_abi16_chan {
@@ -37,7 +37,7 @@ struct nouveau_drm;
 struct nouveau_abi16 *nouveau_abi16_get(struct drm_file *, struct drm_device *);
 int  nouveau_abi16_put(struct nouveau_abi16 *, int);
 void nouveau_abi16_fini(struct nouveau_abi16 *);
-u16  nouveau_abi16_swclass(struct nouveau_drm *);
+s32  nouveau_abi16_swclass(struct nouveau_drm *);
 
 #define NOUVEAU_GEM_DOMAIN_VRAM      (1 << 1)
 #define NOUVEAU_GEM_DOMAIN_GART      (1 << 2)
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 622424692b3b..df2d9818aba3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -372,12 +372,12 @@ static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
 	return len;
 }
 
-bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
+bool nouveau_acpi_rom_supported(struct device *dev)
 {
 	acpi_status status;
 	acpi_handle dhandle, rom_handle;
 
-	dhandle = ACPI_HANDLE(&pdev->dev);
+	dhandle = ACPI_HANDLE(dev);
 	if (!dhandle)
 		return false;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
index 74acf0f87785..2f03653aff86 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.h
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -10,7 +10,7 @@ void nouveau_register_dsm_handler(void);
 void nouveau_unregister_dsm_handler(void);
 void nouveau_switcheroo_optimus_dsm(void);
 int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
-bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
+bool nouveau_acpi_rom_supported(struct device *);
 void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
 #else
 static inline bool nouveau_is_optimus(void) { return false; };
@@ -18,7 +18,7 @@ static inline bool nouveau_is_v1_dsm(void) { return false; };
 static inline void nouveau_register_dsm_handler(void) {}
 static inline void nouveau_unregister_dsm_handler(void) {}
 static inline void nouveau_switcheroo_optimus_dsm(void) {}
-static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
+static inline bool nouveau_acpi_rom_supported(struct device *dev) { return false; }
 static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
 static inline void *nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return NULL; }
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c
deleted file mode 100644
index 0b5970955604..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_agp.c
+++ /dev/null
@@ -1,195 +0,0 @@
-#include <linux/module.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_agp.h"
-#include "nouveau_reg.h"
-
-#if __OS_HAS_AGP
-MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)");
-static int nouveau_agpmode = -1;
-module_param_named(agpmode, nouveau_agpmode, int, 0400);
-
-struct nouveau_agpmode_quirk {
-	u16 hostbridge_vendor;
-	u16 hostbridge_device;
-	u16 chip_vendor;
-	u16 chip_device;
-	int mode;
-};
-
-static struct nouveau_agpmode_quirk nouveau_agpmode_quirk_list[] = {
-	/* VIA Apollo PRO133x / GeForce FX 5600 Ultra, max agpmode 2, fdo #20341 */
-	{ PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_NVIDIA, 0x0311, 2 },
-
-	{},
-};
-
-static unsigned long
-get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info)
-{
-	struct nvif_device *device = &drm->device;
-	struct nouveau_agpmode_quirk *quirk = nouveau_agpmode_quirk_list;
-	int agpmode = nouveau_agpmode;
-	unsigned long mode = info->mode;
-
-	/*
-	 * FW seems to be broken on nv18, it makes the card lock up
-	 * randomly.
-	 */
-	if (device->info.chipset == 0x18)
-		mode &= ~PCI_AGP_COMMAND_FW;
-
-	/*
-	 * Go through the quirks list and adjust the agpmode accordingly.
-	 */
-	while (agpmode == -1 && quirk->hostbridge_vendor) {
-		if (info->id_vendor == quirk->hostbridge_vendor &&
-		    info->id_device == quirk->hostbridge_device &&
-		    nvxx_device(device)->pdev->vendor == quirk->chip_vendor &&
-		    nvxx_device(device)->pdev->device == quirk->chip_device) {
-			agpmode = quirk->mode;
-			NV_INFO(drm, "Forcing agp mode to %dX. Use agpmode to override.\n",
-				agpmode);
-			break;
-		}
-		++quirk;
-	}
-
-	/*
-	 * AGP mode set in the command line.
-	 */
-	if (agpmode > 0) {
-		bool agpv3 = mode & 0x8;
-		int rate = agpv3 ? agpmode / 4 : agpmode;
-
-		mode = (mode & ~0x7) | (rate & 0x7);
-	}
-
-	return mode;
-}
-
-static bool
-nouveau_agp_enabled(struct nouveau_drm *drm)
-{
-	struct drm_device *dev = drm->dev;
-
-	if (!dev->pdev || !drm_pci_device_is_agp(dev) || !dev->agp)
-		return false;
-
-	if (drm->agp.stat == UNKNOWN) {
-		if (!nouveau_agpmode)
-			return false;
-#ifdef __powerpc__
-		/* Disable AGP by default on all PowerPC machines for
-		 * now -- At least some UniNorth-2 AGP bridges are
-		 * known to be broken: DMA from the host to the card
-		 * works just fine, but writeback from the card to the
-		 * host goes straight to memory untranslated bypassing
-		 * the GATT somehow, making them quite painful to deal
-		 * with...
-		 */
-		if (nouveau_agpmode == -1)
-			return false;
-#endif
-		return true;
-	}
-
-	return (drm->agp.stat == ENABLED);
-}
-#endif
-
-void
-nouveau_agp_reset(struct nouveau_drm *drm)
-{
-#if __OS_HAS_AGP
-	struct nvif_device *device = &drm->device;
-	struct drm_device *dev = drm->dev;
-	u32 save[2];
-	int ret;
-
-	if (!nouveau_agp_enabled(drm))
-		return;
-
-	/* First of all, disable fast writes, otherwise if it's
-	 * already enabled in the AGP bridge and we disable the card's
-	 * AGP controller we might be locking ourselves out of it. */
-	if ((nvif_rd32(device, NV04_PBUS_PCI_NV_19) |
-	     dev->agp->mode) & PCI_AGP_COMMAND_FW) {
-		struct drm_agp_info info;
-		struct drm_agp_mode mode;
-
-		ret = drm_agp_info(dev, &info);
-		if (ret)
-			return;
-
-		mode.mode  = get_agp_mode(drm, &info);
-		mode.mode &= ~PCI_AGP_COMMAND_FW;
-
-		ret = drm_agp_enable(dev, mode);
-		if (ret)
-			return;
-	}
-
-
-	/* clear busmaster bit, and disable AGP */
-	save[0] = nvif_mask(device, NV04_PBUS_PCI_NV_1, 0x00000004, 0x00000000);
-	nvif_wr32(device, NV04_PBUS_PCI_NV_19, 0);
-
-	/* reset PGRAPH, PFIFO and PTIMER */
-	save[1] = nvif_mask(device, 0x000200, 0x00011100, 0x00000000);
-	nvif_mask(device, 0x000200, 0x00011100, save[1]);
-
-	/* and restore bustmaster bit (gives effect of resetting AGP) */
-	nvif_wr32(device, NV04_PBUS_PCI_NV_1, save[0]);
-#endif
-}
-
-void
-nouveau_agp_init(struct nouveau_drm *drm)
-{
-#if __OS_HAS_AGP
-	struct drm_device *dev = drm->dev;
-	struct drm_agp_info info;
-	struct drm_agp_mode mode;
-	int ret;
-
-	if (!nouveau_agp_enabled(drm))
-		return;
-	drm->agp.stat = DISABLE;
-
-	ret = drm_agp_acquire(dev);
-	if (ret) {
-		NV_ERROR(drm, "unable to acquire AGP: %d\n", ret);
-		return;
-	}
-
-	ret = drm_agp_info(dev, &info);
-	if (ret) {
-		NV_ERROR(drm, "unable to get AGP info: %d\n", ret);
-		return;
-	}
-
-	/* see agp.h for the AGPSTAT_* modes available */
-	mode.mode = get_agp_mode(drm, &info);
-
-	ret = drm_agp_enable(dev, mode);
-	if (ret) {
-		NV_ERROR(drm, "unable to enable AGP: %d\n", ret);
-		return;
-	}
-
-	drm->agp.stat = ENABLED;
-	drm->agp.base = info.aperture_base;
-	drm->agp.size = info.aperture_size;
-#endif
-}
-
-void
-nouveau_agp_fini(struct nouveau_drm *drm)
-{
-#if __OS_HAS_AGP
-	struct drm_device *dev = drm->dev;
-	if (dev->agp && dev->agp->acquired)
-		drm_agp_release(dev);
-#endif
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.h b/drivers/gpu/drm/nouveau/nouveau_agp.h
deleted file mode 100644
index b55c08652963..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_agp.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef __NOUVEAU_AGP_H__
-#define __NOUVEAU_AGP_H__
-
-struct nouveau_drm;
-
-void nouveau_agp_reset(struct nouveau_drm *);
-void nouveau_agp_init(struct nouveau_drm *);
-void nouveau_agp_fini(struct nouveau_drm *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index e566c5b53651..89eb46040b13 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -40,7 +40,7 @@ static int
 nv40_get_intensity(struct backlight_device *bd)
 {
 	struct nouveau_drm *drm = bl_get_data(bd);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	int val = (nvif_rd32(device, NV40_PMC_BACKLIGHT) &
 				   NV40_PMC_BACKLIGHT_MASK) >> 16;
 
@@ -51,7 +51,7 @@ static int
 nv40_set_intensity(struct backlight_device *bd)
 {
 	struct nouveau_drm *drm = bl_get_data(bd);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	int val = bd->props.brightness;
 	int reg = nvif_rd32(device, NV40_PMC_BACKLIGHT);
 
@@ -71,7 +71,7 @@ static int
 nv40_backlight_init(struct drm_connector *connector)
 {
 	struct nouveau_drm *drm = nouveau_drm(connector->dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	struct backlight_properties props;
 	struct backlight_device *bd;
 
@@ -97,7 +97,7 @@ nv50_get_intensity(struct backlight_device *bd)
 {
 	struct nouveau_encoder *nv_encoder = bl_get_data(bd);
 	struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	int or = nv_encoder->or;
 	u32 div = 1025;
 	u32 val;
@@ -112,7 +112,7 @@ nv50_set_intensity(struct backlight_device *bd)
 {
 	struct nouveau_encoder *nv_encoder = bl_get_data(bd);
 	struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	int or = nv_encoder->or;
 	u32 div = 1025;
 	u32 val = (bd->props.brightness * div) / 100;
@@ -133,7 +133,7 @@ nva3_get_intensity(struct backlight_device *bd)
 {
 	struct nouveau_encoder *nv_encoder = bl_get_data(bd);
 	struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	int or = nv_encoder->or;
 	u32 div, val;
 
@@ -151,7 +151,7 @@ nva3_set_intensity(struct backlight_device *bd)
 {
 	struct nouveau_encoder *nv_encoder = bl_get_data(bd);
 	struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	int or = nv_encoder->or;
 	u32 div, val;
 
@@ -177,7 +177,7 @@ static int
 nv50_backlight_init(struct drm_connector *connector)
 {
 	struct nouveau_drm *drm = nouveau_drm(connector->dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	struct nouveau_encoder *nv_encoder;
 	struct backlight_properties props;
 	struct backlight_device *bd;
@@ -193,9 +193,9 @@ nv50_backlight_init(struct drm_connector *connector)
 	if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or)))
 		return 0;
 
-	if (device->info.chipset <= 0xa0 ||
-	    device->info.chipset == 0xaa ||
-	    device->info.chipset == 0xac)
+	if (drm->device.info.chipset <= 0xa0 ||
+	    drm->device.info.chipset == 0xaa ||
+	    drm->device.info.chipset == 0xac)
 		ops = &nv50_bl_ops;
 	else
 		ops = &nva3_bl_ops;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 0190b69bbe25..4dca65a63b92 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -215,7 +215,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head
 	 */
 
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	struct nvbios *bios = &drm->vbios;
 	uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
 	uint32_t sel_clk_binding, sel_clk;
@@ -318,7 +318,8 @@ static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct n
 static int
 get_fp_strap(struct drm_device *dev, struct nvbios *bios)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvif_object *device = &drm->device.object;
 
 	/*
 	 * The fp strap is normally dictated by the "User Strap" in
@@ -332,7 +333,7 @@ get_fp_strap(struct drm_device *dev, struct nvbios *bios)
 	if (bios->major_version < 5 && bios->data[0x48] & 0x4)
 		return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
 
-	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
+	if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
 		return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
 	else
 		return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf;
@@ -634,7 +635,7 @@ int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head,
 	 */
 
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	struct nvbios *bios = &drm->vbios;
 	int cv = bios->chip_version;
 	uint16_t clktable = 0, scriptptr;
@@ -1481,22 +1482,20 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
 			entry->dpconf.link_bw = 540000;
 			break;
 		}
-		entry->dpconf.link_nr = (conf & 0x0f000000) >> 24;
-		if (dcb->version < 0x41) {
-			switch (entry->dpconf.link_nr) {
-			case 0xf:
-				entry->dpconf.link_nr = 4;
-				break;
-			case 0x3:
-				entry->dpconf.link_nr = 2;
-				break;
-			default:
-				entry->dpconf.link_nr = 1;
-				break;
-			}
+		switch ((conf & 0x0f000000) >> 24) {
+		case 0xf:
+		case 0x4:
+			entry->dpconf.link_nr = 4;
+			break;
+		case 0x3:
+		case 0x2:
+			entry->dpconf.link_nr = 2;
+			break;
+		default:
+			entry->dpconf.link_nr = 1;
+			break;
 		}
 		link = entry->dpconf.sor.link;
-		entry->i2c_index += NV_I2C_AUX(0);
 		break;
 	case DCB_OUTPUT_TMDS:
 		if (dcb->version >= 0x40) {
@@ -1892,11 +1891,12 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
 	idx = -1;
 	while ((conn = olddcb_conn(dev, ++idx))) {
 		if (conn[0] != 0xff) {
-			NV_INFO(drm, "DCB conn %02d: ", idx);
 			if (olddcb_conntab(dev)[3] < 4)
-				pr_cont("%04x\n", ROM16(conn[0]));
+				NV_INFO(drm, "DCB conn %02d: %04x\n",
+					idx, ROM16(conn[0]));
 			else
-				pr_cont("%08x\n", ROM32(conn[0]));
+				NV_INFO(drm, "DCB conn %02d: %08x\n",
+					idx, ROM32(conn[0]));
 		}
 	}
 	dcb_fake_connectors(bios);
@@ -1915,7 +1915,7 @@ static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bio
 	 */
 
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	uint8_t bytes_to_write;
 	uint16_t hwsq_entry_offset;
 	int i;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 6edcce1658b7..15057b39491c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -48,24 +48,19 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	int i = reg - drm->tile.reg;
-	struct nvkm_fb *pfb = nvxx_fb(&drm->device);
-	struct nvkm_fb_tile *tile = &pfb->tile.region[i];
-	struct nvkm_engine *engine;
+	struct nvkm_device *device = nvxx_device(&drm->device);
+	struct nvkm_fb *fb = device->fb;
+	struct nvkm_fb_tile *tile = &fb->tile.region[i];
 
 	nouveau_fence_unref(&reg->fence);
 
 	if (tile->pitch)
-		pfb->tile.fini(pfb, i, tile);
+		nvkm_fb_tile_fini(fb, i, tile);
 
 	if (pitch)
-		pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
-
-	pfb->tile.prog(pfb, i, tile);
+		nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
 
-	if ((engine = nvkm_engine(pfb, NVDEV_ENGINE_GR)))
-		engine->tile_prog(engine, i);
-	if ((engine = nvkm_engine(pfb, NVDEV_ENGINE_MPEG)))
-		engine->tile_prog(engine, i);
+	nvkm_fb_tile_prog(fb, i, tile);
 }
 
 static struct nouveau_drm_tile *
@@ -105,18 +100,18 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
 		   u32 size, u32 pitch, u32 flags)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvkm_fb *pfb = nvxx_fb(&drm->device);
+	struct nvkm_fb *fb = nvxx_fb(&drm->device);
 	struct nouveau_drm_tile *tile, *found = NULL;
 	int i;
 
-	for (i = 0; i < pfb->tile.regions; i++) {
+	for (i = 0; i < fb->tile.regions; i++) {
 		tile = nv10_bo_get_tile_region(dev, i);
 
 		if (pitch && !found) {
 			found = tile;
 			continue;
 
-		} else if (tile && pfb->tile.region[i].pitch) {
+		} else if (tile && fb->tile.region[i].pitch) {
 			/* Kill an unused tile region. */
 			nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
 		}
@@ -214,7 +209,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
 	nvbo->tile_flags = tile_flags;
 	nvbo->bo.bdev = &drm->ttm.bdev;
 
-	if (!nv_device_is_cpu_coherent(nvxx_device(&drm->device)))
+	if (!nvxx_device(&drm->device)->func->cpu_coherent)
 		nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
 
 	nvbo->page_shift = 12;
@@ -471,8 +466,8 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
 		return;
 
 	for (i = 0; i < ttm_dma->ttm.num_pages; i++)
-		dma_sync_single_for_device(nv_device_base(device),
-			ttm_dma->dma_address[i], PAGE_SIZE, DMA_TO_DEVICE);
+		dma_sync_single_for_device(device->dev, ttm_dma->dma_address[i],
+					   PAGE_SIZE, DMA_TO_DEVICE);
 }
 
 void
@@ -491,8 +486,8 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
 		return;
 
 	for (i = 0; i < ttm_dma->ttm.num_pages; i++)
-		dma_sync_single_for_cpu(nv_device_base(device),
-			ttm_dma->dma_address[i], PAGE_SIZE, DMA_FROM_DEVICE);
+		dma_sync_single_for_cpu(device->dev, ttm_dma->dma_address[i],
+					PAGE_SIZE, DMA_FROM_DEVICE);
 }
 
 int
@@ -581,10 +576,9 @@ nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
 {
 #if __OS_HAS_AGP
 	struct nouveau_drm *drm = nouveau_bdev(bdev);
-	struct drm_device *dev = drm->dev;
 
-	if (drm->agp.stat == ENABLED) {
-		return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
+	if (drm->agp.bridge) {
+		return ttm_agp_tt_create(bdev, drm->agp.bridge, size,
 					 page_flags, dummy_read);
 	}
 #endif
@@ -636,12 +630,12 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 		if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
 			man->func = &nouveau_gart_manager;
 		else
-		if (drm->agp.stat != ENABLED)
+		if (!drm->agp.bridge)
 			man->func = &nv04_gart_manager;
 		else
 			man->func = &ttm_bo_manager_func;
 
-		if (drm->agp.stat == ENABLED) {
+		if (drm->agp.bridge) {
 			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
 			man->available_caching = TTM_PL_FLAG_UNCACHED |
 				TTM_PL_FLAG_WC;
@@ -1064,7 +1058,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
 {
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct nouveau_channel *chan = drm->ttm.chan;
-	struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
+	struct nouveau_cli *cli = (void *)chan->user.client;
 	struct nouveau_fence *fence;
 	int ret;
 
@@ -1104,7 +1098,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
 	static const struct {
 		const char *name;
 		int engine;
-		u32 oclass;
+		s32 oclass;
 		int (*exec)(struct nouveau_channel *,
 			    struct ttm_buffer_object *,
 			    struct ttm_mem_reg *, struct ttm_mem_reg *);
@@ -1137,7 +1131,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
 		if (chan == NULL)
 			continue;
 
-		ret = nvif_object_init(chan->object, NULL,
+		ret = nvif_object_init(&chan->user,
 				       mthd->oclass | (mthd->engine << 16),
 				       mthd->oclass, NULL, 0,
 				       &drm->ttm.copy);
@@ -1356,6 +1350,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 {
 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 	struct nouveau_drm *drm = nouveau_bdev(bdev);
+	struct nvkm_device *device = nvxx_device(&drm->device);
 	struct nvkm_mem *node = mem->mm_node;
 	int ret;
 
@@ -1372,10 +1367,10 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 		return 0;
 	case TTM_PL_TT:
 #if __OS_HAS_AGP
-		if (drm->agp.stat == ENABLED) {
+		if (drm->agp.bridge) {
 			mem->bus.offset = mem->start << PAGE_SHIFT;
 			mem->bus.base = drm->agp.base;
-			mem->bus.is_iomem = !drm->dev->agp->cant_use_aperture;
+			mem->bus.is_iomem = !drm->agp.cma;
 		}
 #endif
 		if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
@@ -1384,16 +1379,20 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 		/* fallthrough, tiled memory */
 	case TTM_PL_VRAM:
 		mem->bus.offset = mem->start << PAGE_SHIFT;
-		mem->bus.base = nv_device_resource_start(nvxx_device(&drm->device), 1);
+		mem->bus.base = device->func->resource_addr(device, 1);
 		mem->bus.is_iomem = true;
 		if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
 			struct nvkm_bar *bar = nvxx_bar(&drm->device);
+			int page_shift = 12;
+			if (drm->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
+				page_shift = node->page_shift;
 
-			ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
-					&node->bar_vma);
+			ret = nvkm_bar_umap(bar, node->size << 12, page_shift,
+					    &node->bar_vma);
 			if (ret)
 				return ret;
 
+			nvkm_vm_map(&node->bar_vma, node);
 			mem->bus.offset = node->bar_vma.offset;
 		}
 		break;
@@ -1406,14 +1405,13 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 static void
 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 {
-	struct nouveau_drm *drm = nouveau_bdev(bdev);
-	struct nvkm_bar *bar = nvxx_bar(&drm->device);
 	struct nvkm_mem *node = mem->mm_node;
 
 	if (!node->bar_vma.node)
 		return;
 
-	bar->unmap(bar, &node->bar_vma);
+	nvkm_vm_unmap(&node->bar_vma);
+	nvkm_vm_put(&node->bar_vma);
 }
 
 static int
@@ -1421,8 +1419,8 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
-	struct nvif_device *device = &drm->device;
-	u32 mappable = nv_device_resource_len(nvxx_device(device), 1) >> PAGE_SHIFT;
+	struct nvkm_device *device = nvxx_device(&drm->device);
+	u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
 	int i, ret;
 
 	/* as long as the bo isn't in vram, and isn't tiled, we've got
@@ -1488,18 +1486,18 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
 	drm = nouveau_bdev(ttm->bdev);
 	device = nvxx_device(&drm->device);
 	dev = drm->dev;
-	pdev = nv_device_base(device);
+	pdev = device->dev;
 
 	/*
 	 * Objects matching this condition have been marked as force_coherent,
 	 * so use the DMA API for them.
 	 */
-	if (!nv_device_is_cpu_coherent(device) &&
+	if (!nvxx_device(&drm->device)->func->cpu_coherent &&
 	    ttm->caching_state == tt_uncached)
 		return ttm_dma_populate(ttm_dma, dev->dev);
 
 #if __OS_HAS_AGP
-	if (drm->agp.stat == ENABLED) {
+	if (drm->agp.bridge) {
 		return ttm_agp_tt_populate(ttm);
 	}
 #endif
@@ -1553,20 +1551,20 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
 	drm = nouveau_bdev(ttm->bdev);
 	device = nvxx_device(&drm->device);
 	dev = drm->dev;
-	pdev = nv_device_base(device);
+	pdev = device->dev;
 
 	/*
 	 * Objects matching this condition have been marked as force_coherent,
 	 * so use the DMA API for them.
 	 */
-	if (!nv_device_is_cpu_coherent(device) &&
+	if (!nvxx_device(&drm->device)->func->cpu_coherent &&
 	    ttm->caching_state == tt_uncached) {
 		ttm_dma_unpopulate(ttm_dma, dev->dev);
 		return;
 	}
 
 #if __OS_HAS_AGP
-	if (drm->agp.stat == ENABLED) {
+	if (drm->agp.bridge) {
 		ttm_agp_tt_unpopulate(ttm);
 		return;
 	}
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 0589babc506e..ff5e59db49db 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -24,6 +24,7 @@
 
 #include <nvif/os.h>
 #include <nvif/class.h>
+#include <nvif/ioctl.h>
 
 /*XXX*/
 #include <core/client.h>
@@ -42,20 +43,26 @@ module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
 int
 nouveau_channel_idle(struct nouveau_channel *chan)
 {
-	struct nouveau_cli *cli = (void *)nvif_client(chan->object);
-	struct nouveau_fence *fence = NULL;
-	int ret;
+	if (likely(chan && chan->fence)) {
+		struct nouveau_cli *cli = (void *)chan->user.client;
+		struct nouveau_fence *fence = NULL;
+		int ret;
+
+		ret = nouveau_fence_new(chan, false, &fence);
+		if (!ret) {
+			ret = nouveau_fence_wait(fence, false, false);
+			nouveau_fence_unref(&fence);
+		}
 
-	ret = nouveau_fence_new(chan, false, &fence);
-	if (!ret) {
-		ret = nouveau_fence_wait(fence, false, false);
-		nouveau_fence_unref(&fence);
+		if (ret) {
+			NV_PRINTK(err, cli, "failed to idle channel "
+					    "0x%08x [%s]\n",
+				  chan->user.handle,
+				  nvxx_client(&cli->base)->name);
+			return ret;
+		}
 	}
-
-	if (ret)
-		NV_PRINTK(error, cli, "failed to idle channel 0x%08x [%s]\n",
-			  chan->object->handle, nvxx_client(&cli->base)->name);
-	return ret;
+	return 0;
 }
 
 void
@@ -63,21 +70,18 @@ nouveau_channel_del(struct nouveau_channel **pchan)
 {
 	struct nouveau_channel *chan = *pchan;
 	if (chan) {
-		if (chan->fence) {
-			nouveau_channel_idle(chan);
+		if (chan->fence)
 			nouveau_fence(chan->drm)->context_del(chan);
-		}
 		nvif_object_fini(&chan->nvsw);
 		nvif_object_fini(&chan->gart);
 		nvif_object_fini(&chan->vram);
-		nvif_object_ref(NULL, &chan->object);
+		nvif_object_fini(&chan->user);
 		nvif_object_fini(&chan->push.ctxdma);
 		nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
 		nouveau_bo_unmap(chan->push.buffer);
 		if (chan->push.buffer && chan->push.buffer->pin_refcnt)
 			nouveau_bo_unpin(chan->push.buffer);
 		nouveau_bo_ref(NULL, &chan->push.buffer);
-		nvif_device_ref(NULL, &chan->device);
 		kfree(chan);
 	}
 	*pchan = NULL;
@@ -87,7 +91,7 @@ static int
 nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
 		     u32 handle, u32 size, struct nouveau_channel **pchan)
 {
-	struct nouveau_cli *cli = (void *)nvif_client(&device->base);
+	struct nouveau_cli *cli = (void *)device->object.client;
 	struct nvkm_mmu *mmu = nvxx_mmu(device);
 	struct nv_dma_v0 args = {};
 	struct nouveau_channel *chan;
@@ -98,7 +102,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
 	if (!chan)
 		return -ENOMEM;
 
-	nvif_device_ref(device, &chan->device);
+	chan->device = device;
 	chan->drm = drm;
 
 	/* allocate memory for dma push buffer */
@@ -146,7 +150,8 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
 			 */
 			args.target = NV_DMA_V0_TARGET_PCI;
 			args.access = NV_DMA_V0_ACCESS_RDWR;
-			args.start = nv_device_resource_start(nvxx_device(device), 1);
+			args.start = nvxx_device(device)->func->
+				resource_addr(nvxx_device(device), 1);
 			args.limit = args.start + device->info.ram_user - 1;
 		} else {
 			args.target = NV_DMA_V0_TARGET_VRAM;
@@ -155,7 +160,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
 			args.limit = device->info.ram_user - 1;
 		}
 	} else {
-		if (chan->drm->agp.stat == ENABLED) {
+		if (chan->drm->agp.bridge) {
 			args.target = NV_DMA_V0_TARGET_AGP;
 			args.access = NV_DMA_V0_ACCESS_RDWR;
 			args.start = chan->drm->agp.base;
@@ -169,7 +174,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
 		}
 	}
 
-	ret = nvif_object_init(nvif_object(device), NULL, NVDRM_PUSH |
+	ret = nvif_object_init(&device->object, NVDRM_PUSH |
 			       (handle & 0xffff), NV_DMA_FROM_MEMORY,
 			       &args, sizeof(args), &chan->push.ctxdma);
 	if (ret) {
@@ -193,8 +198,9 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
 	const u16 *oclass = oclasses;
 	union {
 		struct nv50_channel_gpfifo_v0 nv50;
+		struct fermi_channel_gpfifo_v0 fermi;
 		struct kepler_channel_gpfifo_a_v0 kepler;
-	} args, *retn;
+	} args;
 	struct nouveau_channel *chan;
 	u32 size;
 	int ret;
@@ -210,26 +216,36 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
 		if (oclass[0] >= KEPLER_CHANNEL_GPFIFO_A) {
 			args.kepler.version = 0;
 			args.kepler.engine  = engine;
-			args.kepler.pushbuf = chan->push.ctxdma.handle;
 			args.kepler.ilength = 0x02000;
 			args.kepler.ioffset = 0x10000 + chan->push.vma.offset;
+			args.kepler.vm = 0;
 			size = sizeof(args.kepler);
+		} else
+		if (oclass[0] >= FERMI_CHANNEL_GPFIFO) {
+			args.fermi.version = 0;
+			args.fermi.ilength = 0x02000;
+			args.fermi.ioffset = 0x10000 + chan->push.vma.offset;
+			args.fermi.vm = 0;
+			size = sizeof(args.fermi);
 		} else {
 			args.nv50.version = 0;
-			args.nv50.pushbuf = chan->push.ctxdma.handle;
 			args.nv50.ilength = 0x02000;
 			args.nv50.ioffset = 0x10000 + chan->push.vma.offset;
+			args.nv50.pushbuf = nvif_handle(&chan->push.ctxdma);
+			args.nv50.vm = 0;
 			size = sizeof(args.nv50);
 		}
 
-		ret = nvif_object_new(nvif_object(device), handle, *oclass++,
-				      &args, size, &chan->object);
+		ret = nvif_object_init(&device->object, handle, *oclass++,
+				       &args, size, &chan->user);
 		if (ret == 0) {
-			retn = chan->object->data;
-			if (chan->object->oclass >= KEPLER_CHANNEL_GPFIFO_A)
-				chan->chid = retn->kepler.chid;
+			if (chan->user.oclass >= KEPLER_CHANNEL_GPFIFO_A)
+				chan->chid = args.kepler.chid;
+			else
+			if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO)
+				chan->chid = args.fermi.chid;
 			else
-				chan->chid = retn->nv50.chid;
+				chan->chid = args.nv50.chid;
 			return ret;
 		}
 	} while (*oclass);
@@ -248,7 +264,7 @@ nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device,
 					NV03_CHANNEL_DMA,
 					0 };
 	const u16 *oclass = oclasses;
-	struct nv03_channel_dma_v0 args, *retn;
+	struct nv03_channel_dma_v0 args;
 	struct nouveau_channel *chan;
 	int ret;
 
@@ -260,15 +276,14 @@ nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device,
 
 	/* create channel object */
 	args.version = 0;
-	args.pushbuf = chan->push.ctxdma.handle;
+	args.pushbuf = nvif_handle(&chan->push.ctxdma);
 	args.offset = chan->push.vma.offset;
 
 	do {
-		ret = nvif_object_new(nvif_object(device), handle, *oclass++,
-				      &args, sizeof(args), &chan->object);
+		ret = nvif_object_init(&device->object, handle, *oclass++,
+				       &args, sizeof(args), &chan->user);
 		if (ret == 0) {
-			retn = chan->object->data;
-			chan->chid = retn->chid;
+			chan->chid = args.chid;
 			return ret;
 		}
 	} while (ret && *oclass);
@@ -281,13 +296,12 @@ static int
 nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
 {
 	struct nvif_device *device = chan->device;
-	struct nouveau_cli *cli = (void *)nvif_client(&device->base);
+	struct nouveau_cli *cli = (void *)chan->user.client;
 	struct nvkm_mmu *mmu = nvxx_mmu(device);
-	struct nvkm_sw_chan *swch;
 	struct nv_dma_v0 args = {};
 	int ret, i;
 
-	nvif_object_map(chan->object);
+	nvif_object_map(&chan->user);
 
 	/* allocate dma objects to cover all allowed vram, and gart */
 	if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
@@ -303,9 +317,8 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
 			args.limit = device->info.ram_user - 1;
 		}
 
-		ret = nvif_object_init(chan->object, NULL, vram,
-				       NV_DMA_IN_MEMORY, &args,
-				       sizeof(args), &chan->vram);
+		ret = nvif_object_init(&chan->user, vram, NV_DMA_IN_MEMORY,
+				       &args, sizeof(args), &chan->vram);
 		if (ret)
 			return ret;
 
@@ -315,7 +328,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
 			args.start = 0;
 			args.limit = cli->vm->mmu->limit - 1;
 		} else
-		if (chan->drm->agp.stat == ENABLED) {
+		if (chan->drm->agp.bridge) {
 			args.target = NV_DMA_V0_TARGET_AGP;
 			args.access = NV_DMA_V0_ACCESS_RDWR;
 			args.start = chan->drm->agp.base;
@@ -328,15 +341,14 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
 			args.limit = mmu->limit - 1;
 		}
 
-		ret = nvif_object_init(chan->object, NULL, gart,
-				       NV_DMA_IN_MEMORY, &args,
-				       sizeof(args), &chan->gart);
+		ret = nvif_object_init(&chan->user, gart, NV_DMA_IN_MEMORY,
+				       &args, sizeof(args), &chan->gart);
 		if (ret)
 			return ret;
 	}
 
 	/* initialise dma tracking parameters */
-	switch (chan->object->oclass & 0x00ff) {
+	switch (chan->user.oclass & 0x00ff) {
 	case 0x006b:
 	case 0x006e:
 		chan->user_put = 0x40;
@@ -368,15 +380,12 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
 
 	/* allocate software object class (used for fences on <= nv05) */
 	if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
-		ret = nvif_object_init(chan->object, NULL, 0x006e, 0x006e,
+		ret = nvif_object_init(&chan->user, 0x006e,
+				       NVIF_IOCTL_NEW_V0_SW_NV04,
 				       NULL, 0, &chan->nvsw);
 		if (ret)
 			return ret;
 
-		swch = (void *)nvxx_object(&chan->nvsw)->parent;
-		swch->flip = nouveau_flip_complete;
-		swch->flip_data = chan;
-
 		ret = RING_SPACE(chan, 2);
 		if (ret)
 			return ret;
@@ -395,7 +404,7 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
 		    u32 handle, u32 arg0, u32 arg1,
 		    struct nouveau_channel **pchan)
 {
-	struct nouveau_cli *cli = (void *)nvif_client(&device->base);
+	struct nouveau_cli *cli = (void *)device->object.client;
 	bool super;
 	int ret;
 
@@ -405,17 +414,17 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
 
 	ret = nouveau_channel_ind(drm, device, handle, arg0, pchan);
 	if (ret) {
-		NV_PRINTK(debug, cli, "ib channel create, %d\n", ret);
+		NV_PRINTK(dbg, cli, "ib channel create, %d\n", ret);
 		ret = nouveau_channel_dma(drm, device, handle, pchan);
 		if (ret) {
-			NV_PRINTK(debug, cli, "dma channel create, %d\n", ret);
+			NV_PRINTK(dbg, cli, "dma channel create, %d\n", ret);
 			goto done;
 		}
 	}
 
 	ret = nouveau_channel_init(*pchan, arg0, arg1);
 	if (ret) {
-		NV_PRINTK(error, cli, "channel failed to initialise, %d\n", ret);
+		NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret);
 		nouveau_channel_del(pchan);
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 8b3640f69e4f..2ed32414cb69 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -37,7 +37,7 @@ struct nouveau_channel {
 	u32 user_get;
 	u32 user_put;
 
-	struct nvif_object *object;
+	struct nvif_object user;
 };
 
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 3162040bc314..2e7cbe933533 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -125,9 +125,9 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
 	 * is handled by the SOR itself, and not required for LVDS DDC.
 	 */
 	if (nv_connector->type == DCB_CONNECTOR_eDP) {
-		panel = gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
+		panel = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
 		if (panel == 0) {
-			gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
+			nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
 			msleep(300);
 		}
 	}
@@ -148,7 +148,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
 				break;
 		} else
 		if (nv_encoder->i2c) {
-			if (nv_probe_i2c(nv_encoder->i2c, 0x50))
+			if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))
 				break;
 		}
 	}
@@ -157,7 +157,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
 	 * state to avoid confusing the SOR for other output types.
 	 */
 	if (!nv_encoder && panel == 0)
-		gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
+		nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
 
 	return nv_encoder;
 }
@@ -241,7 +241,7 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
 	struct nouveau_connector *nv_connector = nouveau_connector(connector);
 	struct nouveau_encoder *nv_encoder = NULL;
 	struct nouveau_encoder *nv_partner;
-	struct nvkm_i2c_port *i2c;
+	struct i2c_adapter *i2c;
 	int type;
 	int ret;
 	enum drm_connector_status conn_status = connector_status_disconnected;
@@ -259,7 +259,7 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
 
 	nv_encoder = nouveau_connector_ddc_detect(connector);
 	if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
-		nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
+		nv_connector->edid = drm_get_edid(connector, i2c);
 		drm_mode_connector_update_edid_property(connector,
 							nv_connector->edid);
 		if (!nv_connector->edid) {
@@ -919,7 +919,7 @@ nouveau_connector_funcs_lvds = {
 	.force = nouveau_connector_force
 };
 
-static void
+static int
 nouveau_connector_dp_dpms(struct drm_connector *connector, int mode)
 {
 	struct nouveau_encoder *nv_encoder = NULL;
@@ -930,15 +930,15 @@ nouveau_connector_dp_dpms(struct drm_connector *connector, int mode)
 	    nv_encoder->dcb->type == DCB_OUTPUT_DP) {
 		if (mode == DRM_MODE_DPMS_ON) {
 			u8 data = DP_SET_POWER_D0;
-			nv_wraux(nv_encoder->i2c, DP_SET_POWER, &data, 1);
+			nvkm_wraux(nv_encoder->aux, DP_SET_POWER, &data, 1);
 			usleep_range(1000, 2000);
 		} else {
 			u8 data = DP_SET_POWER_D3;
-			nv_wraux(nv_encoder->i2c, DP_SET_POWER, &data, 1);
+			nvkm_wraux(nv_encoder->aux, DP_SET_POWER, &data, 1);
 		}
 	}
 
-	drm_helper_connector_dpms(connector, mode);
+	return drm_helper_connector_dpms(connector, mode);
 }
 
 static const struct drm_connector_funcs
@@ -980,29 +980,29 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
 }
 
 static ssize_t
-nouveau_connector_aux_xfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg)
 {
 	struct nouveau_connector *nv_connector =
-		container_of(aux, typeof(*nv_connector), aux);
+		container_of(obj, typeof(*nv_connector), aux);
 	struct nouveau_encoder *nv_encoder;
-	struct nvkm_i2c_port *port;
+	struct nvkm_i2c_aux *aux;
 	int ret;
 
 	nv_encoder = find_encoder(&nv_connector->base, DCB_OUTPUT_DP);
-	if (!nv_encoder || !(port = nv_encoder->i2c))
+	if (!nv_encoder || !(aux = nv_encoder->aux))
 		return -ENODEV;
 	if (WARN_ON(msg->size > 16))
 		return -E2BIG;
 	if (msg->size == 0)
 		return msg->size;
 
-	ret = nvkm_i2c(port)->acquire(port, 0);
+	ret = nvkm_i2c_aux_acquire(aux);
 	if (ret)
 		return ret;
 
-	ret = port->func->aux(port, false, msg->request, msg->address,
-			      msg->buffer, msg->size);
-	nvkm_i2c(port)->release(port);
+	ret = nvkm_i2c_aux_xfer(aux, false, msg->request, msg->address,
+				msg->buffer, msg->size);
+	nvkm_i2c_aux_release(aux);
 	if (ret >= 0) {
 		msg->reply = ret;
 		return msg->size;
@@ -1256,8 +1256,8 @@ nouveau_connector_create(struct drm_device *dev, int index)
 		break;
 	}
 
-	ret = nvif_notify_init(&disp->disp, NULL, nouveau_connector_hotplug,
-				true, NV04_DISP_NTFY_CONN,
+	ret = nvif_notify_init(&disp->disp, nouveau_connector_hotplug, true,
+			       NV04_DISP_NTFY_CONN,
 			       &(struct nvif_notify_conn_req_v0) {
 				.mask = NVIF_NOTIFY_CONN_V0_ANY,
 				.conn = index,
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 8670d90cdc11..cc6c228e11c8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -185,7 +185,7 @@ nouveau_display_vblank_init(struct drm_device *dev)
 
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-		ret = nvif_notify_init(&disp->disp, NULL,
+		ret = nvif_notify_init(&disp->disp,
 				       nouveau_display_vblank_handler, false,
 				       NV04_DISP_NTFY_VBLANK,
 				       &(struct nvif_notify_head_req_v0) {
@@ -358,6 +358,7 @@ int
 nouveau_display_init(struct drm_device *dev)
 {
 	struct nouveau_display *disp = nouveau_display(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct drm_connector *connector;
 	int ret;
 
@@ -374,6 +375,8 @@ nouveau_display_init(struct drm_device *dev)
 		nvif_notify_get(&conn->hpd);
 	}
 
+	/* enable flip completion events */
+	nvif_notify_get(&drm->flip);
 	return ret;
 }
 
@@ -381,6 +384,7 @@ void
 nouveau_display_fini(struct drm_device *dev)
 {
 	struct nouveau_display *disp = nouveau_display(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct drm_connector *connector;
 	int head;
 
@@ -388,6 +392,9 @@ nouveau_display_fini(struct drm_device *dev)
 	for (head = 0; head < dev->mode_config.num_crtc; head++)
 		drm_vblank_off(dev, head);
 
+	/* disable flip completion events */
+	nvif_notify_put(&drm->flip);
+
 	/* disable hotplug interrupts */
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct nouveau_connector *conn = nouveau_connector(connector);
@@ -438,6 +445,7 @@ int
 nouveau_display_create(struct drm_device *dev)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvkm_device *device = nvxx_device(&drm->device);
 	struct nouveau_display *disp;
 	int ret;
 
@@ -450,7 +458,7 @@ nouveau_display_create(struct drm_device *dev)
 	drm_mode_create_dvi_i_properties(dev);
 
 	dev->mode_config.funcs = &nouveau_mode_config_funcs;
-	dev->mode_config.fb_base = nv_device_resource_start(nvxx_device(&drm->device), 1);
+	dev->mode_config.fb_base = device->func->resource_addr(device, 1);
 
 	dev->mode_config.min_width = 0;
 	dev->mode_config.min_height = 0;
@@ -494,7 +502,7 @@ nouveau_display_create(struct drm_device *dev)
 		int i;
 
 		for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) {
-			ret = nvif_object_init(nvif_object(&drm->device), NULL,
+			ret = nvif_object_init(&drm->device.object,
 					       NVDRM_DISPLAY, oclass[i],
 					       NULL, 0, &disp->disp);
 		}
@@ -711,7 +719,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 	chan = drm->channel;
 	if (!chan)
 		return -ENODEV;
-	cli = (void *)nvif_client(&chan->device->base);
+	cli = (void *)chan->user.client;
 
 	s = kzalloc(sizeof(*s), GFP_KERNEL);
 	if (!s)
@@ -847,10 +855,10 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
 }
 
 int
-nouveau_flip_complete(void *data)
+nouveau_flip_complete(struct nvif_notify *notify)
 {
-	struct nouveau_channel *chan = data;
-	struct nouveau_drm *drm = chan->drm;
+	struct nouveau_drm *drm = container_of(notify, typeof(*drm), flip);
+	struct nouveau_channel *chan = drm->channel;
 	struct nouveau_page_flip_state state;
 
 	if (!nouveau_finish_page_flip(chan, &state)) {
@@ -861,7 +869,7 @@ nouveau_flip_complete(void *data)
 		}
 	}
 
-	return 0;
+	return NVIF_NOTIFY_KEEP;
 }
 
 int
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 6d9245aa81a6..d168c63533c1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -52,9 +52,9 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
 {
 	uint64_t val;
 
-	val = nvif_rd32(chan, chan->user_get);
+	val = nvif_rd32(&chan->user, chan->user_get);
         if (chan->user_get_hi)
-                val |= (uint64_t)nvif_rd32(chan, chan->user_get_hi) << 32;
+                val |= (uint64_t)nvif_rd32(&chan->user, chan->user_get_hi) << 32;
 
 	/* reset counter as long as GET is still advancing, this is
 	 * to avoid misdetecting a GPU lockup if the GPU happens to
@@ -82,7 +82,7 @@ void
 nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
 	      int delta, int length)
 {
-	struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
+	struct nouveau_cli *cli = (void *)chan->user.client;
 	struct nouveau_bo *pb = chan->push.buffer;
 	struct nvkm_vma *vma;
 	int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
@@ -103,7 +103,7 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
 	/* Flush writes. */
 	nouveau_bo_rd32(pb, 0);
 
-	nvif_wr32(chan, 0x8c, chan->dma.ib_put);
+	nvif_wr32(&chan->user, 0x8c, chan->dma.ib_put);
 	chan->dma.ib_free--;
 }
 
@@ -113,7 +113,7 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
 	uint32_t cnt = 0, prev_get = 0;
 
 	while (chan->dma.ib_free < count) {
-		uint32_t get = nvif_rd32(chan, 0x88);
+		uint32_t get = nvif_rd32(&chan->user, 0x88);
 		if (get != prev_get) {
 			prev_get = get;
 			cnt = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 8da0a272c45a..aff3a9d0a1fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -140,7 +140,7 @@ BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
 #define WRITE_PUT(val) do {                                                    \
 	mb();                                                   \
 	nouveau_bo_rd32(chan->push.buffer, 0);                                 \
-	nvif_wr32(chan, chan->user_put, ((val) << 2) + chan->push.vma.offset); \
+	nvif_wr32(&chan->user, chan->user_put, ((val) << 2) + chan->push.vma.offset); \
 } while (0)
 
 static inline void
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index c3ef30b3a5ec..e17e15ec7d43 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -31,8 +31,7 @@
 #include "nouveau_crtc.h"
 
 static void
-nouveau_dp_probe_oui(struct drm_device *dev, struct nvkm_i2c_port *auxch,
-		     u8 *dpcd)
+nouveau_dp_probe_oui(struct drm_device *dev, struct nvkm_i2c_aux *aux, u8 *dpcd)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	u8 buf[3];
@@ -40,11 +39,11 @@ nouveau_dp_probe_oui(struct drm_device *dev, struct nvkm_i2c_port *auxch,
 	if (!(dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
 		return;
 
-	if (!nv_rdaux(auxch, DP_SINK_OUI, buf, 3))
+	if (!nvkm_rdaux(aux, DP_SINK_OUI, buf, 3))
 		NV_DEBUG(drm, "Sink OUI: %02hx%02hx%02hx\n",
 			     buf[0], buf[1], buf[2]);
 
-	if (!nv_rdaux(auxch, DP_BRANCH_OUI, buf, 3))
+	if (!nvkm_rdaux(aux, DP_BRANCH_OUI, buf, 3))
 		NV_DEBUG(drm, "Branch OUI: %02hx%02hx%02hx\n",
 			     buf[0], buf[1], buf[2]);
 
@@ -55,15 +54,15 @@ nouveau_dp_detect(struct nouveau_encoder *nv_encoder)
 {
 	struct drm_device *dev = nv_encoder->base.base.dev;
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvkm_i2c_port *auxch;
+	struct nvkm_i2c_aux *aux;
 	u8 *dpcd = nv_encoder->dp.dpcd;
 	int ret;
 
-	auxch = nv_encoder->i2c;
-	if (!auxch)
+	aux = nv_encoder->aux;
+	if (!aux)
 		return -ENODEV;
 
-	ret = nv_rdaux(auxch, DP_DPCD_REV, dpcd, 8);
+	ret = nvkm_rdaux(aux, DP_DPCD_REV, dpcd, 8);
 	if (ret)
 		return ret;
 
@@ -84,6 +83,6 @@ nouveau_dp_detect(struct nouveau_encoder *nv_encoder)
 	NV_DEBUG(drm, "maximum: %dx%d\n",
 		     nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
 
-	nouveau_dp_probe_oui(dev, auxch, dpcd);
+	nouveau_dp_probe_oui(dev, aux, dpcd);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 477cbb12809b..ccefb645fd55 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -32,15 +32,15 @@
 #include "drmP.h"
 #include "drm_crtc_helper.h"
 
-#include <core/device.h>
 #include <core/gpuobj.h>
 #include <core/option.h>
+#include <core/pci.h>
+#include <core/tegra.h>
 
 #include "nouveau_drm.h"
 #include "nouveau_dma.h"
 #include "nouveau_ttm.h"
 #include "nouveau_gem.h"
-#include "nouveau_agp.h"
 #include "nouveau_vga.h"
 #include "nouveau_sysfs.h"
 #include "nouveau_hwmon.h"
@@ -105,14 +105,18 @@ nouveau_name(struct drm_device *dev)
 }
 
 static int
-nouveau_cli_create(u64 name, const char *sname,
+nouveau_cli_create(struct drm_device *dev, const char *sname,
 		   int size, void **pcli)
 {
 	struct nouveau_cli *cli = *pcli = kzalloc(size, GFP_KERNEL);
+	int ret;
 	if (cli) {
-		int ret = nvif_client_init(NULL, NULL, sname, name,
-					   nouveau_config, nouveau_debug,
-					  &cli->base);
+		snprintf(cli->name, sizeof(cli->name), "%s", sname);
+		cli->dev = dev;
+
+		ret = nvif_client_init(NULL, cli->name, nouveau_name(dev),
+				       nouveau_config, nouveau_debug,
+				       &cli->base);
 		if (ret == 0) {
 			mutex_init(&cli->mutex);
 			usif_client_init(cli);
@@ -134,12 +138,17 @@ nouveau_cli_destroy(struct nouveau_cli *cli)
 static void
 nouveau_accel_fini(struct nouveau_drm *drm)
 {
-	nouveau_channel_del(&drm->channel);
+	nouveau_channel_idle(drm->channel);
 	nvif_object_fini(&drm->ntfy);
-	nvkm_gpuobj_ref(NULL, &drm->notify);
+	nvkm_gpuobj_del(&drm->notify);
+	nvif_notify_fini(&drm->flip);
 	nvif_object_fini(&drm->nvsw);
-	nouveau_channel_del(&drm->cechan);
+	nouveau_channel_del(&drm->channel);
+
+	nouveau_channel_idle(drm->cechan);
 	nvif_object_fini(&drm->ttm.copy);
+	nouveau_channel_del(&drm->cechan);
+
 	if (drm->fence)
 		nouveau_fence(drm)->dtor(drm);
 }
@@ -148,9 +157,9 @@ static void
 nouveau_accel_init(struct nouveau_drm *drm)
 {
 	struct nvif_device *device = &drm->device;
+	struct nvif_sclass *sclass;
 	u32 arg0, arg1;
-	u32 sclass[16];
-	int ret, i;
+	int ret, i, n;
 
 	if (nouveau_noaccel)
 		return;
@@ -159,12 +168,12 @@ nouveau_accel_init(struct nouveau_drm *drm)
 	/*XXX: this is crap, but the fence/channel stuff is a little
 	 *     backwards in some places.  this will be fixed.
 	 */
-	ret = nvif_object_sclass(&device->base, sclass, ARRAY_SIZE(sclass));
+	ret = n = nvif_object_sclass_get(&device->object, &sclass);
 	if (ret < 0)
 		return;
 
-	for (ret = -ENOSYS, i = 0; ret && i < ARRAY_SIZE(sclass); i++) {
-		switch (sclass[i]) {
+	for (ret = -ENOSYS, i = 0; i < n; i++) {
+		switch (sclass[i].oclass) {
 		case NV03_CHANNEL_DMA:
 			ret = nv04_fence_create(drm);
 			break;
@@ -191,6 +200,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
 		}
 	}
 
+	nvif_object_sclass_put(&sclass);
 	if (ret) {
 		NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret);
 		nouveau_accel_fini(drm);
@@ -231,10 +241,9 @@ nouveau_accel_init(struct nouveau_drm *drm)
 		return;
 	}
 
-	ret = nvif_object_init(drm->channel->object, NULL, NVDRM_NVSW,
+	ret = nvif_object_init(&drm->channel->user, NVDRM_NVSW,
 			       nouveau_abi16_swclass(drm), NULL, 0, &drm->nvsw);
 	if (ret == 0) {
-		struct nvkm_sw_chan *swch;
 		ret = RING_SPACE(drm->channel, 2);
 		if (ret == 0) {
 			if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
@@ -246,9 +255,16 @@ nouveau_accel_init(struct nouveau_drm *drm)
 				OUT_RING  (drm->channel, 0x001f0000);
 			}
 		}
-		swch = (void *)nvxx_object(&drm->nvsw)->parent;
-		swch->flip = nouveau_flip_complete;
-		swch->flip_data = drm->channel;
+
+		ret = nvif_notify_init(&drm->nvsw, nouveau_flip_complete,
+				       false, NVSW_NTFY_UEVENT, NULL, 0, 0,
+				       &drm->flip);
+		if (ret == 0)
+			ret = nvif_notify_get(&drm->flip);
+		if (ret) {
+			nouveau_accel_fini(drm);
+			return;
+		}
 	}
 
 	if (ret) {
@@ -258,15 +274,15 @@ nouveau_accel_init(struct nouveau_drm *drm)
 	}
 
 	if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
-		ret = nvkm_gpuobj_new(nvxx_object(&drm->device), NULL, 32,
-				      0, 0, &drm->notify);
+		ret = nvkm_gpuobj_new(nvxx_device(&drm->device), 32, 0, false,
+				      NULL, &drm->notify);
 		if (ret) {
 			NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
 			nouveau_accel_fini(drm);
 			return;
 		}
 
-		ret = nvif_object_init(drm->channel->object, NULL, NvNotify0,
+		ret = nvif_object_init(&drm->channel->user, NvNotify0,
 				       NV_DMA_IN_MEMORY,
 				       &(struct nv_dma_v0) {
 						.target = NV_DMA_V0_TARGET_VRAM,
@@ -321,9 +337,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
 		remove_conflicting_framebuffers(aper, "nouveaufb", boot);
 	kfree(aper);
 
-	ret = nvkm_device_create(pdev, NVKM_BUS_PCI,
-				 nouveau_pci_name(pdev), pci_name(pdev),
-				 nouveau_config, nouveau_debug, &device);
+	ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug,
+				  true, true, ~0ULL, &device);
 	if (ret)
 		return ret;
 
@@ -331,7 +346,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
 
 	ret = drm_get_pci_dev(pdev, pent, &driver_pci);
 	if (ret) {
-		nvkm_object_ref(NULL, (struct nvkm_object **)&device);
+		nvkm_device_del(&device);
 		return ret;
 	}
 
@@ -371,12 +386,10 @@ nouveau_get_hdmi_dev(struct nouveau_drm *drm)
 static int
 nouveau_drm_load(struct drm_device *dev, unsigned long flags)
 {
-	struct pci_dev *pdev = dev->pdev;
 	struct nouveau_drm *drm;
 	int ret;
 
-	ret = nouveau_cli_create(nouveau_name(dev), "DRM", sizeof(*drm),
-				 (void **)&drm);
+	ret = nouveau_cli_create(dev, "DRM", sizeof(*drm), (void **)&drm);
 	if (ret)
 		return ret;
 
@@ -390,36 +403,10 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
 
 	nouveau_get_hdmi_dev(drm);
 
-	/* make sure AGP controller is in a consistent state before we
-	 * (possibly) execute vbios init tables (see nouveau_agp.h)
-	 */
-	if (pdev && drm_pci_device_is_agp(dev) && dev->agp) {
-		const u64 enables = NV_DEVICE_V0_DISABLE_IDENTIFY |
-				    NV_DEVICE_V0_DISABLE_MMIO;
-		/* dummy device object, doesn't init anything, but allows
-		 * agp code access to registers
-		 */
-		ret = nvif_device_init(&drm->client.base.base, NULL,
-				       NVDRM_DEVICE, NV_DEVICE,
-				       &(struct nv_device_v0) {
-						.device = ~0,
-						.disable = ~enables,
-						.debug0 = ~0,
-				       }, sizeof(struct nv_device_v0),
-				       &drm->device);
-		if (ret)
-			goto fail_device;
-
-		nouveau_agp_reset(drm);
-		nvif_device_fini(&drm->device);
-	}
-
-	ret = nvif_device_init(&drm->client.base.base, NULL, NVDRM_DEVICE,
-			       NV_DEVICE,
+	ret = nvif_device_init(&drm->client.base.object,
+			       NVDRM_DEVICE, NV_DEVICE,
 			       &(struct nv_device_v0) {
 					.device = ~0,
-					.disable = 0,
-					.debug0 = 0,
 			       }, sizeof(struct nv_device_v0),
 			       &drm->device);
 	if (ret)
@@ -432,14 +419,13 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
 	 * better fix is found - assuming there is one...
 	 */
 	if (drm->device.info.chipset == 0xc1)
-		nvif_mask(&drm->device, 0x00088080, 0x00000800, 0x00000000);
+		nvif_mask(&drm->device.object, 0x00088080, 0x00000800, 0x00000000);
 
 	nouveau_vga_init(drm);
-	nouveau_agp_init(drm);
 
 	if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
 		ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
-				  0x1000, &drm->client.vm);
+				  0x1000, NULL, &drm->client.vm);
 		if (ret)
 			goto fail_device;
 
@@ -486,7 +472,6 @@ fail_dispctor:
 fail_bios:
 	nouveau_ttm_fini(drm);
 fail_ttm:
-	nouveau_agp_fini(drm);
 	nouveau_vga_fini(drm);
 fail_device:
 	nvif_device_fini(&drm->device);
@@ -512,7 +497,6 @@ nouveau_drm_unload(struct drm_device *dev)
 	nouveau_bios_takedown(dev);
 
 	nouveau_ttm_fini(drm);
-	nouveau_agp_fini(drm);
 	nouveau_vga_fini(drm);
 
 	nvif_device_fini(&drm->device);
@@ -527,15 +511,14 @@ nouveau_drm_device_remove(struct drm_device *dev)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvkm_client *client;
-	struct nvkm_object *device;
+	struct nvkm_device *device;
 
 	dev->irq_enabled = false;
 	client = nvxx_client(&drm->client.base);
-	device = client->device;
+	device = nvkm_device_find(client->device);
 	drm_put_dev(dev);
 
-	nvkm_object_ref(NULL, &device);
-	nvkm_object_debug();
+	nvkm_device_del(&device);
 }
 
 static void
@@ -597,7 +580,6 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
 	if (ret)
 		goto fail_client;
 
-	nouveau_agp_fini(drm);
 	return 0;
 
 fail_client:
@@ -622,13 +604,8 @@ nouveau_do_resume(struct drm_device *dev, bool runtime)
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_cli *cli;
 
-	NV_INFO(drm, "re-enabling device...\n");
-
-	nouveau_agp_reset(drm);
-
 	NV_INFO(drm, "resuming kernel object tree...\n");
 	nvif_client_resume(&drm->client.base);
-	nouveau_agp_init(drm);
 
 	NV_INFO(drm, "resuming client object trees...\n");
 	if (drm->fence && nouveau_fence(drm)->resume)
@@ -728,7 +705,6 @@ nouveau_pmops_runtime_suspend(struct device *dev)
 		return -EBUSY;
 	}
 
-	nv_debug_level(SILENT);
 	drm_kms_helper_poll_disable(drm_dev);
 	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
 	nouveau_switcheroo_optimus_dsm();
@@ -762,10 +738,9 @@ nouveau_pmops_runtime_resume(struct device *dev)
 	ret = nouveau_do_resume(drm_dev, true);
 	drm_kms_helper_poll_enable(drm_dev);
 	/* do magic */
-	nvif_mask(device, 0x88488, (1 << 25), (1 << 25));
+	nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
 	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
 	drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
-	nv_debug_level(NORMAL);
 	return ret;
 }
 
@@ -826,8 +801,7 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
 	get_task_comm(tmpname, current);
 	snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
 
-	ret = nouveau_cli_create(nouveau_name(dev), name, sizeof(*cli),
-			(void **)&cli);
+	ret = nouveau_cli_create(dev, name, sizeof(*cli), (void **)&cli);
 
 	if (ret)
 		goto out_suspend;
@@ -836,7 +810,7 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
 
 	if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
 		ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
-				  0x1000, &cli->vm);
+				  0x1000, NULL, &cli->vm);
 		if (ret) {
 			nouveau_cli_destroy(cli);
 			goto out_suspend;
@@ -945,8 +919,8 @@ nouveau_driver_fops = {
 static struct drm_driver
 driver_stub = {
 	.driver_features =
-		DRIVER_USE_AGP |
-		DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
+		DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
+		DRIVER_KMS_LEGACY_CONTEXT,
 
 	.load = nouveau_drm_load,
 	.unload = nouveau_drm_unload,
@@ -1056,18 +1030,16 @@ nouveau_drm_pci_driver = {
 };
 
 struct drm_device *
-nouveau_platform_device_create_(struct platform_device *pdev, int size,
-				void **pobject)
+nouveau_platform_device_create(struct platform_device *pdev,
+			       struct nvkm_device **pdevice)
 {
 	struct drm_device *drm;
 	int err;
 
-	err = nvkm_device_create_(pdev, NVKM_BUS_PLATFORM,
-				  nouveau_platform_name(pdev),
-				  dev_name(&pdev->dev), nouveau_config,
-				  nouveau_debug, size, pobject);
+	err = nvkm_device_tegra_new(pdev, nouveau_config, nouveau_debug,
+				    true, true, ~0ULL, pdevice);
 	if (err)
-		return ERR_PTR(err);
+		goto err_free;
 
 	drm = drm_dev_alloc(&driver_platform, &pdev->dev);
 	if (!drm) {
@@ -1085,7 +1057,7 @@ nouveau_platform_device_create_(struct platform_device *pdev, int size,
 	return drm;
 
 err_free:
-	nvkm_object_ref(NULL, (struct nvkm_object **)pobject);
+	nvkm_device_del(pdevice);
 
 	return ERR_PTR(err);
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index dd726523ca99..3c902c24a8dd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -9,8 +9,8 @@
 #define DRIVER_DATE		"20120801"
 
 #define DRIVER_MAJOR		1
-#define DRIVER_MINOR		2
-#define DRIVER_PATCHLEVEL	2
+#define DRIVER_MINOR		3
+#define DRIVER_PATCHLEVEL	0
 
 /*
  * 1.1.1:
@@ -30,6 +30,9 @@
  *      - allow concurrent access to bo's mapped read/write.
  * 1.2.2:
  *      - add NOUVEAU_GEM_DOMAIN_COHERENT flag
+ * 1.3.0:
+ *      - NVIF ABI modified, safe because only (current) users are test
+ *        programs that get directly linked with NVKM.
  */
 
 #include <nvif/client.h>
@@ -88,6 +91,8 @@ struct nouveau_cli {
 	void *abi16;
 	struct list_head objects;
 	struct list_head notifys;
+	char name[32];
+	struct drm_device *dev;
 };
 
 static inline struct nouveau_cli *
@@ -109,13 +114,10 @@ struct nouveau_drm {
 	struct list_head clients;
 
 	struct {
-		enum {
-			UNKNOWN = 0,
-			DISABLE = 1,
-			ENABLED = 2
-		} stat;
+		struct agp_bridge_data *bridge;
 		u32 base;
 		u32 size;
+		bool cma;
 	} agp;
 
 	/* TTM interface support */
@@ -148,6 +150,7 @@ struct nouveau_drm {
 	struct nouveau_fbdev *fbcon;
 	struct nvif_object nvsw;
 	struct nvif_object ntfy;
+	struct nvif_notify flip;
 
 	/* nv10-nv40 tiling regions */
 	struct {
@@ -180,22 +183,22 @@ nouveau_drm(struct drm_device *dev)
 int nouveau_pmops_suspend(struct device *);
 int nouveau_pmops_resume(struct device *);
 
-#define nouveau_platform_device_create(p, u)                                   \
-	nouveau_platform_device_create_(p, sizeof(**u), (void **)u)
 struct drm_device *
-nouveau_platform_device_create_(struct platform_device *pdev,
-				int size, void **pobject);
+nouveau_platform_device_create(struct platform_device *, struct nvkm_device **);
 void nouveau_drm_device_remove(struct drm_device *dev);
 
 #define NV_PRINTK(l,c,f,a...) do {                                             \
 	struct nouveau_cli *_cli = (c);                                        \
-	nv_##l(_cli->base.base.priv, f, ##a);                                  \
+	dev_##l(_cli->dev->dev, "%s: "f, _cli->name, ##a);                     \
 } while(0)
-#define NV_FATAL(drm,f,a...) NV_PRINTK(fatal, &(drm)->client, f, ##a)
-#define NV_ERROR(drm,f,a...) NV_PRINTK(error, &(drm)->client, f, ##a)
+#define NV_FATAL(drm,f,a...) NV_PRINTK(crit, &(drm)->client, f, ##a)
+#define NV_ERROR(drm,f,a...) NV_PRINTK(err, &(drm)->client, f, ##a)
 #define NV_WARN(drm,f,a...) NV_PRINTK(warn, &(drm)->client, f, ##a)
 #define NV_INFO(drm,f,a...) NV_PRINTK(info, &(drm)->client, f, ##a)
-#define NV_DEBUG(drm,f,a...) NV_PRINTK(debug, &(drm)->client, f, ##a)
+#define NV_DEBUG(drm,f,a...) do {                                              \
+	if (unlikely(drm_debug & DRM_UT_DRIVER))                               \
+		NV_PRINTK(info, &(drm)->client, f, ##a);                       \
+} while(0)
 
 extern int nouveau_modeset;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index c57a37e8e1eb..b37da95105b0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -41,7 +41,9 @@ struct nouveau_encoder {
 
 	struct dcb_output *dcb;
 	int or;
-	struct nvkm_i2c_port *i2c;
+
+	struct i2c_adapter *i2c;
+	struct nvkm_i2c_aux *aux;
 
 	/* different to drm_encoder.crtc, this reflects what's
 	 * actually programmed on the hw, not the proposed crtc */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 6751553abe4a..2791701685dc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -84,7 +84,7 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 
 	if (ret != -ENODEV)
 		nouveau_fbcon_gpu_lockup(info);
-	cfb_fillrect(info, rect);
+	drm_fb_helper_cfb_fillrect(info, rect);
 }
 
 static void
@@ -116,7 +116,7 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
 
 	if (ret != -ENODEV)
 		nouveau_fbcon_gpu_lockup(info);
-	cfb_copyarea(info, image);
+	drm_fb_helper_cfb_copyarea(info, image);
 }
 
 static void
@@ -148,7 +148,7 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
 
 	if (ret != -ENODEV)
 		nouveau_fbcon_gpu_lockup(info);
-	cfb_imageblit(info, image);
+	drm_fb_helper_cfb_imageblit(info, image);
 }
 
 static int
@@ -197,9 +197,9 @@ static struct fb_ops nouveau_fbcon_sw_ops = {
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
-	.fb_fillrect = cfb_fillrect,
-	.fb_copyarea = cfb_copyarea,
-	.fb_imageblit = cfb_imageblit,
+	.fb_fillrect = drm_fb_helper_cfb_fillrect,
+	.fb_copyarea = drm_fb_helper_cfb_copyarea,
+	.fb_imageblit = drm_fb_helper_cfb_imageblit,
 	.fb_pan_display = drm_fb_helper_pan_display,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcmap = drm_fb_helper_setcmap,
@@ -319,7 +319,6 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
 	struct nouveau_channel *chan;
 	struct nouveau_bo *nvbo;
 	struct drm_mode_fb_cmd2 mode_cmd;
-	struct pci_dev *pdev = dev->pdev;
 	int size, ret;
 
 	mode_cmd.width = sizes->surface_width;
@@ -365,20 +364,13 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
 
 	mutex_lock(&dev->struct_mutex);
 
-	info = framebuffer_alloc(0, &pdev->dev);
-	if (!info) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto out_unlock;
 	}
 	info->skip_vt_switch = 1;
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		framebuffer_release(info);
-		goto out_unlock;
-	}
-
 	info->par = fbcon;
 
 	nouveau_framebuffer_init(dev, &fbcon->nouveau_fb, &mode_cmd, nvbo);
@@ -388,7 +380,6 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
 
 	/* setup helper */
 	fbcon->helper.fb = fb;
-	fbcon->helper.fbdev = info;
 
 	strcpy(info->fix.id, "nouveaufb");
 	if (!chan)
@@ -450,15 +441,9 @@ static int
 nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
 {
 	struct nouveau_framebuffer *nouveau_fb = &fbcon->nouveau_fb;
-	struct fb_info *info;
 
-	if (fbcon->helper.fbdev) {
-		info = fbcon->helper.fbdev;
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&fbcon->helper);
+	drm_fb_helper_release_fbi(&fbcon->helper);
 
 	if (nouveau_fb->nvbo) {
 		nouveau_bo_unmap(nouveau_fb->nvbo);
@@ -496,7 +481,7 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
 		console_lock();
 		if (state == FBINFO_STATE_RUNNING)
 			nouveau_fbcon_accel_restore(dev);
-		fb_set_suspend(drm->fbcon->helper.fbdev, state);
+		drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
 		if (state != FBINFO_STATE_RUNNING)
 			nouveau_fbcon_accel_save_disable(dev);
 		console_unlock();
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index c6d56bef5823..574c36b492ee 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -169,7 +169,7 @@ void
 nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
 {
 	struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
-	struct nouveau_cli *cli = (void *)nvif_client(chan->object);
+	struct nouveau_cli *cli = (void *)chan->user.client;
 	int ret;
 
 	INIT_LIST_HEAD(&fctx->flip);
@@ -188,13 +188,12 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
 	if (!priv->uevent)
 		return;
 
-	ret = nvif_notify_init(chan->object, NULL,
-			 nouveau_fence_wait_uevent_handler, false,
-			 G82_CHANNEL_DMA_V0_NTFY_UEVENT,
-			 &(struct nvif_notify_uevent_req) { },
-			 sizeof(struct nvif_notify_uevent_req),
-			 sizeof(struct nvif_notify_uevent_rep),
-			 &fctx->notify);
+	ret = nvif_notify_init(&chan->user, nouveau_fence_wait_uevent_handler,
+			       false, G82_CHANNEL_DMA_V0_NTFY_UEVENT,
+			       &(struct nvif_notify_uevent_req) { },
+			       sizeof(struct nvif_notify_uevent_req),
+			       sizeof(struct nvif_notify_uevent_rep),
+			       &fctx->notify);
 
 	WARN_ON(ret);
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index d9241d8247fb..2e3a62d38fe9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -85,7 +85,7 @@ int nv50_fence_create(struct nouveau_drm *);
 int nv84_fence_create(struct nouveau_drm *);
 int nvc0_fence_create(struct nouveau_drm *);
 
-int nouveau_flip_complete(void *chan);
+int nouveau_flip_complete(struct nvif_notify *);
 
 struct nv84_fence_chan {
 	struct nouveau_fence_chan base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index af1ee517f372..2c9981512d27 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -254,13 +254,13 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_cli *cli = nouveau_cli(file_priv);
-	struct nvkm_fb *pfb = nvxx_fb(&drm->device);
+	struct nvkm_fb *fb = nvxx_fb(&drm->device);
 	struct drm_nouveau_gem_new *req = data;
 	struct nouveau_bo *nvbo = NULL;
 	int ret = 0;
 
-	if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
-		NV_PRINTK(error, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
+	if (!nvkm_fb_memtype_valid(fb, req->info.tile_flags)) {
+		NV_PRINTK(err, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
 		return -EINVAL;
 	}
 
@@ -376,7 +376,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
 	ww_acquire_init(&op->ticket, &reservation_ww_class);
 retry:
 	if (++trycnt > 100000) {
-		NV_PRINTK(error, cli, "%s failed and gave up.\n", __func__);
+		NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
 		return -EINVAL;
 	}
 
@@ -387,7 +387,7 @@ retry:
 
 		gem = drm_gem_object_lookup(dev, file_priv, b->handle);
 		if (!gem) {
-			NV_PRINTK(error, cli, "Unknown handle 0x%08x\n", b->handle);
+			NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
 			ret = -ENOENT;
 			break;
 		}
@@ -399,7 +399,7 @@ retry:
 		}
 
 		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
-			NV_PRINTK(error, cli, "multiple instances of buffer %d on "
+			NV_PRINTK(err, cli, "multiple instances of buffer %d on "
 				      "validation list\n", b->handle);
 			drm_gem_object_unreference_unlocked(gem);
 			ret = -EINVAL;
@@ -420,7 +420,7 @@ retry:
 			}
 			if (unlikely(ret)) {
 				if (ret != -ERESTARTSYS)
-					NV_PRINTK(error, cli, "fail reserve\n");
+					NV_PRINTK(err, cli, "fail reserve\n");
 				break;
 			}
 		}
@@ -438,7 +438,7 @@ retry:
 		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
 			list_add_tail(&nvbo->entry, &gart_list);
 		else {
-			NV_PRINTK(error, cli, "invalid valid domains: 0x%08x\n",
+			NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
 				 b->valid_domains);
 			list_add_tail(&nvbo->entry, &both_list);
 			ret = -EINVAL;
@@ -476,21 +476,21 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
 					     b->write_domains,
 					     b->valid_domains);
 		if (unlikely(ret)) {
-			NV_PRINTK(error, cli, "fail set_domain\n");
+			NV_PRINTK(err, cli, "fail set_domain\n");
 			return ret;
 		}
 
 		ret = nouveau_bo_validate(nvbo, true, false);
 		if (unlikely(ret)) {
 			if (ret != -ERESTARTSYS)
-				NV_PRINTK(error, cli, "fail ttm_validate\n");
+				NV_PRINTK(err, cli, "fail ttm_validate\n");
 			return ret;
 		}
 
 		ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
 		if (unlikely(ret)) {
 			if (ret != -ERESTARTSYS)
-				NV_PRINTK(error, cli, "fail post-validate sync\n");
+				NV_PRINTK(err, cli, "fail post-validate sync\n");
 			return ret;
 		}
 
@@ -537,14 +537,14 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
 	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
 	if (unlikely(ret)) {
 		if (ret != -ERESTARTSYS)
-			NV_PRINTK(error, cli, "validate_init\n");
+			NV_PRINTK(err, cli, "validate_init\n");
 		return ret;
 	}
 
 	ret = validate_list(chan, cli, &op->list, pbbo, user_buffers);
 	if (unlikely(ret < 0)) {
 		if (ret != -ERESTARTSYS)
-			NV_PRINTK(error, cli, "validating bo list\n");
+			NV_PRINTK(err, cli, "validating bo list\n");
 		validate_fini(op, NULL, NULL);
 		return ret;
 	}
@@ -600,7 +600,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
 		uint32_t data;
 
 		if (unlikely(r->bo_index > req->nr_buffers)) {
-			NV_PRINTK(error, cli, "reloc bo index invalid\n");
+			NV_PRINTK(err, cli, "reloc bo index invalid\n");
 			ret = -EINVAL;
 			break;
 		}
@@ -610,7 +610,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
 			continue;
 
 		if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
-			NV_PRINTK(error, cli, "reloc container bo index invalid\n");
+			NV_PRINTK(err, cli, "reloc container bo index invalid\n");
 			ret = -EINVAL;
 			break;
 		}
@@ -618,7 +618,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
 
 		if (unlikely(r->reloc_bo_offset + 4 >
 			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
-			NV_PRINTK(error, cli, "reloc outside of bo\n");
+			NV_PRINTK(err, cli, "reloc outside of bo\n");
 			ret = -EINVAL;
 			break;
 		}
@@ -627,7 +627,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
 			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
 					  &nvbo->kmap);
 			if (ret) {
-				NV_PRINTK(error, cli, "failed kmap for reloc\n");
+				NV_PRINTK(err, cli, "failed kmap for reloc\n");
 				break;
 			}
 			nvbo->validate_mapped = true;
@@ -650,7 +650,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
 
 		ret = ttm_bo_wait(&nvbo->bo, true, false, false);
 		if (ret) {
-			NV_PRINTK(error, cli, "reloc wait_idle failed: %d\n", ret);
+			NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
 			break;
 		}
 
@@ -681,7 +681,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 		return -ENOMEM;
 
 	list_for_each_entry(temp, &abi16->channels, head) {
-		if (temp->chan->object->handle == (NVDRM_CHAN | req->channel)) {
+		if (temp->chan->user.handle == (NVDRM_CHAN | req->channel)) {
 			chan = temp->chan;
 			break;
 		}
@@ -696,19 +696,19 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 		goto out_next;
 
 	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
-		NV_PRINTK(error, cli, "pushbuf push count exceeds limit: %d max %d\n",
+		NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
 			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
 		return nouveau_abi16_put(abi16, -EINVAL);
 	}
 
 	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
-		NV_PRINTK(error, cli, "pushbuf bo count exceeds limit: %d max %d\n",
+		NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
 			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
 		return nouveau_abi16_put(abi16, -EINVAL);
 	}
 
 	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
-		NV_PRINTK(error, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
+		NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
 			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
 		return nouveau_abi16_put(abi16, -EINVAL);
 	}
@@ -726,7 +726,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 	/* Ensure all push buffers are on validate list */
 	for (i = 0; i < req->nr_push; i++) {
 		if (push[i].bo_index >= req->nr_buffers) {
-			NV_PRINTK(error, cli, "push %d buffer not in list\n", i);
+			NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
 			ret = -EINVAL;
 			goto out_prevalid;
 		}
@@ -737,7 +737,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 					   req->nr_buffers, &op, &do_reloc);
 	if (ret) {
 		if (ret != -ERESTARTSYS)
-			NV_PRINTK(error, cli, "validate: %d\n", ret);
+			NV_PRINTK(err, cli, "validate: %d\n", ret);
 		goto out_prevalid;
 	}
 
@@ -745,7 +745,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 	if (do_reloc) {
 		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
 		if (ret) {
-			NV_PRINTK(error, cli, "reloc apply: %d\n", ret);
+			NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
 			goto out;
 		}
 	}
@@ -753,7 +753,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 	if (chan->dma.ib_max) {
 		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
 		if (ret) {
-			NV_PRINTK(error, cli, "nv50cal_space: %d\n", ret);
+			NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
 			goto out;
 		}
 
@@ -768,7 +768,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 	if (drm->device.info.chipset >= 0x25) {
 		ret = RING_SPACE(chan, req->nr_push * 2);
 		if (ret) {
-			NV_PRINTK(error, cli, "cal_space: %d\n", ret);
+			NV_PRINTK(err, cli, "cal_space: %d\n", ret);
 			goto out;
 		}
 
@@ -782,7 +782,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 	} else {
 		ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
 		if (ret) {
-			NV_PRINTK(error, cli, "jmp_space: %d\n", ret);
+			NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
 			goto out;
 		}
 
@@ -820,7 +820,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 
 	ret = nouveau_fence_new(chan, false, &fence);
 	if (ret) {
-		NV_PRINTK(error, cli, "error fencing pushbuf: %d\n", ret);
+		NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
 		WIND_RING(chan);
 		goto out;
 	}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 0dbe0060f86e..491c7149d197 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -41,7 +41,7 @@ nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
 	struct drm_device *dev = dev_get_drvdata(d);
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvkm_therm *therm = nvxx_therm(&drm->device);
-	int temp = therm->temp_get(therm);
+	int temp = nvkm_therm_temp_get(therm);
 
 	if (temp < 0)
 		return temp;
@@ -348,7 +348,7 @@ nouveau_hwmon_show_fan1_input(struct device *d, struct device_attribute *attr,
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvkm_therm *therm = nvxx_therm(&drm->device);
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm));
+	return snprintf(buf, PAGE_SIZE, "%d\n", nvkm_therm_fan_sense(therm));
 }
 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, nouveau_hwmon_show_fan1_input,
 			  NULL, 0);
@@ -571,7 +571,7 @@ nouveau_hwmon_init(struct drm_device *dev)
 		return -ENOMEM;
 	hwmon->dev = dev;
 
-	if (!therm || !therm->temp_get || !therm->attr_get || !therm->attr_set)
+	if (!therm || !therm->attr_get || !therm->attr_set)
 		return -ENODEV;
 
 	hwmon_dev = hwmon_device_register(&dev->pdev->dev);
@@ -588,7 +588,7 @@ nouveau_hwmon_init(struct drm_device *dev)
 		goto error;
 
 	/* if the card has a working thermal sensor */
-	if (therm->temp_get(therm) >= 0) {
+	if (nvkm_therm_temp_get(therm) >= 0) {
 		ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup);
 		if (ret)
 			goto error;
@@ -606,7 +606,7 @@ nouveau_hwmon_init(struct drm_device *dev)
 	}
 
 	/* if the card can read the fan rpm */
-	if (therm->fan_sense(therm) >= 0) {
+	if (nvkm_therm_fan_sense(therm) >= 0) {
 		ret = sysfs_create_group(&hwmon_dev->kobj,
 					 &hwmon_fan_rpm_attrgroup);
 		if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c
index ca0ad9d1563d..55eb942847fa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_nvif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c
@@ -72,10 +72,8 @@ nvkm_client_suspend(void *priv)
 static void
 nvkm_client_driver_fini(void *priv)
 {
-	struct nvkm_object *client = priv;
-	nvkm_client_fini(nv_client(client), false);
-	atomic_set(&client->refcount, 1);
-	nvkm_object_ref(NULL, &client);
+	struct nvkm_client *client = priv;
+	nvkm_client_del(&client);
 }
 
 static int
@@ -113,7 +111,7 @@ nvkm_client_driver_init(const char *name, u64 device, const char *cfg,
 	struct nvkm_client *client;
 	int ret;
 
-	ret = nvkm_client_create(name, device, cfg, dbg, &client);
+	ret = nvkm_client_new(name, device, cfg, dbg, &client);
 	*ppriv = client;
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index dcfbbfaf1739..3eb665453165 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -19,239 +19,38 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/reset.h>
-#include <linux/regulator/consumer.h>
-#include <linux/iommu.h>
-#include <soc/tegra/fuse.h>
-#include <soc/tegra/pmc.h>
-
-#include "nouveau_drm.h"
 #include "nouveau_platform.h"
 
-static int nouveau_platform_power_up(struct nouveau_platform_gpu *gpu)
-{
-	int err;
-
-	err = regulator_enable(gpu->vdd);
-	if (err)
-		goto err_power;
-
-	err = clk_prepare_enable(gpu->clk);
-	if (err)
-		goto err_clk;
-	err = clk_prepare_enable(gpu->clk_pwr);
-	if (err)
-		goto err_clk_pwr;
-	clk_set_rate(gpu->clk_pwr, 204000000);
-	udelay(10);
-
-	reset_control_assert(gpu->rst);
-	udelay(10);
-
-	err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
-	if (err)
-		goto err_clamp;
-	udelay(10);
-
-	reset_control_deassert(gpu->rst);
-	udelay(10);
-
-	return 0;
-
-err_clamp:
-	clk_disable_unprepare(gpu->clk_pwr);
-err_clk_pwr:
-	clk_disable_unprepare(gpu->clk);
-err_clk:
-	regulator_disable(gpu->vdd);
-err_power:
-	return err;
-}
-
-static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu)
-{
-	int err;
-
-	reset_control_assert(gpu->rst);
-	udelay(10);
-
-	clk_disable_unprepare(gpu->clk_pwr);
-	clk_disable_unprepare(gpu->clk);
-	udelay(10);
-
-	err = regulator_disable(gpu->vdd);
-	if (err)
-		return err;
-
-	return 0;
-}
-
-#if IS_ENABLED(CONFIG_IOMMU_API)
-
-static void nouveau_platform_probe_iommu(struct device *dev,
-					 struct nouveau_platform_gpu *gpu)
-{
-	int err;
-	unsigned long pgsize_bitmap;
-
-	mutex_init(&gpu->iommu.mutex);
-
-	if (iommu_present(&platform_bus_type)) {
-		gpu->iommu.domain = iommu_domain_alloc(&platform_bus_type);
-		if (IS_ERR(gpu->iommu.domain))
-			goto error;
-
-		/*
-		 * A IOMMU is only usable if it supports page sizes smaller
-		 * or equal to the system's PAGE_SIZE, with a preference if
-		 * both are equal.
-		 */
-		pgsize_bitmap = gpu->iommu.domain->ops->pgsize_bitmap;
-		if (pgsize_bitmap & PAGE_SIZE) {
-			gpu->iommu.pgshift = PAGE_SHIFT;
-		} else {
-			gpu->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
-			if (gpu->iommu.pgshift == 0) {
-				dev_warn(dev, "unsupported IOMMU page size\n");
-				goto free_domain;
-			}
-			gpu->iommu.pgshift -= 1;
-		}
-
-		err = iommu_attach_device(gpu->iommu.domain, dev);
-		if (err)
-			goto free_domain;
-
-		err = nvkm_mm_init(&gpu->iommu._mm, 0,
-				   (1ULL << 40) >> gpu->iommu.pgshift, 1);
-		if (err)
-			goto detach_device;
-
-		gpu->iommu.mm = &gpu->iommu._mm;
-	}
-
-	return;
-
-detach_device:
-	iommu_detach_device(gpu->iommu.domain, dev);
-
-free_domain:
-	iommu_domain_free(gpu->iommu.domain);
-
-error:
-	gpu->iommu.domain = NULL;
-	gpu->iommu.pgshift = 0;
-	dev_err(dev, "cannot initialize IOMMU MM\n");
-}
-
-static void nouveau_platform_remove_iommu(struct device *dev,
-					  struct nouveau_platform_gpu *gpu)
-{
-	if (gpu->iommu.domain) {
-		nvkm_mm_fini(&gpu->iommu._mm);
-		iommu_detach_device(gpu->iommu.domain, dev);
-		iommu_domain_free(gpu->iommu.domain);
-	}
-}
-
-#else
-
-static void nouveau_platform_probe_iommu(struct device *dev,
-					 struct nouveau_platform_gpu *gpu)
-{
-}
-
-static void nouveau_platform_remove_iommu(struct device *dev,
-					  struct nouveau_platform_gpu *gpu)
-{
-}
-
-#endif
-
 static int nouveau_platform_probe(struct platform_device *pdev)
 {
-	struct nouveau_platform_gpu *gpu;
-	struct nouveau_platform_device *device;
+	struct nvkm_device *device;
 	struct drm_device *drm;
-	int err;
-
-	gpu = devm_kzalloc(&pdev->dev, sizeof(*gpu), GFP_KERNEL);
-	if (!gpu)
-		return -ENOMEM;
-
-	gpu->vdd = devm_regulator_get(&pdev->dev, "vdd");
-	if (IS_ERR(gpu->vdd))
-		return PTR_ERR(gpu->vdd);
-
-	gpu->rst = devm_reset_control_get(&pdev->dev, "gpu");
-	if (IS_ERR(gpu->rst))
-		return PTR_ERR(gpu->rst);
-
-	gpu->clk = devm_clk_get(&pdev->dev, "gpu");
-	if (IS_ERR(gpu->clk))
-		return PTR_ERR(gpu->clk);
-
-	gpu->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
-	if (IS_ERR(gpu->clk_pwr))
-		return PTR_ERR(gpu->clk_pwr);
-
-	nouveau_platform_probe_iommu(&pdev->dev, gpu);
-
-	err = nouveau_platform_power_up(gpu);
-	if (err)
-		return err;
+	int ret;
 
 	drm = nouveau_platform_device_create(pdev, &device);
-	if (IS_ERR(drm)) {
-		err = PTR_ERR(drm);
-		goto power_down;
-	}
+	if (IS_ERR(drm))
+		return PTR_ERR(drm);
 
-	device->gpu = gpu;
-	device->gpu_speedo = tegra_sku_info.gpu_speedo_value;
-
-	err = drm_dev_register(drm, 0);
-	if (err < 0)
-		goto err_unref;
+	ret = drm_dev_register(drm, 0);
+	if (ret < 0) {
+		drm_dev_unref(drm);
+		return ret;
+	}
 
 	return 0;
-
-err_unref:
-	drm_dev_unref(drm);
-
-power_down:
-	nouveau_platform_power_down(gpu);
-	nouveau_platform_remove_iommu(&pdev->dev, gpu);
-
-	return err;
 }
 
 static int nouveau_platform_remove(struct platform_device *pdev)
 {
-	struct drm_device *drm_dev = platform_get_drvdata(pdev);
-	struct nouveau_drm *drm = nouveau_drm(drm_dev);
-	struct nvkm_device *device = nvxx_device(&drm->device);
-	struct nouveau_platform_gpu *gpu = nv_device_to_platform(device)->gpu;
-	int err;
-
-	nouveau_drm_device_remove(drm_dev);
-
-	err = nouveau_platform_power_down(gpu);
-
-	nouveau_platform_remove_iommu(&pdev->dev, gpu);
-
-	return err;
+	struct drm_device *dev = platform_get_drvdata(pdev);
+	nouveau_drm_device_remove(dev);
+	return 0;
 }
 
 #if IS_ENABLED(CONFIG_OF)
 static const struct of_device_id nouveau_platform_match[] = {
 	{ .compatible = "nvidia,gk20a" },
+	{ .compatible = "nvidia,gm20b" },
 	{ }
 };
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.h b/drivers/gpu/drm/nouveau/nouveau_platform.h
index 392874cf4725..f41056d0f5f4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.h
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.h
@@ -19,54 +19,9 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-
 #ifndef __NOUVEAU_PLATFORM_H__
 #define __NOUVEAU_PLATFORM_H__
-
-#include "core/device.h"
-#include "core/mm.h"
-
-struct reset_control;
-struct clk;
-struct regulator;
-struct iommu_domain;
-struct platform_driver;
-
-struct nouveau_platform_gpu {
-	struct reset_control *rst;
-	struct clk *clk;
-	struct clk *clk_pwr;
-
-	struct regulator *vdd;
-
-	struct {
-		/*
-		 * Protects accesses to mm from subsystems
-		 */
-		struct mutex mutex;
-
-		struct nvkm_mm _mm;
-		/*
-		 * Just points to _mm. We need this to avoid embedding
-		 * struct nvkm_mm in os.h
-		 */
-		struct nvkm_mm *mm;
-		struct iommu_domain *domain;
-		unsigned long pgshift;
-	} iommu;
-};
-
-struct nouveau_platform_device {
-	struct nvkm_device device;
-
-	struct nouveau_platform_gpu *gpu;
-
-	int gpu_speedo;
-};
-
-#define nv_device_to_platform(d)                                               \
-	container_of(d, struct nouveau_platform_device, device)
+#include "nouveau_drm.h"
 
 extern struct platform_driver nouveau_platform_driver;
-
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.c b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
index 1ec8f38ae69a..d12a5faee047 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sysfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
@@ -165,7 +165,7 @@ nouveau_sysfs_fini(struct drm_device *dev)
 	struct nvif_device *device = &drm->device;
 
 	if (sysfs && sysfs->ctrl.priv) {
-		device_remove_file(nv_device_base(nvxx_device(device)), &dev_attr_pstate);
+		device_remove_file(nvxx_device(device)->dev, &dev_attr_pstate);
 		nvif_object_fini(&sysfs->ctrl);
 	}
 
@@ -188,11 +188,11 @@ nouveau_sysfs_init(struct drm_device *dev)
 	if (!sysfs)
 		return -ENOMEM;
 
-	ret = nvif_object_init(nvif_object(device), NULL, NVDRM_CONTROL,
+	ret = nvif_object_init(&device->object, NVDRM_CONTROL,
 			       NVIF_IOCTL_NEW_V0_CONTROL, NULL, 0,
-			      &sysfs->ctrl);
+			       &sysfs->ctrl);
 	if (ret == 0)
-		device_create_file(nv_device_base(nvxx_device(device)), &dev_attr_pstate);
+		device_create_file(nvxx_device(device)->dev, &dev_attr_pstate);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 7464aef34674..3f0fb55cb473 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -33,8 +33,8 @@ static int
 nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
 {
 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
-	struct nvkm_fb *pfb = nvxx_fb(&drm->device);
-	man->priv = pfb;
+	struct nvkm_fb *fb = nvxx_fb(&drm->device);
+	man->priv = fb;
 	return 0;
 }
 
@@ -64,9 +64,9 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
 			 struct ttm_mem_reg *mem)
 {
 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
-	struct nvkm_fb *pfb = nvxx_fb(&drm->device);
+	struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram;
 	nvkm_mem_node_cleanup(mem->mm_node);
-	pfb->ram->put(pfb, (struct nvkm_mem **)&mem->mm_node);
+	ram->func->put(ram, (struct nvkm_mem **)&mem->mm_node);
 }
 
 static int
@@ -76,7 +76,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
 			 struct ttm_mem_reg *mem)
 {
 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
-	struct nvkm_fb *pfb = nvxx_fb(&drm->device);
+	struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram;
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	struct nvkm_mem *node;
 	u32 size_nc = 0;
@@ -88,9 +88,9 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
 	if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
 		size_nc = 1 << nvbo->page_shift;
 
-	ret = pfb->ram->get(pfb, mem->num_pages << PAGE_SHIFT,
-			   mem->page_alignment << PAGE_SHIFT, size_nc,
-			   (nvbo->tile_flags >> 8) & 0x3ff, &node);
+	ret = ram->func->get(ram, mem->num_pages << PAGE_SHIFT,
+			     mem->page_alignment << PAGE_SHIFT, size_nc,
+			     (nvbo->tile_flags >> 8) & 0x3ff, &node);
 	if (ret) {
 		mem->mm_node = NULL;
 		return (ret == -ENOSPC) ? 0 : ret;
@@ -103,38 +103,11 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
 	return 0;
 }
 
-static void
-nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
-{
-	struct nvkm_fb *pfb = man->priv;
-	struct nvkm_mm *mm = &pfb->vram;
-	struct nvkm_mm_node *r;
-	u32 total = 0, free = 0;
-
-	mutex_lock(&nv_subdev(pfb)->mutex);
-	list_for_each_entry(r, &mm->nodes, nl_entry) {
-		printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
-		       prefix, r->type, ((u64)r->offset << 12),
-		       (((u64)r->offset + r->length) << 12));
-
-		total += r->length;
-		if (!r->type)
-			free += r->length;
-	}
-	mutex_unlock(&nv_subdev(pfb)->mutex);
-
-	printk(KERN_DEBUG "%s  total: 0x%010llx free: 0x%010llx\n",
-	       prefix, (u64)total << 12, (u64)free << 12);
-	printk(KERN_DEBUG "%s  block: 0x%08x\n",
-	       prefix, mm->block_size << 12);
-}
-
 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
 	nouveau_vram_manager_init,
 	nouveau_vram_manager_fini,
 	nouveau_vram_manager_new,
 	nouveau_vram_manager_del,
-	nouveau_vram_manager_debug
 };
 
 static int
@@ -221,7 +194,7 @@ nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
 {
 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
 	struct nvkm_mmu *mmu = nvxx_mmu(&drm->device);
-	struct nv04_mmu_priv *priv = (void *)mmu;
+	struct nv04_mmu *priv = (void *)mmu;
 	struct nvkm_vm *vm = NULL;
 	nvkm_vm_ref(priv->vm, &vm, NULL);
 	man->priv = vm;
@@ -362,13 +335,22 @@ nouveau_ttm_global_release(struct nouveau_drm *drm)
 int
 nouveau_ttm_init(struct nouveau_drm *drm)
 {
+	struct nvkm_device *device = nvxx_device(&drm->device);
+	struct nvkm_pci *pci = device->pci;
 	struct drm_device *dev = drm->dev;
 	u32 bits;
 	int ret;
 
+	if (pci && pci->agp.bridge) {
+		drm->agp.bridge = pci->agp.bridge;
+		drm->agp.base = pci->agp.base;
+		drm->agp.size = pci->agp.size;
+		drm->agp.cma = pci->agp.cma;
+	}
+
 	bits = nvxx_mmu(&drm->device)->dma_bits;
-	if (nv_device_is_pci(nvxx_device(&drm->device))) {
-		if (drm->agp.stat == ENABLED ||
+	if (nvxx_device(&drm->device)->func->pci) {
+		if (drm->agp.bridge ||
 		     !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
 			bits = 32;
 
@@ -408,11 +390,11 @@ nouveau_ttm_init(struct nouveau_drm *drm)
 		return ret;
 	}
 
-	drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(nvxx_device(&drm->device), 1),
-					 nv_device_resource_len(nvxx_device(&drm->device), 1));
+	drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
+					 device->func->resource_size(device, 1));
 
 	/* GART init */
-	if (drm->agp.stat != ENABLED) {
+	if (!drm->agp.bridge) {
 		drm->gem.gart_available = nvxx_mmu(&drm->device)->limit;
 	} else {
 		drm->gem.gart_available = drm->agp.size;
@@ -433,10 +415,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
 void
 nouveau_ttm_fini(struct nouveau_drm *drm)
 {
-	mutex_lock(&drm->dev->struct_mutex);
 	ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
 	ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
-	mutex_unlock(&drm->dev->struct_mutex);
 
 	ttm_bo_device_release(&drm->ttm.bdev);
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index c7592ec8ecb8..af89c3665b2a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -12,13 +12,14 @@
 static unsigned int
 nouveau_vga_set_decode(void *priv, bool state)
 {
-	struct nvif_device *device = &nouveau_drm(priv)->device;
+	struct nouveau_drm *drm = nouveau_drm(priv);
+	struct nvif_object *device = &drm->device.object;
 
-	if (device->info.family == NV_DEVICE_INFO_V0_CURIE &&
-	    device->info.chipset >= 0x4c)
+	if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE &&
+	    drm->device.info.chipset >= 0x4c)
 		nvif_wr32(device, 0x088060, state);
 	else
-	if (device->info.chipset >= 0x40)
+	if (drm->device.info.chipset >= 0x40)
 		nvif_wr32(device, 0x088054, state);
 	else
 		nvif_wr32(device, 0x001854, state);
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 495c57644ced..789dc2993b0d 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -171,33 +171,33 @@ nv04_fbcon_accel_init(struct fb_info *info)
 		return -EINVAL;
 	}
 
-	ret = nvif_object_init(chan->object, NULL, 0x0062,
+	ret = nvif_object_init(&chan->user, 0x0062,
 			       device->info.family >= NV_DEVICE_INFO_V0_CELSIUS ?
 			       0x0062 : 0x0042, NULL, 0, &nfbdev->surf2d);
 	if (ret)
 		return ret;
 
-	ret = nvif_object_init(chan->object, NULL, 0x0019, 0x0019, NULL, 0,
+	ret = nvif_object_init(&chan->user, 0x0019, 0x0019, NULL, 0,
 			       &nfbdev->clip);
 	if (ret)
 		return ret;
 
-	ret = nvif_object_init(chan->object, NULL, 0x0043, 0x0043, NULL, 0,
+	ret = nvif_object_init(&chan->user, 0x0043, 0x0043, NULL, 0,
 			       &nfbdev->rop);
 	if (ret)
 		return ret;
 
-	ret = nvif_object_init(chan->object, NULL, 0x0044, 0x0044, NULL, 0,
+	ret = nvif_object_init(&chan->user, 0x0044, 0x0044, NULL, 0,
 			       &nfbdev->patt);
 	if (ret)
 		return ret;
 
-	ret = nvif_object_init(chan->object, NULL, 0x004a, 0x004a, NULL, 0,
+	ret = nvif_object_init(&chan->user, 0x004a, 0x004a, NULL, 0,
 			       &nfbdev->gdi);
 	if (ret)
 		return ret;
 
-	ret = nvif_object_init(chan->object, NULL, 0x005f,
+	ret = nvif_object_init(&chan->user, 0x005f,
 			       device->info.chipset >= 0x11 ? 0x009f : 0x005f,
 			       NULL, 0, &nfbdev->blit);
 	if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index c2e05e64cd6f..f3d705d67738 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -57,8 +57,10 @@ nv04_fence_sync(struct nouveau_fence *fence,
 static u32
 nv04_fence_read(struct nouveau_channel *chan)
 {
-	struct nvkm_fifo_chan *fifo = nvxx_fifo_chan(chan);;
-	return atomic_read(&fifo->refcnt);
+	struct nv04_nvsw_get_ref_v0 args = {};
+	WARN_ON(nvif_object_mthd(&chan->nvsw, NV04_NVSW_GET_REF,
+				 &args, sizeof(args)));
+	return args.ref;
 }
 
 static void
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 5e1ea1cdce75..2c35213da275 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -50,7 +50,7 @@ nv10_fence_sync(struct nouveau_fence *fence,
 u32
 nv10_fence_read(struct nouveau_channel *chan)
 {
-	return nvif_rd32(chan, 0x0048);
+	return nvif_rd32(&chan->user, 0x0048);
 }
 
 void
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 57860cfa1de5..80b6eb8b3d02 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -33,7 +33,7 @@ int
 nv17_fence_sync(struct nouveau_fence *fence,
 		struct nouveau_channel *prev, struct nouveau_channel *chan)
 {
-	struct nouveau_cli *cli = (void *)nvif_client(&prev->device->base);
+	struct nouveau_cli *cli = (void *)prev->user.client;
 	struct nv10_fence_priv *priv = chan->drm->fence;
 	struct nv10_fence_chan *fctx = chan->fence;
 	u32 value;
@@ -89,7 +89,7 @@ nv17_fence_context_new(struct nouveau_channel *chan)
 	fctx->base.read = nv10_fence_read;
 	fctx->base.sync = nv17_fence_sync;
 
-	ret = nvif_object_init(chan->object, NULL, NvSema, NV_DMA_FROM_MEMORY,
+	ret = nvif_object_init(&chan->user, NvSema, NV_DMA_FROM_MEMORY,
 			       &(struct nv_dma_v0) {
 					.target = NV_DMA_V0_TARGET_VRAM,
 					.access = NV_DMA_V0_ACCESS_RDWR,
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 981342d142ff..4ae87aed4505 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -60,35 +60,39 @@
 
 struct nv50_chan {
 	struct nvif_object user;
+	struct nvif_device *device;
 };
 
 static int
-nv50_chan_create(struct nvif_object *disp, const u32 *oclass, u8 head,
-		 void *data, u32 size, struct nv50_chan *chan)
+nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
+		 const s32 *oclass, u8 head, void *data, u32 size,
+		 struct nv50_chan *chan)
 {
 	const u32 handle = (oclass[0] << 16) | head;
-	u32 sclass[8];
-	int ret, i;
+	struct nvif_sclass *sclass;
+	int ret, i, n;
+
+	chan->device = device;
 
-	ret = nvif_object_sclass(disp, sclass, ARRAY_SIZE(sclass));
-	WARN_ON(ret > ARRAY_SIZE(sclass));
+	ret = n = nvif_object_sclass_get(disp, &sclass);
 	if (ret < 0)
 		return ret;
 
 	while (oclass[0]) {
-		for (i = 0; i < ARRAY_SIZE(sclass); i++) {
-			if (sclass[i] == oclass[0]) {
-				ret = nvif_object_init(disp, NULL, handle,
-						       oclass[0], data, size,
-						       &chan->user);
+		for (i = 0; i < n; i++) {
+			if (sclass[i].oclass == oclass[0]) {
+				ret = nvif_object_init(disp, handle, oclass[0],
+						       data, size, &chan->user);
 				if (ret == 0)
 					nvif_object_map(&chan->user);
+				nvif_object_sclass_put(&sclass);
 				return ret;
 			}
 		}
 		oclass++;
 	}
 
+	nvif_object_sclass_put(&sclass);
 	return -ENOSYS;
 }
 
@@ -113,10 +117,12 @@ nv50_pioc_destroy(struct nv50_pioc *pioc)
 }
 
 static int
-nv50_pioc_create(struct nvif_object *disp, const u32 *oclass, u8 head,
-		 void *data, u32 size, struct nv50_pioc *pioc)
+nv50_pioc_create(struct nvif_device *device, struct nvif_object *disp,
+		 const s32 *oclass, u8 head, void *data, u32 size,
+		 struct nv50_pioc *pioc)
 {
-	return nv50_chan_create(disp, oclass, head, data, size, &pioc->base);
+	return nv50_chan_create(device, disp, oclass, head, data, size,
+				&pioc->base);
 }
 
 /******************************************************************************
@@ -128,12 +134,13 @@ struct nv50_curs {
 };
 
 static int
-nv50_curs_create(struct nvif_object *disp, int head, struct nv50_curs *curs)
+nv50_curs_create(struct nvif_device *device, struct nvif_object *disp,
+		 int head, struct nv50_curs *curs)
 {
 	struct nv50_disp_cursor_v0 args = {
 		.head = head,
 	};
-	static const u32 oclass[] = {
+	static const s32 oclass[] = {
 		GK104_DISP_CURSOR,
 		GF110_DISP_CURSOR,
 		GT214_DISP_CURSOR,
@@ -142,8 +149,8 @@ nv50_curs_create(struct nvif_object *disp, int head, struct nv50_curs *curs)
 		0
 	};
 
-	return nv50_pioc_create(disp, oclass, head, &args, sizeof(args),
-			       &curs->base);
+	return nv50_pioc_create(device, disp, oclass, head, &args, sizeof(args),
+				&curs->base);
 }
 
 /******************************************************************************
@@ -155,12 +162,13 @@ struct nv50_oimm {
 };
 
 static int
-nv50_oimm_create(struct nvif_object *disp, int head, struct nv50_oimm *oimm)
+nv50_oimm_create(struct nvif_device *device, struct nvif_object *disp,
+		 int head, struct nv50_oimm *oimm)
 {
 	struct nv50_disp_cursor_v0 args = {
 		.head = head,
 	};
-	static const u32 oclass[] = {
+	static const s32 oclass[] = {
 		GK104_DISP_OVERLAY,
 		GF110_DISP_OVERLAY,
 		GT214_DISP_OVERLAY,
@@ -169,8 +177,8 @@ nv50_oimm_create(struct nvif_object *disp, int head, struct nv50_oimm *oimm)
 		0
 	};
 
-	return nv50_pioc_create(disp, oclass, head, &args, sizeof(args),
-			       &oimm->base);
+	return nv50_pioc_create(device, disp, oclass, head, &args, sizeof(args),
+				&oimm->base);
 }
 
 /******************************************************************************
@@ -194,37 +202,37 @@ struct nv50_dmac {
 static void
 nv50_dmac_destroy(struct nv50_dmac *dmac, struct nvif_object *disp)
 {
+	struct nvif_device *device = dmac->base.device;
+
 	nvif_object_fini(&dmac->vram);
 	nvif_object_fini(&dmac->sync);
 
 	nv50_chan_destroy(&dmac->base);
 
 	if (dmac->ptr) {
-		struct pci_dev *pdev = nvxx_device(nvif_device(disp))->pdev;
-		pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle);
+		struct device *dev = nvxx_device(device)->dev;
+		dma_free_coherent(dev, PAGE_SIZE, dmac->ptr, dmac->handle);
 	}
 }
 
 static int
-nv50_dmac_create(struct nvif_object *disp, const u32 *oclass, u8 head,
-		 void *data, u32 size, u64 syncbuf,
+nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
+		 const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
 		 struct nv50_dmac *dmac)
 {
-	struct nvif_device *device = nvif_device(disp);
 	struct nv50_disp_core_channel_dma_v0 *args = data;
 	struct nvif_object pushbuf;
 	int ret;
 
 	mutex_init(&dmac->lock);
 
-	dmac->ptr = pci_alloc_consistent(nvxx_device(device)->pdev,
-					 PAGE_SIZE, &dmac->handle);
+	dmac->ptr = dma_alloc_coherent(nvxx_device(device)->dev, PAGE_SIZE,
+				       &dmac->handle, GFP_KERNEL);
 	if (!dmac->ptr)
 		return -ENOMEM;
 
-	ret = nvif_object_init(nvif_object(device), NULL,
-			       args->pushbuf, NV_DMA_FROM_MEMORY,
-			       &(struct nv_dma_v0) {
+	ret = nvif_object_init(&device->object, 0xd0000000,
+			       NV_DMA_FROM_MEMORY, &(struct nv_dma_v0) {
 					.target = NV_DMA_V0_TARGET_PCI_US,
 					.access = NV_DMA_V0_ACCESS_RD,
 					.start = dmac->handle + 0x0000,
@@ -233,13 +241,15 @@ nv50_dmac_create(struct nvif_object *disp, const u32 *oclass, u8 head,
 	if (ret)
 		return ret;
 
-	ret = nv50_chan_create(disp, oclass, head, data, size, &dmac->base);
+	args->pushbuf = nvif_handle(&pushbuf);
+
+	ret = nv50_chan_create(device, disp, oclass, head, data, size,
+			       &dmac->base);
 	nvif_object_fini(&pushbuf);
 	if (ret)
 		return ret;
 
-	ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000000,
-			       NV_DMA_IN_MEMORY,
+	ret = nvif_object_init(&dmac->base.user, 0xf0000000, NV_DMA_IN_MEMORY,
 			       &(struct nv_dma_v0) {
 					.target = NV_DMA_V0_TARGET_VRAM,
 					.access = NV_DMA_V0_ACCESS_RDWR,
@@ -250,8 +260,7 @@ nv50_dmac_create(struct nvif_object *disp, const u32 *oclass, u8 head,
 	if (ret)
 		return ret;
 
-	ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000001,
-			       NV_DMA_IN_MEMORY,
+	ret = nvif_object_init(&dmac->base.user, 0xf0000001, NV_DMA_IN_MEMORY,
 			       &(struct nv_dma_v0) {
 					.target = NV_DMA_V0_TARGET_VRAM,
 					.access = NV_DMA_V0_ACCESS_RDWR,
@@ -274,12 +283,13 @@ struct nv50_mast {
 };
 
 static int
-nv50_core_create(struct nvif_object *disp, u64 syncbuf, struct nv50_mast *core)
+nv50_core_create(struct nvif_device *device, struct nvif_object *disp,
+		 u64 syncbuf, struct nv50_mast *core)
 {
 	struct nv50_disp_core_channel_dma_v0 args = {
 		.pushbuf = 0xb0007d00,
 	};
-	static const u32 oclass[] = {
+	static const s32 oclass[] = {
 		GM204_DISP_CORE_CHANNEL_DMA,
 		GM107_DISP_CORE_CHANNEL_DMA,
 		GK110_DISP_CORE_CHANNEL_DMA,
@@ -293,8 +303,8 @@ nv50_core_create(struct nvif_object *disp, u64 syncbuf, struct nv50_mast *core)
 		0
 	};
 
-	return nv50_dmac_create(disp, oclass, 0, &args, sizeof(args), syncbuf,
-			       &core->base);
+	return nv50_dmac_create(device, disp, oclass, 0, &args, sizeof(args),
+				syncbuf, &core->base);
 }
 
 /******************************************************************************
@@ -308,14 +318,14 @@ struct nv50_sync {
 };
 
 static int
-nv50_base_create(struct nvif_object *disp, int head, u64 syncbuf,
-		 struct nv50_sync *base)
+nv50_base_create(struct nvif_device *device, struct nvif_object *disp,
+		 int head, u64 syncbuf, struct nv50_sync *base)
 {
 	struct nv50_disp_base_channel_dma_v0 args = {
 		.pushbuf = 0xb0007c00 | head,
 		.head = head,
 	};
-	static const u32 oclass[] = {
+	static const s32 oclass[] = {
 		GK110_DISP_BASE_CHANNEL_DMA,
 		GK104_DISP_BASE_CHANNEL_DMA,
 		GF110_DISP_BASE_CHANNEL_DMA,
@@ -326,7 +336,7 @@ nv50_base_create(struct nvif_object *disp, int head, u64 syncbuf,
 		0
 	};
 
-	return nv50_dmac_create(disp, oclass, head, &args, sizeof(args),
+	return nv50_dmac_create(device, disp, oclass, head, &args, sizeof(args),
 				syncbuf, &base->base);
 }
 
@@ -339,14 +349,14 @@ struct nv50_ovly {
 };
 
 static int
-nv50_ovly_create(struct nvif_object *disp, int head, u64 syncbuf,
-		 struct nv50_ovly *ovly)
+nv50_ovly_create(struct nvif_device *device, struct nvif_object *disp,
+		 int head, u64 syncbuf, struct nv50_ovly *ovly)
 {
 	struct nv50_disp_overlay_channel_dma_v0 args = {
 		.pushbuf = 0xb0007e00 | head,
 		.head = head,
 	};
-	static const u32 oclass[] = {
+	static const s32 oclass[] = {
 		GK104_DISP_OVERLAY_CONTROL_DMA,
 		GF110_DISP_OVERLAY_CONTROL_DMA,
 		GT214_DISP_OVERLAY_CHANNEL_DMA,
@@ -356,7 +366,7 @@ nv50_ovly_create(struct nvif_object *disp, int head, u64 syncbuf,
 		0
 	};
 
-	return nv50_dmac_create(disp, oclass, head, &args, sizeof(args),
+	return nv50_dmac_create(device, disp, oclass, head, &args, sizeof(args),
 				syncbuf, &ovly->base);
 }
 
@@ -413,6 +423,7 @@ static u32 *
 evo_wait(void *evoc, int nr)
 {
 	struct nv50_dmac *dmac = evoc;
+	struct nvif_device *device = dmac->base.device;
 	u32 put = nvif_rd32(&dmac->base.user, 0x0000) / 4;
 
 	mutex_lock(&dmac->lock);
@@ -420,9 +431,12 @@ evo_wait(void *evoc, int nr)
 		dmac->ptr[put] = 0x20000000;
 
 		nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
-		if (!nvxx_wait(&dmac->base.user, 0x0004, ~0, 0x00000000)) {
+		if (nvif_msec(device, 2000,
+			if (!nvif_rd32(&dmac->base.user, 0x0004))
+				break;
+		) < 0) {
 			mutex_unlock(&dmac->lock);
-			nv_error(nvxx_object(&dmac->base.user), "channel stalled\n");
+			printk(KERN_ERR "nouveau: evo channel stalled\n");
 			return NULL;
 		}
 
@@ -480,7 +494,10 @@ evo_sync(struct drm_device *dev)
 		evo_data(push, 0x00000000);
 		evo_data(push, 0x00000000);
 		evo_kick(push, mast);
-		if (nv_wait_cb(nvxx_device(device), evo_sync_wait, disp->sync))
+		if (nvif_msec(device, 2000,
+			if (evo_sync_wait(disp->sync))
+				break;
+		) >= 0)
 			return 0;
 	}
 
@@ -535,7 +552,10 @@ nv50_display_flip_stop(struct drm_crtc *crtc)
 		evo_kick(push, flip.chan);
 	}
 
-	nv_wait_cb(nvxx_device(device), nv50_display_flip_wait, &flip);
+	nvif_msec(device, 2000,
+		if (nv50_display_flip_wait(&flip))
+			break;
+	);
 }
 
 int
@@ -563,7 +583,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 	if (unlikely(push == NULL))
 		return -EBUSY;
 
-	if (chan && chan->object->oclass < G82_CHANNEL_GPFIFO) {
+	if (chan && chan->user.oclass < G82_CHANNEL_GPFIFO) {
 		ret = RING_SPACE(chan, 8);
 		if (ret)
 			return ret;
@@ -577,7 +597,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 		OUT_RING  (chan, sync->addr);
 		OUT_RING  (chan, sync->data);
 	} else
-	if (chan && chan->object->oclass < FERMI_CHANNEL_GPFIFO) {
+	if (chan && chan->user.oclass < FERMI_CHANNEL_GPFIFO) {
 		u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
 		ret = RING_SPACE(chan, 12);
 		if (ret)
@@ -1408,6 +1428,8 @@ static const struct drm_crtc_funcs nv50_crtc_func = {
 static int
 nv50_crtc_create(struct drm_device *dev, int index)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvif_device *device = &drm->device;
 	struct nv50_disp *disp = nv50_disp(dev);
 	struct nv50_head *head;
 	struct drm_crtc *crtc;
@@ -1452,13 +1474,13 @@ nv50_crtc_create(struct drm_device *dev, int index)
 		goto out;
 
 	/* allocate cursor resources */
-	ret = nv50_curs_create(disp->disp, index, &head->curs);
+	ret = nv50_curs_create(device, disp->disp, index, &head->curs);
 	if (ret)
 		goto out;
 
 	/* allocate page flip / sync resources */
-	ret = nv50_base_create(disp->disp, index, disp->sync->bo.offset,
-			      &head->sync);
+	ret = nv50_base_create(device, disp->disp, index, disp->sync->bo.offset,
+			       &head->sync);
 	if (ret)
 		goto out;
 
@@ -1466,12 +1488,12 @@ nv50_crtc_create(struct drm_device *dev, int index)
 	head->sync.data = 0x00000000;
 
 	/* allocate overlay resources */
-	ret = nv50_oimm_create(disp->disp, index, &head->oimm);
+	ret = nv50_oimm_create(device, disp->disp, index, &head->oimm);
 	if (ret)
 		goto out;
 
-	ret = nv50_ovly_create(disp->disp, index, disp->sync->bo.offset,
-			      &head->ovly);
+	ret = nv50_ovly_create(device, disp->disp, index, disp->sync->bo.offset,
+			       &head->ovly);
 	if (ret)
 		goto out;
 
@@ -1678,6 +1700,7 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
 {
 	struct nouveau_drm *drm = nouveau_drm(connector->dev);
 	struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
+	struct nvkm_i2c_bus *bus;
 	struct nouveau_encoder *nv_encoder;
 	struct drm_encoder *encoder;
 	int type = DRM_MODE_ENCODER_DAC;
@@ -1687,7 +1710,10 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
 		return -ENOMEM;
 	nv_encoder->dcb = dcbe;
 	nv_encoder->or = ffs(dcbe->or) - 1;
-	nv_encoder->i2c = i2c->find(i2c, dcbe->i2c_index);
+
+	bus = nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
+	if (bus)
+		nv_encoder->i2c = &bus->i2c;
 
 	encoder = to_drm_encoder(nv_encoder);
 	encoder->possible_crtcs = dcbe->heads;
@@ -2081,9 +2107,22 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
 		return -ENOMEM;
 	nv_encoder->dcb = dcbe;
 	nv_encoder->or = ffs(dcbe->or) - 1;
-	nv_encoder->i2c = i2c->find(i2c, dcbe->i2c_index);
 	nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
 
+	if (dcbe->type == DCB_OUTPUT_DP) {
+		struct nvkm_i2c_aux *aux =
+			nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
+		if (aux) {
+			nv_encoder->i2c = &aux->i2c;
+			nv_encoder->aux = aux;
+		}
+	} else {
+		struct nvkm_i2c_bus *bus =
+			nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
+		if (bus)
+			nv_encoder->i2c = &bus->i2c;
+	}
+
 	encoder = to_drm_encoder(nv_encoder);
 	encoder->possible_crtcs = dcbe->heads;
 	encoder->possible_clones = 0;
@@ -2234,18 +2273,22 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
 {
 	struct nouveau_drm *drm = nouveau_drm(connector->dev);
 	struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
-	struct nvkm_i2c_port *ddc = NULL;
+	struct nvkm_i2c_bus *bus = NULL;
+	struct nvkm_i2c_aux *aux = NULL;
+	struct i2c_adapter *ddc;
 	struct nouveau_encoder *nv_encoder;
 	struct drm_encoder *encoder;
 	int type;
 
 	switch (dcbe->type) {
 	case DCB_OUTPUT_TMDS:
-		ddc  = i2c->find_type(i2c, NV_I2C_TYPE_EXTDDC(dcbe->extdev));
+		bus  = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_EXT(dcbe->extdev));
+		ddc  = bus ? &bus->i2c : NULL;
 		type = DRM_MODE_ENCODER_TMDS;
 		break;
 	case DCB_OUTPUT_DP:
-		ddc  = i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(dcbe->extdev));
+		aux  = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev));
+		ddc  = aux ? &aux->i2c : NULL;
 		type = DRM_MODE_ENCODER_TMDS;
 		break;
 	default:
@@ -2258,6 +2301,7 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
 	nv_encoder->dcb = dcbe;
 	nv_encoder->or = ffs(dcbe->or) - 1;
 	nv_encoder->i2c = ddc;
+	nv_encoder->aux = aux;
 
 	encoder = to_drm_encoder(nv_encoder);
 	encoder->possible_crtcs = dcbe->heads;
@@ -2295,7 +2339,7 @@ nv50_fbdma_init(struct drm_device *dev, u32 name, u64 offset, u64 length, u8 kin
 		union {
 			struct nv50_dma_v0 nv50;
 			struct gf100_dma_v0 gf100;
-			struct gf110_dma_v0 gf110;
+			struct gf119_dma_v0 gf119;
 		};
 	} args = {};
 	struct nv50_fbdma *fbdma;
@@ -2331,15 +2375,15 @@ nv50_fbdma_init(struct drm_device *dev, u32 name, u64 offset, u64 length, u8 kin
 		args.gf100.kind = kind;
 		size += sizeof(args.gf100);
 	} else {
-		args.gf110.page = GF110_DMA_V0_PAGE_LP;
-		args.gf110.kind = kind;
-		size += sizeof(args.gf110);
+		args.gf119.page = GF119_DMA_V0_PAGE_LP;
+		args.gf119.kind = kind;
+		size += sizeof(args.gf119);
 	}
 
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		struct nv50_head *head = nv50_head(crtc);
-		int ret = nvif_object_init(&head->sync.base.base.user, NULL,
-					    name, NV_DMA_IN_MEMORY, &args, size,
+		int ret = nvif_object_init(&head->sync.base.base.user, name,
+					   NV_DMA_IN_MEMORY, &args, size,
 					   &fbdma->base[head->base.index]);
 		if (ret) {
 			nv50_fbdma_fini(fbdma);
@@ -2347,9 +2391,8 @@ nv50_fbdma_init(struct drm_device *dev, u32 name, u64 offset, u64 length, u8 kin
 		}
 	}
 
-	ret = nvif_object_init(&mast->base.base.user, NULL, name,
-				NV_DMA_IN_MEMORY, &args, size,
-			       &fbdma->core);
+	ret = nvif_object_init(&mast->base.base.user, name, NV_DMA_IN_MEMORY,
+			       &args, size, &fbdma->core);
 	if (ret) {
 		nv50_fbdma_fini(fbdma);
 		return ret;
@@ -2502,14 +2545,14 @@ nv50_display_create(struct drm_device *dev)
 		goto out;
 
 	/* allocate master evo channel */
-	ret = nv50_core_create(disp->disp, disp->sync->bo.offset,
+	ret = nv50_core_create(device, disp->disp, disp->sync->bo.offset,
 			      &disp->mast);
 	if (ret)
 		goto out;
 
 	/* create crtc objects to represent the hw heads */
 	if (disp->disp->oclass >= GF110_DISP)
-		crtcs = nvif_rd32(device, 0x022448);
+		crtcs = nvif_rd32(&device->object, 0x022448);
 	else
 		crtcs = 2;
 
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 901130b06072..e05499d6ed83 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -183,7 +183,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
 		return -EINVAL;
 	}
 
-	ret = nvif_object_init(chan->object, NULL, 0x502d, 0x502d, NULL, 0,
+	ret = nvif_object_init(&chan->user, 0x502d, 0x502d, NULL, 0,
 			       &nfbdev->twod);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index a82d9ea7c6fd..f0d96e5da6b4 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -51,7 +51,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
 	fctx->base.read = nv10_fence_read;
 	fctx->base.sync = nv17_fence_sync;
 
-	ret = nvif_object_init(chan->object, NULL, NvSema, NV_DMA_IN_MEMORY,
+	ret = nvif_object_init(&chan->user, NvSema, NV_DMA_IN_MEMORY,
 			       &(struct nv_dma_v0) {
 					.target = NV_DMA_V0_TARGET_VRAM,
 					.access = NV_DMA_V0_ACCESS_RDWR,
@@ -66,7 +66,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
 		u32 start = bo->bo.mem.start * PAGE_SIZE;
 		u32 limit = start + bo->bo.mem.size - 1;
 
-		ret = nvif_object_init(chan->object, NULL, NvEvoSema0 + i,
+		ret = nvif_object_init(&chan->user, NvEvoSema0 + i,
 				       NV_DMA_IN_MEMORY, &(struct nv_dma_v0) {
 						.target = NV_DMA_V0_TARGET_VRAM,
 						.access = NV_DMA_V0_ACCESS_RDWR,
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index a03db4368696..412c5be5a9ca 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -131,7 +131,7 @@ nv84_fence_context_del(struct nouveau_channel *chan)
 int
 nv84_fence_context_new(struct nouveau_channel *chan)
 {
-	struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
+	struct nouveau_cli *cli = (void *)chan->user.client;
 	struct nv84_fence_priv *priv = chan->drm->fence;
 	struct nv84_fence_chan *fctx;
 	int ret, i;
@@ -213,7 +213,7 @@ nv84_fence_destroy(struct nouveau_drm *drm)
 int
 nv84_fence_create(struct nouveau_drm *drm)
 {
-	struct nvkm_fifo *pfifo = nvxx_fifo(&drm->device);
+	struct nvkm_fifo *fifo = nvxx_fifo(&drm->device);
 	struct nv84_fence_priv *priv;
 	u32 domain;
 	int ret;
@@ -228,7 +228,7 @@ nv84_fence_create(struct nouveau_drm *drm)
 	priv->base.context_new = nv84_fence_context_new;
 	priv->base.context_del = nv84_fence_context_del;
 
-	priv->base.contexts = pfifo->max + 1;
+	priv->base.contexts = fifo->nr;
 	priv->base.context_base = fence_context_alloc(priv->base.contexts);
 	priv->base.uevent = true;
 
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index fcd2e5f27bb9..c97395b4a312 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -156,7 +156,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
 	struct nouveau_channel *chan = drm->channel;
 	int ret, format;
 
-	ret = nvif_object_init(chan->object, NULL, 0x902d, 0x902d, NULL, 0,
+	ret = nvif_object_init(&chan->user, 0x902d, 0x902d, NULL, 0,
 			       &nfbdev->twod);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c
index 80b96844221e..1ee9294eca2e 100644
--- a/drivers/gpu/drm/nouveau/nvif/client.c
+++ b/drivers/gpu/drm/nouveau/nvif/client.c
@@ -29,29 +29,29 @@
 int
 nvif_client_ioctl(struct nvif_client *client, void *data, u32 size)
 {
-	return client->driver->ioctl(client->base.priv, client->super, data, size, NULL);
+	return client->driver->ioctl(client->object.priv, client->super, data, size, NULL);
 }
 
 int
 nvif_client_suspend(struct nvif_client *client)
 {
-	return client->driver->suspend(client->base.priv);
+	return client->driver->suspend(client->object.priv);
 }
 
 int
 nvif_client_resume(struct nvif_client *client)
 {
-	return client->driver->resume(client->base.priv);
+	return client->driver->resume(client->object.priv);
 }
 
 void
 nvif_client_fini(struct nvif_client *client)
 {
 	if (client->driver) {
-		client->driver->fini(client->base.priv);
+		client->driver->fini(client->object.priv);
 		client->driver = NULL;
-		client->base.parent = NULL;
-		nvif_object_fini(&client->base);
+		client->object.client = NULL;
+		nvif_object_fini(&client->object);
 	}
 }
 
@@ -68,63 +68,39 @@ nvif_drivers[] = {
 };
 
 int
-nvif_client_init(void (*dtor)(struct nvif_client *), const char *driver,
-		 const char *name, u64 device, const char *cfg, const char *dbg,
-		 struct nvif_client *client)
+nvif_client_init(const char *driver, const char *name, u64 device,
+		 const char *cfg, const char *dbg, struct nvif_client *client)
 {
+	struct {
+		struct nvif_ioctl_v0 ioctl;
+		struct nvif_ioctl_nop_v0 nop;
+	} args = {};
 	int ret, i;
 
-	ret = nvif_object_init(NULL, (void*)dtor, 0, 0, NULL, 0, &client->base);
+	ret = nvif_object_init(NULL, 0, 0, NULL, 0, &client->object);
 	if (ret)
 		return ret;
 
-	client->base.parent = &client->base;
-	client->base.handle = ~0;
-	client->object = &client->base;
+	client->object.client = client;
+	client->object.handle = ~0;
+	client->route = NVIF_IOCTL_V0_ROUTE_NVIF;
 	client->super = true;
 
 	for (i = 0, ret = -EINVAL; (client->driver = nvif_drivers[i]); i++) {
 		if (!driver || !strcmp(client->driver->name, driver)) {
 			ret = client->driver->init(name, device, cfg, dbg,
-						  &client->base.priv);
+						  &client->object.priv);
 			if (!ret || driver)
 				break;
 		}
 	}
 
+	if (ret == 0) {
+		ret = nvif_client_ioctl(client, &args, sizeof(args));
+		client->version = args.nop.version;
+	}
+
 	if (ret)
 		nvif_client_fini(client);
 	return ret;
 }
-
-static void
-nvif_client_del(struct nvif_client *client)
-{
-	nvif_client_fini(client);
-	kfree(client);
-}
-
-int
-nvif_client_new(const char *driver, const char *name, u64 device,
-		const char *cfg, const char *dbg,
-		struct nvif_client **pclient)
-{
-	struct nvif_client *client = kzalloc(sizeof(*client), GFP_KERNEL);
-	if (client) {
-		int ret = nvif_client_init(nvif_client_del, driver, name,
-					   device, cfg, dbg, client);
-		if (ret) {
-			kfree(client);
-			client = NULL;
-		}
-		*pclient = client;
-		return ret;
-	}
-	return -ENOMEM;
-}
-
-void
-nvif_client_ref(struct nvif_client *client, struct nvif_client **pclient)
-{
-	nvif_object_ref(&client->base, (struct nvif_object **)pclient);
-}
diff --git a/drivers/gpu/drm/nouveau/nvif/device.c b/drivers/gpu/drm/nouveau/nvif/device.c
index 6f72244c52cd..252d8c33215b 100644
--- a/drivers/gpu/drm/nouveau/nvif/device.c
+++ b/drivers/gpu/drm/nouveau/nvif/device.c
@@ -24,55 +24,32 @@
 
 #include <nvif/device.h>
 
+u64
+nvif_device_time(struct nvif_device *device)
+{
+	struct nv_device_time_v0 args = {};
+	int ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_TIME,
+				   &args, sizeof(args));
+	WARN_ON_ONCE(ret != 0);
+	return args.time;
+}
+
 void
 nvif_device_fini(struct nvif_device *device)
 {
-	nvif_object_fini(&device->base);
+	nvif_object_fini(&device->object);
 }
 
 int
-nvif_device_init(struct nvif_object *parent, void (*dtor)(struct nvif_device *),
-		 u32 handle, u32 oclass, void *data, u32 size,
-		 struct nvif_device *device)
+nvif_device_init(struct nvif_object *parent, u32 handle, s32 oclass,
+		 void *data, u32 size, struct nvif_device *device)
 {
-	int ret = nvif_object_init(parent, (void *)dtor, handle, oclass,
-				   data, size, &device->base);
+	int ret = nvif_object_init(parent, handle, oclass, data, size,
+				   &device->object);
 	if (ret == 0) {
-		device->object = &device->base;
 		device->info.version = 0;
-		ret = nvif_object_mthd(&device->base, NV_DEVICE_V0_INFO,
+		ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_INFO,
 				       &device->info, sizeof(device->info));
 	}
 	return ret;
 }
-
-static void
-nvif_device_del(struct nvif_device *device)
-{
-	nvif_device_fini(device);
-	kfree(device);
-}
-
-int
-nvif_device_new(struct nvif_object *parent, u32 handle, u32 oclass,
-		void *data, u32 size, struct nvif_device **pdevice)
-{
-	struct nvif_device *device = kzalloc(sizeof(*device), GFP_KERNEL);
-	if (device) {
-		int ret = nvif_device_init(parent, nvif_device_del, handle,
-					   oclass, data, size, device);
-		if (ret) {
-			kfree(device);
-			device = NULL;
-		}
-		*pdevice = device;
-		return ret;
-	}
-	return -ENOMEM;
-}
-
-void
-nvif_device_ref(struct nvif_device *device, struct nvif_device **pdevice)
-{
-	nvif_object_ref(&device->base, (struct nvif_object **)pdevice);
-}
diff --git a/drivers/gpu/drm/nouveau/nvif/notify.c b/drivers/gpu/drm/nouveau/nvif/notify.c
index 8e34748709a0..b0787ff833ef 100644
--- a/drivers/gpu/drm/nouveau/nvif/notify.c
+++ b/drivers/gpu/drm/nouveau/nvif/notify.c
@@ -124,7 +124,7 @@ nvif_notify(const void *header, u32 length, const void *data, u32 size)
 	}
 
 	if (!WARN_ON(notify == NULL)) {
-		struct nvif_client *client = nvif_client(notify->object);
+		struct nvif_client *client = notify->object->client;
 		if (!WARN_ON(notify->size != size)) {
 			atomic_inc(&notify->putcnt);
 			if (test_bit(NVIF_NOTIFY_WORK, &notify->flags)) {
@@ -156,7 +156,7 @@ nvif_notify_fini(struct nvif_notify *notify)
 	if (ret >= 0 && object) {
 		ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
 		if (ret == 0) {
-			nvif_object_ref(NULL, &notify->object);
+			notify->object = NULL;
 			kfree((void *)notify->data);
 		}
 	}
@@ -164,9 +164,9 @@ nvif_notify_fini(struct nvif_notify *notify)
 }
 
 int
-nvif_notify_init(struct nvif_object *object, void (*dtor)(struct nvif_notify *),
-		 int (*func)(struct nvif_notify *), bool work, u8 event,
-		 void *data, u32 size, u32 reply, struct nvif_notify *notify)
+nvif_notify_init(struct nvif_object *object, int (*func)(struct nvif_notify *),
+		 bool work, u8 event, void *data, u32 size, u32 reply,
+		 struct nvif_notify *notify)
 {
 	struct {
 		struct nvif_ioctl_v0 ioctl;
@@ -175,11 +175,9 @@ nvif_notify_init(struct nvif_object *object, void (*dtor)(struct nvif_notify *),
 	} *args;
 	int ret = -ENOMEM;
 
-	notify->object = NULL;
-	nvif_object_ref(object, &notify->object);
+	notify->object = object;
 	notify->flags = 0;
 	atomic_set(&notify->putcnt, 1);
-	notify->dtor = dtor;
 	notify->func = func;
 	notify->data = NULL;
 	notify->size = reply;
@@ -211,38 +209,3 @@ done:
 		nvif_notify_fini(notify);
 	return ret;
 }
-
-static void
-nvif_notify_del(struct nvif_notify *notify)
-{
-	nvif_notify_fini(notify);
-	kfree(notify);
-}
-
-void
-nvif_notify_ref(struct nvif_notify *notify, struct nvif_notify **pnotify)
-{
-	BUG_ON(notify != NULL);
-	if (*pnotify)
-		(*pnotify)->dtor(*pnotify);
-	*pnotify = notify;
-}
-
-int
-nvif_notify_new(struct nvif_object *object, int (*func)(struct nvif_notify *),
-		bool work, u8 type, void *data, u32 size, u32 reply,
-		struct nvif_notify **pnotify)
-{
-	struct nvif_notify *notify = kzalloc(sizeof(*notify), GFP_KERNEL);
-	if (notify) {
-		int ret = nvif_notify_init(object, nvif_notify_del, func, work,
-					   type, data, size, reply, notify);
-		if (ret) {
-			kfree(notify);
-			notify = NULL;
-		}
-		*pnotify = notify;
-		return ret;
-	}
-	return -ENOMEM;
-}
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
index 3ab4e2f8cc12..c3fb6a20f567 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.c
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -30,47 +30,71 @@
 int
 nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack)
 {
-	struct nvif_client *client = nvif_client(object);
+	struct nvif_client *client = object->client;
 	union {
 		struct nvif_ioctl_v0 v0;
 	} *args = data;
 
 	if (size >= sizeof(*args) && args->v0.version == 0) {
+		if (object != &client->object)
+			args->v0.object = nvif_handle(object);
+		else
+			args->v0.object = 0;
 		args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
-		args->v0.path_nr = 0;
-		while (args->v0.path_nr < ARRAY_SIZE(args->v0.path)) {
-			args->v0.path[args->v0.path_nr++] = object->handle;
-			if (object->parent == object)
-				break;
-			object = object->parent;
-		}
 	} else
 		return -ENOSYS;
 
-	return client->driver->ioctl(client->base.priv, client->super, data, size, hack);
+	return client->driver->ioctl(client->object.priv, client->super,
+				     data, size, hack);
+}
+
+void
+nvif_object_sclass_put(struct nvif_sclass **psclass)
+{
+	kfree(*psclass);
+	*psclass = NULL;
 }
 
 int
-nvif_object_sclass(struct nvif_object *object, u32 *oclass, int count)
+nvif_object_sclass_get(struct nvif_object *object, struct nvif_sclass **psclass)
 {
 	struct {
 		struct nvif_ioctl_v0 ioctl;
 		struct nvif_ioctl_sclass_v0 sclass;
-	} *args;
-	u32 size = count * sizeof(args->sclass.oclass[0]);
-	int ret;
+	} *args = NULL;
+	int ret, cnt = 0, i;
+	u32 size;
 
-	if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL)))
-		return -ENOMEM;
-	args->ioctl.version = 0;
-	args->ioctl.type = NVIF_IOCTL_V0_SCLASS;
-	args->sclass.version = 0;
-	args->sclass.count = count;
+	while (1) {
+		size = sizeof(*args) + cnt * sizeof(args->sclass.oclass[0]);
+		if (!(args = kmalloc(size, GFP_KERNEL)))
+			return -ENOMEM;
+		args->ioctl.version = 0;
+		args->ioctl.type = NVIF_IOCTL_V0_SCLASS;
+		args->sclass.version = 0;
+		args->sclass.count = cnt;
+
+		ret = nvif_object_ioctl(object, args, size, NULL);
+		if (ret == 0 && args->sclass.count <= cnt)
+			break;
+		cnt = args->sclass.count;
+		kfree(args);
+		if (ret != 0)
+			return ret;
+	}
+
+	*psclass = kzalloc(sizeof(**psclass) * args->sclass.count, GFP_KERNEL);
+	if (*psclass) {
+		for (i = 0; i < args->sclass.count; i++) {
+			(*psclass)[i].oclass = args->sclass.oclass[i].oclass;
+			(*psclass)[i].minver = args->sclass.oclass[i].minver;
+			(*psclass)[i].maxver = args->sclass.oclass[i].maxver;
+		}
+		ret = args->sclass.count;
+	} else {
+		ret = -ENOMEM;
+	}
 
-	memcpy(args->sclass.oclass, oclass, size);
-	ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL);
-	ret = ret ? ret : args->sclass.count;
-	memcpy(oclass, args->sclass.oclass, size);
 	kfree(args);
 	return ret;
 }
@@ -145,7 +169,7 @@ void
 nvif_object_unmap(struct nvif_object *object)
 {
 	if (object->map.size) {
-		struct nvif_client *client = nvif_client(object);
+		struct nvif_client *client = object->client;
 		struct {
 			struct nvif_ioctl_v0 ioctl;
 			struct nvif_ioctl_unmap unmap;
@@ -167,7 +191,7 @@ nvif_object_unmap(struct nvif_object *object)
 int
 nvif_object_map(struct nvif_object *object)
 {
-	struct nvif_client *client = nvif_client(object);
+	struct nvif_client *client = object->client;
 	struct {
 		struct nvif_ioctl_v0 ioctl;
 		struct nvif_ioctl_map_v0 map;
@@ -186,119 +210,65 @@ nvif_object_map(struct nvif_object *object)
 	return ret;
 }
 
-struct ctor {
-	struct nvif_ioctl_v0 ioctl;
-	struct nvif_ioctl_new_v0 new;
-};
-
 void
 nvif_object_fini(struct nvif_object *object)
 {
-	struct ctor *ctor = container_of(object->data, typeof(*ctor), new.data);
-	if (object->parent) {
-		struct {
-			struct nvif_ioctl_v0 ioctl;
-			struct nvif_ioctl_del del;
-		} args = {
-			.ioctl.type = NVIF_IOCTL_V0_DEL,
-		};
+	struct {
+		struct nvif_ioctl_v0 ioctl;
+		struct nvif_ioctl_del del;
+	} args = {
+		.ioctl.type = NVIF_IOCTL_V0_DEL,
+	};
 
-		nvif_object_unmap(object);
-		nvif_object_ioctl(object, &args, sizeof(args), NULL);
-		if (object->data) {
-			object->size = 0;
-			object->data = NULL;
-			kfree(ctor);
-		}
-		nvif_object_ref(NULL, &object->parent);
-	}
+	if (!object->client)
+		return;
+
+	nvif_object_unmap(object);
+	nvif_object_ioctl(object, &args, sizeof(args), NULL);
+	object->client = NULL;
 }
 
 int
-nvif_object_init(struct nvif_object *parent, void (*dtor)(struct nvif_object *),
-		 u32 handle, u32 oclass, void *data, u32 size,
-		 struct nvif_object *object)
+nvif_object_init(struct nvif_object *parent, u32 handle, s32 oclass,
+		 void *data, u32 size, struct nvif_object *object)
 {
-	struct ctor *ctor;
+	struct {
+		struct nvif_ioctl_v0 ioctl;
+		struct nvif_ioctl_new_v0 new;
+	} *args;
 	int ret = 0;
 
-	object->parent = NULL;
-	object->object = object;
-	nvif_object_ref(parent, &object->parent);
-	kref_init(&object->refcount);
+	object->client = NULL;
 	object->handle = handle;
 	object->oclass = oclass;
-	object->data = NULL;
-	object->size = 0;
-	object->dtor = dtor;
 	object->map.ptr = NULL;
 	object->map.size = 0;
 
-	if (object->parent) {
-		if (!(ctor = kmalloc(sizeof(*ctor) + size, GFP_KERNEL))) {
+	if (parent) {
+		if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL))) {
 			nvif_object_fini(object);
 			return -ENOMEM;
 		}
-		object->data = ctor->new.data;
-		object->size = size;
-		memcpy(object->data, data, size);
 
-		ctor->ioctl.version = 0;
-		ctor->ioctl.type = NVIF_IOCTL_V0_NEW;
-		ctor->new.version = 0;
-		ctor->new.route = NVIF_IOCTL_V0_ROUTE_NVIF;
-		ctor->new.token = (unsigned long)(void *)object;
-		ctor->new.handle = handle;
-		ctor->new.oclass = oclass;
+		args->ioctl.version = 0;
+		args->ioctl.type = NVIF_IOCTL_V0_NEW;
+		args->new.version = 0;
+		args->new.route = parent->client->route;
+		args->new.token = nvif_handle(object);
+		args->new.object = nvif_handle(object);
+		args->new.handle = handle;
+		args->new.oclass = oclass;
 
-		ret = nvif_object_ioctl(parent, ctor, sizeof(*ctor) +
-					object->size, &object->priv);
+		memcpy(args->new.data, data, size);
+		ret = nvif_object_ioctl(parent, args, sizeof(*args) + size,
+					&object->priv);
+		memcpy(data, args->new.data, size);
+		kfree(args);
+		if (ret == 0)
+			object->client = parent->client;
 	}
 
 	if (ret)
 		nvif_object_fini(object);
 	return ret;
 }
-
-static void
-nvif_object_del(struct nvif_object *object)
-{
-	nvif_object_fini(object);
-	kfree(object);
-}
-
-int
-nvif_object_new(struct nvif_object *parent, u32 handle, u32 oclass,
-		void *data, u32 size, struct nvif_object **pobject)
-{
-	struct nvif_object *object = kzalloc(sizeof(*object), GFP_KERNEL);
-	if (object) {
-		int ret = nvif_object_init(parent, nvif_object_del, handle,
-					   oclass, data, size, object);
-		if (ret) {
-			kfree(object);
-			object = NULL;
-		}
-		*pobject = object;
-		return ret;
-	}
-	return -ENOMEM;
-}
-
-static void
-nvif_object_put(struct kref *kref)
-{
-	struct nvif_object *object =
-		container_of(kref, typeof(*object), refcount);
-	object->dtor(object);
-}
-
-void
-nvif_object_ref(struct nvif_object *object, struct nvif_object **pobject)
-{
-	if (object)
-		kref_get(&object->refcount);
-	if (*pobject)
-		kref_put(&(*pobject)->refcount, nvif_object_put);
-	*pobject = object;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/Kbuild b/drivers/gpu/drm/nouveau/nvkm/core/Kbuild
index a2bdb2069113..7f66963f305c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/core/Kbuild
@@ -1,17 +1,14 @@
 nvkm-y := nvkm/core/client.o
-nvkm-y += nvkm/core/engctx.o
 nvkm-y += nvkm/core/engine.o
 nvkm-y += nvkm/core/enum.o
 nvkm-y += nvkm/core/event.o
 nvkm-y += nvkm/core/gpuobj.o
-nvkm-y += nvkm/core/handle.o
 nvkm-y += nvkm/core/ioctl.o
+nvkm-y += nvkm/core/memory.o
 nvkm-y += nvkm/core/mm.o
-nvkm-y += nvkm/core/namedb.o
 nvkm-y += nvkm/core/notify.o
 nvkm-y += nvkm/core/object.o
+nvkm-y += nvkm/core/oproxy.o
 nvkm-y += nvkm/core/option.o
-nvkm-y += nvkm/core/parent.o
-nvkm-y += nvkm/core/printk.o
 nvkm-y += nvkm/core/ramht.o
 nvkm-y += nvkm/core/subdev.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c
index 878a82f8f295..297e1e953fa6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/client.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c
@@ -23,7 +23,6 @@
  */
 #include <core/client.h>
 #include <core/device.h>
-#include <core/handle.h>
 #include <core/notify.h>
 #include <core/option.h>
 
@@ -91,7 +90,7 @@ int
 nvkm_client_notify_new(struct nvkm_object *object,
 		       struct nvkm_event *event, void *data, u32 size)
 {
-	struct nvkm_client *client = nvkm_client(object);
+	struct nvkm_client *client = object->client;
 	struct nvkm_client_notify *notify;
 	union {
 		struct nvif_notify_req_v0 v0;
@@ -111,11 +110,11 @@ nvkm_client_notify_new(struct nvkm_object *object,
 	if (!notify)
 		return -ENOMEM;
 
-	nv_ioctl(client, "notify new size %d\n", size);
+	nvif_ioctl(object, "notify new size %d\n", size);
 	if (nvif_unpack(req->v0, 0, 0, true)) {
-		nv_ioctl(client, "notify new vers %d reply %d route %02x "
-				 "token %llx\n", req->v0.version,
-			 req->v0.reply, req->v0.route, req->v0.token);
+		nvif_ioctl(object, "notify new vers %d reply %d route %02x "
+				   "token %llx\n", req->v0.version,
+			   req->v0.reply, req->v0.route, req->v0.token);
 		notify->version = req->v0.version;
 		notify->size = sizeof(notify->rep.v0);
 		notify->rep.v0.version = req->v0.version;
@@ -146,10 +145,10 @@ nvkm_client_mthd_devlist(struct nvkm_object *object, void *data, u32 size)
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "client devlist size %d\n", size);
+	nvif_ioctl(object, "client devlist size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(object, "client devlist vers %d count %d\n",
-			 args->v0.version, args->v0.count);
+		nvif_ioctl(object, "client devlist vers %d count %d\n",
+			   args->v0.version, args->v0.count);
 		if (size == sizeof(args->v0.device[0]) * args->v0.count) {
 			ret = nvkm_device_list(args->v0.device, args->v0.count);
 			if (ret >= 0) {
@@ -176,91 +175,134 @@ nvkm_client_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
 	return -EINVAL;
 }
 
-static void
-nvkm_client_dtor(struct nvkm_object *object)
+static int
+nvkm_client_child_new(const struct nvkm_oclass *oclass,
+		      void *data, u32 size, struct nvkm_object **pobject)
 {
-	struct nvkm_client *client = (void *)object;
-	int i;
-	for (i = 0; i < ARRAY_SIZE(client->notify); i++)
-		nvkm_client_notify_del(client, i);
-	nvkm_object_ref(NULL, &client->device);
-	nvkm_handle_destroy(client->root);
-	nvkm_namedb_destroy(&client->namedb);
+	return oclass->base.ctor(oclass, data, size, pobject);
 }
 
-static struct nvkm_oclass
-nvkm_client_oclass = {
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.dtor = nvkm_client_dtor,
-		.mthd = nvkm_client_mthd,
-	},
-};
-
-int
-nvkm_client_create_(const char *name, u64 devname, const char *cfg,
-		    const char *dbg, int length, void **pobject)
+static int
+nvkm_client_child_get(struct nvkm_object *object, int index,
+		      struct nvkm_oclass *oclass)
 {
-	struct nvkm_object *device;
-	struct nvkm_client *client;
-	int ret;
+	const struct nvkm_sclass *sclass;
+
+	switch (index) {
+	case 0: sclass = &nvkm_udevice_sclass; break;
+	default:
+		return -EINVAL;
+	}
 
-	device = (void *)nvkm_device_find(devname);
-	if (!device)
-		return -ENODEV;
+	oclass->ctor = nvkm_client_child_new;
+	oclass->base = *sclass;
+	return 0;
+}
+
+static const struct nvkm_object_func
+nvkm_client_object_func = {
+	.mthd = nvkm_client_mthd,
+	.sclass = nvkm_client_child_get,
+};
 
-	ret = nvkm_namedb_create_(NULL, NULL, &nvkm_client_oclass,
-				  NV_CLIENT_CLASS, NULL,
-				  (1ULL << NVDEV_ENGINE_DEVICE),
-				  length, pobject);
-	client = *pobject;
-	if (ret)
-		return ret;
+void
+nvkm_client_remove(struct nvkm_client *client, struct nvkm_object *object)
+{
+	if (!RB_EMPTY_NODE(&object->node))
+		rb_erase(&object->node, &client->objroot);
+}
 
-	ret = nvkm_handle_create(nv_object(client), ~0, ~0, nv_object(client),
-				 &client->root);
-	if (ret)
-		return ret;
+bool
+nvkm_client_insert(struct nvkm_client *client, struct nvkm_object *object)
+{
+	struct rb_node **ptr = &client->objroot.rb_node;
+	struct rb_node *parent = NULL;
 
-	/* prevent init/fini being called, os in in charge of this */
-	atomic_set(&nv_object(client)->usecount, 2);
+	while (*ptr) {
+		struct nvkm_object *this =
+			container_of(*ptr, typeof(*this), node);
+		parent = *ptr;
+		if (object->object < this->object)
+			ptr = &parent->rb_left;
+		else
+		if (object->object > this->object)
+			ptr = &parent->rb_right;
+		else
+			return false;
+	}
 
-	nvkm_object_ref(device, &client->device);
-	snprintf(client->name, sizeof(client->name), "%s", name);
-	client->debug = nvkm_dbgopt(dbg, "CLIENT");
-	return 0;
+	rb_link_node(&object->node, parent, ptr);
+	rb_insert_color(&object->node, &client->objroot);
+	return true;
 }
 
-int
-nvkm_client_init(struct nvkm_client *client)
+struct nvkm_object *
+nvkm_client_search(struct nvkm_client *client, u64 handle)
 {
-	int ret;
-	nv_debug(client, "init running\n");
-	ret = nvkm_handle_init(client->root);
-	nv_debug(client, "init completed with %d\n", ret);
-	return ret;
+	struct rb_node *node = client->objroot.rb_node;
+	while (node) {
+		struct nvkm_object *object =
+			container_of(node, typeof(*object), node);
+		if (handle < object->object)
+			node = node->rb_left;
+		else
+		if (handle > object->object)
+			node = node->rb_right;
+		else
+			return object;
+	}
+	return NULL;
 }
 
 int
 nvkm_client_fini(struct nvkm_client *client, bool suspend)
 {
+	struct nvkm_object *object = &client->object;
 	const char *name[2] = { "fini", "suspend" };
-	int ret, i;
-	nv_debug(client, "%s running\n", name[suspend]);
-	nv_debug(client, "%s notify\n", name[suspend]);
+	int i;
+	nvif_debug(object, "%s notify\n", name[suspend]);
 	for (i = 0; i < ARRAY_SIZE(client->notify); i++)
 		nvkm_client_notify_put(client, i);
-	nv_debug(client, "%s object\n", name[suspend]);
-	ret = nvkm_handle_fini(client->root, suspend);
-	nv_debug(client, "%s completed with %d\n", name[suspend], ret);
-	return ret;
+	return nvkm_object_fini(&client->object, suspend);
+}
+
+int
+nvkm_client_init(struct nvkm_client *client)
+{
+	return nvkm_object_init(&client->object);
+}
+
+void
+nvkm_client_del(struct nvkm_client **pclient)
+{
+	struct nvkm_client *client = *pclient;
+	int i;
+	if (client) {
+		nvkm_client_fini(client, false);
+		for (i = 0; i < ARRAY_SIZE(client->notify); i++)
+			nvkm_client_notify_del(client, i);
+		nvkm_object_dtor(&client->object);
+		kfree(*pclient);
+		*pclient = NULL;
+	}
 }
 
-const char *
-nvkm_client_name(void *obj)
+int
+nvkm_client_new(const char *name, u64 device, const char *cfg,
+		const char *dbg, struct nvkm_client **pclient)
 {
-	const char *client_name = "unknown";
-	struct nvkm_client *client = nvkm_client(obj);
-	if (client)
-		client_name = client->name;
-	return client_name;
+	struct nvkm_oclass oclass = {};
+	struct nvkm_client *client;
+
+	if (!(client = *pclient = kzalloc(sizeof(*client), GFP_KERNEL)))
+		return -ENOMEM;
+	oclass.client = client;
+
+	nvkm_object_ctor(&nvkm_client_object_func, &oclass, &client->object);
+	snprintf(client->name, sizeof(client->name), "%s", name);
+	client->device = device;
+	client->debug = nvkm_dbgopt(dbg, "CLIENT");
+	client->objroot = RB_ROOT;
+	client->dmaroot = RB_ROOT;
+	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engctx.c b/drivers/gpu/drm/nouveau/nvkm/core/engctx.c
deleted file mode 100644
index fb2acbca75d9..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/core/engctx.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include <core/engctx.h>
-#include <core/engine.h>
-#include <core/client.h>
-
-static inline int
-nvkm_engctx_exists(struct nvkm_object *parent,
-		   struct nvkm_engine *engine, void **pobject)
-{
-	struct nvkm_engctx *engctx;
-	struct nvkm_object *parctx;
-
-	list_for_each_entry(engctx, &engine->contexts, head) {
-		parctx = nv_pclass(nv_object(engctx), NV_PARENT_CLASS);
-		if (parctx == parent) {
-			atomic_inc(&nv_object(engctx)->refcount);
-			*pobject = engctx;
-			return 1;
-		}
-	}
-
-	return 0;
-}
-
-int
-nvkm_engctx_create_(struct nvkm_object *parent, struct nvkm_object *engobj,
-		    struct nvkm_oclass *oclass, struct nvkm_object *pargpu,
-		    u32 size, u32 align, u32 flags, int length, void **pobject)
-{
-	struct nvkm_client *client = nvkm_client(parent);
-	struct nvkm_engine *engine = nv_engine(engobj);
-	struct nvkm_object *engctx;
-	unsigned long save;
-	int ret;
-
-	/* check if this engine already has a context for the parent object,
-	 * and reference it instead of creating a new one
-	 */
-	spin_lock_irqsave(&engine->lock, save);
-	ret = nvkm_engctx_exists(parent, engine, pobject);
-	spin_unlock_irqrestore(&engine->lock, save);
-	if (ret)
-		return ret;
-
-	/* create the new context, supports creating both raw objects and
-	 * objects backed by instance memory
-	 */
-	if (size) {
-		ret = nvkm_gpuobj_create_(parent, engobj, oclass,
-					  NV_ENGCTX_CLASS, pargpu, size,
-					  align, flags, length, pobject);
-	} else {
-		ret = nvkm_object_create_(parent, engobj, oclass,
-					  NV_ENGCTX_CLASS, length, pobject);
-	}
-
-	engctx = *pobject;
-	if (ret)
-		return ret;
-
-	/* must take the lock again and re-check a context doesn't already
-	 * exist (in case of a race) - the lock had to be dropped before as
-	 * it's not possible to allocate the object with it held.
-	 */
-	spin_lock_irqsave(&engine->lock, save);
-	ret = nvkm_engctx_exists(parent, engine, pobject);
-	if (ret) {
-		spin_unlock_irqrestore(&engine->lock, save);
-		nvkm_object_ref(NULL, &engctx);
-		return ret;
-	}
-
-	if (client->vm)
-		atomic_inc(&client->vm->engref[nv_engidx(engine)]);
-	list_add(&nv_engctx(engctx)->head, &engine->contexts);
-	nv_engctx(engctx)->addr = ~0ULL;
-	spin_unlock_irqrestore(&engine->lock, save);
-	return 0;
-}
-
-void
-nvkm_engctx_destroy(struct nvkm_engctx *engctx)
-{
-	struct nvkm_engine *engine = engctx->gpuobj.object.engine;
-	struct nvkm_client *client = nvkm_client(engctx);
-	unsigned long save;
-
-	nvkm_gpuobj_unmap(&engctx->vma);
-	spin_lock_irqsave(&engine->lock, save);
-	list_del(&engctx->head);
-	spin_unlock_irqrestore(&engine->lock, save);
-
-	if (client->vm)
-		atomic_dec(&client->vm->engref[nv_engidx(engine)]);
-
-	if (engctx->gpuobj.size)
-		nvkm_gpuobj_destroy(&engctx->gpuobj);
-	else
-		nvkm_object_destroy(&engctx->gpuobj.object);
-}
-
-int
-nvkm_engctx_init(struct nvkm_engctx *engctx)
-{
-	struct nvkm_object *object = nv_object(engctx);
-	struct nvkm_subdev *subdev = nv_subdev(object->engine);
-	struct nvkm_object *parent;
-	struct nvkm_subdev *pardev;
-	int ret;
-
-	ret = nvkm_gpuobj_init(&engctx->gpuobj);
-	if (ret)
-		return ret;
-
-	parent = nv_pclass(object->parent, NV_PARENT_CLASS);
-	pardev = nv_subdev(parent->engine);
-	if (nv_parent(parent)->context_attach) {
-		mutex_lock(&pardev->mutex);
-		ret = nv_parent(parent)->context_attach(parent, object);
-		mutex_unlock(&pardev->mutex);
-	}
-
-	if (ret) {
-		nv_error(parent, "failed to attach %s context, %d\n",
-			 subdev->name, ret);
-		return ret;
-	}
-
-	nv_debug(parent, "attached %s context\n", subdev->name);
-	return 0;
-}
-
-int
-nvkm_engctx_fini(struct nvkm_engctx *engctx, bool suspend)
-{
-	struct nvkm_object *object = nv_object(engctx);
-	struct nvkm_subdev *subdev = nv_subdev(object->engine);
-	struct nvkm_object *parent;
-	struct nvkm_subdev *pardev;
-	int ret = 0;
-
-	parent = nv_pclass(object->parent, NV_PARENT_CLASS);
-	pardev = nv_subdev(parent->engine);
-	if (nv_parent(parent)->context_detach) {
-		mutex_lock(&pardev->mutex);
-		ret = nv_parent(parent)->context_detach(parent, suspend, object);
-		mutex_unlock(&pardev->mutex);
-	}
-
-	if (ret) {
-		nv_error(parent, "failed to detach %s context, %d\n",
-			 subdev->name, ret);
-		return ret;
-	}
-
-	nv_debug(parent, "detached %s context\n", subdev->name);
-	return nvkm_gpuobj_fini(&engctx->gpuobj, suspend);
-}
-
-int
-_nvkm_engctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
-{
-	struct nvkm_engctx *engctx;
-	int ret;
-
-	ret = nvkm_engctx_create(parent, engine, oclass, NULL, 256, 256,
-				 NVOBJ_FLAG_ZERO_ALLOC, &engctx);
-	*pobject = nv_object(engctx);
-	return ret;
-}
-
-void
-_nvkm_engctx_dtor(struct nvkm_object *object)
-{
-	nvkm_engctx_destroy(nv_engctx(object));
-}
-
-int
-_nvkm_engctx_init(struct nvkm_object *object)
-{
-	return nvkm_engctx_init(nv_engctx(object));
-}
-
-int
-_nvkm_engctx_fini(struct nvkm_object *object, bool suspend)
-{
-	return nvkm_engctx_fini(nv_engctx(object), suspend);
-}
-
-struct nvkm_object *
-nvkm_engctx_get(struct nvkm_engine *engine, u64 addr)
-{
-	struct nvkm_engctx *engctx;
-	unsigned long flags;
-
-	spin_lock_irqsave(&engine->lock, flags);
-	list_for_each_entry(engctx, &engine->contexts, head) {
-		if (engctx->addr == addr) {
-			engctx->save = flags;
-			return nv_object(engctx);
-		}
-	}
-	spin_unlock_irqrestore(&engine->lock, flags);
-	return NULL;
-}
-
-void
-nvkm_engctx_put(struct nvkm_object *object)
-{
-	if (object) {
-		struct nvkm_engine *engine = nv_engine(object->engine);
-		struct nvkm_engctx *engctx = nv_engctx(object);
-		spin_unlock_irqrestore(&engine->lock, engctx->save);
-	}
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
index 60820173c6aa..8a7bae7bd995 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/engine.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
@@ -25,51 +25,141 @@
 #include <core/device.h>
 #include <core/option.h>
 
+#include <subdev/fb.h>
+
+void
+nvkm_engine_unref(struct nvkm_engine **pengine)
+{
+	struct nvkm_engine *engine = *pengine;
+	if (engine) {
+		mutex_lock(&engine->subdev.mutex);
+		if (--engine->usecount == 0)
+			nvkm_subdev_fini(&engine->subdev, false);
+		mutex_unlock(&engine->subdev.mutex);
+		*pengine = NULL;
+	}
+}
+
 struct nvkm_engine *
-nvkm_engine(void *obj, int idx)
+nvkm_engine_ref(struct nvkm_engine *engine)
 {
-	obj = nvkm_subdev(obj, idx);
-	if (obj && nv_iclass(obj, NV_ENGINE_CLASS))
-		return nv_engine(obj);
-	return NULL;
+	if (engine) {
+		mutex_lock(&engine->subdev.mutex);
+		if (++engine->usecount == 1) {
+			int ret = nvkm_subdev_init(&engine->subdev);
+			if (ret) {
+				engine->usecount--;
+				mutex_unlock(&engine->subdev.mutex);
+				return ERR_PTR(ret);
+			}
+		}
+		mutex_unlock(&engine->subdev.mutex);
+	}
+	return engine;
 }
 
-int
-nvkm_engine_create_(struct nvkm_object *parent, struct nvkm_object *engobj,
-		    struct nvkm_oclass *oclass, bool enable,
-		    const char *iname, const char *fname,
-		    int length, void **pobject)
+void
+nvkm_engine_tile(struct nvkm_engine *engine, int region)
 {
-	struct nvkm_engine *engine;
-	int ret;
+	struct nvkm_fb *fb = engine->subdev.device->fb;
+	if (engine->func->tile)
+		engine->func->tile(engine, region, &fb->tile.region[region]);
+}
 
-	ret = nvkm_subdev_create_(parent, engobj, oclass, NV_ENGINE_CLASS,
-				  iname, fname, length, pobject);
-	engine = *pobject;
-	if (ret)
-		return ret;
+static void
+nvkm_engine_intr(struct nvkm_subdev *subdev)
+{
+	struct nvkm_engine *engine = nvkm_engine(subdev);
+	if (engine->func->intr)
+		engine->func->intr(engine);
+}
 
-	if (parent) {
-		struct nvkm_device *device = nv_device(parent);
-		int engidx = nv_engidx(engine);
+static int
+nvkm_engine_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+	struct nvkm_engine *engine = nvkm_engine(subdev);
+	if (engine->func->fini)
+		return engine->func->fini(engine, suspend);
+	return 0;
+}
 
-		if (device->disable_mask & (1ULL << engidx)) {
-			if (!nvkm_boolopt(device->cfgopt, iname, false)) {
-				nv_debug(engine, "engine disabled by hw/fw\n");
-				return -ENODEV;
-			}
+static int
+nvkm_engine_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_engine *engine = nvkm_engine(subdev);
+	struct nvkm_fb *fb = subdev->device->fb;
+	int ret = 0, i;
+	s64 time;
 
-			nv_warn(engine, "ignoring hw/fw engine disable\n");
-		}
+	if (!engine->usecount) {
+		nvkm_trace(subdev, "init skipped, engine has no users\n");
+		return ret;
+	}
 
-		if (!nvkm_boolopt(device->cfgopt, iname, enable)) {
-			if (!enable)
-				nv_warn(engine, "disabled, %s=1 to enable\n", iname);
-			return -ENODEV;
+	if (engine->func->oneinit && !engine->subdev.oneinit) {
+		nvkm_trace(subdev, "one-time init running...\n");
+		time = ktime_to_us(ktime_get());
+		ret = engine->func->oneinit(engine);
+		if (ret) {
+			nvkm_trace(subdev, "one-time init failed, %d\n", ret);
+			return ret;
 		}
+
+		engine->subdev.oneinit = true;
+		time = ktime_to_us(ktime_get()) - time;
+		nvkm_trace(subdev, "one-time init completed in %lldus\n", time);
+	}
+
+	if (engine->func->init)
+		ret = engine->func->init(engine);
+
+	for (i = 0; fb && i < fb->tile.regions; i++)
+		nvkm_engine_tile(engine, i);
+	return ret;
+}
+
+static void *
+nvkm_engine_dtor(struct nvkm_subdev *subdev)
+{
+	struct nvkm_engine *engine = nvkm_engine(subdev);
+	if (engine->func->dtor)
+		return engine->func->dtor(engine);
+	return engine;
+}
+
+static const struct nvkm_subdev_func
+nvkm_engine_func = {
+	.dtor = nvkm_engine_dtor,
+	.init = nvkm_engine_init,
+	.fini = nvkm_engine_fini,
+	.intr = nvkm_engine_intr,
+};
+
+int
+nvkm_engine_ctor(const struct nvkm_engine_func *func,
+		 struct nvkm_device *device, int index, u32 pmc_enable,
+		 bool enable, struct nvkm_engine *engine)
+{
+	nvkm_subdev_ctor(&nvkm_engine_func, device, index,
+			 pmc_enable, &engine->subdev);
+	engine->func = func;
+
+	if (!nvkm_boolopt(device->cfgopt, nvkm_subdev_name[index], enable)) {
+		nvkm_debug(&engine->subdev, "disabled\n");
+		return -ENODEV;
 	}
 
-	INIT_LIST_HEAD(&engine->contexts);
 	spin_lock_init(&engine->lock);
 	return 0;
 }
+
+int
+nvkm_engine_new_(const struct nvkm_engine_func *func,
+		 struct nvkm_device *device, int index, u32 pmc_enable,
+		 bool enable, struct nvkm_engine **pengine)
+{
+	if (!(*pengine = kzalloc(sizeof(**pengine), GFP_KERNEL)))
+		return -ENOMEM;
+	return nvkm_engine_ctor(func, device, index, pmc_enable,
+				enable, *pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/enum.c b/drivers/gpu/drm/nouveau/nvkm/core/enum.c
index 4f92bfc13d6b..b9581feb24cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/enum.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/enum.c
@@ -38,29 +38,19 @@ nvkm_enum_find(const struct nvkm_enum *en, u32 value)
 	return NULL;
 }
 
-const struct nvkm_enum *
-nvkm_enum_print(const struct nvkm_enum *en, u32 value)
-{
-	en = nvkm_enum_find(en, value);
-	if (en)
-		pr_cont("%s", en->name);
-	else
-		pr_cont("(unknown enum 0x%08x)", value);
-	return en;
-}
-
 void
-nvkm_bitfield_print(const struct nvkm_bitfield *bf, u32 value)
+nvkm_snprintbf(char *data, int size, const struct nvkm_bitfield *bf, u32 value)
 {
-	while (bf->name) {
+	bool space = false;
+	while (size >= 1 && bf->name) {
 		if (value & bf->mask) {
-			pr_cont(" %s", bf->name);
-			value &= ~bf->mask;
+			int this = snprintf(data, size, "%s%s",
+					    space ? " " : "", bf->name);
+			size -= this;
+			data += this;
+			space = true;
 		}
-
 		bf++;
 	}
-
-	if (value)
-		pr_cont(" (unknown bits 0x%08x)", value);
+	data[0] = '\0';
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
index 2eba801aae6f..c3a790eb8d6a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
@@ -28,240 +28,205 @@
 #include <subdev/bar.h>
 #include <subdev/mmu.h>
 
-void
-nvkm_gpuobj_destroy(struct nvkm_gpuobj *gpuobj)
+/* fast-path, where backend is able to provide direct pointer to memory */
+static u32
+nvkm_gpuobj_rd32_fast(struct nvkm_gpuobj *gpuobj, u32 offset)
 {
-	int i;
-
-	if (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE) {
-		for (i = 0; i < gpuobj->size; i += 4)
-			nv_wo32(gpuobj, i, 0x00000000);
-	}
-
-	if (gpuobj->node)
-		nvkm_mm_free(&nv_gpuobj(gpuobj->parent)->heap, &gpuobj->node);
+	return ioread32_native(gpuobj->map + offset);
+}
 
-	if (gpuobj->heap.block_size)
-		nvkm_mm_fini(&gpuobj->heap);
+static void
+nvkm_gpuobj_wr32_fast(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
+{
+	iowrite32_native(data, gpuobj->map + offset);
+}
 
-	nvkm_object_destroy(&gpuobj->object);
+/* accessor functions for gpuobjs allocated directly from instmem */
+static u32
+nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
+{
+	return nvkm_ro32(gpuobj->memory, offset);
 }
 
-int
-nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, u32 pclass,
-		    struct nvkm_object *pargpu, u32 size, u32 align, u32 flags,
-		    int length, void **pobject)
+static void
+nvkm_gpuobj_heap_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
 {
-	struct nvkm_instmem *imem = nvkm_instmem(parent);
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nvkm_gpuobj *gpuobj;
-	struct nvkm_mm *heap = NULL;
-	int ret, i;
-	u64 addr;
+	nvkm_wo32(gpuobj->memory, offset, data);
+}
 
-	*pobject = NULL;
+static const struct nvkm_gpuobj_func nvkm_gpuobj_heap;
+static void
+nvkm_gpuobj_heap_release(struct nvkm_gpuobj *gpuobj)
+{
+	gpuobj->func = &nvkm_gpuobj_heap;
+	nvkm_done(gpuobj->memory);
+}
 
-	if (pargpu) {
-		while ((pargpu = nv_pclass(pargpu, NV_GPUOBJ_CLASS))) {
-			if (nv_gpuobj(pargpu)->heap.block_size)
-				break;
-			pargpu = pargpu->parent;
-		}
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_heap_fast = {
+	.release = nvkm_gpuobj_heap_release,
+	.rd32 = nvkm_gpuobj_rd32_fast,
+	.wr32 = nvkm_gpuobj_wr32_fast,
+};
 
-		if (unlikely(pargpu == NULL)) {
-			nv_error(parent, "no gpuobj heap\n");
-			return -EINVAL;
-		}
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_heap_slow = {
+	.release = nvkm_gpuobj_heap_release,
+	.rd32 = nvkm_gpuobj_heap_rd32,
+	.wr32 = nvkm_gpuobj_heap_wr32,
+};
 
-		addr =  nv_gpuobj(pargpu)->addr;
-		heap = &nv_gpuobj(pargpu)->heap;
-		atomic_inc(&parent->refcount);
-	} else {
-		ret = imem->alloc(imem, parent, size, align, &parent);
-		pargpu = parent;
-		if (ret)
-			return ret;
+static void *
+nvkm_gpuobj_heap_acquire(struct nvkm_gpuobj *gpuobj)
+{
+	gpuobj->map = nvkm_kmap(gpuobj->memory);
+	if (likely(gpuobj->map))
+		gpuobj->func = &nvkm_gpuobj_heap_fast;
+	else
+		gpuobj->func = &nvkm_gpuobj_heap_slow;
+	return gpuobj->map;
+}
 
-		addr = nv_memobj(pargpu)->addr;
-		size = nv_memobj(pargpu)->size;
-
-		if (bar && bar->alloc) {
-			struct nvkm_instobj *iobj = (void *)parent;
-			struct nvkm_mem **mem = (void *)(iobj + 1);
-			struct nvkm_mem *node = *mem;
-			if (!bar->alloc(bar, parent, node, &pargpu)) {
-				nvkm_object_ref(NULL, &parent);
-				parent = pargpu;
-			}
-		}
-	}
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_heap = {
+	.acquire = nvkm_gpuobj_heap_acquire,
+};
 
-	ret = nvkm_object_create_(parent, engine, oclass, pclass |
-				  NV_GPUOBJ_CLASS, length, pobject);
-	nvkm_object_ref(NULL, &parent);
-	gpuobj = *pobject;
-	if (ret)
-		return ret;
+/* accessor functions for gpuobjs sub-allocated from a parent gpuobj */
+static u32
+nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
+{
+	return nvkm_ro32(gpuobj->parent, gpuobj->node->offset + offset);
+}
 
-	gpuobj->parent = pargpu;
-	gpuobj->flags = flags;
-	gpuobj->addr = addr;
-	gpuobj->size = size;
+static void
+nvkm_gpuobj_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
+{
+	nvkm_wo32(gpuobj->parent, gpuobj->node->offset + offset, data);
+}
 
-	if (heap) {
-		ret = nvkm_mm_head(heap, 0, 1, size, size, max(align, (u32)1),
-				   &gpuobj->node);
-		if (ret)
-			return ret;
+static const struct nvkm_gpuobj_func nvkm_gpuobj_func;
+static void
+nvkm_gpuobj_release(struct nvkm_gpuobj *gpuobj)
+{
+	gpuobj->func = &nvkm_gpuobj_func;
+	nvkm_done(gpuobj->parent);
+}
 
-		gpuobj->addr += gpuobj->node->offset;
-	}
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_fast = {
+	.release = nvkm_gpuobj_release,
+	.rd32 = nvkm_gpuobj_rd32_fast,
+	.wr32 = nvkm_gpuobj_wr32_fast,
+};
 
-	if (gpuobj->flags & NVOBJ_FLAG_HEAP) {
-		ret = nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
-		if (ret)
-			return ret;
-	}
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_slow = {
+	.release = nvkm_gpuobj_release,
+	.rd32 = nvkm_gpuobj_rd32,
+	.wr32 = nvkm_gpuobj_wr32,
+};
 
-	if (flags & NVOBJ_FLAG_ZERO_ALLOC) {
-		for (i = 0; i < gpuobj->size; i += 4)
-			nv_wo32(gpuobj, i, 0x00000000);
+static void *
+nvkm_gpuobj_acquire(struct nvkm_gpuobj *gpuobj)
+{
+	gpuobj->map = nvkm_kmap(gpuobj->parent);
+	if (likely(gpuobj->map)) {
+		gpuobj->map  = (u8 *)gpuobj->map + gpuobj->node->offset;
+		gpuobj->func = &nvkm_gpuobj_fast;
+	} else {
+		gpuobj->func = &nvkm_gpuobj_slow;
 	}
-
-	return ret;
+	return gpuobj->map;
 }
 
-struct nvkm_gpuobj_class {
-	struct nvkm_object *pargpu;
-	u64 size;
-	u32 align;
-	u32 flags;
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_func = {
+	.acquire = nvkm_gpuobj_acquire,
 };
 
 static int
-_nvkm_gpuobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nvkm_gpuobj_ctor(struct nvkm_device *device, u32 size, int align, bool zero,
+		 struct nvkm_gpuobj *parent, struct nvkm_gpuobj *gpuobj)
 {
-	struct nvkm_gpuobj_class *args = data;
-	struct nvkm_gpuobj *object;
+	u32 offset;
 	int ret;
 
-	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, args->pargpu,
-				 args->size, args->align, args->flags,
-				 &object);
-	*pobject = nv_object(object);
-	if (ret)
-		return ret;
+	if (parent) {
+		if (align >= 0) {
+			ret = nvkm_mm_head(&parent->heap, 0, 1, size, size,
+					   max(align, 1), &gpuobj->node);
+		} else {
+			ret = nvkm_mm_tail(&parent->heap, 0, 1, size, size,
+					   -align, &gpuobj->node);
+		}
+		if (ret)
+			return ret;
 
-	return 0;
-}
+		gpuobj->parent = parent;
+		gpuobj->func = &nvkm_gpuobj_func;
+		gpuobj->addr = parent->addr + gpuobj->node->offset;
+		gpuobj->size = gpuobj->node->length;
 
-void
-_nvkm_gpuobj_dtor(struct nvkm_object *object)
-{
-	nvkm_gpuobj_destroy(nv_gpuobj(object));
-}
-
-int
-_nvkm_gpuobj_init(struct nvkm_object *object)
-{
-	return nvkm_gpuobj_init(nv_gpuobj(object));
-}
+		if (zero) {
+			nvkm_kmap(gpuobj);
+			for (offset = 0; offset < gpuobj->size; offset += 4)
+				nvkm_wo32(gpuobj, offset, 0x00000000);
+			nvkm_done(gpuobj);
+		}
+	} else {
+		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size,
+				      abs(align), zero, &gpuobj->memory);
+		if (ret)
+			return ret;
 
-int
-_nvkm_gpuobj_fini(struct nvkm_object *object, bool suspend)
-{
-	return nvkm_gpuobj_fini(nv_gpuobj(object), suspend);
-}
+		gpuobj->func = &nvkm_gpuobj_heap;
+		gpuobj->addr = nvkm_memory_addr(gpuobj->memory);
+		gpuobj->size = nvkm_memory_size(gpuobj->memory);
+	}
 
-u32
-_nvkm_gpuobj_rd32(struct nvkm_object *object, u64 addr)
-{
-	struct nvkm_gpuobj *gpuobj = nv_gpuobj(object);
-	struct nvkm_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
-	if (gpuobj->node)
-		addr += gpuobj->node->offset;
-	return pfuncs->rd32(gpuobj->parent, addr);
+	return nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
 }
 
 void
-_nvkm_gpuobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
+nvkm_gpuobj_del(struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_gpuobj *gpuobj = nv_gpuobj(object);
-	struct nvkm_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
-	if (gpuobj->node)
-		addr += gpuobj->node->offset;
-	pfuncs->wr32(gpuobj->parent, addr, data);
+	struct nvkm_gpuobj *gpuobj = *pgpuobj;
+	if (gpuobj) {
+		if (gpuobj->parent)
+			nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node);
+		nvkm_mm_fini(&gpuobj->heap);
+		nvkm_memory_del(&gpuobj->memory);
+		kfree(*pgpuobj);
+		*pgpuobj = NULL;
+	}
 }
 
-static struct nvkm_oclass
-_nvkm_gpuobj_oclass = {
-	.handle = 0x00000000,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_gpuobj_ctor,
-		.dtor = _nvkm_gpuobj_dtor,
-		.init = _nvkm_gpuobj_init,
-		.fini = _nvkm_gpuobj_fini,
-		.rd32 = _nvkm_gpuobj_rd32,
-		.wr32 = _nvkm_gpuobj_wr32,
-	},
-};
-
 int
-nvkm_gpuobj_new(struct nvkm_object *parent, struct nvkm_object *pargpu,
-		u32 size, u32 align, u32 flags,
-		struct nvkm_gpuobj **pgpuobj)
+nvkm_gpuobj_new(struct nvkm_device *device, u32 size, int align, bool zero,
+		struct nvkm_gpuobj *parent, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_object *engine = parent;
-	struct nvkm_gpuobj_class args = {
-		.pargpu = pargpu,
-		.size = size,
-		.align = align,
-		.flags = flags,
-	};
-
-	if (!nv_iclass(engine, NV_SUBDEV_CLASS))
-		engine = &engine->engine->subdev.object;
-	BUG_ON(engine == NULL);
-
-	return nvkm_object_ctor(parent, engine, &_nvkm_gpuobj_oclass,
-				&args, sizeof(args),
-				(struct nvkm_object **)pgpuobj);
-}
+	struct nvkm_gpuobj *gpuobj;
+	int ret;
 
-int
-nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, u32 access, struct nvkm_vma *vma)
-{
-	struct nvkm_bar *bar = nvkm_bar(gpuobj);
-	int ret = -EINVAL;
-
-	if (bar && bar->umap) {
-		struct nvkm_instobj *iobj = (void *)
-			nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
-		struct nvkm_mem **mem = (void *)(iobj + 1);
-		ret = bar->umap(bar, *mem, access, vma);
-	}
+	if (!(gpuobj = *pgpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL)))
+		return -ENOMEM;
 
+	ret = nvkm_gpuobj_ctor(device, size, align, zero, parent, gpuobj);
+	if (ret)
+		nvkm_gpuobj_del(pgpuobj);
 	return ret;
 }
 
 int
-nvkm_gpuobj_map_vm(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm,
-		   u32 access, struct nvkm_vma *vma)
+nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm,
+		u32 access, struct nvkm_vma *vma)
 {
-	struct nvkm_instobj *iobj = (void *)
-		nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
-	struct nvkm_mem **mem = (void *)(iobj + 1);
-	int ret;
-
-	ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma);
-	if (ret)
-		return ret;
-
-	nvkm_vm_map(vma, *mem);
-	return 0;
+	struct nvkm_memory *memory = gpuobj->memory;
+	int ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma);
+	if (ret == 0)
+		nvkm_memory_map(memory, vma, 0);
+	return ret;
 }
 
 void
@@ -278,39 +243,13 @@ nvkm_gpuobj_unmap(struct nvkm_vma *vma)
  * anywhere else.
  */
 
-static void
-nvkm_gpudup_dtor(struct nvkm_object *object)
-{
-	struct nvkm_gpuobj *gpuobj = (void *)object;
-	nvkm_object_ref(NULL, &gpuobj->parent);
-	nvkm_object_destroy(&gpuobj->object);
-}
-
-static struct nvkm_oclass
-nvkm_gpudup_oclass = {
-	.handle = NV_GPUOBJ_CLASS,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.dtor = nvkm_gpudup_dtor,
-		.init = nvkm_object_init,
-		.fini = nvkm_object_fini,
-	},
-};
-
 int
-nvkm_gpuobj_dup(struct nvkm_object *parent, struct nvkm_gpuobj *base,
-		struct nvkm_gpuobj **pgpuobj)
+nvkm_gpuobj_wrap(struct nvkm_memory *memory, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_gpuobj *gpuobj;
-	int ret;
-
-	ret = nvkm_object_create(parent, &parent->engine->subdev.object,
-				 &nvkm_gpudup_oclass, 0, &gpuobj);
-	*pgpuobj = gpuobj;
-	if (ret)
-		return ret;
+	if (!(*pgpuobj = kzalloc(sizeof(**pgpuobj), GFP_KERNEL)))
+		return -ENOMEM;
 
-	nvkm_object_ref(nv_object(base), &gpuobj->parent);
-	gpuobj->addr = base->addr;
-	gpuobj->size = base->size;
+	(*pgpuobj)->addr = nvkm_memory_addr(memory);
+	(*pgpuobj)->size = nvkm_memory_size(memory);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/handle.c b/drivers/gpu/drm/nouveau/nvkm/core/handle.c
deleted file mode 100644
index dc7ff10ebe7b..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/core/handle.c
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include <core/handle.h>
-#include <core/client.h>
-
-#define hprintk(h,l,f,a...) do {                                               \
-	struct nvkm_client *c = nvkm_client((h)->object);                      \
-	struct nvkm_handle *p = (h)->parent; u32 n = p ? p->name : ~0;         \
-	nv_printk((c), l, "0x%08x:0x%08x "f, n, (h)->name, ##a);               \
-} while(0)
-
-int
-nvkm_handle_init(struct nvkm_handle *handle)
-{
-	struct nvkm_handle *item;
-	int ret;
-
-	hprintk(handle, TRACE, "init running\n");
-	ret = nvkm_object_inc(handle->object);
-	if (ret)
-		return ret;
-
-	hprintk(handle, TRACE, "init children\n");
-	list_for_each_entry(item, &handle->tree, head) {
-		ret = nvkm_handle_init(item);
-		if (ret)
-			goto fail;
-	}
-
-	hprintk(handle, TRACE, "init completed\n");
-	return 0;
-fail:
-	hprintk(handle, ERROR, "init failed with %d\n", ret);
-	list_for_each_entry_continue_reverse(item, &handle->tree, head) {
-		nvkm_handle_fini(item, false);
-	}
-
-	nvkm_object_dec(handle->object, false);
-	return ret;
-}
-
-int
-nvkm_handle_fini(struct nvkm_handle *handle, bool suspend)
-{
-	static char *name[2] = { "fini", "suspend" };
-	struct nvkm_handle *item;
-	int ret;
-
-	hprintk(handle, TRACE, "%s children\n", name[suspend]);
-	list_for_each_entry(item, &handle->tree, head) {
-		ret = nvkm_handle_fini(item, suspend);
-		if (ret && suspend)
-			goto fail;
-	}
-
-	hprintk(handle, TRACE, "%s running\n", name[suspend]);
-	if (handle->object) {
-		ret = nvkm_object_dec(handle->object, suspend);
-		if (ret && suspend)
-			goto fail;
-	}
-
-	hprintk(handle, TRACE, "%s completed\n", name[suspend]);
-	return 0;
-fail:
-	hprintk(handle, ERROR, "%s failed with %d\n", name[suspend], ret);
-	list_for_each_entry_continue_reverse(item, &handle->tree, head) {
-		int rret = nvkm_handle_init(item);
-		if (rret)
-			hprintk(handle, FATAL, "failed to restart, %d\n", rret);
-	}
-
-	return ret;
-}
-
-int
-nvkm_handle_create(struct nvkm_object *parent, u32 _parent, u32 _handle,
-		   struct nvkm_object *object, struct nvkm_handle **phandle)
-{
-	struct nvkm_object *namedb;
-	struct nvkm_handle *handle;
-	int ret;
-
-	namedb = parent;
-	while (!nv_iclass(namedb, NV_NAMEDB_CLASS))
-		namedb = namedb->parent;
-
-	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
-	if (!handle)
-		return -ENOMEM;
-
-	INIT_LIST_HEAD(&handle->head);
-	INIT_LIST_HEAD(&handle->tree);
-	handle->name = _handle;
-	handle->priv = ~0;
-
-	ret = nvkm_namedb_insert(nv_namedb(namedb), _handle, object, handle);
-	if (ret) {
-		kfree(handle);
-		return ret;
-	}
-
-	if (nv_parent(parent)->object_attach) {
-		ret = nv_parent(parent)->object_attach(parent, object, _handle);
-		if (ret < 0) {
-			nvkm_handle_destroy(handle);
-			return ret;
-		}
-
-		handle->priv = ret;
-	}
-
-	if (object != namedb) {
-		while (!nv_iclass(namedb, NV_CLIENT_CLASS))
-			namedb = namedb->parent;
-
-		handle->parent = nvkm_namedb_get(nv_namedb(namedb), _parent);
-		if (handle->parent) {
-			list_add(&handle->head, &handle->parent->tree);
-			nvkm_namedb_put(handle->parent);
-		}
-	}
-
-	hprintk(handle, TRACE, "created\n");
-	*phandle = handle;
-	return 0;
-}
-
-void
-nvkm_handle_destroy(struct nvkm_handle *handle)
-{
-	struct nvkm_handle *item, *temp;
-
-	hprintk(handle, TRACE, "destroy running\n");
-	list_for_each_entry_safe(item, temp, &handle->tree, head) {
-		nvkm_handle_destroy(item);
-	}
-	list_del(&handle->head);
-
-	if (handle->priv != ~0) {
-		struct nvkm_object *parent = handle->parent->object;
-		nv_parent(parent)->object_detach(parent, handle->priv);
-	}
-
-	hprintk(handle, TRACE, "destroy completed\n");
-	nvkm_namedb_remove(handle);
-	kfree(handle);
-}
-
-struct nvkm_object *
-nvkm_handle_ref(struct nvkm_object *parent, u32 name)
-{
-	struct nvkm_object *object = NULL;
-	struct nvkm_handle *handle;
-
-	while (!nv_iclass(parent, NV_NAMEDB_CLASS))
-		parent = parent->parent;
-
-	handle = nvkm_namedb_get(nv_namedb(parent), name);
-	if (handle) {
-		nvkm_object_ref(handle->object, &object);
-		nvkm_namedb_put(handle);
-	}
-
-	return object;
-}
-
-struct nvkm_handle *
-nvkm_handle_get_class(struct nvkm_object *engctx, u16 oclass)
-{
-	struct nvkm_namedb *namedb;
-	if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
-		return nvkm_namedb_get_class(namedb, oclass);
-	return NULL;
-}
-
-struct nvkm_handle *
-nvkm_handle_get_vinst(struct nvkm_object *engctx, u64 vinst)
-{
-	struct nvkm_namedb *namedb;
-	if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
-		return nvkm_namedb_get_vinst(namedb, vinst);
-	return NULL;
-}
-
-struct nvkm_handle *
-nvkm_handle_get_cinst(struct nvkm_object *engctx, u32 cinst)
-{
-	struct nvkm_namedb *namedb;
-	if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
-		return nvkm_namedb_get_cinst(namedb, cinst);
-	return NULL;
-}
-
-void
-nvkm_handle_put(struct nvkm_handle *handle)
-{
-	if (handle)
-		nvkm_namedb_put(handle);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
index 4459ff5f4cb8..d87d6ab03cc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
@@ -24,196 +24,154 @@
 #include <core/ioctl.h>
 #include <core/client.h>
 #include <core/engine.h>
-#include <core/handle.h>
-#include <core/namedb.h>
 
 #include <nvif/unpack.h>
 #include <nvif/ioctl.h>
 
 static int
-nvkm_ioctl_nop(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_nop(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
 	union {
-		struct nvif_ioctl_nop none;
+		struct nvif_ioctl_nop_v0 v0;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "nop size %d\n", size);
-	if (nvif_unvers(args->none)) {
-		nv_ioctl(object, "nop\n");
+	nvif_ioctl(object, "nop size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(object, "nop vers %lld\n", args->v0.version);
+		args->v0.version = NVIF_VERSION_LATEST;
 	}
 
 	return ret;
 }
 
 static int
-nvkm_ioctl_sclass(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_sclass(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
 	union {
 		struct nvif_ioctl_sclass_v0 v0;
 	} *args = data;
-	int ret;
+	struct nvkm_oclass oclass;
+	int ret, i = 0;
 
-	if (!nv_iclass(object, NV_PARENT_CLASS)) {
-		nv_debug(object, "cannot have children (sclass)\n");
-		return -ENODEV;
-	}
-
-	nv_ioctl(object, "sclass size %d\n", size);
+	nvif_ioctl(object, "sclass size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(object, "sclass vers %d count %d\n",
-			 args->v0.version, args->v0.count);
-		if (size == args->v0.count * sizeof(args->v0.oclass[0])) {
-			ret = nvkm_parent_lclass(object, args->v0.oclass,
-							 args->v0.count);
-			if (ret >= 0) {
-				args->v0.count = ret;
-				ret = 0;
+		nvif_ioctl(object, "sclass vers %d count %d\n",
+			   args->v0.version, args->v0.count);
+		if (size != args->v0.count * sizeof(args->v0.oclass[0]))
+			return -EINVAL;
+
+		while (object->func->sclass &&
+		       object->func->sclass(object, i, &oclass) >= 0) {
+			if (i < args->v0.count) {
+				args->v0.oclass[i].oclass = oclass.base.oclass;
+				args->v0.oclass[i].minver = oclass.base.minver;
+				args->v0.oclass[i].maxver = oclass.base.maxver;
 			}
-		} else {
-			ret = -EINVAL;
+			i++;
 		}
+
+		args->v0.count = i;
 	}
 
 	return ret;
 }
 
 static int
-nvkm_ioctl_new(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_new(struct nvkm_object *parent, void *data, u32 size)
 {
 	union {
 		struct nvif_ioctl_new_v0 v0;
 	} *args = data;
-	struct nvkm_client *client = nvkm_client(handle->object);
-	struct nvkm_object *engctx = NULL;
+	struct nvkm_client *client = parent->client;
 	struct nvkm_object *object = NULL;
-	struct nvkm_parent *parent;
-	struct nvkm_object *engine;
-	struct nvkm_oclass *oclass;
-	u32 _handle, _oclass;
-	int ret;
+	struct nvkm_oclass oclass;
+	int ret, i = 0;
 
-	nv_ioctl(client, "new size %d\n", size);
+	nvif_ioctl(parent, "new size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, true)) {
-		_handle = args->v0.handle;
-		_oclass = args->v0.oclass;
+		nvif_ioctl(parent, "new vers %d handle %08x class %08x "
+				   "route %02x token %llx object %016llx\n",
+			   args->v0.version, args->v0.handle, args->v0.oclass,
+			   args->v0.route, args->v0.token, args->v0.object);
 	} else
 		return ret;
 
-	nv_ioctl(client, "new vers %d handle %08x class %08x "
-			 "route %02x token %llx\n",
-		 args->v0.version, _handle, _oclass,
-		 args->v0.route, args->v0.token);
-
-	if (!nv_iclass(handle->object, NV_PARENT_CLASS)) {
-		nv_debug(handle->object, "cannot have children (ctor)\n");
-		ret = -ENODEV;
-		goto fail_class;
+	if (!parent->func->sclass) {
+		nvif_ioctl(parent, "cannot have children\n");
+		return -EINVAL;
 	}
 
-	parent = nv_parent(handle->object);
-
-	/* check that parent supports the requested subclass */
-	ret = nvkm_parent_sclass(&parent->object, _oclass, &engine, &oclass);
-	if (ret) {
-		nv_debug(parent, "illegal class 0x%04x\n", _oclass);
-		goto fail_class;
-	}
-
-	/* make sure engine init has been completed *before* any objects
-	 * it controls are created - the constructors may depend on
-	 * state calculated at init (ie. default context construction)
-	 */
-	if (engine) {
-		ret = nvkm_object_inc(engine);
+	do {
+		memset(&oclass, 0x00, sizeof(oclass));
+		oclass.client = client;
+		oclass.handle = args->v0.handle;
+		oclass.object = args->v0.object;
+		oclass.parent = parent;
+		ret = parent->func->sclass(parent, i++, &oclass);
 		if (ret)
-			goto fail_class;
+			return ret;
+	} while (oclass.base.oclass != args->v0.oclass);
+
+	if (oclass.engine) {
+		oclass.engine = nvkm_engine_ref(oclass.engine);
+		if (IS_ERR(oclass.engine))
+			return PTR_ERR(oclass.engine);
 	}
 
-	/* if engine requires it, create a context object to insert
-	 * between the parent and its children (eg. PGRAPH context)
-	 */
-	if (engine && nv_engine(engine)->cclass) {
-		ret = nvkm_object_ctor(&parent->object, engine,
-				       nv_engine(engine)->cclass,
-				       data, size, &engctx);
-		if (ret)
-			goto fail_engctx;
-	} else {
-		nvkm_object_ref(&parent->object, &engctx);
+	ret = oclass.ctor(&oclass, data, size, &object);
+	nvkm_engine_unref(&oclass.engine);
+	if (ret == 0) {
+		ret = nvkm_object_init(object);
+		if (ret == 0) {
+			list_add(&object->head, &parent->tree);
+			object->route = args->v0.route;
+			object->token = args->v0.token;
+			object->object = args->v0.object;
+			if (nvkm_client_insert(client, object)) {
+				client->data = object;
+				return 0;
+			}
+			ret = -EEXIST;
+		}
+		nvkm_object_fini(object, false);
 	}
 
-	/* finally, create new object and bind it to its handle */
-	ret = nvkm_object_ctor(engctx, engine, oclass, data, size, &object);
-	client->data = object;
-	if (ret)
-		goto fail_ctor;
-
-	ret = nvkm_object_inc(object);
-	if (ret)
-		goto fail_init;
-
-	ret = nvkm_handle_create(&parent->object, handle->name,
-				 _handle, object, &handle);
-	if (ret)
-		goto fail_handle;
-
-	ret = nvkm_handle_init(handle);
-	handle->route = args->v0.route;
-	handle->token = args->v0.token;
-	if (ret)
-		nvkm_handle_destroy(handle);
-
-fail_handle:
-	nvkm_object_dec(object, false);
-fail_init:
-	nvkm_object_ref(NULL, &object);
-fail_ctor:
-	nvkm_object_ref(NULL, &engctx);
-fail_engctx:
-	if (engine)
-		nvkm_object_dec(engine, false);
-fail_class:
+	nvkm_object_del(&object);
 	return ret;
 }
 
 static int
-nvkm_ioctl_del(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_del(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
 	union {
 		struct nvif_ioctl_del none;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "delete size %d\n", size);
+	nvif_ioctl(object, "delete size %d\n", size);
 	if (nvif_unvers(args->none)) {
-		nv_ioctl(object, "delete\n");
-		nvkm_handle_fini(handle, false);
-		nvkm_handle_destroy(handle);
+		nvif_ioctl(object, "delete\n");
+		nvkm_object_fini(object, false);
+		nvkm_object_del(&object);
 	}
 
 	return ret;
 }
 
 static int
-nvkm_ioctl_mthd(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_mthd(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
-	struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
 	union {
 		struct nvif_ioctl_mthd_v0 v0;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "mthd size %d\n", size);
+	nvif_ioctl(object, "mthd size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(object, "mthd vers %d mthd %02x\n",
-			 args->v0.version, args->v0.method);
-		if (ret = -ENODEV, ofuncs->mthd)
-			ret = ofuncs->mthd(object, args->v0.method, data, size);
+		nvif_ioctl(object, "mthd vers %d mthd %02x\n",
+			   args->v0.version, args->v0.method);
+		ret = nvkm_object_mthd(object, args->v0.method, data, size);
 	}
 
 	return ret;
@@ -221,37 +179,34 @@ nvkm_ioctl_mthd(struct nvkm_handle *handle, void *data, u32 size)
 
 
 static int
-nvkm_ioctl_rd(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_rd(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
-	struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
 	union {
 		struct nvif_ioctl_rd_v0 v0;
 	} *args = data;
+	union {
+		u8  b08;
+		u16 b16;
+		u32 b32;
+	} v;
 	int ret;
 
-	nv_ioctl(object, "rd size %d\n", size);
+	nvif_ioctl(object, "rd size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "rd vers %d size %d addr %016llx\n",
-			 args->v0.version, args->v0.size, args->v0.addr);
+		nvif_ioctl(object, "rd vers %d size %d addr %016llx\n",
+			   args->v0.version, args->v0.size, args->v0.addr);
 		switch (args->v0.size) {
 		case 1:
-			if (ret = -ENODEV, ofuncs->rd08) {
-				args->v0.data = nv_ro08(object, args->v0.addr);
-				ret = 0;
-			}
+			ret = nvkm_object_rd08(object, args->v0.addr, &v.b08);
+			args->v0.data = v.b08;
 			break;
 		case 2:
-			if (ret = -ENODEV, ofuncs->rd16) {
-				args->v0.data = nv_ro16(object, args->v0.addr);
-				ret = 0;
-			}
+			ret = nvkm_object_rd16(object, args->v0.addr, &v.b16);
+			args->v0.data = v.b16;
 			break;
 		case 4:
-			if (ret = -ENODEV, ofuncs->rd32) {
-				args->v0.data = nv_ro32(object, args->v0.addr);
-				ret = 0;
-			}
+			ret = nvkm_object_rd32(object, args->v0.addr, &v.b32);
+			args->v0.data = v.b32;
 			break;
 		default:
 			ret = -EINVAL;
@@ -263,104 +218,81 @@ nvkm_ioctl_rd(struct nvkm_handle *handle, void *data, u32 size)
 }
 
 static int
-nvkm_ioctl_wr(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_wr(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
-	struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
 	union {
 		struct nvif_ioctl_wr_v0 v0;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "wr size %d\n", size);
+	nvif_ioctl(object, "wr size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "wr vers %d size %d addr %016llx data %08x\n",
-			 args->v0.version, args->v0.size, args->v0.addr,
-			 args->v0.data);
-		switch (args->v0.size) {
-		case 1:
-			if (ret = -ENODEV, ofuncs->wr08) {
-				nv_wo08(object, args->v0.addr, args->v0.data);
-				ret = 0;
-			}
-			break;
-		case 2:
-			if (ret = -ENODEV, ofuncs->wr16) {
-				nv_wo16(object, args->v0.addr, args->v0.data);
-				ret = 0;
-			}
-			break;
-		case 4:
-			if (ret = -ENODEV, ofuncs->wr32) {
-				nv_wo32(object, args->v0.addr, args->v0.data);
-				ret = 0;
-			}
-			break;
-		default:
-			ret = -EINVAL;
-			break;
-		}
+		nvif_ioctl(object,
+			   "wr vers %d size %d addr %016llx data %08x\n",
+			   args->v0.version, args->v0.size, args->v0.addr,
+			   args->v0.data);
+	} else
+		return ret;
+
+	switch (args->v0.size) {
+	case 1: return nvkm_object_wr08(object, args->v0.addr, args->v0.data);
+	case 2: return nvkm_object_wr16(object, args->v0.addr, args->v0.data);
+	case 4: return nvkm_object_wr32(object, args->v0.addr, args->v0.data);
+	default:
+		break;
 	}
 
-	return ret;
+	return -EINVAL;
 }
 
 static int
-nvkm_ioctl_map(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_map(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
-	struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
 	union {
 		struct nvif_ioctl_map_v0 v0;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "map size %d\n", size);
+	nvif_ioctl(object, "map size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "map vers %d\n", args->v0.version);
-		if (ret = -ENODEV, ofuncs->map) {
-			ret = ofuncs->map(object, &args->v0.handle,
-						  &args->v0.length);
-		}
+		nvif_ioctl(object, "map vers %d\n", args->v0.version);
+		ret = nvkm_object_map(object, &args->v0.handle,
+					      &args->v0.length);
 	}
 
 	return ret;
 }
 
 static int
-nvkm_ioctl_unmap(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_unmap(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
 	union {
 		struct nvif_ioctl_unmap none;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "unmap size %d\n", size);
+	nvif_ioctl(object, "unmap size %d\n", size);
 	if (nvif_unvers(args->none)) {
-		nv_ioctl(object, "unmap\n");
+		nvif_ioctl(object, "unmap\n");
 	}
 
 	return ret;
 }
 
 static int
-nvkm_ioctl_ntfy_new(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_ntfy_new(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
-	struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
 	union {
 		struct nvif_ioctl_ntfy_new_v0 v0;
 	} *args = data;
 	struct nvkm_event *event;
 	int ret;
 
-	nv_ioctl(object, "ntfy new size %d\n", size);
+	nvif_ioctl(object, "ntfy new size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(object, "ntfy new vers %d event %02x\n",
-			 args->v0.version, args->v0.event);
-		if (ret = -ENODEV, ofuncs->ntfy)
-			ret = ofuncs->ntfy(object, args->v0.event, &event);
+		nvif_ioctl(object, "ntfy new vers %d event %02x\n",
+			   args->v0.version, args->v0.event);
+		ret = nvkm_object_ntfy(object, args->v0.event, &event);
 		if (ret == 0) {
 			ret = nvkm_client_notify_new(object, event, data, size);
 			if (ret >= 0) {
@@ -374,19 +306,18 @@ nvkm_ioctl_ntfy_new(struct nvkm_handle *handle, void *data, u32 size)
 }
 
 static int
-nvkm_ioctl_ntfy_del(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_ntfy_del(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_client *client = nvkm_client(handle->object);
-	struct nvkm_object *object = handle->object;
+	struct nvkm_client *client = object->client;
 	union {
 		struct nvif_ioctl_ntfy_del_v0 v0;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "ntfy del size %d\n", size);
+	nvif_ioctl(object, "ntfy del size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "ntfy del vers %d index %d\n",
-			 args->v0.version, args->v0.index);
+		nvif_ioctl(object, "ntfy del vers %d index %d\n",
+			   args->v0.version, args->v0.index);
 		ret = nvkm_client_notify_del(client, args->v0.index);
 	}
 
@@ -394,19 +325,18 @@ nvkm_ioctl_ntfy_del(struct nvkm_handle *handle, void *data, u32 size)
 }
 
 static int
-nvkm_ioctl_ntfy_get(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_ntfy_get(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_client *client = nvkm_client(handle->object);
-	struct nvkm_object *object = handle->object;
+	struct nvkm_client *client = object->client;
 	union {
 		struct nvif_ioctl_ntfy_get_v0 v0;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "ntfy get size %d\n", size);
+	nvif_ioctl(object, "ntfy get size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "ntfy get vers %d index %d\n",
-			 args->v0.version, args->v0.index);
+		nvif_ioctl(object, "ntfy get vers %d index %d\n",
+			   args->v0.version, args->v0.index);
 		ret = nvkm_client_notify_get(client, args->v0.index);
 	}
 
@@ -414,19 +344,18 @@ nvkm_ioctl_ntfy_get(struct nvkm_handle *handle, void *data, u32 size)
 }
 
 static int
-nvkm_ioctl_ntfy_put(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_ntfy_put(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_client *client = nvkm_client(handle->object);
-	struct nvkm_object *object = handle->object;
+	struct nvkm_client *client = object->client;
 	union {
 		struct nvif_ioctl_ntfy_put_v0 v0;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "ntfy put size %d\n", size);
+	nvif_ioctl(object, "ntfy put size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "ntfy put vers %d index %d\n",
-			 args->v0.version, args->v0.index);
+		nvif_ioctl(object, "ntfy put vers %d index %d\n",
+			   args->v0.version, args->v0.index);
 		ret = nvkm_client_notify_put(client, args->v0.index);
 	}
 
@@ -435,7 +364,7 @@ nvkm_ioctl_ntfy_put(struct nvkm_handle *handle, void *data, u32 size)
 
 static struct {
 	int version;
-	int (*func)(struct nvkm_handle *, void *, u32);
+	int (*func)(struct nvkm_object *, void *, u32);
 }
 nvkm_ioctl_v0[] = {
 	{ 0x00, nvkm_ioctl_nop },
@@ -454,40 +383,31 @@ nvkm_ioctl_v0[] = {
 };
 
 static int
-nvkm_ioctl_path(struct nvkm_handle *parent, u32 type, u32 nr, u32 *path,
+nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type,
 		void *data, u32 size, u8 owner, u8 *route, u64 *token)
 {
-	struct nvkm_handle *handle = parent;
-	struct nvkm_namedb *namedb;
 	struct nvkm_object *object;
 	int ret;
 
-	while ((object = parent->object), nr--) {
-		nv_ioctl(object, "path 0x%08x\n", path[nr]);
-		if (!nv_iclass(object, NV_PARENT_CLASS)) {
-			nv_debug(object, "cannot have children (path)\n");
-			return -EINVAL;
-		}
-
-		if (!(namedb = (void *)nv_pclass(object, NV_NAMEDB_CLASS)) ||
-		    !(handle = nvkm_namedb_get(namedb, path[nr]))) {
-			nv_debug(object, "handle 0x%08x not found\n", path[nr]);
-			return -ENOENT;
-		}
-		nvkm_namedb_put(handle);
-		parent = handle;
+	if (handle)
+		object = nvkm_client_search(client, handle);
+	else
+		object = &client->object;
+	if (unlikely(!object)) {
+		nvif_ioctl(&client->object, "object not found\n");
+		return -ENOENT;
 	}
 
-	if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != handle->route) {
-		nv_ioctl(object, "object route != owner\n");
+	if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != object->route) {
+		nvif_ioctl(&client->object, "route != owner\n");
 		return -EACCES;
 	}
-	*route = handle->route;
-	*token = handle->token;
+	*route = object->route;
+	*token = object->token;
 
 	if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) {
 		if (nvkm_ioctl_v0[type].version == 0)
-			ret = nvkm_ioctl_v0[type].func(handle, data, size);
+			ret = nvkm_ioctl_v0[type].func(object, data, size);
 	}
 
 	return ret;
@@ -497,25 +417,26 @@ int
 nvkm_ioctl(struct nvkm_client *client, bool supervisor,
 	   void *data, u32 size, void **hack)
 {
+	struct nvkm_object *object = &client->object;
 	union {
 		struct nvif_ioctl_v0 v0;
 	} *args = data;
 	int ret;
 
 	client->super = supervisor;
-	nv_ioctl(client, "size %d\n", size);
+	nvif_ioctl(object, "size %d\n", size);
 
 	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(client, "vers %d type %02x path %d owner %02x\n",
-			 args->v0.version, args->v0.type, args->v0.path_nr,
-			 args->v0.owner);
-		ret = nvkm_ioctl_path(client->root, args->v0.type,
-				      args->v0.path_nr, args->v0.path,
+		nvif_ioctl(object,
+			   "vers %d type %02x object %016llx owner %02x\n",
+			   args->v0.version, args->v0.type, args->v0.object,
+			   args->v0.owner);
+		ret = nvkm_ioctl_path(client, args->v0.object, args->v0.type,
 				      data, size, args->v0.owner,
 				      &args->v0.route, &args->v0.token);
 	}
 
-	nv_ioctl(client, "return %d\n", ret);
+	nvif_ioctl(object, "return %d\n", ret);
 	if (hack) {
 		*hack = client->data;
 		client->data = NULL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
new file mode 100644
index 000000000000..8903c04c977e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include <core/memory.h>
+#include <subdev/instmem.h>
+
+void
+nvkm_memory_ctor(const struct nvkm_memory_func *func,
+		 struct nvkm_memory *memory)
+{
+	memory->func = func;
+}
+
+void
+nvkm_memory_del(struct nvkm_memory **pmemory)
+{
+	struct nvkm_memory *memory = *pmemory;
+	if (memory && !WARN_ON(!memory->func)) {
+		if (memory->func->dtor)
+			*pmemory = memory->func->dtor(memory);
+		kfree(*pmemory);
+		*pmemory = NULL;
+	}
+}
+
+int
+nvkm_memory_new(struct nvkm_device *device, enum nvkm_memory_target target,
+		u64 size, u32 align, bool zero,
+		struct nvkm_memory **pmemory)
+{
+	struct nvkm_instmem *imem = device->imem;
+	struct nvkm_memory *memory;
+	int ret = -ENOSYS;
+
+	if (unlikely(target != NVKM_MEM_TARGET_INST || !imem))
+		return -ENOSYS;
+
+	ret = nvkm_instobj_new(imem, size, align, zero, &memory);
+	if (ret)
+		return ret;
+
+	*pmemory = memory;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/mm.c b/drivers/gpu/drm/nouveau/nvkm/core/mm.c
index 7f458dfd5608..09a1eee8fd33 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/mm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/mm.c
@@ -26,7 +26,7 @@
 #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL :          \
 	list_entry((root)->nl_entry.dir, struct nvkm_mm_node, nl_entry)
 
-static void
+void
 nvkm_mm_dump(struct nvkm_mm *mm, const char *header)
 {
 	struct nvkm_mm_node *node;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/namedb.c b/drivers/gpu/drm/nouveau/nvkm/core/namedb.c
deleted file mode 100644
index 6400767c5dba..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/core/namedb.c
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include <core/namedb.h>
-#include <core/gpuobj.h>
-#include <core/handle.h>
-
-static struct nvkm_handle *
-nvkm_namedb_lookup(struct nvkm_namedb *namedb, u32 name)
-{
-	struct nvkm_handle *handle;
-
-	list_for_each_entry(handle, &namedb->list, node) {
-		if (handle->name == name)
-			return handle;
-	}
-
-	return NULL;
-}
-
-static struct nvkm_handle *
-nvkm_namedb_lookup_class(struct nvkm_namedb *namedb, u16 oclass)
-{
-	struct nvkm_handle *handle;
-
-	list_for_each_entry(handle, &namedb->list, node) {
-		if (nv_mclass(handle->object) == oclass)
-			return handle;
-	}
-
-	return NULL;
-}
-
-static struct nvkm_handle *
-nvkm_namedb_lookup_vinst(struct nvkm_namedb *namedb, u64 vinst)
-{
-	struct nvkm_handle *handle;
-
-	list_for_each_entry(handle, &namedb->list, node) {
-		if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
-			if (nv_gpuobj(handle->object)->addr == vinst)
-				return handle;
-		}
-	}
-
-	return NULL;
-}
-
-static struct nvkm_handle *
-nvkm_namedb_lookup_cinst(struct nvkm_namedb *namedb, u32 cinst)
-{
-	struct nvkm_handle *handle;
-
-	list_for_each_entry(handle, &namedb->list, node) {
-		if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
-			if (nv_gpuobj(handle->object)->node &&
-			    nv_gpuobj(handle->object)->node->offset == cinst)
-				return handle;
-		}
-	}
-
-	return NULL;
-}
-
-int
-nvkm_namedb_insert(struct nvkm_namedb *namedb, u32 name,
-		   struct nvkm_object *object,
-		   struct nvkm_handle *handle)
-{
-	int ret = -EEXIST;
-	write_lock_irq(&namedb->lock);
-	if (!nvkm_namedb_lookup(namedb, name)) {
-		nvkm_object_ref(object, &handle->object);
-		handle->namedb = namedb;
-		list_add(&handle->node, &namedb->list);
-		ret = 0;
-	}
-	write_unlock_irq(&namedb->lock);
-	return ret;
-}
-
-void
-nvkm_namedb_remove(struct nvkm_handle *handle)
-{
-	struct nvkm_namedb *namedb = handle->namedb;
-	struct nvkm_object *object = handle->object;
-	write_lock_irq(&namedb->lock);
-	list_del(&handle->node);
-	write_unlock_irq(&namedb->lock);
-	nvkm_object_ref(NULL, &object);
-}
-
-struct nvkm_handle *
-nvkm_namedb_get(struct nvkm_namedb *namedb, u32 name)
-{
-	struct nvkm_handle *handle;
-	read_lock(&namedb->lock);
-	handle = nvkm_namedb_lookup(namedb, name);
-	if (handle == NULL)
-		read_unlock(&namedb->lock);
-	return handle;
-}
-
-struct nvkm_handle *
-nvkm_namedb_get_class(struct nvkm_namedb *namedb, u16 oclass)
-{
-	struct nvkm_handle *handle;
-	read_lock(&namedb->lock);
-	handle = nvkm_namedb_lookup_class(namedb, oclass);
-	if (handle == NULL)
-		read_unlock(&namedb->lock);
-	return handle;
-}
-
-struct nvkm_handle *
-nvkm_namedb_get_vinst(struct nvkm_namedb *namedb, u64 vinst)
-{
-	struct nvkm_handle *handle;
-	read_lock(&namedb->lock);
-	handle = nvkm_namedb_lookup_vinst(namedb, vinst);
-	if (handle == NULL)
-		read_unlock(&namedb->lock);
-	return handle;
-}
-
-struct nvkm_handle *
-nvkm_namedb_get_cinst(struct nvkm_namedb *namedb, u32 cinst)
-{
-	struct nvkm_handle *handle;
-	read_lock(&namedb->lock);
-	handle = nvkm_namedb_lookup_cinst(namedb, cinst);
-	if (handle == NULL)
-		read_unlock(&namedb->lock);
-	return handle;
-}
-
-void
-nvkm_namedb_put(struct nvkm_handle *handle)
-{
-	if (handle)
-		read_unlock(&handle->namedb->lock);
-}
-
-int
-nvkm_namedb_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, u32 pclass,
-		    struct nvkm_oclass *sclass, u64 engcls,
-		    int length, void **pobject)
-{
-	struct nvkm_namedb *namedb;
-	int ret;
-
-	ret = nvkm_parent_create_(parent, engine, oclass, pclass |
-				  NV_NAMEDB_CLASS, sclass, engcls,
-				  length, pobject);
-	namedb = *pobject;
-	if (ret)
-		return ret;
-
-	rwlock_init(&namedb->lock);
-	INIT_LIST_HEAD(&namedb->list);
-	return 0;
-}
-
-int
-_nvkm_namedb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
-{
-	struct nvkm_namedb *object;
-	int ret;
-
-	ret = nvkm_namedb_create(parent, engine, oclass, 0, NULL, 0, &object);
-	*pobject = nv_object(object);
-	if (ret)
-		return ret;
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c
index 979f3627d395..67aa7223dcd7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/object.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c
@@ -22,309 +22,243 @@
  * Authors: Ben Skeggs
  */
 #include <core/object.h>
+#include <core/client.h>
 #include <core/engine.h>
 
-#ifdef NVKM_OBJECT_MAGIC
-static struct list_head _objlist = LIST_HEAD_INIT(_objlist);
-static DEFINE_SPINLOCK(_objlist_lock);
-#endif
-
 int
-nvkm_object_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, u32 pclass,
-		    int size, void **pobject)
+nvkm_object_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
 {
-	struct nvkm_object *object;
-
-	object = *pobject = kzalloc(size, GFP_KERNEL);
-	if (!object)
-		return -ENOMEM;
-
-	nvkm_object_ref(parent, &object->parent);
-	nvkm_object_ref(engine, (struct nvkm_object **)&object->engine);
-	object->oclass = oclass;
-	object->oclass->handle |= pclass;
-	atomic_set(&object->refcount, 1);
-	atomic_set(&object->usecount, 0);
-
-#ifdef NVKM_OBJECT_MAGIC
-	object->_magic = NVKM_OBJECT_MAGIC;
-	spin_lock(&_objlist_lock);
-	list_add(&object->list, &_objlist);
-	spin_unlock(&_objlist_lock);
-#endif
-	return 0;
+	if (likely(object->func->mthd))
+		return object->func->mthd(object, mthd, data, size);
+	return -ENODEV;
 }
 
 int
-_nvkm_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nvkm_object_ntfy(struct nvkm_object *object, u32 mthd,
+		 struct nvkm_event **pevent)
 {
-	if (size != 0)
-		return -ENOSYS;
-	return nvkm_object_create(parent, engine, oclass, 0, pobject);
+	if (likely(object->func->ntfy))
+		return object->func->ntfy(object, mthd, pevent);
+	return -ENODEV;
 }
 
-void
-nvkm_object_destroy(struct nvkm_object *object)
+int
+nvkm_object_map(struct nvkm_object *object, u64 *addr, u32 *size)
 {
-#ifdef NVKM_OBJECT_MAGIC
-	spin_lock(&_objlist_lock);
-	list_del(&object->list);
-	spin_unlock(&_objlist_lock);
-#endif
-	nvkm_object_ref(NULL, (struct nvkm_object **)&object->engine);
-	nvkm_object_ref(NULL, &object->parent);
-	kfree(object);
+	if (likely(object->func->map))
+		return object->func->map(object, addr, size);
+	return -ENODEV;
 }
 
 int
-nvkm_object_init(struct nvkm_object *object)
+nvkm_object_rd08(struct nvkm_object *object, u64 addr, u8 *data)
 {
-	return 0;
+	if (likely(object->func->rd08))
+		return object->func->rd08(object, addr, data);
+	return -ENODEV;
 }
 
 int
-nvkm_object_fini(struct nvkm_object *object, bool suspend)
+nvkm_object_rd16(struct nvkm_object *object, u64 addr, u16 *data)
 {
-	return 0;
+	if (likely(object->func->rd16))
+		return object->func->rd16(object, addr, data);
+	return -ENODEV;
 }
 
-struct nvkm_ofuncs
-nvkm_object_ofuncs = {
-	.ctor = _nvkm_object_ctor,
-	.dtor = nvkm_object_destroy,
-	.init = nvkm_object_init,
-	.fini = nvkm_object_fini,
-};
-
 int
-nvkm_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
+nvkm_object_rd32(struct nvkm_object *object, u64 addr, u32 *data)
 {
-	struct nvkm_ofuncs *ofuncs = oclass->ofuncs;
-	struct nvkm_object *object = NULL;
-	int ret;
-
-	ret = ofuncs->ctor(parent, engine, oclass, data, size, &object);
-	*pobject = object;
-	if (ret < 0) {
-		if (ret != -ENODEV) {
-			nv_error(parent, "failed to create 0x%08x, %d\n",
-				 oclass->handle, ret);
-		}
-
-		if (object) {
-			ofuncs->dtor(object);
-			*pobject = NULL;
-		}
-
-		return ret;
-	}
-
-	if (ret == 0) {
-		nv_trace(object, "created\n");
-		atomic_set(&object->refcount, 1);
-	}
-
-	return 0;
+	if (likely(object->func->rd32))
+		return object->func->rd32(object, addr, data);
+	return -ENODEV;
 }
 
-static void
-nvkm_object_dtor(struct nvkm_object *object)
+int
+nvkm_object_wr08(struct nvkm_object *object, u64 addr, u8 data)
 {
-	nv_trace(object, "destroying\n");
-	nv_ofuncs(object)->dtor(object);
+	if (likely(object->func->wr08))
+		return object->func->wr08(object, addr, data);
+	return -ENODEV;
 }
 
-void
-nvkm_object_ref(struct nvkm_object *obj, struct nvkm_object **ref)
+int
+nvkm_object_wr16(struct nvkm_object *object, u64 addr, u16 data)
 {
-	if (obj) {
-		atomic_inc(&obj->refcount);
-		nv_trace(obj, "inc() == %d\n", atomic_read(&obj->refcount));
-	}
+	if (likely(object->func->wr16))
+		return object->func->wr16(object, addr, data);
+	return -ENODEV;
+}
 
-	if (*ref) {
-		int dead = atomic_dec_and_test(&(*ref)->refcount);
-		nv_trace(*ref, "dec() == %d\n", atomic_read(&(*ref)->refcount));
-		if (dead)
-			nvkm_object_dtor(*ref);
-	}
+int
+nvkm_object_wr32(struct nvkm_object *object, u64 addr, u32 data)
+{
+	if (likely(object->func->wr32))
+		return object->func->wr32(object, addr, data);
+	return -ENODEV;
+}
 
-	*ref = obj;
+int
+nvkm_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *gpuobj,
+		 int align, struct nvkm_gpuobj **pgpuobj)
+{
+	if (object->func->bind)
+		return object->func->bind(object, gpuobj, align, pgpuobj);
+	return -ENODEV;
 }
 
 int
-nvkm_object_inc(struct nvkm_object *object)
+nvkm_object_fini(struct nvkm_object *object, bool suspend)
 {
-	int ref = atomic_add_return(1, &object->usecount);
+	const char *action = suspend ? "suspend" : "fini";
+	struct nvkm_object *child;
+	s64 time;
 	int ret;
 
-	nv_trace(object, "use(+1) == %d\n", atomic_read(&object->usecount));
-	if (ref != 1)
-		return 0;
-
-	nv_trace(object, "initialising...\n");
-	if (object->parent) {
-		ret = nvkm_object_inc(object->parent);
-		if (ret) {
-			nv_error(object, "parent failed, %d\n", ret);
-			goto fail_parent;
-		}
+	nvif_debug(object, "%s children...\n", action);
+	time = ktime_to_us(ktime_get());
+	list_for_each_entry(child, &object->tree, head) {
+		ret = nvkm_object_fini(child, suspend);
+		if (ret && suspend)
+			goto fail_child;
 	}
 
-	if (object->engine) {
-		mutex_lock(&nv_subdev(object->engine)->mutex);
-		ret = nvkm_object_inc(&object->engine->subdev.object);
-		mutex_unlock(&nv_subdev(object->engine)->mutex);
+	nvif_debug(object, "%s running...\n", action);
+	if (object->func->fini) {
+		ret = object->func->fini(object, suspend);
 		if (ret) {
-			nv_error(object, "engine failed, %d\n", ret);
-			goto fail_engine;
+			nvif_error(object, "%s failed with %d\n", action, ret);
+			if (suspend)
+				goto fail;
 		}
 	}
 
-	ret = nv_ofuncs(object)->init(object);
-	atomic_set(&object->usecount, 1);
-	if (ret) {
-		nv_error(object, "init failed, %d\n", ret);
-		goto fail_self;
-	}
-
-	nv_trace(object, "initialised\n");
+	time = ktime_to_us(ktime_get()) - time;
+	nvif_debug(object, "%s completed in %lldus\n", action, time);
 	return 0;
 
-fail_self:
-	if (object->engine) {
-		mutex_lock(&nv_subdev(object->engine)->mutex);
-		nvkm_object_dec(&object->engine->subdev.object, false);
-		mutex_unlock(&nv_subdev(object->engine)->mutex);
+fail:
+	if (object->func->init) {
+		int rret = object->func->init(object);
+		if (rret)
+			nvif_fatal(object, "failed to restart, %d\n", rret);
+	}
+fail_child:
+	list_for_each_entry_continue_reverse(child, &object->tree, head) {
+		nvkm_object_init(child);
 	}
-fail_engine:
-	if (object->parent)
-		 nvkm_object_dec(object->parent, false);
-fail_parent:
-	atomic_dec(&object->usecount);
 	return ret;
 }
 
-static int
-nvkm_object_decf(struct nvkm_object *object)
+int
+nvkm_object_init(struct nvkm_object *object)
 {
+	struct nvkm_object *child;
+	s64 time;
 	int ret;
 
-	nv_trace(object, "stopping...\n");
-
-	ret = nv_ofuncs(object)->fini(object, false);
-	atomic_set(&object->usecount, 0);
-	if (ret)
-		nv_warn(object, "failed fini, %d\n", ret);
-
-	if (object->engine) {
-		mutex_lock(&nv_subdev(object->engine)->mutex);
-		nvkm_object_dec(&object->engine->subdev.object, false);
-		mutex_unlock(&nv_subdev(object->engine)->mutex);
+	nvif_debug(object, "init running...\n");
+	time = ktime_to_us(ktime_get());
+	if (object->func->init) {
+		ret = object->func->init(object);
+		if (ret)
+			goto fail;
 	}
 
-	if (object->parent)
-		nvkm_object_dec(object->parent, false);
+	nvif_debug(object, "init children...\n");
+	list_for_each_entry(child, &object->tree, head) {
+		ret = nvkm_object_init(child);
+		if (ret)
+			goto fail_child;
+	}
 
-	nv_trace(object, "stopped\n");
+	time = ktime_to_us(ktime_get()) - time;
+	nvif_debug(object, "init completed in %lldus\n", time);
 	return 0;
+
+fail_child:
+	list_for_each_entry_continue_reverse(child, &object->tree, head)
+		nvkm_object_fini(child, false);
+fail:
+	nvif_error(object, "init failed with %d\n", ret);
+	if (object->func->fini)
+		object->func->fini(object, false);
+	return ret;
 }
 
-static int
-nvkm_object_decs(struct nvkm_object *object)
+void *
+nvkm_object_dtor(struct nvkm_object *object)
 {
-	int ret, rret;
-
-	nv_trace(object, "suspending...\n");
-
-	ret = nv_ofuncs(object)->fini(object, true);
-	atomic_set(&object->usecount, 0);
-	if (ret) {
-		nv_error(object, "failed suspend, %d\n", ret);
-		return ret;
+	struct nvkm_object *child, *ctemp;
+	void *data = object;
+	s64 time;
+
+	nvif_debug(object, "destroy children...\n");
+	time = ktime_to_us(ktime_get());
+	list_for_each_entry_safe(child, ctemp, &object->tree, head) {
+		nvkm_object_del(&child);
 	}
 
-	if (object->engine) {
-		mutex_lock(&nv_subdev(object->engine)->mutex);
-		ret = nvkm_object_dec(&object->engine->subdev.object, true);
-		mutex_unlock(&nv_subdev(object->engine)->mutex);
-		if (ret) {
-			nv_warn(object, "engine failed suspend, %d\n", ret);
-			goto fail_engine;
-		}
-	}
-
-	if (object->parent) {
-		ret = nvkm_object_dec(object->parent, true);
-		if (ret) {
-			nv_warn(object, "parent failed suspend, %d\n", ret);
-			goto fail_parent;
-		}
-	}
-
-	nv_trace(object, "suspended\n");
-	return 0;
+	nvif_debug(object, "destroy running...\n");
+	if (object->func->dtor)
+		data = object->func->dtor(object);
+	nvkm_engine_unref(&object->engine);
+	time = ktime_to_us(ktime_get()) - time;
+	nvif_debug(object, "destroy completed in %lldus...\n", time);
+	return data;
+}
 
-fail_parent:
-	if (object->engine) {
-		mutex_lock(&nv_subdev(object->engine)->mutex);
-		rret = nvkm_object_inc(&object->engine->subdev.object);
-		mutex_unlock(&nv_subdev(object->engine)->mutex);
-		if (rret)
-			nv_fatal(object, "engine failed to reinit, %d\n", rret);
+void
+nvkm_object_del(struct nvkm_object **pobject)
+{
+	struct nvkm_object *object = *pobject;
+	if (object && !WARN_ON(!object->func)) {
+		*pobject = nvkm_object_dtor(object);
+		nvkm_client_remove(object->client, object);
+		list_del(&object->head);
+		kfree(*pobject);
+		*pobject = NULL;
 	}
+}
 
-fail_engine:
-	rret = nv_ofuncs(object)->init(object);
-	if (rret)
-		nv_fatal(object, "failed to reinit, %d\n", rret);
-
-	return ret;
+void
+nvkm_object_ctor(const struct nvkm_object_func *func,
+		 const struct nvkm_oclass *oclass, struct nvkm_object *object)
+{
+	object->func = func;
+	object->client = oclass->client;
+	object->engine = nvkm_engine_ref(oclass->engine);
+	object->oclass = oclass->base.oclass;
+	object->handle = oclass->handle;
+	INIT_LIST_HEAD(&object->head);
+	INIT_LIST_HEAD(&object->tree);
+	RB_CLEAR_NODE(&object->node);
+	WARN_ON(oclass->engine && !object->engine);
 }
 
 int
-nvkm_object_dec(struct nvkm_object *object, bool suspend)
+nvkm_object_new_(const struct nvkm_object_func *func,
+		 const struct nvkm_oclass *oclass, void *data, u32 size,
+		 struct nvkm_object **pobject)
 {
-	int ref = atomic_add_return(-1, &object->usecount);
-	int ret;
-
-	nv_trace(object, "use(-1) == %d\n", atomic_read(&object->usecount));
-
-	if (ref == 0) {
-		if (suspend)
-			ret = nvkm_object_decs(object);
-		else
-			ret = nvkm_object_decf(object);
-
-		if (ret) {
-			atomic_inc(&object->usecount);
-			return ret;
-		}
+	if (size == 0) {
+		if (!(*pobject = kzalloc(sizeof(**pobject), GFP_KERNEL)))
+			return -ENOMEM;
+		nvkm_object_ctor(func, oclass, *pobject);
+		return 0;
 	}
-
-	return 0;
+	return -ENOSYS;
 }
 
-void
-nvkm_object_debug(void)
+static const struct nvkm_object_func
+nvkm_object_func = {
+};
+
+int
+nvkm_object_new(const struct nvkm_oclass *oclass, void *data, u32 size,
+		struct nvkm_object **pobject)
 {
-#ifdef NVKM_OBJECT_MAGIC
-	struct nvkm_object *object;
-	if (!list_empty(&_objlist)) {
-		nv_fatal(NULL, "*******************************************\n");
-		nv_fatal(NULL, "* AIIIII! object(s) still exist!!!\n");
-		nv_fatal(NULL, "*******************************************\n");
-		list_for_each_entry(object, &_objlist, list) {
-			nv_fatal(object, "%p/%p/%d/%d\n",
-				 object->parent, object->engine,
-				 atomic_read(&object->refcount),
-				 atomic_read(&object->usecount));
-		}
-	}
-#endif
+	const struct nvkm_object_func *func =
+		oclass->base.func ? oclass->base.func : &nvkm_object_func;
+	return nvkm_object_new_(func, oclass, data, size, pobject);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
new file mode 100644
index 000000000000..e31a0479add0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include <core/oproxy.h>
+
+static int
+nvkm_oproxy_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+{
+	return nvkm_object_mthd(nvkm_oproxy(object)->object, mthd, data, size);
+}
+
+static int
+nvkm_oproxy_ntfy(struct nvkm_object *object, u32 mthd,
+		 struct nvkm_event **pevent)
+{
+	return nvkm_object_ntfy(nvkm_oproxy(object)->object, mthd, pevent);
+}
+
+static int
+nvkm_oproxy_map(struct nvkm_object *object, u64 *addr, u32 *size)
+{
+	return nvkm_object_map(nvkm_oproxy(object)->object, addr, size);
+}
+
+static int
+nvkm_oproxy_rd08(struct nvkm_object *object, u64 addr, u8 *data)
+{
+	return nvkm_object_rd08(nvkm_oproxy(object)->object, addr, data);
+}
+
+static int
+nvkm_oproxy_rd16(struct nvkm_object *object, u64 addr, u16 *data)
+{
+	return nvkm_object_rd16(nvkm_oproxy(object)->object, addr, data);
+}
+
+static int
+nvkm_oproxy_rd32(struct nvkm_object *object, u64 addr, u32 *data)
+{
+	return nvkm_object_rd32(nvkm_oproxy(object)->object, addr, data);
+}
+
+static int
+nvkm_oproxy_wr08(struct nvkm_object *object, u64 addr, u8 data)
+{
+	return nvkm_object_wr08(nvkm_oproxy(object)->object, addr, data);
+}
+
+static int
+nvkm_oproxy_wr16(struct nvkm_object *object, u64 addr, u16 data)
+{
+	return nvkm_object_wr16(nvkm_oproxy(object)->object, addr, data);
+}
+
+static int
+nvkm_oproxy_wr32(struct nvkm_object *object, u64 addr, u32 data)
+{
+	return nvkm_object_wr32(nvkm_oproxy(object)->object, addr, data);
+}
+
+static int
+nvkm_oproxy_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		 int align, struct nvkm_gpuobj **pgpuobj)
+{
+	return nvkm_object_bind(nvkm_oproxy(object)->object,
+				parent, align, pgpuobj);
+}
+
+static int
+nvkm_oproxy_sclass(struct nvkm_object *object, int index,
+		   struct nvkm_oclass *oclass)
+{
+	struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
+	oclass->parent = oproxy->object;
+	if (!oproxy->object->func->sclass)
+		return -ENODEV;
+	return oproxy->object->func->sclass(oproxy->object, index, oclass);
+}
+
+static int
+nvkm_oproxy_fini(struct nvkm_object *object, bool suspend)
+{
+	struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
+	int ret;
+
+	if (oproxy->func->fini[0]) {
+		ret = oproxy->func->fini[0](oproxy, suspend);
+		if (ret && suspend)
+			return ret;
+	}
+
+	if (oproxy->object->func->fini) {
+		ret = oproxy->object->func->fini(oproxy->object, suspend);
+		if (ret && suspend)
+			return ret;
+	}
+
+	if (oproxy->func->fini[1]) {
+		ret = oproxy->func->fini[1](oproxy, suspend);
+		if (ret && suspend)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int
+nvkm_oproxy_init(struct nvkm_object *object)
+{
+	struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
+	int ret;
+
+	if (oproxy->func->init[0]) {
+		ret = oproxy->func->init[0](oproxy);
+		if (ret)
+			return ret;
+	}
+
+	if (oproxy->object->func->init) {
+		ret = oproxy->object->func->init(oproxy->object);
+		if (ret)
+			return ret;
+	}
+
+	if (oproxy->func->init[1]) {
+		ret = oproxy->func->init[1](oproxy);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void *
+nvkm_oproxy_dtor(struct nvkm_object *object)
+{
+	struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
+	if (oproxy->func->dtor[0])
+		oproxy->func->dtor[0](oproxy);
+	nvkm_object_del(&oproxy->object);
+	if (oproxy->func->dtor[1])
+		oproxy->func->dtor[1](oproxy);
+	return oproxy;
+}
+
+static const struct nvkm_object_func
+nvkm_oproxy_func = {
+	.dtor = nvkm_oproxy_dtor,
+	.init = nvkm_oproxy_init,
+	.fini = nvkm_oproxy_fini,
+	.mthd = nvkm_oproxy_mthd,
+	.ntfy = nvkm_oproxy_ntfy,
+	.map = nvkm_oproxy_map,
+	.rd08 = nvkm_oproxy_rd08,
+	.rd16 = nvkm_oproxy_rd16,
+	.rd32 = nvkm_oproxy_rd32,
+	.wr08 = nvkm_oproxy_wr08,
+	.wr16 = nvkm_oproxy_wr16,
+	.wr32 = nvkm_oproxy_wr32,
+	.bind = nvkm_oproxy_bind,
+	.sclass = nvkm_oproxy_sclass,
+};
+
+void
+nvkm_oproxy_ctor(const struct nvkm_oproxy_func *func,
+		 const struct nvkm_oclass *oclass, struct nvkm_oproxy *oproxy)
+{
+	nvkm_object_ctor(&nvkm_oproxy_func, oclass, &oproxy->base);
+	oproxy->func = func;
+}
+
+int
+nvkm_oproxy_new_(const struct nvkm_oproxy_func *func,
+		 const struct nvkm_oclass *oclass, struct nvkm_oproxy **poproxy)
+{
+	if (!(*poproxy = kzalloc(sizeof(**poproxy), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_oproxy_ctor(func, oclass, *poproxy);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/option.c b/drivers/gpu/drm/nouveau/nvkm/core/option.c
index 19d153f8c8fd..3e62cf8cde08 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/option.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/option.c
@@ -73,6 +73,24 @@ nvkm_boolopt(const char *optstr, const char *opt, bool value)
 	return value;
 }
 
+long
+nvkm_longopt(const char *optstr, const char *opt, long value)
+{
+	long result = value;
+	int arglen;
+	char *s;
+
+	optstr = nvkm_stropt(optstr, opt, &arglen);
+	if (optstr && (s = kstrndup(optstr, arglen, GFP_KERNEL))) {
+		int ret = kstrtol(s, 0, &value);
+		if (ret == 0)
+			result = value;
+		kfree(s);
+	}
+
+	return result;
+}
+
 int
 nvkm_dbgopt(const char *optstr, const char *sub)
 {
@@ -95,7 +113,7 @@ nvkm_dbgopt(const char *optstr, const char *sub)
 				else if (!strncasecmpz(optstr, "warn", len))
 					level = NV_DBG_WARN;
 				else if (!strncasecmpz(optstr, "info", len))
-					level = NV_DBG_INFO_NORMAL;
+					level = NV_DBG_INFO;
 				else if (!strncasecmpz(optstr, "debug", len))
 					level = NV_DBG_DEBUG;
 				else if (!strncasecmpz(optstr, "trace", len))
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/parent.c b/drivers/gpu/drm/nouveau/nvkm/core/parent.c
deleted file mode 100644
index dd56cd1eeb38..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/core/parent.c
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include <core/parent.h>
-#include <core/client.h>
-#include <core/engine.h>
-
-int
-nvkm_parent_sclass(struct nvkm_object *parent, u16 handle,
-		   struct nvkm_object **pengine,
-		   struct nvkm_oclass **poclass)
-{
-	struct nvkm_sclass *sclass;
-	struct nvkm_engine *engine;
-	struct nvkm_oclass *oclass;
-	u64 mask;
-
-	sclass = nv_parent(parent)->sclass;
-	while (sclass) {
-		if ((sclass->oclass->handle & 0xffff) == handle) {
-			*pengine = &parent->engine->subdev.object;
-			*poclass = sclass->oclass;
-			return 0;
-		}
-
-		sclass = sclass->sclass;
-	}
-
-	mask = nv_parent(parent)->engine;
-	while (mask) {
-		int i = __ffs64(mask);
-
-		if (nv_iclass(parent, NV_CLIENT_CLASS))
-			engine = nv_engine(nv_client(parent)->device);
-		else
-			engine = nvkm_engine(parent, i);
-
-		if (engine) {
-			oclass = engine->sclass;
-			while (oclass->ofuncs) {
-				if ((oclass->handle & 0xffff) == handle) {
-					*pengine = nv_object(engine);
-					*poclass = oclass;
-					return 0;
-				}
-				oclass++;
-			}
-		}
-
-		mask &= ~(1ULL << i);
-	}
-
-	return -EINVAL;
-}
-
-int
-nvkm_parent_lclass(struct nvkm_object *parent, u32 *lclass, int size)
-{
-	struct nvkm_sclass *sclass;
-	struct nvkm_engine *engine;
-	struct nvkm_oclass *oclass;
-	int nr = -1, i;
-	u64 mask;
-
-	sclass = nv_parent(parent)->sclass;
-	while (sclass) {
-		if (++nr < size)
-			lclass[nr] = sclass->oclass->handle & 0xffff;
-		sclass = sclass->sclass;
-	}
-
-	mask = nv_parent(parent)->engine;
-	while (i = __ffs64(mask), mask) {
-		engine = nvkm_engine(parent, i);
-		if (engine && (oclass = engine->sclass)) {
-			while (oclass->ofuncs) {
-				if (++nr < size)
-					lclass[nr] = oclass->handle & 0xffff;
-				oclass++;
-			}
-		}
-
-		mask &= ~(1ULL << i);
-	}
-
-	return nr + 1;
-}
-
-int
-nvkm_parent_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, u32 pclass,
-		    struct nvkm_oclass *sclass, u64 engcls,
-		    int size, void **pobject)
-{
-	struct nvkm_parent *object;
-	struct nvkm_sclass *nclass;
-	int ret;
-
-	ret = nvkm_object_create_(parent, engine, oclass, pclass |
-				  NV_PARENT_CLASS, size, pobject);
-	object = *pobject;
-	if (ret)
-		return ret;
-
-	while (sclass && sclass->ofuncs) {
-		nclass = kzalloc(sizeof(*nclass), GFP_KERNEL);
-		if (!nclass)
-			return -ENOMEM;
-
-		nclass->sclass = object->sclass;
-		object->sclass = nclass;
-		nclass->engine = engine ? nv_engine(engine) : NULL;
-		nclass->oclass = sclass;
-		sclass++;
-	}
-
-	object->engine = engcls;
-	return 0;
-}
-
-void
-nvkm_parent_destroy(struct nvkm_parent *parent)
-{
-	struct nvkm_sclass *sclass;
-
-	while ((sclass = parent->sclass)) {
-		parent->sclass = sclass->sclass;
-		kfree(sclass);
-	}
-
-	nvkm_object_destroy(&parent->object);
-}
-
-
-void
-_nvkm_parent_dtor(struct nvkm_object *object)
-{
-	nvkm_parent_destroy(nv_parent(object));
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/printk.c b/drivers/gpu/drm/nouveau/nvkm/core/printk.c
deleted file mode 100644
index 4a220eb91660..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/core/printk.c
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include <core/printk.h>
-#include <core/client.h>
-#include <core/device.h>
-
-int nv_info_debug_level = NV_DBG_INFO_NORMAL;
-
-void
-nv_printk_(struct nvkm_object *object, int level, const char *fmt, ...)
-{
-	static const char name[] = { '!', 'E', 'W', ' ', 'D', 'T', 'P', 'S' };
-	const char *pfx;
-	char mfmt[256];
-	va_list args;
-
-	switch (level) {
-	case NV_DBG_FATAL:
-		pfx = KERN_CRIT;
-		break;
-	case NV_DBG_ERROR:
-		pfx = KERN_ERR;
-		break;
-	case NV_DBG_WARN:
-		pfx = KERN_WARNING;
-		break;
-	case NV_DBG_INFO_NORMAL:
-		pfx = KERN_INFO;
-		break;
-	case NV_DBG_DEBUG:
-	case NV_DBG_PARANOIA:
-	case NV_DBG_TRACE:
-	case NV_DBG_SPAM:
-	default:
-		pfx = KERN_DEBUG;
-		break;
-	}
-
-	if (object && !nv_iclass(object, NV_CLIENT_CLASS)) {
-		struct nvkm_object *device;
-		struct nvkm_object *subdev;
-		char obuf[64], *ofmt = "";
-
-		if (object->engine == NULL) {
-			subdev = object;
-			while (subdev && !nv_iclass(subdev, NV_SUBDEV_CLASS))
-				subdev = subdev->parent;
-		} else {
-			subdev = &object->engine->subdev.object;
-		}
-
-		device = subdev;
-		if (device->parent)
-			device = device->parent;
-
-		if (object != subdev) {
-			snprintf(obuf, sizeof(obuf), "[0x%08x]",
-				 nv_hclass(object));
-			ofmt = obuf;
-		}
-
-		if (level > nv_subdev(subdev)->debug)
-			return;
-
-		snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s][%s]%s %s", pfx,
-			 name[level], nv_subdev(subdev)->name,
-			 nv_device(device)->name, ofmt, fmt);
-	} else
-	if (object && nv_iclass(object, NV_CLIENT_CLASS)) {
-		if (level > nv_client(object)->debug)
-			return;
-
-		snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s] %s", pfx,
-			 name[level], nv_client(object)->name, fmt);
-	} else {
-		snprintf(mfmt, sizeof(mfmt), "%snouveau: %s", pfx, fmt);
-	}
-
-	va_start(args, fmt);
-	vprintk(mfmt, args);
-	va_end(args);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
index ebd4d15479bd..3216e157a8a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
@@ -22,8 +22,6 @@
 #include <core/ramht.h>
 #include <core/engine.h>
 
-#include <subdev/bar.h>
-
 static u32
 nvkm_ramht_hash(struct nvkm_ramht *ramht, int chid, u32 handle)
 {
@@ -35,72 +33,130 @@ nvkm_ramht_hash(struct nvkm_ramht *ramht, int chid, u32 handle)
 	}
 
 	hash ^= chid << (ramht->bits - 4);
-	hash  = hash << 3;
 	return hash;
 }
 
-int
-nvkm_ramht_insert(struct nvkm_ramht *ramht, int chid, u32 handle, u32 context)
+struct nvkm_gpuobj *
+nvkm_ramht_search(struct nvkm_ramht *ramht, int chid, u32 handle)
 {
-	struct nvkm_bar *bar = nvkm_bar(ramht);
 	u32 co, ho;
 
 	co = ho = nvkm_ramht_hash(ramht, chid, handle);
 	do {
-		if (!nv_ro32(ramht, co + 4)) {
-			nv_wo32(ramht, co + 0, handle);
-			nv_wo32(ramht, co + 4, context);
-			if (bar)
-				bar->flush(bar);
-			return co;
+		if (ramht->data[co].chid == chid) {
+			if (ramht->data[co].handle == handle)
+				return ramht->data[co].inst;
 		}
 
-		co += 8;
-		if (co >= nv_gpuobj(ramht)->size)
+		if (++co >= ramht->size)
 			co = 0;
 	} while (co != ho);
 
-	return -ENOMEM;
+	return NULL;
+}
+
+static int
+nvkm_ramht_update(struct nvkm_ramht *ramht, int co, struct nvkm_object *object,
+		  int chid, int addr, u32 handle, u32 context)
+{
+	struct nvkm_ramht_data *data = &ramht->data[co];
+	u64 inst = 0x00000040; /* just non-zero for <=g8x fifo ramht */
+	int ret;
+
+	nvkm_gpuobj_del(&data->inst);
+	data->chid = chid;
+	data->handle = handle;
+
+	if (object) {
+		ret = nvkm_object_bind(object, ramht->parent, 16, &data->inst);
+		if (ret) {
+			if (ret != -ENODEV) {
+				data->chid = -1;
+				return ret;
+			}
+			data->inst = NULL;
+		}
+
+		if (data->inst) {
+			if (ramht->device->card_type >= NV_50)
+				inst = data->inst->node->offset;
+			else
+				inst = data->inst->addr;
+		}
+
+		if (addr < 0) context |= inst << -addr;
+		else          context |= inst >>  addr;
+	}
+
+	nvkm_kmap(ramht->gpuobj);
+	nvkm_wo32(ramht->gpuobj, (co << 3) + 0, handle);
+	nvkm_wo32(ramht->gpuobj, (co << 3) + 4, context);
+	nvkm_done(ramht->gpuobj);
+	return co + 1;
 }
 
 void
 nvkm_ramht_remove(struct nvkm_ramht *ramht, int cookie)
 {
-	struct nvkm_bar *bar = nvkm_bar(ramht);
-	nv_wo32(ramht, cookie + 0, 0x00000000);
-	nv_wo32(ramht, cookie + 4, 0x00000000);
-	if (bar)
-		bar->flush(bar);
+	if (--cookie >= 0)
+		nvkm_ramht_update(ramht, cookie, NULL, -1, 0, 0, 0);
+}
+
+int
+nvkm_ramht_insert(struct nvkm_ramht *ramht, struct nvkm_object *object,
+		  int chid, int addr, u32 handle, u32 context)
+{
+	u32 co, ho;
+
+	if (nvkm_ramht_search(ramht, chid, handle))
+		return -EEXIST;
+
+	co = ho = nvkm_ramht_hash(ramht, chid, handle);
+	do {
+		if (ramht->data[co].chid < 0) {
+			return nvkm_ramht_update(ramht, co, object, chid,
+						 addr, handle, context);
+		}
+
+		if (++co >= ramht->size)
+			co = 0;
+	} while (co != ho);
+
+	return -ENOSPC;
 }
 
-static struct nvkm_oclass
-nvkm_ramht_oclass = {
-	.handle = 0x0000abcd,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = NULL,
-		.dtor = _nvkm_gpuobj_dtor,
-		.init = _nvkm_gpuobj_init,
-		.fini = _nvkm_gpuobj_fini,
-		.rd32 = _nvkm_gpuobj_rd32,
-		.wr32 = _nvkm_gpuobj_wr32,
-	},
-};
+void
+nvkm_ramht_del(struct nvkm_ramht **pramht)
+{
+	struct nvkm_ramht *ramht = *pramht;
+	if (ramht) {
+		nvkm_gpuobj_del(&ramht->gpuobj);
+		kfree(*pramht);
+		*pramht = NULL;
+	}
+}
 
 int
-nvkm_ramht_new(struct nvkm_object *parent, struct nvkm_object *pargpu,
-	       u32 size, u32 align, struct nvkm_ramht **pramht)
+nvkm_ramht_new(struct nvkm_device *device, u32 size, u32 align,
+	       struct nvkm_gpuobj *parent, struct nvkm_ramht **pramht)
 {
 	struct nvkm_ramht *ramht;
-	int ret;
+	int ret, i;
 
-	ret = nvkm_gpuobj_create(parent, parent->engine ?
-				 &parent->engine->subdev.object : parent, /* <nv50 ramht */
-				 &nvkm_ramht_oclass, 0, pargpu, size,
-				 align, NVOBJ_FLAG_ZERO_ALLOC, &ramht);
-	*pramht = ramht;
-	if (ret)
-		return ret;
+	if (!(ramht = *pramht = kzalloc(sizeof(*ramht) + (size >> 3) *
+					sizeof(*ramht->data), GFP_KERNEL)))
+		return -ENOMEM;
 
-	ramht->bits = order_base_2(nv_gpuobj(ramht)->size >> 3);
-	return 0;
+	ramht->device = device;
+	ramht->parent = parent;
+	ramht->size = size >> 3;
+	ramht->bits = order_base_2(ramht->size);
+	for (i = 0; i < ramht->size; i++)
+		ramht->data[i].chid = -1;
+
+	ret = nvkm_gpuobj_new(ramht->device, size, align, true,
+			      ramht->parent, &ramht->gpuobj);
+	if (ret)
+		nvkm_ramht_del(pramht);
+	return ret;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index c5fb3a793174..7de98470a2a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -25,96 +25,178 @@
 #include <core/device.h>
 #include <core/option.h>
 
-struct nvkm_subdev *
-nvkm_subdev(void *obj, int idx)
-{
-	struct nvkm_object *object = nv_object(obj);
-	while (object && !nv_iclass(object, NV_SUBDEV_CLASS))
-		object = object->parent;
-	if (object == NULL || nv_subidx(nv_subdev(object)) != idx)
-		object = nv_device(obj)->subdev[idx];
-	return object ? nv_subdev(object) : NULL;
-}
+static struct lock_class_key nvkm_subdev_lock_class[NVKM_SUBDEV_NR];
+
+const char *
+nvkm_subdev_name[NVKM_SUBDEV_NR] = {
+	[NVKM_SUBDEV_BAR    ] = "bar",
+	[NVKM_SUBDEV_VBIOS  ] = "bios",
+	[NVKM_SUBDEV_BUS    ] = "bus",
+	[NVKM_SUBDEV_CLK    ] = "clk",
+	[NVKM_SUBDEV_DEVINIT] = "devinit",
+	[NVKM_SUBDEV_FB     ] = "fb",
+	[NVKM_SUBDEV_FUSE   ] = "fuse",
+	[NVKM_SUBDEV_GPIO   ] = "gpio",
+	[NVKM_SUBDEV_I2C    ] = "i2c",
+	[NVKM_SUBDEV_IBUS   ] = "priv",
+	[NVKM_SUBDEV_INSTMEM] = "imem",
+	[NVKM_SUBDEV_LTC    ] = "ltc",
+	[NVKM_SUBDEV_MC     ] = "mc",
+	[NVKM_SUBDEV_MMU    ] = "mmu",
+	[NVKM_SUBDEV_MXM    ] = "mxm",
+	[NVKM_SUBDEV_PCI    ] = "pci",
+	[NVKM_SUBDEV_PMU    ] = "pmu",
+	[NVKM_SUBDEV_THERM  ] = "therm",
+	[NVKM_SUBDEV_TIMER  ] = "tmr",
+	[NVKM_SUBDEV_VOLT   ] = "volt",
+	[NVKM_ENGINE_BSP    ] = "bsp",
+	[NVKM_ENGINE_CE0    ] = "ce0",
+	[NVKM_ENGINE_CE1    ] = "ce1",
+	[NVKM_ENGINE_CE2    ] = "ce2",
+	[NVKM_ENGINE_CIPHER ] = "cipher",
+	[NVKM_ENGINE_DISP   ] = "disp",
+	[NVKM_ENGINE_DMAOBJ ] = "dma",
+	[NVKM_ENGINE_FIFO   ] = "fifo",
+	[NVKM_ENGINE_GR     ] = "gr",
+	[NVKM_ENGINE_IFB    ] = "ifb",
+	[NVKM_ENGINE_ME     ] = "me",
+	[NVKM_ENGINE_MPEG   ] = "mpeg",
+	[NVKM_ENGINE_MSENC  ] = "msenc",
+	[NVKM_ENGINE_MSPDEC ] = "mspdec",
+	[NVKM_ENGINE_MSPPP  ] = "msppp",
+	[NVKM_ENGINE_MSVLD  ] = "msvld",
+	[NVKM_ENGINE_PM     ] = "pm",
+	[NVKM_ENGINE_SEC    ] = "sec",
+	[NVKM_ENGINE_SW     ] = "sw",
+	[NVKM_ENGINE_VIC    ] = "vic",
+	[NVKM_ENGINE_VP     ] = "vp",
+};
 
 void
-nvkm_subdev_reset(struct nvkm_object *subdev)
+nvkm_subdev_intr(struct nvkm_subdev *subdev)
 {
-	nv_trace(subdev, "resetting...\n");
-	nv_ofuncs(subdev)->fini(subdev, false);
-	nv_debug(subdev, "reset\n");
+	if (subdev->func->intr)
+		subdev->func->intr(subdev);
 }
 
 int
-nvkm_subdev_init(struct nvkm_subdev *subdev)
+nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend)
 {
-	int ret = nvkm_object_init(&subdev->object);
-	if (ret)
-		return ret;
+	struct nvkm_device *device = subdev->device;
+	const char *action = suspend ? "suspend" : "fini";
+	u32 pmc_enable = subdev->pmc_enable;
+	s64 time;
 
-	nvkm_subdev_reset(&subdev->object);
-	return 0;
-}
+	nvkm_trace(subdev, "%s running...\n", action);
+	time = ktime_to_us(ktime_get());
 
-int
-_nvkm_subdev_init(struct nvkm_object *object)
-{
-	return nvkm_subdev_init(nv_subdev(object));
-}
+	if (subdev->func->fini) {
+		int ret = subdev->func->fini(subdev, suspend);
+		if (ret) {
+			nvkm_error(subdev, "%s failed, %d\n", action, ret);
+			if (suspend)
+				return ret;
+		}
+	}
 
-int
-nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend)
-{
-	if (subdev->unit) {
-		nv_mask(subdev, 0x000200, subdev->unit, 0x00000000);
-		nv_mask(subdev, 0x000200, subdev->unit, subdev->unit);
+	if (pmc_enable) {
+		nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
+		nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
+		nvkm_rd32(device, 0x000200);
 	}
 
-	return nvkm_object_fini(&subdev->object, suspend);
+	time = ktime_to_us(ktime_get()) - time;
+	nvkm_trace(subdev, "%s completed in %lldus\n", action, time);
+	return 0;
 }
 
 int
-_nvkm_subdev_fini(struct nvkm_object *object, bool suspend)
+nvkm_subdev_preinit(struct nvkm_subdev *subdev)
 {
-	return nvkm_subdev_fini(nv_subdev(object), suspend);
-}
+	s64 time;
 
-void
-nvkm_subdev_destroy(struct nvkm_subdev *subdev)
-{
-	int subidx = nv_hclass(subdev) & 0xff;
-	nv_device(subdev)->subdev[subidx] = NULL;
-	nvkm_object_destroy(&subdev->object);
-}
+	nvkm_trace(subdev, "preinit running...\n");
+	time = ktime_to_us(ktime_get());
 
-void
-_nvkm_subdev_dtor(struct nvkm_object *object)
-{
-	nvkm_subdev_destroy(nv_subdev(object));
+	if (subdev->func->preinit) {
+		int ret = subdev->func->preinit(subdev);
+		if (ret) {
+			nvkm_error(subdev, "preinit failed, %d\n", ret);
+			return ret;
+		}
+	}
+
+	time = ktime_to_us(ktime_get()) - time;
+	nvkm_trace(subdev, "preinit completed in %lldus\n", time);
+	return 0;
 }
 
 int
-nvkm_subdev_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, u32 pclass,
-		    const char *subname, const char *sysname,
-		    int size, void **pobject)
+nvkm_subdev_init(struct nvkm_subdev *subdev)
 {
-	struct nvkm_subdev *subdev;
+	s64 time;
 	int ret;
 
-	ret = nvkm_object_create_(parent, engine, oclass, pclass |
-				  NV_SUBDEV_CLASS, size, pobject);
-	subdev = *pobject;
-	if (ret)
-		return ret;
+	nvkm_trace(subdev, "init running...\n");
+	time = ktime_to_us(ktime_get());
+
+	if (subdev->func->oneinit && !subdev->oneinit) {
+		s64 time;
+		nvkm_trace(subdev, "one-time init running...\n");
+		time = ktime_to_us(ktime_get());
+		ret = subdev->func->oneinit(subdev);
+		if (ret) {
+			nvkm_error(subdev, "one-time init failed, %d\n", ret);
+			return ret;
+		}
 
-	__mutex_init(&subdev->mutex, subname, &oclass->lock_class_key);
-	subdev->name = subname;
+		subdev->oneinit = true;
+		time = ktime_to_us(ktime_get()) - time;
+		nvkm_trace(subdev, "one-time init completed in %lldus\n", time);
+	}
 
-	if (parent) {
-		struct nvkm_device *device = nv_device(parent);
-		subdev->debug = nvkm_dbgopt(device->dbgopt, subname);
-		subdev->mmio  = nv_subdev(device)->mmio;
+	if (subdev->func->init) {
+		ret = subdev->func->init(subdev);
+		if (ret) {
+			nvkm_error(subdev, "init failed, %d\n", ret);
+			return ret;
+		}
 	}
 
+	time = ktime_to_us(ktime_get()) - time;
+	nvkm_trace(subdev, "init completed in %lldus\n", time);
 	return 0;
 }
+
+void
+nvkm_subdev_del(struct nvkm_subdev **psubdev)
+{
+	struct nvkm_subdev *subdev = *psubdev;
+	s64 time;
+
+	if (subdev && !WARN_ON(!subdev->func)) {
+		nvkm_trace(subdev, "destroy running...\n");
+		time = ktime_to_us(ktime_get());
+		if (subdev->func->dtor)
+			*psubdev = subdev->func->dtor(subdev);
+		time = ktime_to_us(ktime_get()) - time;
+		nvkm_trace(subdev, "destroy completed in %lldus\n", time);
+		kfree(*psubdev);
+		*psubdev = NULL;
+	}
+}
+
+void
+nvkm_subdev_ctor(const struct nvkm_subdev_func *func,
+		 struct nvkm_device *device, int index, u32 pmc_enable,
+		 struct nvkm_subdev *subdev)
+{
+	const char *name = nvkm_subdev_name[index];
+	subdev->func = func;
+	subdev->device = device;
+	subdev->index = index;
+	subdev->pmc_enable = pmc_enable;
+
+	__mutex_init(&subdev->mutex, name, &nvkm_subdev_lock_class[index]);
+	subdev->debug = nvkm_dbgopt(device->dbgopt, name);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
index 6bd3d756f32c..36f724763fde 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
@@ -6,7 +6,7 @@ include $(src)/nvkm/engine/ce/Kbuild
 include $(src)/nvkm/engine/cipher/Kbuild
 include $(src)/nvkm/engine/device/Kbuild
 include $(src)/nvkm/engine/disp/Kbuild
-include $(src)/nvkm/engine/dmaobj/Kbuild
+include $(src)/nvkm/engine/dma/Kbuild
 include $(src)/nvkm/engine/fifo/Kbuild
 include $(src)/nvkm/engine/gr/Kbuild
 include $(src)/nvkm/engine/mpeg/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
index a0b1fd80fa93..3ef01071f073 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
@@ -22,72 +22,23 @@
  * Authors: Ben Skeggs, Ilia Mirkin
  */
 #include <engine/bsp.h>
-#include <engine/xtensa.h>
 
-#include <core/engctx.h>
-
-/*******************************************************************************
- * BSP object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_bsp_sclass[] = {
-	{ 0x74b0, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * BSP context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_bsp_cclass = {
-	.handle = NV_ENGCTX(BSP, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_xtensa_engctx_ctor,
-		.dtor = _nvkm_engctx_dtor,
-		.init = _nvkm_engctx_init,
-		.fini = _nvkm_engctx_fini,
-		.rd32 = _nvkm_engctx_rd32,
-		.wr32 = _nvkm_engctx_wr32,
-	},
+#include <nvif/class.h>
+
+static const struct nvkm_xtensa_func
+g84_bsp = {
+	.pmc_enable = 0x04008000,
+	.fifo_val = 0x1111,
+	.unkd28 = 0x90044,
+	.sclass = {
+		{ -1, -1, NV74_BSP },
+		{}
+	}
 };
 
-/*******************************************************************************
- * BSP engine/subdev functions
- ******************************************************************************/
-
-static int
-g84_bsp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+int
+g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
 {
-	struct nvkm_xtensa *priv;
-	int ret;
-
-	ret = nvkm_xtensa_create(parent, engine, oclass, 0x103000, true,
-				 "PBSP", "bsp", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x04008000;
-	nv_engine(priv)->cclass = &g84_bsp_cclass;
-	nv_engine(priv)->sclass = g84_bsp_sclass;
-	priv->fifo_val = 0x1111;
-	priv->unkd28 = 0x90044;
-	return 0;
+	return nvkm_xtensa_new_(&g84_bsp, device, index,
+				true, 0x103000, pengine);
 }
-
-struct nvkm_oclass
-g84_bsp_oclass = {
-	.handle = NV_ENGINE(BSP, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g84_bsp_ctor,
-		.dtor = _nvkm_xtensa_dtor,
-		.init = _nvkm_xtensa_init,
-		.fini = _nvkm_xtensa_fini,
-		.rd32 = _nvkm_xtensa_rd32,
-		.wr32 = _nvkm_xtensa_wr32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc
index a558dfa4d76a..6226bcd98ca9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc
@@ -24,9 +24,9 @@
  */
 
 #ifdef GT215
-.section #gt215_pce_data
+.section #gt215_ce_data
 #else
-.section #gf100_pce_data
+.section #gf100_ce_data
 #endif
 
 ctx_object:                   .b32 0
@@ -128,9 +128,9 @@ dispatch_dma:
 .b16 0x800 0
 
 #ifdef GT215
-.section #gt215_pce_code
+.section #gt215_ce_code
 #else
-.section #gf100_pce_code
+.section #gf100_ce_code
 #endif
 
 main:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
index d9af6e4e4585..05bb65608dfe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf100_pce_data[] = {
+uint32_t gf100_ce_data[] = {
 /* 0x0000: ctx_object */
 	0x00000000,
 /* 0x0004: ctx_query_address_high */
@@ -171,7 +171,7 @@ uint32_t gf100_pce_data[] = {
 	0x00000800,
 };
 
-uint32_t gf100_pce_code[] = {
+uint32_t gf100_ce_code[] = {
 /* 0x0000: main */
 	0x04fe04bd,
 	0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
index f42c0d0d6cee..972281d10f38 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gt215_pce_data[] = {
+uint32_t gt215_ce_data[] = {
 /* 0x0000: ctx_object */
 	0x00000000,
 /* 0x0004: ctx_dma */
@@ -183,7 +183,7 @@ uint32_t gt215_pce_data[] = {
 	0x00000800,
 };
 
-uint32_t gt215_pce_code[] = {
+uint32_t gt215_ce_code[] = {
 /* 0x0000: main */
 	0x04fe04bd,
 	0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
index 2d2e549c2e34..92a9f35df1a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
@@ -21,146 +21,60 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/ce.h>
-#include <engine/falcon.h>
+#include "priv.h"
 #include "fuc/gf100.fuc3.h"
 
-struct gf100_ce_priv {
-	struct nvkm_falcon base;
-};
-
-/*******************************************************************************
- * Copy object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_ce0_sclass[] = {
-	{ 0x90b5, &nvkm_object_ofuncs },
-	{},
-};
-
-static struct nvkm_oclass
-gf100_ce1_sclass[] = {
-	{ 0x90b8, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PCE context
- ******************************************************************************/
-
-static struct nvkm_ofuncs
-gf100_ce_context_ofuncs = {
-	.ctor = _nvkm_falcon_context_ctor,
-	.dtor = _nvkm_falcon_context_dtor,
-	.init = _nvkm_falcon_context_init,
-	.fini = _nvkm_falcon_context_fini,
-	.rd32 = _nvkm_falcon_context_rd32,
-	.wr32 = _nvkm_falcon_context_wr32,
-};
-
-static struct nvkm_oclass
-gf100_ce0_cclass = {
-	.handle = NV_ENGCTX(CE0, 0xc0),
-	.ofuncs = &gf100_ce_context_ofuncs,
-};
-
-static struct nvkm_oclass
-gf100_ce1_cclass = {
-	.handle = NV_ENGCTX(CE1, 0xc0),
-	.ofuncs = &gf100_ce_context_ofuncs,
-};
-
-/*******************************************************************************
- * PCE engine/subdev functions
- ******************************************************************************/
+#include <nvif/class.h>
 
-static int
-gf100_ce_init(struct nvkm_object *object)
+static void
+gf100_ce_init(struct nvkm_falcon *ce)
 {
-	struct gf100_ce_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wo32(priv, 0x084, nv_engidx(&priv->base.base) - NVDEV_ENGINE_CE0);
-	return 0;
+	struct nvkm_device *device = ce->engine.subdev.device;
+	const int index = ce->engine.subdev.index - NVKM_ENGINE_CE0;
+	nvkm_wr32(device, ce->addr + 0x084, index);
 }
 
-static int
-gf100_ce0_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct gf100_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x104000, true,
-				 "PCE0", "ce0", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+static const struct nvkm_falcon_func
+gf100_ce0 = {
+	.code.data = gf100_ce_code,
+	.code.size = sizeof(gf100_ce_code),
+	.data.data = gf100_ce_data,
+	.data.size = sizeof(gf100_ce_data),
+	.pmc_enable = 0x00000040,
+	.init = gf100_ce_init,
+	.intr = gt215_ce_intr,
+	.sclass = {
+		{ -1, -1, FERMI_DMA },
+		{}
+	}
+};
 
-	nv_subdev(priv)->unit = 0x00000040;
-	nv_subdev(priv)->intr = gt215_ce_intr;
-	nv_engine(priv)->cclass = &gf100_ce0_cclass;
-	nv_engine(priv)->sclass = gf100_ce0_sclass;
-	nv_falcon(priv)->code.data = gf100_pce_code;
-	nv_falcon(priv)->code.size = sizeof(gf100_pce_code);
-	nv_falcon(priv)->data.data = gf100_pce_data;
-	nv_falcon(priv)->data.size = sizeof(gf100_pce_data);
-	return 0;
-}
+static const struct nvkm_falcon_func
+gf100_ce1 = {
+	.code.data = gf100_ce_code,
+	.code.size = sizeof(gf100_ce_code),
+	.data.data = gf100_ce_data,
+	.data.size = sizeof(gf100_ce_data),
+	.pmc_enable = 0x00000080,
+	.init = gf100_ce_init,
+	.intr = gt215_ce_intr,
+	.sclass = {
+		{ -1, -1, FERMI_DECOMPRESS },
+		{}
+	}
+};
 
-static int
-gf100_ce1_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+gf100_ce_new(struct nvkm_device *device, int index,
+	     struct nvkm_engine **pengine)
 {
-	struct gf100_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x105000, true,
-				 "PCE1", "ce1", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000080;
-	nv_subdev(priv)->intr = gt215_ce_intr;
-	nv_engine(priv)->cclass = &gf100_ce1_cclass;
-	nv_engine(priv)->sclass = gf100_ce1_sclass;
-	nv_falcon(priv)->code.data = gf100_pce_code;
-	nv_falcon(priv)->code.size = sizeof(gf100_pce_code);
-	nv_falcon(priv)->data.data = gf100_pce_data;
-	nv_falcon(priv)->data.size = sizeof(gf100_pce_data);
-	return 0;
+	if (index == NVKM_ENGINE_CE0) {
+		return nvkm_falcon_new_(&gf100_ce0, device, index, true,
+					0x104000, pengine);
+	} else
+	if (index == NVKM_ENGINE_CE1) {
+		return nvkm_falcon_new_(&gf100_ce1, device, index, true,
+					0x105000, pengine);
+	}
+	return -ENODEV;
 }
-
-struct nvkm_oclass
-gf100_ce0_oclass = {
-	.handle = NV_ENGINE(CE0, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_ce0_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = gf100_ce_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
-};
-
-struct nvkm_oclass
-gf100_ce1_oclass = {
-	.handle = NV_ENGINE(CE1, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_ce1_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = gf100_ce_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
index a998932fae45..c541a1c012dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
@@ -21,153 +21,47 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/ce.h>
+#include "priv.h"
 
-#include <core/engctx.h>
+#include <nvif/class.h>
 
-struct gk104_ce_priv {
-	struct nvkm_engine base;
-};
-
-/*******************************************************************************
- * Copy object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_ce_sclass[] = {
-	{ 0xa0b5, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PCE context
- ******************************************************************************/
-
-static struct nvkm_ofuncs
-gk104_ce_context_ofuncs = {
-	.ctor = _nvkm_engctx_ctor,
-	.dtor = _nvkm_engctx_dtor,
-	.init = _nvkm_engctx_init,
-	.fini = _nvkm_engctx_fini,
-	.rd32 = _nvkm_engctx_rd32,
-	.wr32 = _nvkm_engctx_wr32,
-};
-
-static struct nvkm_oclass
-gk104_ce_cclass = {
-	.handle = NV_ENGCTX(CE0, 0xc0),
-	.ofuncs = &gk104_ce_context_ofuncs,
-};
-
-/*******************************************************************************
- * PCE engine/subdev functions
- ******************************************************************************/
-
-static void
-gk104_ce_intr(struct nvkm_subdev *subdev)
+void
+gk104_ce_intr(struct nvkm_engine *ce)
 {
-	const int ce = nv_subidx(subdev) - NVDEV_ENGINE_CE0;
-	struct gk104_ce_priv *priv = (void *)subdev;
-	u32 stat = nv_rd32(priv, 0x104908 + (ce * 0x1000));
-
+	const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x1000;
+	struct nvkm_subdev *subdev = &ce->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x104908 + base);
 	if (stat) {
-		nv_warn(priv, "unhandled intr 0x%08x\n", stat);
-		nv_wr32(priv, 0x104908 + (ce * 0x1000), stat);
+		nvkm_warn(subdev, "intr %08x\n", stat);
+		nvkm_wr32(device, 0x104908 + base, stat);
 	}
 }
 
-static int
-gk104_ce0_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct gk104_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_engine_create(parent, engine, oclass, true,
-				 "PCE0", "ce0", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000040;
-	nv_subdev(priv)->intr = gk104_ce_intr;
-	nv_engine(priv)->cclass = &gk104_ce_cclass;
-	nv_engine(priv)->sclass = gk104_ce_sclass;
-	return 0;
-}
-
-static int
-gk104_ce1_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct gk104_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_engine_create(parent, engine, oclass, true,
-				 "PCE1", "ce1", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000080;
-	nv_subdev(priv)->intr = gk104_ce_intr;
-	nv_engine(priv)->cclass = &gk104_ce_cclass;
-	nv_engine(priv)->sclass = gk104_ce_sclass;
-	return 0;
-}
+static const struct nvkm_engine_func
+gk104_ce = {
+	.intr = gk104_ce_intr,
+	.sclass = {
+		{ -1, -1, KEPLER_DMA_COPY_A },
+		{}
+	}
+};
 
-static int
-gk104_ce2_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+gk104_ce_new(struct nvkm_device *device, int index,
+	     struct nvkm_engine **pengine)
 {
-	struct gk104_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_engine_create(parent, engine, oclass, true,
-				 "PCE2", "ce2", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00200000;
-	nv_subdev(priv)->intr = gk104_ce_intr;
-	nv_engine(priv)->cclass = &gk104_ce_cclass;
-	nv_engine(priv)->sclass = gk104_ce_sclass;
-	return 0;
+	if (index == NVKM_ENGINE_CE0) {
+		return nvkm_engine_new_(&gk104_ce, device, index,
+					0x00000040, true, pengine);
+	} else
+	if (index == NVKM_ENGINE_CE1) {
+		return nvkm_engine_new_(&gk104_ce, device, index,
+					0x00000080, true, pengine);
+	} else
+	if (index == NVKM_ENGINE_CE2) {
+		return nvkm_engine_new_(&gk104_ce, device, index,
+					0x00200000, true, pengine);
+	}
+	return -ENODEV;
 }
-
-struct nvkm_oclass
-gk104_ce0_oclass = {
-	.handle = NV_ENGINE(CE0, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_ce0_ctor,
-		.dtor = _nvkm_engine_dtor,
-		.init = _nvkm_engine_init,
-		.fini = _nvkm_engine_fini,
-	},
-};
-
-struct nvkm_oclass
-gk104_ce1_oclass = {
-	.handle = NV_ENGINE(CE1, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_ce1_ctor,
-		.dtor = _nvkm_engine_dtor,
-		.init = _nvkm_engine_init,
-		.fini = _nvkm_engine_fini,
-	},
-};
-
-struct nvkm_oclass
-gk104_ce2_oclass = {
-	.handle = NV_ENGINE(CE2, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_ce2_ctor,
-		.dtor = _nvkm_engine_dtor,
-		.init = _nvkm_engine_init,
-		.fini = _nvkm_engine_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c
index 577eb2eead05..8eaa72a59f40 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c
@@ -21,153 +21,34 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/ce.h>
+#include "priv.h"
 
-#include <core/engctx.h>
+#include <nvif/class.h>
 
-struct gm204_ce_priv {
-	struct nvkm_engine base;
-};
-
-/*******************************************************************************
- * Copy object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gm204_ce_sclass[] = {
-	{ 0xb0b5, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PCE context
- ******************************************************************************/
-
-static struct nvkm_ofuncs
-gm204_ce_context_ofuncs = {
-	.ctor = _nvkm_engctx_ctor,
-	.dtor = _nvkm_engctx_dtor,
-	.init = _nvkm_engctx_init,
-	.fini = _nvkm_engctx_fini,
-	.rd32 = _nvkm_engctx_rd32,
-	.wr32 = _nvkm_engctx_wr32,
-};
-
-static struct nvkm_oclass
-gm204_ce_cclass = {
-	.handle = NV_ENGCTX(CE0, 0x24),
-	.ofuncs = &gm204_ce_context_ofuncs,
+static const struct nvkm_engine_func
+gm204_ce = {
+	.intr = gk104_ce_intr,
+	.sclass = {
+		{ -1, -1, MAXWELL_DMA_COPY_A },
+		{}
+	}
 };
 
-/*******************************************************************************
- * PCE engine/subdev functions
- ******************************************************************************/
-
-static void
-gm204_ce_intr(struct nvkm_subdev *subdev)
+int
+gm204_ce_new(struct nvkm_device *device, int index,
+	     struct nvkm_engine **pengine)
 {
-	const int ce = nv_subidx(subdev) - NVDEV_ENGINE_CE0;
-	struct gm204_ce_priv *priv = (void *)subdev;
-	u32 stat = nv_rd32(priv, 0x104908 + (ce * 0x1000));
-
-	if (stat) {
-		nv_warn(priv, "unhandled intr 0x%08x\n", stat);
-		nv_wr32(priv, 0x104908 + (ce * 0x1000), stat);
+	if (index == NVKM_ENGINE_CE0) {
+		return nvkm_engine_new_(&gm204_ce, device, index,
+					0x00000040, true, pengine);
+	} else
+	if (index == NVKM_ENGINE_CE1) {
+		return nvkm_engine_new_(&gm204_ce, device, index,
+					0x00000080, true, pengine);
+	} else
+	if (index == NVKM_ENGINE_CE2) {
+		return nvkm_engine_new_(&gm204_ce, device, index,
+					0x00200000, true, pengine);
 	}
+	return -ENODEV;
 }
-
-static int
-gm204_ce0_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct gm204_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_engine_create(parent, engine, oclass, true,
-				 "PCE0", "ce0", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000040;
-	nv_subdev(priv)->intr = gm204_ce_intr;
-	nv_engine(priv)->cclass = &gm204_ce_cclass;
-	nv_engine(priv)->sclass = gm204_ce_sclass;
-	return 0;
-}
-
-static int
-gm204_ce1_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct gm204_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_engine_create(parent, engine, oclass, true,
-				 "PCE1", "ce1", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000080;
-	nv_subdev(priv)->intr = gm204_ce_intr;
-	nv_engine(priv)->cclass = &gm204_ce_cclass;
-	nv_engine(priv)->sclass = gm204_ce_sclass;
-	return 0;
-}
-
-static int
-gm204_ce2_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct gm204_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_engine_create(parent, engine, oclass, true,
-				 "PCE2", "ce2", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00200000;
-	nv_subdev(priv)->intr = gm204_ce_intr;
-	nv_engine(priv)->cclass = &gm204_ce_cclass;
-	nv_engine(priv)->sclass = gm204_ce_sclass;
-	return 0;
-}
-
-struct nvkm_oclass
-gm204_ce0_oclass = {
-	.handle = NV_ENGINE(CE0, 0x24),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm204_ce0_ctor,
-		.dtor = _nvkm_engine_dtor,
-		.init = _nvkm_engine_init,
-		.fini = _nvkm_engine_fini,
-	},
-};
-
-struct nvkm_oclass
-gm204_ce1_oclass = {
-	.handle = NV_ENGINE(CE1, 0x24),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm204_ce1_ctor,
-		.dtor = _nvkm_engine_dtor,
-		.init = _nvkm_engine_init,
-		.fini = _nvkm_engine_fini,
-	},
-};
-
-struct nvkm_oclass
-gm204_ce2_oclass = {
-	.handle = NV_ENGINE(CE2, 0x24),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm204_ce2_ctor,
-		.dtor = _nvkm_engine_dtor,
-		.init = _nvkm_engine_init,
-		.fini = _nvkm_engine_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
index d8bb4293bc11..402dcbcc2192 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
@@ -21,50 +21,15 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/ce.h>
-#include <engine/falcon.h>
-#include <engine/fifo.h>
+#include "priv.h"
 #include "fuc/gt215.fuc3.h"
 
 #include <core/client.h>
-#include <core/device.h>
 #include <core/enum.h>
+#include <core/gpuobj.h>
+#include <engine/fifo.h>
 
-struct gt215_ce_priv {
-	struct nvkm_falcon base;
-};
-
-/*******************************************************************************
- * Copy object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gt215_ce_sclass[] = {
-	{ 0x85b5, &nvkm_object_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * PCE context
- ******************************************************************************/
-
-static struct nvkm_oclass
-gt215_ce_cclass = {
-	.handle = NV_ENGCTX(CE0, 0xa3),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-
-	},
-};
-
-/*******************************************************************************
- * PCE engine/subdev functions
- ******************************************************************************/
+#include <nvif/class.h>
 
 static const struct nvkm_enum
 gt215_ce_isr_error_name[] = {
@@ -75,78 +40,45 @@ gt215_ce_isr_error_name[] = {
 };
 
 void
-gt215_ce_intr(struct nvkm_subdev *subdev)
+gt215_ce_intr(struct nvkm_falcon *ce, struct nvkm_fifo_chan *chan)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
-	struct nvkm_engine *engine = nv_engine(subdev);
-	struct nvkm_falcon *falcon = (void *)subdev;
-	struct nvkm_object *engctx;
-	u32 dispatch = nv_ro32(falcon, 0x01c);
-	u32 stat = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
-	u64 inst = nv_ro32(falcon, 0x050) & 0x3fffffff;
-	u32 ssta = nv_ro32(falcon, 0x040) & 0x0000ffff;
-	u32 addr = nv_ro32(falcon, 0x040) >> 16;
+	struct nvkm_subdev *subdev = &ce->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	const u32 base = (subdev->index - NVKM_ENGINE_CE0) * 0x1000;
+	u32 ssta = nvkm_rd32(device, 0x104040 + base) & 0x0000ffff;
+	u32 addr = nvkm_rd32(device, 0x104040 + base) >> 16;
 	u32 mthd = (addr & 0x07ff) << 2;
 	u32 subc = (addr & 0x3800) >> 11;
-	u32 data = nv_ro32(falcon, 0x044);
-	int chid;
-
-	engctx = nvkm_engctx_get(engine, inst);
-	chid   = pfifo->chid(pfifo, engctx);
-
-	if (stat & 0x00000040) {
-		nv_error(falcon, "DISPATCH_ERROR [");
-		nvkm_enum_print(gt215_ce_isr_error_name, ssta);
-		pr_cont("] ch %d [0x%010llx %s] subc %d mthd 0x%04x data 0x%08x\n",
-		       chid, inst << 12, nvkm_client_name(engctx), subc,
-		       mthd, data);
-		nv_wo32(falcon, 0x004, 0x00000040);
-		stat &= ~0x00000040;
-	}
+	u32 data = nvkm_rd32(device, 0x104044 + base);
+	const struct nvkm_enum *en =
+		nvkm_enum_find(gt215_ce_isr_error_name, ssta);
+
+	nvkm_error(subdev, "DISPATCH_ERROR %04x [%s] ch %d [%010llx %s] "
+			   "subc %d mthd %04x data %08x\n", ssta,
+		   en ? en->name : "", chan ? chan->chid : -1,
+		   chan ? chan->inst->addr : 0,
+		   chan ? chan->object.client->name : "unknown",
+		   subc, mthd, data);
+}
 
-	if (stat) {
-		nv_error(falcon, "unhandled intr 0x%08x\n", stat);
-		nv_wo32(falcon, 0x004, stat);
+static const struct nvkm_falcon_func
+gt215_ce = {
+	.code.data = gt215_ce_code,
+	.code.size = sizeof(gt215_ce_code),
+	.data.data = gt215_ce_data,
+	.data.size = sizeof(gt215_ce_data),
+	.pmc_enable = 0x00802000,
+	.intr = gt215_ce_intr,
+	.sclass = {
+		{ -1, -1, GT212_DMA },
+		{}
 	}
+};
 
-	nvkm_engctx_put(engctx);
-}
-
-static int
-gt215_ce_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+int
+gt215_ce_new(struct nvkm_device *device, int index,
+	     struct nvkm_engine **pengine)
 {
-	bool enable = (nv_device(parent)->chipset != 0xaf);
-	struct gt215_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x104000, enable,
-				 "PCE0", "ce0", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00802000;
-	nv_subdev(priv)->intr = gt215_ce_intr;
-	nv_engine(priv)->cclass = &gt215_ce_cclass;
-	nv_engine(priv)->sclass = gt215_ce_sclass;
-	nv_falcon(priv)->code.data = gt215_pce_code;
-	nv_falcon(priv)->code.size = sizeof(gt215_pce_code);
-	nv_falcon(priv)->data.data = gt215_pce_data;
-	nv_falcon(priv)->data.size = sizeof(gt215_pce_data);
-	return 0;
+	return nvkm_falcon_new_(&gt215_ce, device, index,
+				(device->chipset != 0xaf), 0x104000, pengine);
 }
-
-struct nvkm_oclass
-gt215_ce_oclass = {
-	.handle = NV_ENGINE(CE0, 0xa3),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gt215_ce_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = _nvkm_falcon_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
new file mode 100644
index 000000000000..e2fa8b161943
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
@@ -0,0 +1,7 @@
+#ifndef __NVKM_CE_PRIV_H__
+#define __NVKM_CE_PRIV_H__
+#include <engine/ce.h>
+
+void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_fifo_chan *);
+void gk104_ce_intr(struct nvkm_engine *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
index 13f30428a305..bfd01625ec7f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
@@ -25,76 +25,47 @@
 #include <engine/fifo.h>
 
 #include <core/client.h>
-#include <core/engctx.h>
 #include <core/enum.h>
+#include <core/gpuobj.h>
 
-struct g84_cipher_priv {
-	struct nvkm_engine base;
-};
-
-/*******************************************************************************
- * Crypt object classes
- ******************************************************************************/
+#include <nvif/class.h>
 
 static int
-g84_cipher_object_ctor(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **pobject)
+g84_cipher_oclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		       int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_gpuobj *obj;
-	int ret;
-
-	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
-				 16, 16, 0, &obj);
-	*pobject = nv_object(obj);
-	if (ret)
-		return ret;
-
-	nv_wo32(obj, 0x00, nv_mclass(obj));
-	nv_wo32(obj, 0x04, 0x00000000);
-	nv_wo32(obj, 0x08, 0x00000000);
-	nv_wo32(obj, 0x0c, 0x00000000);
-	return 0;
+	int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16,
+				  align, false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, object->oclass);
+		nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
+		nvkm_done(*pgpuobj);
+	}
+	return ret;
 }
 
-static struct nvkm_ofuncs
-g84_cipher_ofuncs = {
-	.ctor = g84_cipher_object_ctor,
-	.dtor = _nvkm_gpuobj_dtor,
-	.init = _nvkm_gpuobj_init,
-	.fini = _nvkm_gpuobj_fini,
-	.rd32 = _nvkm_gpuobj_rd32,
-	.wr32 = _nvkm_gpuobj_wr32,
+static const struct nvkm_object_func
+g84_cipher_oclass_func = {
+	.bind = g84_cipher_oclass_bind,
 };
 
-static struct nvkm_oclass
-g84_cipher_sclass[] = {
-	{ 0x74c1, &g84_cipher_ofuncs },
-	{}
-};
+static int
+g84_cipher_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		       int align, struct nvkm_gpuobj **pgpuobj)
+{
+	return nvkm_gpuobj_new(object->engine->subdev.device, 256,
+			       align, true, parent, pgpuobj);
 
-/*******************************************************************************
- * PCIPHER context
- ******************************************************************************/
+}
 
-static struct nvkm_oclass
+static const struct nvkm_object_func
 g84_cipher_cclass = {
-	.handle = NV_ENGCTX(CIPHER, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_engctx_ctor,
-		.dtor = _nvkm_engctx_dtor,
-		.init = _nvkm_engctx_init,
-		.fini = _nvkm_engctx_fini,
-		.rd32 = _nvkm_engctx_rd32,
-		.wr32 = _nvkm_engctx_wr32,
-	},
+	.bind = g84_cipher_cclass_bind,
 };
 
-/*******************************************************************************
- * PCIPHER engine/subdev functions
- ******************************************************************************/
-
 static const struct nvkm_bitfield
 g84_cipher_intr_mask[] = {
 	{ 0x00000001, "INVALID_STATE" },
@@ -106,79 +77,59 @@ g84_cipher_intr_mask[] = {
 };
 
 static void
-g84_cipher_intr(struct nvkm_subdev *subdev)
+g84_cipher_intr(struct nvkm_engine *cipher)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
-	struct nvkm_engine *engine = nv_engine(subdev);
-	struct nvkm_object *engctx;
-	struct g84_cipher_priv *priv = (void *)subdev;
-	u32 stat = nv_rd32(priv, 0x102130);
-	u32 mthd = nv_rd32(priv, 0x102190);
-	u32 data = nv_rd32(priv, 0x102194);
-	u32 inst = nv_rd32(priv, 0x102188) & 0x7fffffff;
-	int chid;
-
-	engctx = nvkm_engctx_get(engine, inst);
-	chid   = pfifo->chid(pfifo, engctx);
-
+	struct nvkm_subdev *subdev = &cipher->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_fifo *fifo = device->fifo;
+	struct nvkm_fifo_chan *chan;
+	u32 stat = nvkm_rd32(device, 0x102130);
+	u32 mthd = nvkm_rd32(device, 0x102190);
+	u32 data = nvkm_rd32(device, 0x102194);
+	u32 inst = nvkm_rd32(device, 0x102188) & 0x7fffffff;
+	unsigned long flags;
+	char msg[128];
+
+	chan = nvkm_fifo_chan_inst(fifo, (u64)inst << 12, &flags);
 	if (stat) {
-		nv_error(priv, "%s", "");
-		nvkm_bitfield_print(g84_cipher_intr_mask, stat);
-		pr_cont(" ch %d [0x%010llx %s] mthd 0x%04x data 0x%08x\n",
-		       chid, (u64)inst << 12, nvkm_client_name(engctx),
-		       mthd, data);
+		nvkm_snprintbf(msg, sizeof(msg), g84_cipher_intr_mask, stat);
+		nvkm_error(subdev,  "%08x [%s] ch %d [%010llx %s] "
+				    "mthd %04x data %08x\n", stat, msg,
+			   chan ? chan->chid : -1, (u64)inst << 12,
+			   chan ? chan->object.client->name : "unknown",
+			   mthd, data);
 	}
+	nvkm_fifo_chan_put(fifo, flags, &chan);
 
-	nv_wr32(priv, 0x102130, stat);
-	nv_wr32(priv, 0x10200c, 0x10);
-
-	nvkm_engctx_put(engctx);
+	nvkm_wr32(device, 0x102130, stat);
+	nvkm_wr32(device, 0x10200c, 0x10);
 }
 
 static int
-g84_cipher_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+g84_cipher_init(struct nvkm_engine *cipher)
 {
-	struct g84_cipher_priv *priv;
-	int ret;
-
-	ret = nvkm_engine_create(parent, engine, oclass, true,
-				 "PCIPHER", "cipher", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00004000;
-	nv_subdev(priv)->intr = g84_cipher_intr;
-	nv_engine(priv)->cclass = &g84_cipher_cclass;
-	nv_engine(priv)->sclass = g84_cipher_sclass;
+	struct nvkm_device *device = cipher->subdev.device;
+	nvkm_wr32(device, 0x102130, 0xffffffff);
+	nvkm_wr32(device, 0x102140, 0xffffffbf);
+	nvkm_wr32(device, 0x10200c, 0x00000010);
 	return 0;
 }
 
-static int
-g84_cipher_init(struct nvkm_object *object)
-{
-	struct g84_cipher_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_engine_init(&priv->base);
-	if (ret)
-		return ret;
+static const struct nvkm_engine_func
+g84_cipher = {
+	.init = g84_cipher_init,
+	.intr = g84_cipher_intr,
+	.cclass = &g84_cipher_cclass,
+	.sclass = {
+		{ -1, -1, NV74_CIPHER, &g84_cipher_oclass_func },
+		{}
+	}
+};
 
-	nv_wr32(priv, 0x102130, 0xffffffff);
-	nv_wr32(priv, 0x102140, 0xffffffbf);
-	nv_wr32(priv, 0x10200c, 0x00000010);
-	return 0;
+int
+g84_cipher_new(struct nvkm_device *device, int index,
+	       struct nvkm_engine **pengine)
+{
+	return nvkm_engine_new_(&g84_cipher, device, index,
+				0x00004000, true, pengine);
 }
-
-struct nvkm_oclass
-g84_cipher_oclass = {
-	.handle = NV_ENGINE(CIPHER, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g84_cipher_ctor,
-		.dtor = _nvkm_engine_dtor,
-		.init = g84_cipher_init,
-		.fini = _nvkm_engine_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild
index de1bf092b2b2..09032ba36000 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild
@@ -1,12 +1,6 @@
 nvkm-y += nvkm/engine/device/acpi.o
 nvkm-y += nvkm/engine/device/base.o
 nvkm-y += nvkm/engine/device/ctrl.o
-nvkm-y += nvkm/engine/device/nv04.o
-nvkm-y += nvkm/engine/device/nv10.o
-nvkm-y += nvkm/engine/device/nv20.o
-nvkm-y += nvkm/engine/device/nv30.o
-nvkm-y += nvkm/engine/device/nv40.o
-nvkm-y += nvkm/engine/device/nv50.o
-nvkm-y += nvkm/engine/device/gf100.o
-nvkm-y += nvkm/engine/device/gk104.o
-nvkm-y += nvkm/engine/device/gm100.o
+nvkm-y += nvkm/engine/device/pci.o
+nvkm-y += nvkm/engine/device/tegra.o
+nvkm-y += nvkm/engine/device/user.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c
index f42706e1d5db..fdca90bc8f0e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c
@@ -40,21 +40,19 @@ nvkm_acpi_ntfy(struct notifier_block *nb, unsigned long val, void *data)
 }
 #endif
 
-int
-nvkm_acpi_fini(struct nvkm_device *device, bool suspend)
+void
+nvkm_acpi_fini(struct nvkm_device *device)
 {
 #ifdef CONFIG_ACPI
 	unregister_acpi_notifier(&device->acpi.nb);
 #endif
-	return 0;
 }
 
-int
+void
 nvkm_acpi_init(struct nvkm_device *device)
 {
 #ifdef CONFIG_ACPI
 	device->acpi.nb.notifier_call = nvkm_acpi_ntfy;
 	register_acpi_notifier(&device->acpi.nb);
 #endif
-	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h
index 82dd359ddfa4..1bbe76e0740a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h
@@ -3,6 +3,6 @@
 #include <core/os.h>
 struct nvkm_device;
 
-int nvkm_acpi_init(struct nvkm_device *);
-int nvkm_acpi_fini(struct nvkm_device *, bool);
+void nvkm_acpi_init(struct nvkm_device *);
+void nvkm_acpi_fini(struct nvkm_device *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 63d8e52f4b22..94a906b8cb88 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -24,33 +24,33 @@
 #include "priv.h"
 #include "acpi.h"
 
-#include <core/client.h>
-#include <core/option.h>
 #include <core/notify.h>
-#include <core/parent.h>
-#include <subdev/bios.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
+#include <core/option.h>
 
-#include <nvif/class.h>
-#include <nvif/unpack.h>
+#include <subdev/bios.h>
 
 static DEFINE_MUTEX(nv_devices_mutex);
 static LIST_HEAD(nv_devices);
 
-struct nvkm_device *
-nvkm_device_find(u64 name)
+static struct nvkm_device *
+nvkm_device_find_locked(u64 handle)
 {
-	struct nvkm_device *device, *match = NULL;
-	mutex_lock(&nv_devices_mutex);
+	struct nvkm_device *device;
 	list_for_each_entry(device, &nv_devices, head) {
-		if (device->handle == name) {
-			match = device;
-			break;
-		}
+		if (device->handle == handle)
+			return device;
 	}
+	return NULL;
+}
+
+struct nvkm_device *
+nvkm_device_find(u64 handle)
+{
+	struct nvkm_device *device;
+	mutex_lock(&nv_devices_mutex);
+	device = nvkm_device_find_locked(handle);
 	mutex_unlock(&nv_devices_mutex);
-	return match;
+	return device;
 }
 
 int
@@ -67,280 +67,2272 @@ nvkm_device_list(u64 *name, int size)
 	return nr;
 }
 
-/******************************************************************************
- * nvkm_devobj (0x0080): class implementation
- *****************************************************************************/
+static const struct nvkm_device_chip
+null_chipset = {
+	.name = "NULL",
+	.bios = nvkm_bios_new,
+};
+
+static const struct nvkm_device_chip
+nv4_chipset = {
+	.name = "NV04",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv04_devinit_new,
+	.fb = nv04_fb_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv04_fifo_new,
+	.gr = nv04_gr_new,
+	.sw = nv04_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv5_chipset = {
+	.name = "NV05",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv05_devinit_new,
+	.fb = nv04_fb_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv04_fifo_new,
+	.gr = nv04_gr_new,
+	.sw = nv04_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv10_chipset = {
+	.name = "NV10",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv10_devinit_new,
+	.fb = nv10_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.gr = nv10_gr_new,
+};
+
+static const struct nvkm_device_chip
+nv11_chipset = {
+	.name = "NV11",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv10_devinit_new,
+	.fb = nv10_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv10_fifo_new,
+	.gr = nv15_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv15_chipset = {
+	.name = "NV15",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv10_devinit_new,
+	.fb = nv10_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv10_fifo_new,
+	.gr = nv15_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv17_chipset = {
+	.name = "NV17",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv10_devinit_new,
+	.fb = nv10_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv17_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv18_chipset = {
+	.name = "NV18",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv10_devinit_new,
+	.fb = nv10_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv17_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv1a_chipset = {
+	.name = "nForce",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv1a_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv10_fifo_new,
+	.gr = nv15_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv1f_chipset = {
+	.name = "nForce2",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv1a_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv17_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv20_chipset = {
+	.name = "NV20",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv20_devinit_new,
+	.fb = nv20_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv20_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv25_chipset = {
+	.name = "NV25",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv20_devinit_new,
+	.fb = nv25_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv25_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv28_chipset = {
+	.name = "NV28",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv20_devinit_new,
+	.fb = nv25_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv25_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv2a_chipset = {
+	.name = "NV2A",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv20_devinit_new,
+	.fb = nv25_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv2a_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv30_chipset = {
+	.name = "NV30",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv20_devinit_new,
+	.fb = nv30_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv30_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv31_chipset = {
+	.name = "NV31",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv20_devinit_new,
+	.fb = nv30_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv30_gr_new,
+	.mpeg = nv31_mpeg_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv34_chipset = {
+	.name = "NV34",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv10_devinit_new,
+	.fb = nv10_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv34_gr_new,
+	.mpeg = nv31_mpeg_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv35_chipset = {
+	.name = "NV35",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv20_devinit_new,
+	.fb = nv35_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv35_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv36_chipset = {
+	.name = "NV36",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv20_devinit_new,
+	.fb = nv36_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv35_gr_new,
+	.mpeg = nv31_mpeg_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv40_chipset = {
+	.name = "NV40",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv40_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv40_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv40_gr_new,
+	.mpeg = nv40_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv41_chipset = {
+	.name = "NV41",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv41_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv41_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv40_gr_new,
+	.mpeg = nv40_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv42_chipset = {
+	.name = "NV42",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv41_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv41_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv40_gr_new,
+	.mpeg = nv40_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv43_chipset = {
+	.name = "NV43",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv41_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv41_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv40_gr_new,
+	.mpeg = nv40_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv44_chipset = {
+	.name = "NV44",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv44_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv44_mc_new,
+	.mmu = nv44_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv44_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv45_chipset = {
+	.name = "NV45",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv40_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv40_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv46_chipset = {
+	.name = "G72",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv46_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv44_mc_new,
+	.mmu = nv44_mmu_new,
+	.pci = nv4c_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv44_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv47_chipset = {
+	.name = "G70",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv47_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv41_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv40_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv49_chipset = {
+	.name = "G71",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv49_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv41_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv40_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv4a_chipset = {
+	.name = "NV44A",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv44_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv44_mc_new,
+	.mmu = nv44_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv44_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv4b_chipset = {
+	.name = "G73",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv49_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv41_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv40_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv4c_chipset = {
+	.name = "C61",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv46_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv44_mc_new,
+	.mmu = nv44_mmu_new,
+	.pci = nv4c_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv44_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv4e_chipset = {
+	.name = "C51",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv4e_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv4e_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv44_mc_new,
+	.mmu = nv44_mmu_new,
+	.pci = nv4c_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv44_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv50_chipset = {
+	.name = "G80",
+	.bar = nv50_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = nv50_bus_new,
+	.clk = nv50_clk_new,
+	.devinit = nv50_devinit_new,
+	.fb = nv50_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = nv50_gpio_new,
+	.i2c = nv50_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = nv50_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv50_pci_new,
+	.therm = nv50_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv50_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = nv50_fifo_new,
+	.gr = nv50_gr_new,
+	.mpeg = nv50_mpeg_new,
+	.pm = nv50_pm_new,
+	.sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv63_chipset = {
+	.name = "C73",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv46_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv44_mc_new,
+	.mmu = nv44_mmu_new,
+	.pci = nv4c_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv44_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv67_chipset = {
+	.name = "C67",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv46_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv44_mc_new,
+	.mmu = nv44_mmu_new,
+	.pci = nv4c_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv44_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv68_chipset = {
+	.name = "C68",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv46_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv44_mc_new,
+	.mmu = nv44_mmu_new,
+	.pci = nv4c_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv44_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv84_chipset = {
+	.name = "G84",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = nv50_bus_new,
+	.clk = g84_clk_new,
+	.devinit = g84_devinit_new,
+	.fb = g84_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = nv50_gpio_new,
+	.i2c = nv50_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = nv50_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv50_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.bsp = g84_bsp_new,
+	.cipher = g84_cipher_new,
+	.disp = g84_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = g84_gr_new,
+	.mpeg = g84_mpeg_new,
+	.pm = g84_pm_new,
+	.sw = nv50_sw_new,
+	.vp = g84_vp_new,
+};
+
+static const struct nvkm_device_chip
+nv86_chipset = {
+	.name = "G86",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = nv50_bus_new,
+	.clk = g84_clk_new,
+	.devinit = g84_devinit_new,
+	.fb = g84_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = nv50_gpio_new,
+	.i2c = nv50_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = nv50_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv50_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.bsp = g84_bsp_new,
+	.cipher = g84_cipher_new,
+	.disp = g84_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = g84_gr_new,
+	.mpeg = g84_mpeg_new,
+	.pm = g84_pm_new,
+	.sw = nv50_sw_new,
+	.vp = g84_vp_new,
+};
+
+static const struct nvkm_device_chip
+nv92_chipset = {
+	.name = "G92",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = nv50_bus_new,
+	.clk = g84_clk_new,
+	.devinit = g84_devinit_new,
+	.fb = g84_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = nv50_gpio_new,
+	.i2c = nv50_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = nv50_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv50_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.bsp = g84_bsp_new,
+	.cipher = g84_cipher_new,
+	.disp = g84_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = g84_gr_new,
+	.mpeg = g84_mpeg_new,
+	.pm = g84_pm_new,
+	.sw = nv50_sw_new,
+	.vp = g84_vp_new,
+};
+
+static const struct nvkm_device_chip
+nv94_chipset = {
+	.name = "G94",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = g84_clk_new,
+	.devinit = g84_devinit_new,
+	.fb = g84_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = nv50_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.bsp = g84_bsp_new,
+	.cipher = g84_cipher_new,
+	.disp = g94_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = g84_gr_new,
+	.mpeg = g84_mpeg_new,
+	.pm = g84_pm_new,
+	.sw = nv50_sw_new,
+	.vp = g84_vp_new,
+};
+
+static const struct nvkm_device_chip
+nv96_chipset = {
+	.name = "G96",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = g84_clk_new,
+	.devinit = g84_devinit_new,
+	.fb = g84_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = nv50_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.bsp = g84_bsp_new,
+	.cipher = g84_cipher_new,
+	.disp = g94_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = g84_gr_new,
+	.mpeg = g84_mpeg_new,
+	.pm = g84_pm_new,
+	.sw = nv50_sw_new,
+	.vp = g84_vp_new,
+};
+
+static const struct nvkm_device_chip
+nv98_chipset = {
+	.name = "G98",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = g84_clk_new,
+	.devinit = g98_devinit_new,
+	.fb = g84_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = g98_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = g94_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = g84_gr_new,
+	.mspdec = g98_mspdec_new,
+	.msppp = g98_msppp_new,
+	.msvld = g98_msvld_new,
+	.pm = g84_pm_new,
+	.sec = g98_sec_new,
+	.sw = nv50_sw_new,
+};
 
-struct nvkm_devobj {
-	struct nvkm_parent base;
-	struct nvkm_object *subdev[NVDEV_SUBDEV_NR];
+static const struct nvkm_device_chip
+nva0_chipset = {
+	.name = "GT200",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = g84_clk_new,
+	.devinit = g84_devinit_new,
+	.fb = g84_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = nv50_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = g98_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.bsp = g84_bsp_new,
+	.cipher = g84_cipher_new,
+	.disp = gt200_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = gt200_gr_new,
+	.mpeg = g84_mpeg_new,
+	.pm = gt200_pm_new,
+	.sw = nv50_sw_new,
+	.vp = g84_vp_new,
+};
+
+static const struct nvkm_device_chip
+nva3_chipset = {
+	.name = "GT215",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = gt215_clk_new,
+	.devinit = gt215_devinit_new,
+	.fb = gt215_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = g98_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gt215_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gt215_ce_new,
+	.disp = gt215_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = gt215_gr_new,
+	.mpeg = g84_mpeg_new,
+	.mspdec = gt215_mspdec_new,
+	.msppp = gt215_msppp_new,
+	.msvld = gt215_msvld_new,
+	.pm = gt215_pm_new,
+	.sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nva5_chipset = {
+	.name = "GT216",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = gt215_clk_new,
+	.devinit = gt215_devinit_new,
+	.fb = gt215_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = g98_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gt215_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gt215_ce_new,
+	.disp = gt215_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = gt215_gr_new,
+	.mspdec = gt215_mspdec_new,
+	.msppp = gt215_msppp_new,
+	.msvld = gt215_msvld_new,
+	.pm = gt215_pm_new,
+	.sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nva8_chipset = {
+	.name = "GT218",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = gt215_clk_new,
+	.devinit = gt215_devinit_new,
+	.fb = gt215_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = g98_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gt215_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gt215_ce_new,
+	.disp = gt215_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = gt215_gr_new,
+	.mspdec = gt215_mspdec_new,
+	.msppp = gt215_msppp_new,
+	.msvld = gt215_msvld_new,
+	.pm = gt215_pm_new,
+	.sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvaa_chipset = {
+	.name = "MCP77/MCP78",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = mcp77_clk_new,
+	.devinit = g98_devinit_new,
+	.fb = mcp77_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = g98_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = g94_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = gt200_gr_new,
+	.mspdec = g98_mspdec_new,
+	.msppp = g98_msppp_new,
+	.msvld = g98_msvld_new,
+	.pm = g84_pm_new,
+	.sec = g98_sec_new,
+	.sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvac_chipset = {
+	.name = "MCP79/MCP7A",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = mcp77_clk_new,
+	.devinit = g98_devinit_new,
+	.fb = mcp77_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = g98_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = g94_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = mcp79_gr_new,
+	.mspdec = g98_mspdec_new,
+	.msppp = g98_msppp_new,
+	.msvld = g98_msvld_new,
+	.pm = g84_pm_new,
+	.sec = g98_sec_new,
+	.sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvaf_chipset = {
+	.name = "MCP89",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = gt215_clk_new,
+	.devinit = mcp89_devinit_new,
+	.fb = mcp89_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = g98_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gt215_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gt215_ce_new,
+	.disp = gt215_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = mcp89_gr_new,
+	.mspdec = gt215_mspdec_new,
+	.msppp = gt215_msppp_new,
+	.msvld = mcp89_msvld_new,
+	.pm = gt215_pm_new,
+	.sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvc0_chipset = {
+	.name = "GF100",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = gf100_pci_new,
+	.pmu = gf100_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gf100_ce_new,
+	.ce[1] = gf100_ce_new,
+	.disp = gt215_disp_new,
+	.dma = gf100_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf100_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf100_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvc1_chipset = {
+	.name = "GF108",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gf100_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gf100_ce_new,
+	.disp = gt215_disp_new,
+	.dma = gf100_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf108_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf108_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvc3_chipset = {
+	.name = "GF106",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gf100_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gf100_ce_new,
+	.disp = gt215_disp_new,
+	.dma = gf100_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf104_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf100_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvc4_chipset = {
+	.name = "GF104",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = gf100_pci_new,
+	.pmu = gf100_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gf100_ce_new,
+	.ce[1] = gf100_ce_new,
+	.disp = gt215_disp_new,
+	.dma = gf100_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf104_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf100_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvc8_chipset = {
+	.name = "GF110",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = gf100_pci_new,
+	.pmu = gf100_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gf100_ce_new,
+	.ce[1] = gf100_ce_new,
+	.disp = gt215_disp_new,
+	.dma = gf100_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf110_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf100_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvce_chipset = {
+	.name = "GF114",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = gf100_pci_new,
+	.pmu = gf100_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gf100_ce_new,
+	.ce[1] = gf100_ce_new,
+	.disp = gt215_disp_new,
+	.dma = gf100_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf104_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf100_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvcf_chipset = {
+	.name = "GF116",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gf100_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gf100_ce_new,
+	.disp = gt215_disp_new,
+	.dma = gf100_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf104_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf100_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvd7_chipset = {
+	.name = "GF117",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gf119_gpio_new,
+	.i2c = gf117_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.ce[0] = gf100_ce_new,
+	.disp = gf119_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf117_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf117_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvd9_chipset = {
+	.name = "GF119",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gf119_gpio_new,
+	.i2c = gf119_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gf119_pmu_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gf100_ce_new,
+	.disp = gf119_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf119_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf117_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nve4_chipset = {
+	.name = "GK104",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gk104_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gk104_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gk104_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gk104_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gk104_pmu_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gk104_ce_new,
+	.ce[1] = gk104_ce_new,
+	.ce[2] = gk104_ce_new,
+	.disp = gk104_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gk104_fifo_new,
+	.gr = gk104_gr_new,
+	.mspdec = gk104_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gk104_msvld_new,
+	.pm = gk104_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nve6_chipset = {
+	.name = "GK106",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gk104_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gk104_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gk104_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gk104_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gk104_pmu_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gk104_ce_new,
+	.ce[1] = gk104_ce_new,
+	.ce[2] = gk104_ce_new,
+	.disp = gk104_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gk104_fifo_new,
+	.gr = gk104_gr_new,
+	.mspdec = gk104_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gk104_msvld_new,
+	.pm = gk104_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nve7_chipset = {
+	.name = "GK107",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gk104_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gk104_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gk104_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gk104_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gf119_pmu_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gk104_ce_new,
+	.ce[1] = gk104_ce_new,
+	.ce[2] = gk104_ce_new,
+	.disp = gk104_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gk104_fifo_new,
+	.gr = gk104_gr_new,
+	.mspdec = gk104_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gk104_msvld_new,
+	.pm = gk104_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvea_chipset = {
+	.name = "GK20A",
+	.bar = gk20a_bar_new,
+	.bus = gf100_bus_new,
+	.clk = gk20a_clk_new,
+	.fb = gk20a_fb_new,
+	.fuse = gf100_fuse_new,
+	.ibus = gk20a_ibus_new,
+	.imem = gk20a_instmem_new,
+	.ltc = gk104_ltc_new,
+	.mc = gk20a_mc_new,
+	.mmu = gf100_mmu_new,
+	.pmu = gk20a_pmu_new,
+	.timer = gk20a_timer_new,
+	.volt = gk20a_volt_new,
+	.ce[2] = gk104_ce_new,
+	.dma = gf119_dma_new,
+	.fifo = gk20a_fifo_new,
+	.gr = gk20a_gr_new,
+	.pm = gk104_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvf0_chipset = {
+	.name = "GK110",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gk104_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gk104_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gk104_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gk104_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gk110_pmu_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gk104_ce_new,
+	.ce[1] = gk104_ce_new,
+	.ce[2] = gk104_ce_new,
+	.disp = gk110_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gk104_fifo_new,
+	.gr = gk110_gr_new,
+	.mspdec = gk104_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gk104_msvld_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvf1_chipset = {
+	.name = "GK110B",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gk104_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gk104_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gf119_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gk104_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gk110_pmu_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gk104_ce_new,
+	.ce[1] = gk104_ce_new,
+	.ce[2] = gk104_ce_new,
+	.disp = gk110_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gk104_fifo_new,
+	.gr = gk110b_gr_new,
+	.mspdec = gk104_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gk104_msvld_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv106_chipset = {
+	.name = "GK208B",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gk104_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gk104_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gk104_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gk104_ltc_new,
+	.mc = gk20a_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gk208_pmu_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gk104_ce_new,
+	.ce[1] = gk104_ce_new,
+	.ce[2] = gk104_ce_new,
+	.disp = gk110_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gk208_fifo_new,
+	.gr = gk208_gr_new,
+	.mspdec = gk104_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gk104_msvld_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv108_chipset = {
+	.name = "GK208",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gk104_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gk104_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gk104_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gk104_ltc_new,
+	.mc = gk20a_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gk208_pmu_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gk104_ce_new,
+	.ce[1] = gk104_ce_new,
+	.ce[2] = gk104_ce_new,
+	.disp = gk110_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gk208_fifo_new,
+	.gr = gk208_gr_new,
+	.mspdec = gk104_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gk104_msvld_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv117_chipset = {
+	.name = "GM107",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gk104_clk_new,
+	.devinit = gm107_devinit_new,
+	.fb = gm107_fb_new,
+	.fuse = gm107_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gf119_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gm107_ltc_new,
+	.mc = gk20a_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gm107_pmu_new,
+	.therm = gm107_therm_new,
+	.timer = gk20a_timer_new,
+	.ce[0] = gk104_ce_new,
+	.ce[2] = gk104_ce_new,
+	.disp = gm107_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gk208_fifo_new,
+	.gr = gm107_gr_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv124_chipset = {
+	.name = "GM204",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.devinit = gm204_devinit_new,
+	.fb = gm107_fb_new,
+	.fuse = gm107_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gm204_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gm107_ltc_new,
+	.mc = gk20a_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gm107_pmu_new,
+	.timer = gk20a_timer_new,
+	.ce[0] = gm204_ce_new,
+	.ce[1] = gm204_ce_new,
+	.ce[2] = gm204_ce_new,
+	.disp = gm204_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gm204_fifo_new,
+	.gr = gm204_gr_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv126_chipset = {
+	.name = "GM206",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.devinit = gm204_devinit_new,
+	.fb = gm107_fb_new,
+	.fuse = gm107_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gm204_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gm107_ltc_new,
+	.mc = gk20a_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gm107_pmu_new,
+	.timer = gk20a_timer_new,
+	.ce[0] = gm204_ce_new,
+	.ce[1] = gm204_ce_new,
+	.ce[2] = gm204_ce_new,
+	.disp = gm204_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gm204_fifo_new,
+	.gr = gm206_gr_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv12b_chipset = {
+	.name = "GM20B",
+	.bar = gk20a_bar_new,
+	.bus = gf100_bus_new,
+	.fb = gk20a_fb_new,
+	.fuse = gm107_fuse_new,
+	.ibus = gk20a_ibus_new,
+	.imem = gk20a_instmem_new,
+	.ltc = gm107_ltc_new,
+	.mc = gk20a_mc_new,
+	.mmu = gf100_mmu_new,
+	.timer = gk20a_timer_new,
+	.ce[2] = gm204_ce_new,
+	.dma = gf119_dma_new,
+	.fifo = gm20b_fifo_new,
+	.gr = gm20b_gr_new,
+	.sw = gf100_sw_new,
 };
 
 static int
-nvkm_devobj_info(struct nvkm_object *object, void *data, u32 size)
+nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
+		       struct nvkm_notify *notify)
 {
-	struct nvkm_device *device = nv_device(object);
-	struct nvkm_fb *pfb = nvkm_fb(device);
-	struct nvkm_instmem *imem = nvkm_instmem(device);
-	union {
-		struct nv_device_info_v0 v0;
-	} *args = data;
-	int ret;
-
-	nv_ioctl(object, "device info size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "device info vers %d\n", args->v0.version);
-	} else
-		return ret;
+	if (!WARN_ON(size != 0)) {
+		notify->size  = 0;
+		notify->types = 1;
+		notify->index = 0;
+		return 0;
+	}
+	return -EINVAL;
+}
 
-	switch (device->chipset) {
-	case 0x01a:
-	case 0x01f:
-	case 0x04c:
-	case 0x04e:
-	case 0x063:
-	case 0x067:
-	case 0x068:
-	case 0x0aa:
-	case 0x0ac:
-	case 0x0af:
-		args->v0.platform = NV_DEVICE_INFO_V0_IGP;
-		break;
+static const struct nvkm_event_func
+nvkm_device_event_func = {
+	.ctor = nvkm_device_event_ctor,
+};
+
+struct nvkm_subdev *
+nvkm_device_subdev(struct nvkm_device *device, int index)
+{
+	struct nvkm_engine *engine;
+
+	if (device->disable_mask & (1ULL << index))
+		return NULL;
+
+	switch (index) {
+#define _(n,p,m) case NVKM_SUBDEV_##n: if (p) return (m); break
+	_(BAR    , device->bar    , &device->bar->subdev);
+	_(VBIOS  , device->bios   , &device->bios->subdev);
+	_(BUS    , device->bus    , &device->bus->subdev);
+	_(CLK    , device->clk    , &device->clk->subdev);
+	_(DEVINIT, device->devinit, &device->devinit->subdev);
+	_(FB     , device->fb     , &device->fb->subdev);
+	_(FUSE   , device->fuse   , &device->fuse->subdev);
+	_(GPIO   , device->gpio   , &device->gpio->subdev);
+	_(I2C    , device->i2c    , &device->i2c->subdev);
+	_(IBUS   , device->ibus   ,  device->ibus);
+	_(INSTMEM, device->imem   , &device->imem->subdev);
+	_(LTC    , device->ltc    , &device->ltc->subdev);
+	_(MC     , device->mc     , &device->mc->subdev);
+	_(MMU    , device->mmu    , &device->mmu->subdev);
+	_(MXM    , device->mxm    ,  device->mxm);
+	_(PCI    , device->pci    , &device->pci->subdev);
+	_(PMU    , device->pmu    , &device->pmu->subdev);
+	_(THERM  , device->therm  , &device->therm->subdev);
+	_(TIMER  , device->timer  , &device->timer->subdev);
+	_(VOLT   , device->volt   , &device->volt->subdev);
+#undef _
 	default:
-		if (device->pdev) {
-			if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP))
-				args->v0.platform = NV_DEVICE_INFO_V0_AGP;
-			else
-			if (pci_is_pcie(device->pdev))
-				args->v0.platform = NV_DEVICE_INFO_V0_PCIE;
-			else
-				args->v0.platform = NV_DEVICE_INFO_V0_PCI;
-		} else {
-			args->v0.platform = NV_DEVICE_INFO_V0_SOC;
-		}
+		engine = nvkm_device_engine(device, index);
+		if (engine)
+			return &engine->subdev;
 		break;
 	}
+	return NULL;
+}
 
-	switch (device->card_type) {
-	case NV_04: args->v0.family = NV_DEVICE_INFO_V0_TNT; break;
-	case NV_10:
-	case NV_11: args->v0.family = NV_DEVICE_INFO_V0_CELSIUS; break;
-	case NV_20: args->v0.family = NV_DEVICE_INFO_V0_KELVIN; break;
-	case NV_30: args->v0.family = NV_DEVICE_INFO_V0_RANKINE; break;
-	case NV_40: args->v0.family = NV_DEVICE_INFO_V0_CURIE; break;
-	case NV_50: args->v0.family = NV_DEVICE_INFO_V0_TESLA; break;
-	case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break;
-	case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break;
-	case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
+struct nvkm_engine *
+nvkm_device_engine(struct nvkm_device *device, int index)
+{
+	if (device->disable_mask & (1ULL << index))
+		return NULL;
+
+	switch (index) {
+#define _(n,p,m) case NVKM_ENGINE_##n: if (p) return (m); break
+	_(BSP    , device->bsp    ,  device->bsp);
+	_(CE0    , device->ce[0]  ,  device->ce[0]);
+	_(CE1    , device->ce[1]  ,  device->ce[1]);
+	_(CE2    , device->ce[2]  ,  device->ce[2]);
+	_(CIPHER , device->cipher ,  device->cipher);
+	_(DISP   , device->disp   , &device->disp->engine);
+	_(DMAOBJ , device->dma    , &device->dma->engine);
+	_(FIFO   , device->fifo   , &device->fifo->engine);
+	_(GR     , device->gr     , &device->gr->engine);
+	_(IFB    , device->ifb    ,  device->ifb);
+	_(ME     , device->me     ,  device->me);
+	_(MPEG   , device->mpeg   ,  device->mpeg);
+	_(MSENC  , device->msenc  ,  device->msenc);
+	_(MSPDEC , device->mspdec ,  device->mspdec);
+	_(MSPPP  , device->msppp  ,  device->msppp);
+	_(MSVLD  , device->msvld  ,  device->msvld);
+	_(PM     , device->pm     , &device->pm->engine);
+	_(SEC    , device->sec    ,  device->sec);
+	_(SW     , device->sw     , &device->sw->engine);
+	_(VIC    , device->vic    ,  device->vic);
+	_(VP     , device->vp     ,  device->vp);
+#undef _
 	default:
-		args->v0.family = 0;
+		WARN_ON(1);
 		break;
 	}
+	return NULL;
+}
+
+int
+nvkm_device_fini(struct nvkm_device *device, bool suspend)
+{
+	const char *action = suspend ? "suspend" : "fini";
+	struct nvkm_subdev *subdev;
+	int ret, i;
+	s64 time;
+
+	nvdev_trace(device, "%s running...\n", action);
+	time = ktime_to_us(ktime_get());
+
+	nvkm_acpi_fini(device);
+
+	for (i = NVKM_SUBDEV_NR - 1; i >= 0; i--) {
+		if ((subdev = nvkm_device_subdev(device, i))) {
+			ret = nvkm_subdev_fini(subdev, suspend);
+			if (ret && suspend)
+				goto fail;
+		}
+	}
 
-	args->v0.chipset  = device->chipset;
-	args->v0.revision = device->chiprev;
-	if (pfb && pfb->ram)
-		args->v0.ram_size = args->v0.ram_user = pfb->ram->size;
-	else
-		args->v0.ram_size = args->v0.ram_user = 0;
-	if (imem && args->v0.ram_size > 0)
-		args->v0.ram_user = args->v0.ram_user - imem->reserved;
 
+	if (device->func->fini)
+		device->func->fini(device, suspend);
+
+	time = ktime_to_us(ktime_get()) - time;
+	nvdev_trace(device, "%s completed in %lldus...\n", action, time);
 	return 0;
+
+fail:
+	do {
+		if ((subdev = nvkm_device_subdev(device, i))) {
+			int rret = nvkm_subdev_init(subdev);
+			if (rret)
+				nvkm_fatal(subdev, "failed restart, %d\n", ret);
+		}
+	} while (++i < NVKM_SUBDEV_NR);
+
+	nvdev_trace(device, "%s failed with %d\n", action, ret);
+	return ret;
 }
 
 static int
-nvkm_devobj_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+nvkm_device_preinit(struct nvkm_device *device)
 {
-	switch (mthd) {
-	case NV_DEVICE_V0_INFO:
-		return nvkm_devobj_info(object, data, size);
-	default:
-		break;
+	struct nvkm_subdev *subdev;
+	int ret, i;
+	s64 time;
+
+	nvdev_trace(device, "preinit running...\n");
+	time = ktime_to_us(ktime_get());
+
+	if (device->func->preinit) {
+		ret = device->func->preinit(device);
+		if (ret)
+			goto fail;
 	}
-	return -EINVAL;
-}
 
-static u8
-nvkm_devobj_rd08(struct nvkm_object *object, u64 addr)
-{
-	return nv_rd08(object->engine, addr);
-}
+	for (i = 0; i < NVKM_SUBDEV_NR; i++) {
+		if ((subdev = nvkm_device_subdev(device, i))) {
+			ret = nvkm_subdev_preinit(subdev);
+			if (ret)
+				goto fail;
+		}
+	}
 
-static u16
-nvkm_devobj_rd16(struct nvkm_object *object, u64 addr)
-{
-	return nv_rd16(object->engine, addr);
-}
+	ret = nvkm_devinit_post(device->devinit, &device->disable_mask);
+	if (ret)
+		goto fail;
 
-static u32
-nvkm_devobj_rd32(struct nvkm_object *object, u64 addr)
-{
-	return nv_rd32(object->engine, addr);
-}
+	time = ktime_to_us(ktime_get()) - time;
+	nvdev_trace(device, "preinit completed in %lldus\n", time);
+	return 0;
 
-static void
-nvkm_devobj_wr08(struct nvkm_object *object, u64 addr, u8 data)
-{
-	nv_wr08(object->engine, addr, data);
+fail:
+	nvdev_error(device, "preinit failed with %d\n", ret);
+	return ret;
 }
 
-static void
-nvkm_devobj_wr16(struct nvkm_object *object, u64 addr, u16 data)
+int
+nvkm_device_init(struct nvkm_device *device)
 {
-	nv_wr16(object->engine, addr, data);
-}
+	struct nvkm_subdev *subdev;
+	int ret, i;
+	s64 time;
 
-static void
-nvkm_devobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
-	nv_wr32(object->engine, addr, data);
-}
+	ret = nvkm_device_preinit(device);
+	if (ret)
+		return ret;
 
-static int
-nvkm_devobj_map(struct nvkm_object *object, u64 *addr, u32 *size)
-{
-	struct nvkm_device *device = nv_device(object);
-	*addr = nv_device_resource_start(device, 0);
-	*size = nv_device_resource_len(device, 0);
+	nvkm_device_fini(device, false);
+
+	nvdev_trace(device, "init running...\n");
+	time = ktime_to_us(ktime_get());
+
+	if (device->func->init) {
+		ret = device->func->init(device);
+		if (ret)
+			goto fail;
+	}
+
+	for (i = 0; i < NVKM_SUBDEV_NR; i++) {
+		if ((subdev = nvkm_device_subdev(device, i))) {
+			ret = nvkm_subdev_init(subdev);
+			if (ret)
+				goto fail_subdev;
+		}
+	}
+
+	nvkm_acpi_init(device);
+
+	time = ktime_to_us(ktime_get()) - time;
+	nvdev_trace(device, "init completed in %lldus\n", time);
 	return 0;
+
+fail_subdev:
+	do {
+		if ((subdev = nvkm_device_subdev(device, i)))
+			nvkm_subdev_fini(subdev, false);
+	} while (--i >= 0);
+
+fail:
+	nvdev_error(device, "init failed with %d\n", ret);
+	return ret;
 }
 
-static const u64 disable_map[] = {
-	[NVDEV_SUBDEV_VBIOS]	= NV_DEVICE_V0_DISABLE_VBIOS,
-	[NVDEV_SUBDEV_DEVINIT]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_GPIO]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_I2C]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_CLK  ]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_MXM]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_MC]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_BUS]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_TIMER]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_FB]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_LTC]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_IBUS]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_INSTMEM]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_MMU]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_BAR]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_VOLT]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_THERM]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_PMU]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_FUSE]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_ENGINE_DMAOBJ]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_ENGINE_PM     ]  = NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_ENGINE_FIFO]	= NV_DEVICE_V0_DISABLE_FIFO,
-	[NVDEV_ENGINE_SW]	= NV_DEVICE_V0_DISABLE_FIFO,
-	[NVDEV_ENGINE_GR]	= NV_DEVICE_V0_DISABLE_GR,
-	[NVDEV_ENGINE_MPEG]	= NV_DEVICE_V0_DISABLE_MPEG,
-	[NVDEV_ENGINE_ME]	= NV_DEVICE_V0_DISABLE_ME,
-	[NVDEV_ENGINE_VP]	= NV_DEVICE_V0_DISABLE_VP,
-	[NVDEV_ENGINE_CIPHER]	= NV_DEVICE_V0_DISABLE_CIPHER,
-	[NVDEV_ENGINE_BSP]	= NV_DEVICE_V0_DISABLE_BSP,
-	[NVDEV_ENGINE_MSPPP]	= NV_DEVICE_V0_DISABLE_MSPPP,
-	[NVDEV_ENGINE_CE0]	= NV_DEVICE_V0_DISABLE_CE0,
-	[NVDEV_ENGINE_CE1]	= NV_DEVICE_V0_DISABLE_CE1,
-	[NVDEV_ENGINE_CE2]	= NV_DEVICE_V0_DISABLE_CE2,
-	[NVDEV_ENGINE_VIC]	= NV_DEVICE_V0_DISABLE_VIC,
-	[NVDEV_ENGINE_MSENC]	= NV_DEVICE_V0_DISABLE_MSENC,
-	[NVDEV_ENGINE_DISP]	= NV_DEVICE_V0_DISABLE_DISP,
-	[NVDEV_ENGINE_MSVLD]	= NV_DEVICE_V0_DISABLE_MSVLD,
-	[NVDEV_ENGINE_SEC]	= NV_DEVICE_V0_DISABLE_SEC,
-	[NVDEV_SUBDEV_NR]	= 0,
-};
-
-static void
-nvkm_devobj_dtor(struct nvkm_object *object)
+void
+nvkm_device_del(struct nvkm_device **pdevice)
 {
-	struct nvkm_devobj *devobj = (void *)object;
+	struct nvkm_device *device = *pdevice;
 	int i;
+	if (device) {
+		mutex_lock(&nv_devices_mutex);
+		device->disable_mask = 0;
+		for (i = NVKM_SUBDEV_NR - 1; i >= 0; i--) {
+			struct nvkm_subdev *subdev =
+				nvkm_device_subdev(device, i);
+			nvkm_subdev_del(&subdev);
+		}
 
-	for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--)
-		nvkm_object_ref(NULL, &devobj->subdev[i]);
+		nvkm_event_fini(&device->event);
 
-	nvkm_parent_destroy(&devobj->base);
-}
+		if (device->pri)
+			iounmap(device->pri);
+		list_del(&device->head);
 
-static struct nvkm_oclass
-nvkm_devobj_oclass_super = {
-	.handle = NV_DEVICE,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.dtor = nvkm_devobj_dtor,
-		.init = _nvkm_parent_init,
-		.fini = _nvkm_parent_fini,
-		.mthd = nvkm_devobj_mthd,
-		.map  = nvkm_devobj_map,
-		.rd08 = nvkm_devobj_rd08,
-		.rd16 = nvkm_devobj_rd16,
-		.rd32 = nvkm_devobj_rd32,
-		.wr08 = nvkm_devobj_wr08,
-		.wr16 = nvkm_devobj_wr16,
-		.wr32 = nvkm_devobj_wr32,
+		if (device->func->dtor)
+			*pdevice = device->func->dtor(device);
+		mutex_unlock(&nv_devices_mutex);
+
+		kfree(*pdevice);
+		*pdevice = NULL;
 	}
-};
+}
 
-static int
-nvkm_devobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
+int
+nvkm_device_ctor(const struct nvkm_device_func *func,
+		 const struct nvkm_device_quirk *quirk,
+		 struct device *dev, enum nvkm_device_type type, u64 handle,
+		 const char *name, const char *cfg, const char *dbg,
+		 bool detect, bool mmio, u64 subdev_mask,
+		 struct nvkm_device *device)
 {
-	union {
-		struct nv_device_v0 v0;
-	} *args = data;
-	struct nvkm_client *client = nv_client(parent);
-	struct nvkm_device *device;
-	struct nvkm_devobj *devobj;
+	struct nvkm_subdev *subdev;
+	u64 mmio_base, mmio_size;
 	u32 boot0, strap;
-	u64 disable, mmio_base, mmio_size;
 	void __iomem *map;
-	int ret, i, c;
-
-	nv_ioctl(parent, "create device size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create device v%d device %016llx "
-				 "disable %016llx debug0 %016llx\n",
-			 args->v0.version, args->v0.device,
-			 args->v0.disable, args->v0.debug0);
-	} else
-		return ret;
+	int ret = -EEXIST;
+	int i;
 
-	/* give priviledged clients register access */
-	if (client->super)
-		oclass = &nvkm_devobj_oclass_super;
+	mutex_lock(&nv_devices_mutex);
+	if (nvkm_device_find_locked(handle))
+		goto done;
 
-	/* find the device subdev that matches what the client requested */
-	device = nv_device(client->device);
-	if (args->v0.device != ~0) {
-		device = nvkm_device_find(args->v0.device);
-		if (!device)
-			return -ENODEV;
-	}
+	device->func = func;
+	device->quirk = quirk;
+	device->dev = dev;
+	device->type = type;
+	device->handle = handle;
+	device->cfgopt = cfg;
+	device->dbgopt = dbg;
+	device->name = name;
+	list_add_tail(&device->head, &nv_devices);
+	device->debug = nvkm_dbgopt(device->dbgopt, "device");
 
-	ret = nvkm_parent_create(parent, nv_object(device), oclass, 0,
-				 nvkm_control_oclass,
-				 (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				 (1ULL << NVDEV_ENGINE_FIFO) |
-				 (1ULL << NVDEV_ENGINE_DISP) |
-				 (1ULL << NVDEV_ENGINE_PM), &devobj);
-	*pobject = nv_object(devobj);
+	ret = nvkm_event_init(&nvkm_device_event_func, 1, 1, &device->event);
 	if (ret)
-		return ret;
-
-	mmio_base = nv_device_resource_start(device, 0);
-	mmio_size = nv_device_resource_len(device, 0);
+		goto done;
 
-	/* translate api disable mask into internal mapping */
-	disable = args->v0.debug0;
-	for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
-		if (args->v0.disable & disable_map[i])
-			disable |= (1ULL << i);
-	}
+	mmio_base = device->func->resource_addr(device, 0);
+	mmio_size = device->func->resource_size(device, 0);
 
 	/* identify the chipset, and determine classes of subdev/engines */
-	if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY) &&
-	    !device->card_type) {
+	if (detect) {
 		map = ioremap(mmio_base, 0x102000);
-		if (map == NULL)
-			return -ENOMEM;
+		if (ret = -ENOMEM, map == NULL)
+			goto done;
 
 		/* switch mmio to cpu's native endianness */
 #ifndef __BIG_ENDIAN
@@ -397,31 +2389,83 @@ nvkm_devobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
 			device->card_type = NV_04;
 		}
 
-		switch (device->card_type) {
-		case NV_04: ret = nv04_identify(device); break;
-		case NV_10:
-		case NV_11: ret = nv10_identify(device); break;
-		case NV_20: ret = nv20_identify(device); break;
-		case NV_30: ret = nv30_identify(device); break;
-		case NV_40: ret = nv40_identify(device); break;
-		case NV_50: ret = nv50_identify(device); break;
-		case NV_C0: ret = gf100_identify(device); break;
-		case NV_E0: ret = gk104_identify(device); break;
-		case GM100: ret = gm100_identify(device); break;
+		switch (device->chipset) {
+		case 0x004: device->chip = &nv4_chipset; break;
+		case 0x005: device->chip = &nv5_chipset; break;
+		case 0x010: device->chip = &nv10_chipset; break;
+		case 0x011: device->chip = &nv11_chipset; break;
+		case 0x015: device->chip = &nv15_chipset; break;
+		case 0x017: device->chip = &nv17_chipset; break;
+		case 0x018: device->chip = &nv18_chipset; break;
+		case 0x01a: device->chip = &nv1a_chipset; break;
+		case 0x01f: device->chip = &nv1f_chipset; break;
+		case 0x020: device->chip = &nv20_chipset; break;
+		case 0x025: device->chip = &nv25_chipset; break;
+		case 0x028: device->chip = &nv28_chipset; break;
+		case 0x02a: device->chip = &nv2a_chipset; break;
+		case 0x030: device->chip = &nv30_chipset; break;
+		case 0x031: device->chip = &nv31_chipset; break;
+		case 0x034: device->chip = &nv34_chipset; break;
+		case 0x035: device->chip = &nv35_chipset; break;
+		case 0x036: device->chip = &nv36_chipset; break;
+		case 0x040: device->chip = &nv40_chipset; break;
+		case 0x041: device->chip = &nv41_chipset; break;
+		case 0x042: device->chip = &nv42_chipset; break;
+		case 0x043: device->chip = &nv43_chipset; break;
+		case 0x044: device->chip = &nv44_chipset; break;
+		case 0x045: device->chip = &nv45_chipset; break;
+		case 0x046: device->chip = &nv46_chipset; break;
+		case 0x047: device->chip = &nv47_chipset; break;
+		case 0x049: device->chip = &nv49_chipset; break;
+		case 0x04a: device->chip = &nv4a_chipset; break;
+		case 0x04b: device->chip = &nv4b_chipset; break;
+		case 0x04c: device->chip = &nv4c_chipset; break;
+		case 0x04e: device->chip = &nv4e_chipset; break;
+		case 0x050: device->chip = &nv50_chipset; break;
+		case 0x063: device->chip = &nv63_chipset; break;
+		case 0x067: device->chip = &nv67_chipset; break;
+		case 0x068: device->chip = &nv68_chipset; break;
+		case 0x084: device->chip = &nv84_chipset; break;
+		case 0x086: device->chip = &nv86_chipset; break;
+		case 0x092: device->chip = &nv92_chipset; break;
+		case 0x094: device->chip = &nv94_chipset; break;
+		case 0x096: device->chip = &nv96_chipset; break;
+		case 0x098: device->chip = &nv98_chipset; break;
+		case 0x0a0: device->chip = &nva0_chipset; break;
+		case 0x0a3: device->chip = &nva3_chipset; break;
+		case 0x0a5: device->chip = &nva5_chipset; break;
+		case 0x0a8: device->chip = &nva8_chipset; break;
+		case 0x0aa: device->chip = &nvaa_chipset; break;
+		case 0x0ac: device->chip = &nvac_chipset; break;
+		case 0x0af: device->chip = &nvaf_chipset; break;
+		case 0x0c0: device->chip = &nvc0_chipset; break;
+		case 0x0c1: device->chip = &nvc1_chipset; break;
+		case 0x0c3: device->chip = &nvc3_chipset; break;
+		case 0x0c4: device->chip = &nvc4_chipset; break;
+		case 0x0c8: device->chip = &nvc8_chipset; break;
+		case 0x0ce: device->chip = &nvce_chipset; break;
+		case 0x0cf: device->chip = &nvcf_chipset; break;
+		case 0x0d7: device->chip = &nvd7_chipset; break;
+		case 0x0d9: device->chip = &nvd9_chipset; break;
+		case 0x0e4: device->chip = &nve4_chipset; break;
+		case 0x0e6: device->chip = &nve6_chipset; break;
+		case 0x0e7: device->chip = &nve7_chipset; break;
+		case 0x0ea: device->chip = &nvea_chipset; break;
+		case 0x0f0: device->chip = &nvf0_chipset; break;
+		case 0x0f1: device->chip = &nvf1_chipset; break;
+		case 0x106: device->chip = &nv106_chipset; break;
+		case 0x108: device->chip = &nv108_chipset; break;
+		case 0x117: device->chip = &nv117_chipset; break;
+		case 0x124: device->chip = &nv124_chipset; break;
+		case 0x126: device->chip = &nv126_chipset; break;
+		case 0x12b: device->chip = &nv12b_chipset; break;
 		default:
-			ret = -EINVAL;
-			break;
-		}
-
-		if (ret) {
-			nv_error(device, "unknown chipset, 0x%08x\n", boot0);
-			return ret;
+			nvdev_error(device, "unknown chipset (%08x)\n", boot0);
+			goto done;
 		}
 
-		nv_info(device, "BOOT0  : 0x%08x\n", boot0);
-		nv_info(device, "Chipset: %s (NV%02X)\n",
-			device->cname, device->chipset);
-		nv_info(device, "Family : NV%02X\n", device->card_type);
+		nvdev_info(device, "NVIDIA %s (%08x)\n",
+			   device->chip->name, boot0);
 
 		/* determine frequency of timing crystal */
 		if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
@@ -436,300 +2480,89 @@ nvkm_devobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
 		case 0x00400000: device->crystal = 27000; break;
 		case 0x00400040: device->crystal = 25000; break;
 		}
-
-		nv_debug(device, "crystal freq: %dKHz\n", device->crystal);
-	} else
-	if ( (args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY)) {
-		device->cname = "NULL";
-		device->oclass[NVDEV_SUBDEV_VBIOS] = &nvkm_bios_oclass;
-	}
-
-	if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_MMIO) &&
-	    !nv_subdev(device)->mmio) {
-		nv_subdev(device)->mmio  = ioremap(mmio_base, mmio_size);
-		if (!nv_subdev(device)->mmio) {
-			nv_error(device, "unable to map device registers\n");
-			return -ENOMEM;
-		}
-	}
-
-	/* ensure requested subsystems are available for use */
-	for (i = 1, c = 1; i < NVDEV_SUBDEV_NR; i++) {
-		if (!(oclass = device->oclass[i]) || (disable & (1ULL << i)))
-			continue;
-
-		if (device->subdev[i]) {
-			nvkm_object_ref(device->subdev[i], &devobj->subdev[i]);
-			continue;
-		}
-
-		ret = nvkm_object_ctor(nv_object(device), NULL, oclass,
-				       NULL, i, &devobj->subdev[i]);
-		if (ret == -ENODEV)
-			continue;
-		if (ret)
-			return ret;
-
-		device->subdev[i] = devobj->subdev[i];
-
-		/* note: can't init *any* subdevs until devinit has been run
-		 * due to not knowing exactly what the vbios init tables will
-		 * mess with.  devinit also can't be run until all of its
-		 * dependencies have been created.
-		 *
-		 * this code delays init of any subdev until all of devinit's
-		 * dependencies have been created, and then initialises each
-		 * subdev in turn as they're created.
-		 */
-		while (i >= NVDEV_SUBDEV_DEVINIT_LAST && c <= i) {
-			struct nvkm_object *subdev = devobj->subdev[c++];
-			if (subdev && !nv_iclass(subdev, NV_ENGINE_CLASS)) {
-				ret = nvkm_object_inc(subdev);
-				if (ret)
-					return ret;
-				atomic_dec(&nv_object(device)->usecount);
-			} else
-			if (subdev) {
-				nvkm_subdev_reset(subdev);
-			}
-		}
-	}
-
-	return 0;
-}
-
-static struct nvkm_ofuncs
-nvkm_devobj_ofuncs = {
-	.ctor = nvkm_devobj_ctor,
-	.dtor = nvkm_devobj_dtor,
-	.init = _nvkm_parent_init,
-	.fini = _nvkm_parent_fini,
-	.mthd = nvkm_devobj_mthd,
-};
-
-/******************************************************************************
- * nvkm_device: engine functions
- *****************************************************************************/
-
-struct nvkm_device *
-nv_device(void *obj)
-{
-	struct nvkm_object *device = nv_object(obj);
-	if (device->engine == NULL) {
-		while (device && device->parent)
-			device = device->parent;
 	} else {
-		device = &nv_object(obj)->engine->subdev.object;
-		if (device && device->parent)
-			device = device->parent;
+		device->chip = &null_chipset;
 	}
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!device))
-		nv_assert("BAD CAST -> NvDevice, 0x%08x\n", nv_hclass(obj));
-#endif
-	return (void *)device;
-}
 
-static struct nvkm_oclass
-nvkm_device_sclass[] = {
-	{ 0x0080, &nvkm_devobj_ofuncs },
-	{}
-};
+	if (!device->name)
+		device->name = device->chip->name;
 
-static int
-nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
-		       struct nvkm_notify *notify)
-{
-	if (!WARN_ON(size != 0)) {
-		notify->size  = 0;
-		notify->types = 1;
-		notify->index = 0;
-		return 0;
-	}
-	return -EINVAL;
-}
-
-static const struct nvkm_event_func
-nvkm_device_event_func = {
-	.ctor = nvkm_device_event_ctor,
-};
-
-static int
-nvkm_device_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nvkm_device *device = (void *)object;
-	struct nvkm_object *subdev;
-	int ret, i;
-
-	for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) {
-		if ((subdev = device->subdev[i])) {
-			if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
-				ret = nvkm_object_dec(subdev, suspend);
-				if (ret && suspend)
-					goto fail;
-			}
-		}
-	}
-
-	ret = nvkm_acpi_fini(device, suspend);
-fail:
-	for (; ret && i < NVDEV_SUBDEV_NR; i++) {
-		if ((subdev = device->subdev[i])) {
-			if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
-				ret = nvkm_object_inc(subdev);
-				if (ret) {
-					/* XXX */
-				}
-			}
+	if (mmio) {
+		device->pri = ioremap(mmio_base, mmio_size);
+		if (!device->pri) {
+			nvdev_error(device, "unable to map PRI\n");
+			return -ENOMEM;
 		}
 	}
 
-	return ret;
-}
-
-static int
-nvkm_device_init(struct nvkm_object *object)
-{
-	struct nvkm_device *device = (void *)object;
-	struct nvkm_object *subdev;
-	int ret, i = 0;
-
-	ret = nvkm_acpi_init(device);
-	if (ret)
-		goto fail;
-
-	for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
-		if ((subdev = device->subdev[i])) {
-			if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
-				ret = nvkm_object_inc(subdev);
-				if (ret)
-					goto fail;
-			} else {
-				nvkm_subdev_reset(subdev);
-			}
+	mutex_init(&device->mutex);
+
+	for (i = 0; i < NVKM_SUBDEV_NR; i++) {
+#define _(s,m) case s:                                                         \
+	if (device->chip->m && (subdev_mask & (1ULL << (s)))) {                \
+		ret = device->chip->m(device, (s), &device->m);                \
+		if (ret) {                                                     \
+			subdev = nvkm_device_subdev(device, (s));              \
+			nvkm_subdev_del(&subdev);                              \
+			device->m = NULL;                                      \
+			if (ret != -ENODEV) {                                  \
+				nvdev_error(device, "%s ctor failed, %d\n",    \
+					    nvkm_subdev_name[s], ret);         \
+				goto done;                                     \
+			}                                                      \
+		}                                                              \
+	}                                                                      \
+	break
+		switch (i) {
+		_(NVKM_SUBDEV_BAR    ,     bar);
+		_(NVKM_SUBDEV_VBIOS  ,    bios);
+		_(NVKM_SUBDEV_BUS    ,     bus);
+		_(NVKM_SUBDEV_CLK    ,     clk);
+		_(NVKM_SUBDEV_DEVINIT, devinit);
+		_(NVKM_SUBDEV_FB     ,      fb);
+		_(NVKM_SUBDEV_FUSE   ,    fuse);
+		_(NVKM_SUBDEV_GPIO   ,    gpio);
+		_(NVKM_SUBDEV_I2C    ,     i2c);
+		_(NVKM_SUBDEV_IBUS   ,    ibus);
+		_(NVKM_SUBDEV_INSTMEM,    imem);
+		_(NVKM_SUBDEV_LTC    ,     ltc);
+		_(NVKM_SUBDEV_MC     ,      mc);
+		_(NVKM_SUBDEV_MMU    ,     mmu);
+		_(NVKM_SUBDEV_MXM    ,     mxm);
+		_(NVKM_SUBDEV_PCI    ,     pci);
+		_(NVKM_SUBDEV_PMU    ,     pmu);
+		_(NVKM_SUBDEV_THERM  ,   therm);
+		_(NVKM_SUBDEV_TIMER  ,   timer);
+		_(NVKM_SUBDEV_VOLT   ,    volt);
+		_(NVKM_ENGINE_BSP    ,     bsp);
+		_(NVKM_ENGINE_CE0    ,   ce[0]);
+		_(NVKM_ENGINE_CE1    ,   ce[1]);
+		_(NVKM_ENGINE_CE2    ,   ce[2]);
+		_(NVKM_ENGINE_CIPHER ,  cipher);
+		_(NVKM_ENGINE_DISP   ,    disp);
+		_(NVKM_ENGINE_DMAOBJ ,     dma);
+		_(NVKM_ENGINE_FIFO   ,    fifo);
+		_(NVKM_ENGINE_GR     ,      gr);
+		_(NVKM_ENGINE_IFB    ,     ifb);
+		_(NVKM_ENGINE_ME     ,      me);
+		_(NVKM_ENGINE_MPEG   ,    mpeg);
+		_(NVKM_ENGINE_MSENC  ,   msenc);
+		_(NVKM_ENGINE_MSPDEC ,  mspdec);
+		_(NVKM_ENGINE_MSPPP  ,   msppp);
+		_(NVKM_ENGINE_MSVLD  ,   msvld);
+		_(NVKM_ENGINE_PM     ,      pm);
+		_(NVKM_ENGINE_SEC    ,     sec);
+		_(NVKM_ENGINE_SW     ,      sw);
+		_(NVKM_ENGINE_VIC    ,     vic);
+		_(NVKM_ENGINE_VP     ,      vp);
+		default:
+			WARN_ON(1);
+			continue;
 		}
+#undef _
 	}
 
 	ret = 0;
-fail:
-	for (--i; ret && i >= 0; i--) {
-		if ((subdev = device->subdev[i])) {
-			if (!nv_iclass(subdev, NV_ENGINE_CLASS))
-				nvkm_object_dec(subdev, false);
-		}
-	}
-
-	if (ret)
-		nvkm_acpi_fini(device, false);
-	return ret;
-}
-
-static void
-nvkm_device_dtor(struct nvkm_object *object)
-{
-	struct nvkm_device *device = (void *)object;
-
-	nvkm_event_fini(&device->event);
-
-	mutex_lock(&nv_devices_mutex);
-	list_del(&device->head);
-	mutex_unlock(&nv_devices_mutex);
-
-	if (nv_subdev(device)->mmio)
-		iounmap(nv_subdev(device)->mmio);
-
-	nvkm_engine_destroy(&device->engine);
-}
-
-resource_size_t
-nv_device_resource_start(struct nvkm_device *device, unsigned int bar)
-{
-	if (nv_device_is_pci(device)) {
-		return pci_resource_start(device->pdev, bar);
-	} else {
-		struct resource *res;
-		res = platform_get_resource(device->platformdev,
-					    IORESOURCE_MEM, bar);
-		if (!res)
-			return 0;
-		return res->start;
-	}
-}
-
-resource_size_t
-nv_device_resource_len(struct nvkm_device *device, unsigned int bar)
-{
-	if (nv_device_is_pci(device)) {
-		return pci_resource_len(device->pdev, bar);
-	} else {
-		struct resource *res;
-		res = platform_get_resource(device->platformdev,
-					    IORESOURCE_MEM, bar);
-		if (!res)
-			return 0;
-		return resource_size(res);
-	}
-}
-
-int
-nv_device_get_irq(struct nvkm_device *device, bool stall)
-{
-	if (nv_device_is_pci(device)) {
-		return device->pdev->irq;
-	} else {
-		return platform_get_irq_byname(device->platformdev,
-					       stall ? "stall" : "nonstall");
-	}
-}
-
-static struct nvkm_oclass
-nvkm_device_oclass = {
-	.handle = NV_ENGINE(DEVICE, 0x00),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.dtor = nvkm_device_dtor,
-		.init = nvkm_device_init,
-		.fini = nvkm_device_fini,
-	},
-};
-
-int
-nvkm_device_create_(void *dev, enum nv_bus_type type, u64 name,
-		    const char *sname, const char *cfg, const char *dbg,
-		    int length, void **pobject)
-{
-	struct nvkm_device *device;
-	int ret = -EEXIST;
-
-	mutex_lock(&nv_devices_mutex);
-	list_for_each_entry(device, &nv_devices, head) {
-		if (device->handle == name)
-			goto done;
-	}
-
-	ret = nvkm_engine_create_(NULL, NULL, &nvkm_device_oclass, true,
-				  "DEVICE", "device", length, pobject);
-	device = *pobject;
-	if (ret)
-		goto done;
-
-	switch (type) {
-	case NVKM_BUS_PCI:
-		device->pdev = dev;
-		break;
-	case NVKM_BUS_PLATFORM:
-		device->platformdev = dev;
-		break;
-	}
-	device->handle = name;
-	device->cfgopt = cfg;
-	device->dbgopt = dbg;
-	device->name = sname;
-
-	nv_subdev(device)->debug = nvkm_dbgopt(device->dbgopt, "DEVICE");
-	nv_engine(device)->sclass = nvkm_device_sclass;
-	list_add(&device->head, &nv_devices);
-
-	ret = nvkm_event_init(&nvkm_device_event_func, 1, 1, &device->event);
 done:
 	mutex_unlock(&nv_devices_mutex);
 	return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
index 0b794b13cec3..cf8bc068e9b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
@@ -21,7 +21,7 @@
  *
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
-#include "priv.h"
+#include "ctrl.h"
 
 #include <core/client.h>
 #include <subdev/clk.h>
@@ -31,18 +31,18 @@
 #include <nvif/unpack.h>
 
 static int
-nvkm_control_mthd_pstate_info(struct nvkm_object *object, void *data, u32 size)
+nvkm_control_mthd_pstate_info(struct nvkm_control *ctrl, void *data, u32 size)
 {
 	union {
 		struct nvif_control_pstate_info_v0 v0;
 	} *args = data;
-	struct nvkm_clk *clk = nvkm_clk(object);
+	struct nvkm_clk *clk = ctrl->device->clk;
 	int ret;
 
-	nv_ioctl(object, "control pstate info size %d\n", size);
+	nvif_ioctl(&ctrl->object, "control pstate info size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "control pstate info vers %d\n",
-			 args->v0.version);
+		nvif_ioctl(&ctrl->object, "control pstate info vers %d\n",
+			   args->v0.version);
 	} else
 		return ret;
 
@@ -64,24 +64,24 @@ nvkm_control_mthd_pstate_info(struct nvkm_object *object, void *data, u32 size)
 }
 
 static int
-nvkm_control_mthd_pstate_attr(struct nvkm_object *object, void *data, u32 size)
+nvkm_control_mthd_pstate_attr(struct nvkm_control *ctrl, void *data, u32 size)
 {
 	union {
 		struct nvif_control_pstate_attr_v0 v0;
 	} *args = data;
-	struct nvkm_clk *clk = nvkm_clk(object);
-	struct nvkm_domain *domain;
+	struct nvkm_clk *clk = ctrl->device->clk;
+	const struct nvkm_domain *domain;
 	struct nvkm_pstate *pstate;
 	struct nvkm_cstate *cstate;
 	int i = 0, j = -1;
 	u32 lo, hi;
 	int ret;
 
-	nv_ioctl(object, "control pstate attr size %d\n", size);
+	nvif_ioctl(&ctrl->object, "control pstate attr size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "control pstate attr vers %d state %d "
-				 "index %d\n",
-			 args->v0.version, args->v0.state, args->v0.index);
+		nvif_ioctl(&ctrl->object,
+			   "control pstate attr vers %d state %d index %d\n",
+			   args->v0.version, args->v0.state, args->v0.index);
 		if (!clk)
 			return -ENODEV;
 		if (args->v0.state < NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT)
@@ -116,7 +116,7 @@ nvkm_control_mthd_pstate_attr(struct nvkm_object *object, void *data, u32 size)
 
 		args->v0.state = pstate->pstate;
 	} else {
-		lo = max(clk->read(clk, domain->name), 0);
+		lo = max(nvkm_clk_read(clk, domain->name), 0);
 		hi = lo;
 	}
 
@@ -137,19 +137,19 @@ nvkm_control_mthd_pstate_attr(struct nvkm_object *object, void *data, u32 size)
 }
 
 static int
-nvkm_control_mthd_pstate_user(struct nvkm_object *object, void *data, u32 size)
+nvkm_control_mthd_pstate_user(struct nvkm_control *ctrl, void *data, u32 size)
 {
 	union {
 		struct nvif_control_pstate_user_v0 v0;
 	} *args = data;
-	struct nvkm_clk *clk = nvkm_clk(object);
+	struct nvkm_clk *clk = ctrl->device->clk;
 	int ret;
 
-	nv_ioctl(object, "control pstate user size %d\n", size);
+	nvif_ioctl(&ctrl->object, "control pstate user size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "control pstate user vers %d ustate %d "
-				 "pwrsrc %d\n", args->v0.version,
-			 args->v0.ustate, args->v0.pwrsrc);
+		nvif_ioctl(&ctrl->object,
+			   "control pstate user vers %d ustate %d pwrsrc %d\n",
+			   args->v0.version, args->v0.ustate, args->v0.pwrsrc);
 		if (!clk)
 			return -ENODEV;
 	} else
@@ -168,32 +168,44 @@ nvkm_control_mthd_pstate_user(struct nvkm_object *object, void *data, u32 size)
 static int
 nvkm_control_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
 {
+	struct nvkm_control *ctrl = nvkm_control(object);
 	switch (mthd) {
 	case NVIF_CONTROL_PSTATE_INFO:
-		return nvkm_control_mthd_pstate_info(object, data, size);
+		return nvkm_control_mthd_pstate_info(ctrl, data, size);
 	case NVIF_CONTROL_PSTATE_ATTR:
-		return nvkm_control_mthd_pstate_attr(object, data, size);
+		return nvkm_control_mthd_pstate_attr(ctrl, data, size);
 	case NVIF_CONTROL_PSTATE_USER:
-		return nvkm_control_mthd_pstate_user(object, data, size);
+		return nvkm_control_mthd_pstate_user(ctrl, data, size);
 	default:
 		break;
 	}
 	return -EINVAL;
 }
 
-static struct nvkm_ofuncs
-nvkm_control_ofuncs = {
-	.ctor = _nvkm_object_ctor,
-	.dtor = nvkm_object_destroy,
-	.init = nvkm_object_init,
-	.fini = nvkm_object_fini,
+static const struct nvkm_object_func
+nvkm_control = {
 	.mthd = nvkm_control_mthd,
 };
 
-struct nvkm_oclass
-nvkm_control_oclass[] = {
-	{ .handle = NVIF_IOCTL_NEW_V0_CONTROL,
-	  .ofuncs = &nvkm_control_ofuncs
-	},
-	{}
+static int
+nvkm_control_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
+		 void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_control *ctrl;
+
+	if (!(ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &ctrl->object;
+	ctrl->device = device;
+
+	nvkm_object_ctor(&nvkm_control, oclass, &ctrl->object);
+	return 0;
+}
+
+const struct nvkm_device_oclass
+nvkm_control_oclass = {
+	.base.oclass = NVIF_IOCTL_NEW_V0_CONTROL,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = nvkm_control_new,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
new file mode 100644
index 000000000000..20249d8e444d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
@@ -0,0 +1,12 @@
+#ifndef __NVKM_DEVICE_CTRL_H__
+#define __NVKM_DEVICE_CTRL_H__
+#define nvkm_control(p) container_of((p), struct nvkm_control, object)
+#include <core/device.h>
+
+struct nvkm_control {
+	struct nvkm_object object;
+	struct nvkm_device *device;
+};
+
+extern const struct nvkm_device_oclass nvkm_control_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gf100.c
deleted file mode 100644
index 82b38d7e9730..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gf100.c
+++ /dev/null
@@ -1,358 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/fuse.h>
-#include <subdev/clk.h>
-#include <subdev/therm.h>
-#include <subdev/mxm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/ltc.h>
-#include <subdev/ibus.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-#include <subdev/bar.h>
-#include <subdev/pmu.h>
-#include <subdev/volt.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/mspdec.h>
-#include <engine/bsp.h>
-#include <engine/msvld.h>
-#include <engine/msppp.h>
-#include <engine/ce.h>
-#include <engine/disp.h>
-#include <engine/pm.h>
-
-int
-gf100_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0xc0:
-		device->cname = "GF100";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf100_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf100_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf100_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf100_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gf100_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	case 0xc4:
-		device->cname = "GF104";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf100_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf100_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf100_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf104_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gf100_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	case 0xc3:
-		device->cname = "GF106";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf100_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf100_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf104_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	case 0xce:
-		device->cname = "GF114";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf100_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf100_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf100_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf104_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gf100_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	case 0xcf:
-		device->cname = "GF116";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf100_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf100_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf104_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	case 0xc1:
-		device->cname = "GF108";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf100_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf100_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf108_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	case 0xc8:
-		device->cname = "GF110";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf100_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf100_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf100_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf110_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gf100_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	case 0xd9:
-		device->cname = "GF119";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gf110_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gf110_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf110_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf119_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gf110_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	case 0xd7:
-		device->cname = "GF117";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gf110_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gf117_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf117_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gf110_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	default:
-		nv_fatal(device, "unknown Fermi chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c
deleted file mode 100644
index 6a9483f65d83..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c
+++ /dev/null
@@ -1,326 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/fuse.h>
-#include <subdev/clk.h>
-#include <subdev/therm.h>
-#include <subdev/mxm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/ltc.h>
-#include <subdev/ibus.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-#include <subdev/bar.h>
-#include <subdev/pmu.h>
-#include <subdev/volt.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/disp.h>
-#include <engine/ce.h>
-#include <engine/bsp.h>
-#include <engine/msvld.h>
-#include <engine/mspdec.h>
-#include <engine/msppp.h>
-#include <engine/pm.h>
-
-int
-gk104_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0xe4:
-		device->cname = "GK104";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gk104_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gk104_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk104_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk104_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gk104_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gk104_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gk104_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gk104_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gk104_pm_oclass;
-		break;
-	case 0xe7:
-		device->cname = "GK107";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gk104_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gk104_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf110_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk104_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gk104_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gk104_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gk104_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gk104_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gk104_pm_oclass;
-		break;
-	case 0xe6:
-		device->cname = "GK106";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gk104_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gk104_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk104_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk104_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gk104_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gk104_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gk104_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gk104_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gk104_pm_oclass;
-		break;
-	case 0xea:
-		device->cname = "GK20A";
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk20a_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gk20a_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &gk20a_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gk20a_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk20a_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = gk20a_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gk20a_bar_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk20a_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gk20a_gr_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gk104_pm_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &gk20a_volt_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk20a_pmu_oclass;
-		break;
-	case 0xf0:
-		device->cname = "GK110";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gk104_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gk104_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk110_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk104_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gk110_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gk110_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gk104_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gk104_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gk110_pm_oclass;
-		break;
-	case 0xf1:
-		device->cname = "GK110B";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gf110_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gk104_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk110_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk104_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gk110b_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gk110_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gk104_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gk104_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gk110_pm_oclass;
-		break;
-	case 0x106:
-		device->cname = "GK208B";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gk104_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gk20a_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gk104_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk208_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk208_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gk208_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gk110_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gk104_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gk104_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		break;
-	case 0x108:
-		device->cname = "GK208";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gk104_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gk20a_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gk104_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk208_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk208_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gk208_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gk110_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gk104_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gk104_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		break;
-	default:
-		nv_fatal(device, "unknown Kepler chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
deleted file mode 100644
index 70abf1ec7c98..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/fuse.h>
-#include <subdev/clk.h>
-#include <subdev/therm.h>
-#include <subdev/mxm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/ltc.h>
-#include <subdev/ibus.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-#include <subdev/bar.h>
-#include <subdev/pmu.h>
-#include <subdev/volt.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/disp.h>
-#include <engine/ce.h>
-#include <engine/bsp.h>
-#include <engine/msvld.h>
-#include <engine/mspdec.h>
-#include <engine/msppp.h>
-#include <engine/pm.h>
-
-int
-gm100_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0x117:
-		device->cname = "GM107";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gf110_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gm107_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gm107_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gm107_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gk20a_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &gk20a_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gm107_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gm107_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk208_pmu_oclass;
-
-#if 0
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-#endif
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk208_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gm107_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gm107_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gk104_ce0_oclass;
-#if 0
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gk104_ce1_oclass;
-#endif
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-#if 0
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-#endif
-		break;
-	case 0x124:
-		device->cname = "GM204";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gm204_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gm107_fuse_oclass;
-#if 0
-		/* looks to be some non-trivial changes */
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		/* priv ring says no to 0x10eb14 writes */
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gm107_therm_oclass;
-#endif
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gm204_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gk20a_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &gk20a_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gm107_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gm107_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk208_pmu_oclass;
-#if 0
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-#endif
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gm204_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gm204_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gm204_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gm204_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gm204_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gm204_ce2_oclass;
-#if 0
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-#endif
-		break;
-	case 0x126:
-		device->cname = "GM206";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gm204_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gm107_fuse_oclass;
-#if 0
-		/* looks to be some non-trivial changes */
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		/* priv ring says no to 0x10eb14 writes */
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gm107_therm_oclass;
-#endif
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gm204_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gk20a_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &gk20a_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gm107_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gm107_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk208_pmu_oclass;
-#if 0
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-#endif
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gm204_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gm206_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gm204_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gm204_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gm204_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gm204_ce2_oclass;
-#if 0
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-#endif
-		break;
-	default:
-		nv_fatal(device, "unknown Maxwell chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv04.c
deleted file mode 100644
index 5a2ae043b478..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv04.c
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/i2c.h>
-#include <subdev/clk.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/disp.h>
-
-int
-nv04_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0x04:
-		device->cname = "NV04";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv04_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv04_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv04_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv04_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv04_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x05:
-		device->cname = "NV05";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv05_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv04_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv04_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv04_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv04_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	default:
-		nv_fatal(device, "unknown RIVA chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv10.c
deleted file mode 100644
index 94a1ca45e94a..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv10.c
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/clk.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/disp.h>
-
-int
-nv10_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0x10:
-		device->cname = "NV10";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x15:
-		device->cname = "NV15";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x16:
-		device->cname = "NV16";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x1a:
-		device->cname = "nForce";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv1a_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x11:
-		device->cname = "NV11";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x17:
-		device->cname = "NV17";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x1f:
-		device->cname = "nForce2";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv1a_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x18:
-		device->cname = "NV18";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	default:
-		nv_fatal(device, "unknown Celsius chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv20.c
deleted file mode 100644
index d5ec8937df68..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv20.c
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/clk.h>
-#include <subdev/therm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/disp.h>
-
-int
-nv20_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0x20:
-		device->cname = "NV20";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv20_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv20_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x25:
-		device->cname = "NV25";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv25_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv25_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x28:
-		device->cname = "NV28";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv25_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv25_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x2a:
-		device->cname = "NV2A";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv25_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv2a_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	default:
-		nv_fatal(device, "unknown Kelvin chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv30.c
deleted file mode 100644
index dda09621e898..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv30.c
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/clk.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/mpeg.h>
-#include <engine/disp.h>
-
-int
-nv30_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0x30:
-		device->cname = "NV30";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv30_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv30_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x35:
-		device->cname = "NV35";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv35_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv35_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x31:
-		device->cname = "NV31";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv30_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv30_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv31_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x36:
-		device->cname = "NV36";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv36_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv35_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv31_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x34:
-		device->cname = "NV34";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv34_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv31_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	default:
-		nv_fatal(device, "unknown Rankine chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv40.c
deleted file mode 100644
index c6301361d14f..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv40.c
+++ /dev/null
@@ -1,427 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/mmu.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/clk.h>
-#include <subdev/therm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-#include <subdev/volt.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/mpeg.h>
-#include <engine/disp.h>
-#include <engine/pm.h>
-
-int
-nv40_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0x40:
-		device->cname = "NV40";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv40_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x41:
-		device->cname = "NV41";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv41_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv41_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x42:
-		device->cname = "NV42";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv41_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv41_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x43:
-		device->cname = "NV43";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv41_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv41_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x45:
-		device->cname = "NV45";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv40_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x47:
-		device->cname = "G70";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv47_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv41_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x49:
-		device->cname = "G71";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv49_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv41_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x4b:
-		device->cname = "G73";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv49_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv41_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x44:
-		device->cname = "NV44";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv44_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv44_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x46:
-		device->cname = "G72";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv44_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x4a:
-		device->cname = "NV44A";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv44_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv44_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x4c:
-		device->cname = "C61";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv4c_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv44_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x4e:
-		device->cname = "C51";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv4e_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv4c_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv4e_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv44_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x63:
-		device->cname = "C73";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv4c_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv44_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x67:
-		device->cname = "C67";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv4c_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv44_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x68:
-		device->cname = "C68";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv4c_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv44_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	default:
-		nv_fatal(device, "unknown Curie chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv50.c
deleted file mode 100644
index 249b84454612..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv50.c
+++ /dev/null
@@ -1,478 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/fuse.h>
-#include <subdev/clk.h>
-#include <subdev/therm.h>
-#include <subdev/mxm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-#include <subdev/bar.h>
-#include <subdev/pmu.h>
-#include <subdev/volt.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/mpeg.h>
-#include <engine/vp.h>
-#include <engine/cipher.h>
-#include <engine/sec.h>
-#include <engine/bsp.h>
-#include <engine/msvld.h>
-#include <engine/mspdec.h>
-#include <engine/msppp.h>
-#include <engine/ce.h>
-#include <engine/disp.h>
-#include <engine/pm.h>
-
-int
-nv50_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0x50:
-		device->cname = "G80";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv50_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv50_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  nv50_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv50_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv50_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv50_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv50_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv50_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv50_pm_oclass;
-		break;
-	case 0x84:
-		device->cname = "G84";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv50_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv50_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  g84_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g84_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  g84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &g84_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_VP     ] = &g84_vp_oclass;
-		device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
-		device->oclass[NVDEV_ENGINE_BSP    ] = &g84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  g84_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0x86:
-		device->cname = "G86";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv50_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv50_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  g84_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g84_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  g84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &g84_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_VP     ] = &g84_vp_oclass;
-		device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
-		device->oclass[NVDEV_ENGINE_BSP    ] = &g84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  g84_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0x92:
-		device->cname = "G92";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv50_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv50_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  g84_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g84_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  g84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &g84_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_VP     ] = &g84_vp_oclass;
-		device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
-		device->oclass[NVDEV_ENGINE_BSP    ] = &g84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  g84_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0x94:
-		device->cname = "G94";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  g84_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g84_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g94_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  g84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &g84_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_VP     ] = &g84_vp_oclass;
-		device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
-		device->oclass[NVDEV_ENGINE_BSP    ] = &g84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  g94_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0x96:
-		device->cname = "G96";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  g84_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g84_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g94_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  g84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &g84_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_VP     ] = &g84_vp_oclass;
-		device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
-		device->oclass[NVDEV_ENGINE_BSP    ] = &g84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  g94_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0x98:
-		device->cname = "G98";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  g84_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g98_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  g84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_SEC    ] = &g98_sec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &g98_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &g98_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  g94_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0xa0:
-		device->cname = "G200";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv50_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  g84_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g84_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  g84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &g84_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_VP     ] = &g84_vp_oclass;
-		device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
-		device->oclass[NVDEV_ENGINE_BSP    ] = &g84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt200_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0xaa:
-		device->cname = "MCP77/MCP78";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  mcp77_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g98_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  mcp77_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_SEC    ] = &g98_sec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &g98_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &g98_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  g94_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0xac:
-		device->cname = "MCP79/MCP7A";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  mcp77_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g98_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  mcp77_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_SEC    ] = &g98_sec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &g98_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &g98_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  g94_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0xa3:
-		device->cname = "GT215";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gt215_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gt215_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gt215_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gt215_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &g84_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &g98_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &g98_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gt215_ce_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  gt215_pm_oclass;
-		break;
-	case 0xa5:
-		device->cname = "GT216";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gt215_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gt215_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gt215_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gt215_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &g98_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &g98_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gt215_ce_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  gt215_pm_oclass;
-		break;
-	case 0xa8:
-		device->cname = "GT218";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gt215_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gt215_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gt215_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gt215_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &g98_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &g98_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gt215_ce_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  gt215_pm_oclass;
-		break;
-	case 0xaf:
-		device->cname = "MCP89";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gt215_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  mcp89_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  mcp89_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gt215_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &g98_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &g98_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gt215_ce_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  gt215_pm_oclass;
-		break;
-	default:
-		nv_fatal(device, "unknown Tesla chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
new file mode 100644
index 000000000000..9dd1cac81e80
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -0,0 +1,1685 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include <core/pci.h>
+#include "priv.h"
+
+struct nvkm_device_pci_device {
+	u16 device;
+	const char *name;
+	const struct nvkm_device_pci_vendor *vendor;
+};
+
+struct nvkm_device_pci_vendor {
+	u16 vendor;
+	u16 device;
+	const char *name;
+	const struct nvkm_device_quirk quirk;
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0189[] = {
+	/* Apple iMac G4 NV18 */
+	{ 0x10de, 0x0010, NULL, { .tv_gpio = 4 } },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_01f0[] = {
+	/* MSI nForce2 IGP */
+	{ 0x1462, 0x5710, NULL, { .tv_pin_mask = 0xc } },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0322[] = {
+	/* Zotac FX5200 */
+	{ 0x19da, 0x1035, NULL, { .tv_pin_mask = 0xc } },
+	{ 0x19da, 0x2035, NULL, { .tv_pin_mask = 0xc } },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_05e7[] = {
+	{ 0x10de, 0x0595, "Tesla T10 Processor" },
+	{ 0x10de, 0x068f, "Tesla T10 Processor" },
+	{ 0x10de, 0x0697, "Tesla M1060" },
+	{ 0x10de, 0x0714, "Tesla M1060" },
+	{ 0x10de, 0x0743, "Tesla M1060" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0609[] = {
+	{ 0x106b, 0x00a7, "GeForce 8800 GS" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_062e[] = {
+	{ 0x106b, 0x0605, "GeForce GT 130" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0649[] = {
+	{ 0x1043, 0x202d, "GeForce GT 220M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0652[] = {
+	{ 0x152d, 0x0850, "GeForce GT 240M LE" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0654[] = {
+	{ 0x1043, 0x14a2, "GeForce GT 320M" },
+	{ 0x1043, 0x14d2, "GeForce GT 320M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0655[] = {
+	{ 0x106b, 0x0633, "GeForce GT 120" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0656[] = {
+	{ 0x106b, 0x0693, "GeForce GT 120" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_06d1[] = {
+	{ 0x10de, 0x0771, "Tesla C2050" },
+	{ 0x10de, 0x0772, "Tesla C2070" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_06d2[] = {
+	{ 0x10de, 0x088f, "Tesla X2070" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_06de[] = {
+	{ 0x10de, 0x0773, "Tesla S2050" },
+	{ 0x10de, 0x082f, "Tesla M2050" },
+	{ 0x10de, 0x0840, "Tesla X2070" },
+	{ 0x10de, 0x0842, "Tesla M2050" },
+	{ 0x10de, 0x0846, "Tesla M2050" },
+	{ 0x10de, 0x0866, "Tesla M2050" },
+	{ 0x10de, 0x0907, "Tesla M2050" },
+	{ 0x10de, 0x091e, "Tesla M2050" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_06e8[] = {
+	{ 0x103c, 0x360b, "GeForce 9200M GE" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_06f9[] = {
+	{ 0x10de, 0x060d, "Quadro FX 370 Low Profile" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_06ff[] = {
+	{ 0x10de, 0x0711, "HICx8 + Graphics" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0866[] = {
+	{ 0x106b, 0x00b1, "GeForce 9400M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0872[] = {
+	{ 0x1043, 0x1c42, "GeForce G205M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0873[] = {
+	{ 0x1043, 0x1c52, "GeForce G205M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0a6e[] = {
+	{ 0x17aa, 0x3607, "Second Generation ION" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0a70[] = {
+	{ 0x17aa, 0x3605, "Second Generation ION" },
+	{ 0x17aa, 0x3617, "Second Generation ION" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0a73[] = {
+	{ 0x17aa, 0x3607, "Second Generation ION" },
+	{ 0x17aa, 0x3610, "Second Generation ION" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0a74[] = {
+	{ 0x17aa, 0x903a, "GeForce G210" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0a75[] = {
+	{ 0x17aa, 0x3605, "Second Generation ION" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0a7a[] = {
+	{ 0x1462, 0xaa51, "GeForce 405" },
+	{ 0x1462, 0xaa58, "GeForce 405" },
+	{ 0x1462, 0xac71, "GeForce 405" },
+	{ 0x1462, 0xac82, "GeForce 405" },
+	{ 0x1642, 0x3980, "GeForce 405" },
+	{ 0x17aa, 0x3950, "GeForce 405M" },
+	{ 0x17aa, 0x397d, "GeForce 405M" },
+	{ 0x1b0a, 0x90b4, "GeForce 405" },
+	{ 0x1bfd, 0x0003, "GeForce 405" },
+	{ 0x1bfd, 0x8006, "GeForce 405" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0dd8[] = {
+	{ 0x10de, 0x0914, "Quadro 2000D" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0de9[] = {
+	{ 0x1025, 0x0692, "GeForce GT 620M" },
+	{ 0x1025, 0x0725, "GeForce GT 620M" },
+	{ 0x1025, 0x0728, "GeForce GT 620M" },
+	{ 0x1025, 0x072b, "GeForce GT 620M" },
+	{ 0x1025, 0x072e, "GeForce GT 620M" },
+	{ 0x1025, 0x0753, "GeForce GT 620M" },
+	{ 0x1025, 0x0754, "GeForce GT 620M" },
+	{ 0x17aa, 0x3977, "GeForce GT 640M LE" },
+	{ 0x1b0a, 0x2210, "GeForce GT 635M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0dea[] = {
+	{ 0x17aa, 0x365a, "GeForce 615" },
+	{ 0x17aa, 0x365b, "GeForce 615" },
+	{ 0x17aa, 0x365e, "GeForce 615" },
+	{ 0x17aa, 0x3660, "GeForce 615" },
+	{ 0x17aa, 0x366c, "GeForce 615" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0df4[] = {
+	{ 0x152d, 0x0952, "GeForce GT 630M" },
+	{ 0x152d, 0x0953, "GeForce GT 630M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0fd2[] = {
+	{ 0x1028, 0x0595, "GeForce GT 640M LE" },
+	{ 0x1028, 0x05b2, "GeForce GT 640M LE" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0fe3[] = {
+	{ 0x103c, 0x2b16, "GeForce GT 745A" },
+	{ 0x17aa, 0x3675, "GeForce GT 745A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_104b[] = {
+	{ 0x1043, 0x844c, "GeForce GT 625" },
+	{ 0x1043, 0x846b, "GeForce GT 625" },
+	{ 0x1462, 0xb590, "GeForce GT 625" },
+	{ 0x174b, 0x0625, "GeForce GT 625" },
+	{ 0x174b, 0xa625, "GeForce GT 625" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1058[] = {
+	{ 0x103c, 0x2af1, "GeForce 610" },
+	{ 0x17aa, 0x3682, "GeForce 800A" },
+	{ 0x17aa, 0x3692, "GeForce 705A" },
+	{ 0x17aa, 0x3695, "GeForce 800A" },
+	{ 0x17aa, 0x36a8, "GeForce 800A" },
+	{ 0x17aa, 0x36ac, "GeForce 800A" },
+	{ 0x17aa, 0x36ad, "GeForce 800A" },
+	{ 0x705a, 0x3682, "GeForce 800A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_105b[] = {
+	{ 0x103c, 0x2afb, "GeForce 705A" },
+	{ 0x17aa, 0x36a1, "GeForce 800A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1091[] = {
+	{ 0x10de, 0x088e, "Tesla X2090" },
+	{ 0x10de, 0x0891, "Tesla X2090" },
+	{ 0x10de, 0x0974, "Tesla X2090" },
+	{ 0x10de, 0x098d, "Tesla X2090" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1096[] = {
+	{ 0x10de, 0x0911, "Tesla C2050" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1140[] = {
+	{ 0x1019, 0x999f, "GeForce GT 720M" },
+	{ 0x1025, 0x0600, "GeForce GT 620M" },
+	{ 0x1025, 0x0606, "GeForce GT 620M" },
+	{ 0x1025, 0x064a, "GeForce GT 620M" },
+	{ 0x1025, 0x064c, "GeForce GT 620M" },
+	{ 0x1025, 0x067a, "GeForce GT 620M" },
+	{ 0x1025, 0x0680, "GeForce GT 620M" },
+	{ 0x1025, 0x0686, "GeForce 710M" },
+	{ 0x1025, 0x0689, "GeForce 710M" },
+	{ 0x1025, 0x068b, "GeForce 710M" },
+	{ 0x1025, 0x068d, "GeForce 710M" },
+	{ 0x1025, 0x068e, "GeForce 710M" },
+	{ 0x1025, 0x0691, "GeForce 710M" },
+	{ 0x1025, 0x0692, "GeForce GT 620M" },
+	{ 0x1025, 0x0694, "GeForce GT 620M" },
+	{ 0x1025, 0x0702, "GeForce GT 620M" },
+	{ 0x1025, 0x0719, "GeForce GT 620M" },
+	{ 0x1025, 0x0725, "GeForce GT 620M" },
+	{ 0x1025, 0x0728, "GeForce GT 620M" },
+	{ 0x1025, 0x072b, "GeForce GT 620M" },
+	{ 0x1025, 0x072e, "GeForce GT 620M" },
+	{ 0x1025, 0x0732, "GeForce GT 620M" },
+	{ 0x1025, 0x0763, "GeForce GT 720M" },
+	{ 0x1025, 0x0773, "GeForce 710M" },
+	{ 0x1025, 0x0774, "GeForce 710M" },
+	{ 0x1025, 0x0776, "GeForce GT 720M" },
+	{ 0x1025, 0x077a, "GeForce 710M" },
+	{ 0x1025, 0x077b, "GeForce 710M" },
+	{ 0x1025, 0x077c, "GeForce 710M" },
+	{ 0x1025, 0x077d, "GeForce 710M" },
+	{ 0x1025, 0x077e, "GeForce 710M" },
+	{ 0x1025, 0x077f, "GeForce 710M" },
+	{ 0x1025, 0x0781, "GeForce GT 720M" },
+	{ 0x1025, 0x0798, "GeForce GT 720M" },
+	{ 0x1025, 0x0799, "GeForce GT 720M" },
+	{ 0x1025, 0x079b, "GeForce GT 720M" },
+	{ 0x1025, 0x079c, "GeForce GT 720M" },
+	{ 0x1025, 0x0807, "GeForce GT 720M" },
+	{ 0x1025, 0x0821, "GeForce 820M" },
+	{ 0x1025, 0x0823, "GeForce GT 720M" },
+	{ 0x1025, 0x0830, "GeForce GT 720M" },
+	{ 0x1025, 0x0833, "GeForce GT 720M" },
+	{ 0x1025, 0x0837, "GeForce GT 720M" },
+	{ 0x1025, 0x083e, "GeForce 820M" },
+	{ 0x1025, 0x0841, "GeForce 710M" },
+	{ 0x1025, 0x0853, "GeForce 820M" },
+	{ 0x1025, 0x0854, "GeForce 820M" },
+	{ 0x1025, 0x0855, "GeForce 820M" },
+	{ 0x1025, 0x0856, "GeForce 820M" },
+	{ 0x1025, 0x0857, "GeForce 820M" },
+	{ 0x1025, 0x0858, "GeForce 820M" },
+	{ 0x1025, 0x0863, "GeForce 820M" },
+	{ 0x1025, 0x0868, "GeForce 820M" },
+	{ 0x1025, 0x0869, "GeForce 810M" },
+	{ 0x1025, 0x0873, "GeForce 820M" },
+	{ 0x1025, 0x0878, "GeForce 820M" },
+	{ 0x1025, 0x087b, "GeForce 820M" },
+	{ 0x1025, 0x087f, "GeForce 820M" },
+	{ 0x1025, 0x0881, "GeForce 820M" },
+	{ 0x1025, 0x0885, "GeForce 820M" },
+	{ 0x1025, 0x088a, "GeForce 820M" },
+	{ 0x1025, 0x089b, "GeForce 820M" },
+	{ 0x1025, 0x0921, "GeForce 820M" },
+	{ 0x1025, 0x092e, "GeForce 810M" },
+	{ 0x1025, 0x092f, "GeForce 820M" },
+	{ 0x1025, 0x0932, "GeForce 820M" },
+	{ 0x1025, 0x093a, "GeForce 820M" },
+	{ 0x1025, 0x093c, "GeForce 820M" },
+	{ 0x1025, 0x093f, "GeForce 820M" },
+	{ 0x1025, 0x0941, "GeForce 820M" },
+	{ 0x1025, 0x0945, "GeForce 820M" },
+	{ 0x1025, 0x0954, "GeForce 820M" },
+	{ 0x1025, 0x0965, "GeForce 820M" },
+	{ 0x1028, 0x054d, "GeForce GT 630M" },
+	{ 0x1028, 0x054e, "GeForce GT 630M" },
+	{ 0x1028, 0x0554, "GeForce GT 620M" },
+	{ 0x1028, 0x0557, "GeForce GT 620M" },
+	{ 0x1028, 0x0562, "GeForce GT625M" },
+	{ 0x1028, 0x0565, "GeForce GT 630M" },
+	{ 0x1028, 0x0568, "GeForce GT 630M" },
+	{ 0x1028, 0x0590, "GeForce GT 630M" },
+	{ 0x1028, 0x0592, "GeForce GT625M" },
+	{ 0x1028, 0x0594, "GeForce GT625M" },
+	{ 0x1028, 0x0595, "GeForce GT625M" },
+	{ 0x1028, 0x05a2, "GeForce GT625M" },
+	{ 0x1028, 0x05b1, "GeForce GT625M" },
+	{ 0x1028, 0x05b3, "GeForce GT625M" },
+	{ 0x1028, 0x05da, "GeForce GT 630M" },
+	{ 0x1028, 0x05de, "GeForce GT 720M" },
+	{ 0x1028, 0x05e0, "GeForce GT 720M" },
+	{ 0x1028, 0x05e8, "GeForce GT 630M" },
+	{ 0x1028, 0x05f4, "GeForce GT 720M" },
+	{ 0x1028, 0x060f, "GeForce GT 720M" },
+	{ 0x1028, 0x062f, "GeForce GT 720M" },
+	{ 0x1028, 0x064e, "GeForce 820M" },
+	{ 0x1028, 0x0652, "GeForce 820M" },
+	{ 0x1028, 0x0653, "GeForce 820M" },
+	{ 0x1028, 0x0655, "GeForce 820M" },
+	{ 0x1028, 0x065e, "GeForce 820M" },
+	{ 0x1028, 0x0662, "GeForce 820M" },
+	{ 0x1028, 0x068d, "GeForce 820M" },
+	{ 0x1028, 0x06ad, "GeForce 820M" },
+	{ 0x1028, 0x06ae, "GeForce 820M" },
+	{ 0x1028, 0x06af, "GeForce 820M" },
+	{ 0x1028, 0x06b0, "GeForce 820M" },
+	{ 0x1028, 0x06c0, "GeForce 820M" },
+	{ 0x1028, 0x06c1, "GeForce 820M" },
+	{ 0x103c, 0x18ef, "GeForce GT 630M" },
+	{ 0x103c, 0x18f9, "GeForce GT 630M" },
+	{ 0x103c, 0x18fb, "GeForce GT 630M" },
+	{ 0x103c, 0x18fd, "GeForce GT 630M" },
+	{ 0x103c, 0x18ff, "GeForce GT 630M" },
+	{ 0x103c, 0x218a, "GeForce 820M" },
+	{ 0x103c, 0x21bb, "GeForce 820M" },
+	{ 0x103c, 0x21bc, "GeForce 820M" },
+	{ 0x103c, 0x220e, "GeForce 820M" },
+	{ 0x103c, 0x2210, "GeForce 820M" },
+	{ 0x103c, 0x2212, "GeForce 820M" },
+	{ 0x103c, 0x2214, "GeForce 820M" },
+	{ 0x103c, 0x2218, "GeForce 820M" },
+	{ 0x103c, 0x225b, "GeForce 820M" },
+	{ 0x103c, 0x225d, "GeForce 820M" },
+	{ 0x103c, 0x226d, "GeForce 820M" },
+	{ 0x103c, 0x226f, "GeForce 820M" },
+	{ 0x103c, 0x22d2, "GeForce 820M" },
+	{ 0x103c, 0x22d9, "GeForce 820M" },
+	{ 0x103c, 0x2335, "GeForce 820M" },
+	{ 0x103c, 0x2337, "GeForce 820M" },
+	{ 0x103c, 0x2aef, "GeForce GT 720A" },
+	{ 0x103c, 0x2af9, "GeForce 710A" },
+	{ 0x1043, 0x10dd, "NVS 5200M" },
+	{ 0x1043, 0x10ed, "NVS 5200M" },
+	{ 0x1043, 0x11fd, "GeForce GT 720M" },
+	{ 0x1043, 0x124d, "GeForce GT 720M" },
+	{ 0x1043, 0x126d, "GeForce GT 720M" },
+	{ 0x1043, 0x131d, "GeForce GT 720M" },
+	{ 0x1043, 0x13fd, "GeForce GT 720M" },
+	{ 0x1043, 0x14c7, "GeForce GT 720M" },
+	{ 0x1043, 0x1507, "GeForce GT 620M" },
+	{ 0x1043, 0x15ad, "GeForce 820M" },
+	{ 0x1043, 0x15ed, "GeForce 820M" },
+	{ 0x1043, 0x160d, "GeForce 820M" },
+	{ 0x1043, 0x163d, "GeForce 820M" },
+	{ 0x1043, 0x165d, "GeForce 820M" },
+	{ 0x1043, 0x166d, "GeForce 820M" },
+	{ 0x1043, 0x16cd, "GeForce 820M" },
+	{ 0x1043, 0x16dd, "GeForce 820M" },
+	{ 0x1043, 0x170d, "GeForce 820M" },
+	{ 0x1043, 0x176d, "GeForce 820M" },
+	{ 0x1043, 0x178d, "GeForce 820M" },
+	{ 0x1043, 0x179d, "GeForce 820M" },
+	{ 0x1043, 0x2132, "GeForce GT 620M" },
+	{ 0x1043, 0x2136, "NVS 5200M" },
+	{ 0x1043, 0x21ba, "GeForce GT 720M" },
+	{ 0x1043, 0x21fa, "GeForce GT 720M" },
+	{ 0x1043, 0x220a, "GeForce GT 720M" },
+	{ 0x1043, 0x221a, "GeForce GT 720M" },
+	{ 0x1043, 0x223a, "GeForce GT 710M" },
+	{ 0x1043, 0x224a, "GeForce GT 710M" },
+	{ 0x1043, 0x227a, "GeForce 820M" },
+	{ 0x1043, 0x228a, "GeForce 820M" },
+	{ 0x1043, 0x22fa, "GeForce 820M" },
+	{ 0x1043, 0x232a, "GeForce 820M" },
+	{ 0x1043, 0x233a, "GeForce 820M" },
+	{ 0x1043, 0x235a, "GeForce 820M" },
+	{ 0x1043, 0x236a, "GeForce 820M" },
+	{ 0x1043, 0x238a, "GeForce 820M" },
+	{ 0x1043, 0x8595, "GeForce GT 720M" },
+	{ 0x1043, 0x85ea, "GeForce GT 720M" },
+	{ 0x1043, 0x85eb, "GeForce 820M" },
+	{ 0x1043, 0x85ec, "GeForce 820M" },
+	{ 0x1043, 0x85ee, "GeForce GT 720M" },
+	{ 0x1043, 0x85f3, "GeForce 820M" },
+	{ 0x1043, 0x860e, "GeForce 820M" },
+	{ 0x1043, 0x861a, "GeForce 820M" },
+	{ 0x1043, 0x861b, "GeForce 820M" },
+	{ 0x1043, 0x8628, "GeForce 820M" },
+	{ 0x1043, 0x8643, "GeForce 820M" },
+	{ 0x1043, 0x864c, "GeForce 820M" },
+	{ 0x1043, 0x8652, "GeForce 820M" },
+	{ 0x1043, 0x8660, "GeForce 820M" },
+	{ 0x1043, 0x8661, "GeForce 820M" },
+	{ 0x105b, 0x0dac, "GeForce GT 720M" },
+	{ 0x105b, 0x0dad, "GeForce GT 720M" },
+	{ 0x105b, 0x0ef3, "GeForce GT 720M" },
+	{ 0x10cf, 0x17f5, "GeForce GT 720M" },
+	{ 0x1179, 0xfa01, "GeForce 710M" },
+	{ 0x1179, 0xfa02, "GeForce 710M" },
+	{ 0x1179, 0xfa03, "GeForce 710M" },
+	{ 0x1179, 0xfa05, "GeForce 710M" },
+	{ 0x1179, 0xfa11, "GeForce 710M" },
+	{ 0x1179, 0xfa13, "GeForce 710M" },
+	{ 0x1179, 0xfa18, "GeForce 710M" },
+	{ 0x1179, 0xfa19, "GeForce 710M" },
+	{ 0x1179, 0xfa21, "GeForce 710M" },
+	{ 0x1179, 0xfa23, "GeForce 710M" },
+	{ 0x1179, 0xfa2a, "GeForce 710M" },
+	{ 0x1179, 0xfa32, "GeForce 710M" },
+	{ 0x1179, 0xfa33, "GeForce 710M" },
+	{ 0x1179, 0xfa36, "GeForce 710M" },
+	{ 0x1179, 0xfa38, "GeForce 710M" },
+	{ 0x1179, 0xfa42, "GeForce 710M" },
+	{ 0x1179, 0xfa43, "GeForce 710M" },
+	{ 0x1179, 0xfa45, "GeForce 710M" },
+	{ 0x1179, 0xfa47, "GeForce 710M" },
+	{ 0x1179, 0xfa49, "GeForce 710M" },
+	{ 0x1179, 0xfa58, "GeForce 710M" },
+	{ 0x1179, 0xfa59, "GeForce 710M" },
+	{ 0x1179, 0xfa88, "GeForce 710M" },
+	{ 0x1179, 0xfa89, "GeForce 710M" },
+	{ 0x144d, 0xb092, "GeForce GT 620M" },
+	{ 0x144d, 0xc0d5, "GeForce GT 630M" },
+	{ 0x144d, 0xc0d7, "GeForce GT 620M" },
+	{ 0x144d, 0xc0e2, "NVS 5200M" },
+	{ 0x144d, 0xc0e3, "NVS 5200M" },
+	{ 0x144d, 0xc0e4, "NVS 5200M" },
+	{ 0x144d, 0xc10d, "GeForce 820M" },
+	{ 0x144d, 0xc652, "GeForce GT 620M" },
+	{ 0x144d, 0xc709, "GeForce 710M" },
+	{ 0x144d, 0xc711, "GeForce 710M" },
+	{ 0x144d, 0xc736, "GeForce 710M" },
+	{ 0x144d, 0xc737, "GeForce 710M" },
+	{ 0x144d, 0xc745, "GeForce 820M" },
+	{ 0x144d, 0xc750, "GeForce 820M" },
+	{ 0x1462, 0x10b8, "GeForce GT 710M" },
+	{ 0x1462, 0x10e9, "GeForce GT 720M" },
+	{ 0x1462, 0x1116, "GeForce 820M" },
+	{ 0x1462, 0xaa33, "GeForce 720M" },
+	{ 0x1462, 0xaaa2, "GeForce GT 720M" },
+	{ 0x1462, 0xaaa3, "GeForce 820M" },
+	{ 0x1462, 0xacb2, "GeForce GT 720M" },
+	{ 0x1462, 0xacc1, "GeForce GT 720M" },
+	{ 0x1462, 0xae61, "GeForce 720M" },
+	{ 0x1462, 0xae65, "GeForce GT 720M" },
+	{ 0x1462, 0xae6a, "GeForce 820M" },
+	{ 0x1462, 0xae71, "GeForce GT 720M" },
+	{ 0x14c0, 0x0083, "GeForce 820M" },
+	{ 0x152d, 0x0926, "GeForce 620M" },
+	{ 0x152d, 0x0982, "GeForce GT 630M" },
+	{ 0x152d, 0x0983, "GeForce GT 630M" },
+	{ 0x152d, 0x1005, "GeForce GT820M" },
+	{ 0x152d, 0x1012, "GeForce 710M" },
+	{ 0x152d, 0x1019, "GeForce 820M" },
+	{ 0x152d, 0x1030, "GeForce GT 630M" },
+	{ 0x152d, 0x1055, "GeForce 710M" },
+	{ 0x152d, 0x1067, "GeForce GT 720M" },
+	{ 0x152d, 0x1092, "GeForce 820M" },
+	{ 0x17aa, 0x2200, "NVS 5200M" },
+	{ 0x17aa, 0x2213, "GeForce GT 720M" },
+	{ 0x17aa, 0x2220, "GeForce GT 720M" },
+	{ 0x17aa, 0x309c, "GeForce GT 720A" },
+	{ 0x17aa, 0x30b4, "GeForce 820A" },
+	{ 0x17aa, 0x30b7, "GeForce 720A" },
+	{ 0x17aa, 0x30e4, "GeForce 820A" },
+	{ 0x17aa, 0x361b, "GeForce 820A" },
+	{ 0x17aa, 0x361c, "GeForce 820A" },
+	{ 0x17aa, 0x361d, "GeForce 820A" },
+	{ 0x17aa, 0x3656, "GeForce GT620M" },
+	{ 0x17aa, 0x365a, "GeForce 705M" },
+	{ 0x17aa, 0x365e, "GeForce 800M" },
+	{ 0x17aa, 0x3661, "GeForce 820A" },
+	{ 0x17aa, 0x366c, "GeForce 800M" },
+	{ 0x17aa, 0x3685, "GeForce 800M" },
+	{ 0x17aa, 0x3686, "GeForce 800M" },
+	{ 0x17aa, 0x3687, "GeForce 705A" },
+	{ 0x17aa, 0x3696, "GeForce 820A" },
+	{ 0x17aa, 0x369b, "GeForce 820A" },
+	{ 0x17aa, 0x369c, "GeForce 820A" },
+	{ 0x17aa, 0x369d, "GeForce 820A" },
+	{ 0x17aa, 0x369e, "GeForce 820A" },
+	{ 0x17aa, 0x36a6, "GeForce 820A" },
+	{ 0x17aa, 0x36a7, "GeForce 820A" },
+	{ 0x17aa, 0x36a9, "GeForce 820A" },
+	{ 0x17aa, 0x36af, "GeForce 820A" },
+	{ 0x17aa, 0x36b0, "GeForce 820A" },
+	{ 0x17aa, 0x36b6, "GeForce 820A" },
+	{ 0x17aa, 0x3800, "GeForce GT 720M" },
+	{ 0x17aa, 0x3801, "GeForce GT 720M" },
+	{ 0x17aa, 0x3802, "GeForce GT 720M" },
+	{ 0x17aa, 0x3803, "GeForce GT 720M" },
+	{ 0x17aa, 0x3804, "GeForce GT 720M" },
+	{ 0x17aa, 0x3806, "GeForce GT 720M" },
+	{ 0x17aa, 0x3808, "GeForce GT 720M" },
+	{ 0x17aa, 0x380d, "GeForce 820M" },
+	{ 0x17aa, 0x380e, "GeForce 820M" },
+	{ 0x17aa, 0x380f, "GeForce 820M" },
+	{ 0x17aa, 0x3811, "GeForce 820M" },
+	{ 0x17aa, 0x3812, "GeForce 820M" },
+	{ 0x17aa, 0x3813, "GeForce 820M" },
+	{ 0x17aa, 0x3816, "GeForce 820M" },
+	{ 0x17aa, 0x3817, "GeForce 820M" },
+	{ 0x17aa, 0x3818, "GeForce 820M" },
+	{ 0x17aa, 0x381a, "GeForce 820M" },
+	{ 0x17aa, 0x381c, "GeForce 820M" },
+	{ 0x17aa, 0x381d, "GeForce 820M" },
+	{ 0x17aa, 0x3901, "GeForce 610M" },
+	{ 0x17aa, 0x3902, "GeForce 710M" },
+	{ 0x17aa, 0x3903, "GeForce 710M" },
+	{ 0x17aa, 0x3904, "GeForce GT 625M" },
+	{ 0x17aa, 0x3905, "GeForce GT 720M" },
+	{ 0x17aa, 0x3907, "GeForce 820M" },
+	{ 0x17aa, 0x3910, "GeForce GT 720M" },
+	{ 0x17aa, 0x3912, "GeForce GT 720M" },
+	{ 0x17aa, 0x3913, "GeForce 820M" },
+	{ 0x17aa, 0x3915, "GeForce 820M" },
+	{ 0x17aa, 0x3983, "GeForce 610M" },
+	{ 0x17aa, 0x5001, "GeForce 610M" },
+	{ 0x17aa, 0x5003, "GeForce GT 720M" },
+	{ 0x17aa, 0x5005, "GeForce 705M" },
+	{ 0x17aa, 0x500d, "GeForce GT 620M" },
+	{ 0x17aa, 0x5014, "GeForce 710M" },
+	{ 0x17aa, 0x5017, "GeForce 710M" },
+	{ 0x17aa, 0x5019, "GeForce 710M" },
+	{ 0x17aa, 0x501a, "GeForce 710M" },
+	{ 0x17aa, 0x501f, "GeForce GT 720M" },
+	{ 0x17aa, 0x5025, "GeForce 710M" },
+	{ 0x17aa, 0x5027, "GeForce 710M" },
+	{ 0x17aa, 0x502a, "GeForce 710M" },
+	{ 0x17aa, 0x502b, "GeForce GT 720M" },
+	{ 0x17aa, 0x502d, "GeForce 710M" },
+	{ 0x17aa, 0x502e, "GeForce GT 720M" },
+	{ 0x17aa, 0x502f, "GeForce GT 720M" },
+	{ 0x17aa, 0x5030, "GeForce 705M" },
+	{ 0x17aa, 0x5031, "GeForce 705M" },
+	{ 0x17aa, 0x5032, "GeForce 820M" },
+	{ 0x17aa, 0x5033, "GeForce 820M" },
+	{ 0x17aa, 0x503e, "GeForce 710M" },
+	{ 0x17aa, 0x503f, "GeForce 820M" },
+	{ 0x17aa, 0x5040, "GeForce 820M" },
+	{ 0x1854, 0x0177, "GeForce 710M" },
+	{ 0x1854, 0x0180, "GeForce 710M" },
+	{ 0x1854, 0x0190, "GeForce GT 720M" },
+	{ 0x1854, 0x0192, "GeForce GT 720M" },
+	{ 0x1854, 0x0224, "GeForce 820M" },
+	{ 0x1b0a, 0x20dd, "GeForce GT 620M" },
+	{ 0x1b0a, 0x20df, "GeForce GT 620M" },
+	{ 0x1b0a, 0x210e, "GeForce 820M" },
+	{ 0x1b0a, 0x2202, "GeForce GT 720M" },
+	{ 0x1b0a, 0x90d7, "GeForce 820M" },
+	{ 0x1b0a, 0x90dd, "GeForce 820M" },
+	{ 0x1b50, 0x5530, "GeForce 820M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1185[] = {
+	{ 0x10de, 0x106f, "GeForce GTX 760" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1189[] = {
+	{ 0x10de, 0x1074, "GeForce GTX 760 Ti OEM" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1199[] = {
+	{ 0x1458, 0xd001, "GeForce GTX 760" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_11e3[] = {
+	{ 0x17aa, 0x3683, "GeForce GTX 760A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_11fc[] = {
+	{ 0x17aa, 0x2211, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */
+	{ 0x17aa, 0x221e, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1247[] = {
+	{ 0x1043, 0x212a, "GeForce GT 635M" },
+	{ 0x1043, 0x212b, "GeForce GT 635M" },
+	{ 0x1043, 0x212c, "GeForce GT 635M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_124d[] = {
+	{ 0x1462, 0x10cc, "GeForce GT 635M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1290[] = {
+	{ 0x103c, 0x2afa, "GeForce 730A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1292[] = {
+	{ 0x17aa, 0x3675, "GeForce GT 740A" },
+	{ 0x17aa, 0x367c, "GeForce GT 740A" },
+	{ 0x17aa, 0x3684, "GeForce GT 740A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1295[] = {
+	{ 0x103c, 0x2b0d, "GeForce 710A" },
+	{ 0x103c, 0x2b0f, "GeForce 710A" },
+	{ 0x103c, 0x2b20, "GeForce 810A" },
+	{ 0x103c, 0x2b21, "GeForce 810A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1299[] = {
+	{ 0x17aa, 0x369b, "GeForce 920A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1340[] = {
+	{ 0x103c, 0x2b2b, "GeForce 830A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1341[] = {
+	{ 0x17aa, 0x3697, "GeForce 840A" },
+	{ 0x17aa, 0x3699, "GeForce 840A" },
+	{ 0x17aa, 0x369c, "GeForce 840A" },
+	{ 0x17aa, 0x36af, "GeForce 840A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1346[] = {
+	{ 0x17aa, 0x30ba, "GeForce 930A" },
+	{ 0x17aa, 0x362c, "GeForce 930A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1347[] = {
+	{ 0x17aa, 0x36b9, "GeForce 940A" },
+	{ 0x17aa, 0x36ba, "GeForce 940A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_137a[] = {
+	{ 0x17aa, 0x2225, "Quadro K620M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_137d[] = {
+	{ 0x17aa, 0x3699, "GeForce 940A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1391[] = {
+	{ 0x17aa, 0x3697, "GeForce GTX 850A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1392[] = {
+	{ 0x1028, 0x066a, "GeForce GPU" },
+	{ 0x1043, 0x861e, "GeForce GTX 750 Ti" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_139a[] = {
+	{ 0x17aa, 0x36b9, "GeForce GTX 950A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_139b[] = {
+	{ 0x1028, 0x06a3, "GeForce GTX 860M" },
+	{ 0x19da, 0xc248, "GeForce GTX 750 Ti" },
+	{}
+};
+
+static const struct nvkm_device_pci_device
+nvkm_device_pci_10de[] = {
+	{ 0x0020, "RIVA TNT" },
+	{ 0x0028, "RIVA TNT2/TNT2 Pro" },
+	{ 0x0029, "RIVA TNT2 Ultra" },
+	{ 0x002c, "Vanta/Vanta LT" },
+	{ 0x002d, "RIVA TNT2 Model 64/Model 64 Pro" },
+	{ 0x0040, "GeForce 6800 Ultra" },
+	{ 0x0041, "GeForce 6800" },
+	{ 0x0042, "GeForce 6800 LE" },
+	{ 0x0043, "GeForce 6800 XE" },
+	{ 0x0044, "GeForce 6800 XT" },
+	{ 0x0045, "GeForce 6800 GT" },
+	{ 0x0046, "GeForce 6800 GT" },
+	{ 0x0047, "GeForce 6800 GS" },
+	{ 0x0048, "GeForce 6800 XT" },
+	{ 0x004e, "Quadro FX 4000" },
+	{ 0x0090, "GeForce 7800 GTX" },
+	{ 0x0091, "GeForce 7800 GTX" },
+	{ 0x0092, "GeForce 7800 GT" },
+	{ 0x0093, "GeForce 7800 GS" },
+	{ 0x0095, "GeForce 7800 SLI" },
+	{ 0x0098, "GeForce Go 7800" },
+	{ 0x0099, "GeForce Go 7800 GTX" },
+	{ 0x009d, "Quadro FX 4500" },
+	{ 0x00a0, "Aladdin TNT2" },
+	{ 0x00c0, "GeForce 6800 GS" },
+	{ 0x00c1, "GeForce 6800" },
+	{ 0x00c2, "GeForce 6800 LE" },
+	{ 0x00c3, "GeForce 6800 XT" },
+	{ 0x00c8, "GeForce Go 6800" },
+	{ 0x00c9, "GeForce Go 6800 Ultra" },
+	{ 0x00cc, "Quadro FX Go1400" },
+	{ 0x00cd, "Quadro FX 3450/4000 SDI" },
+	{ 0x00ce, "Quadro FX 1400" },
+	{ 0x00f1, "GeForce 6600 GT" },
+	{ 0x00f2, "GeForce 6600" },
+	{ 0x00f3, "GeForce 6200" },
+	{ 0x00f4, "GeForce 6600 LE" },
+	{ 0x00f5, "GeForce 7800 GS" },
+	{ 0x00f6, "GeForce 6800 GS" },
+	{ 0x00f8, "Quadro FX 3400/Quadro FX 4000" },
+	{ 0x00f9, "GeForce 6800 Ultra" },
+	{ 0x00fa, "GeForce PCX 5750" },
+	{ 0x00fb, "GeForce PCX 5900" },
+	{ 0x00fc, "Quadro FX 330/GeForce PCX 5300" },
+	{ 0x00fd, "Quadro FX 330/Quadro NVS 280 PCI-E" },
+	{ 0x00fe, "Quadro FX 1300" },
+	{ 0x0100, "GeForce 256" },
+	{ 0x0101, "GeForce DDR" },
+	{ 0x0103, "Quadro" },
+	{ 0x0110, "GeForce2 MX/MX 400" },
+	{ 0x0111, "GeForce2 MX 100/200" },
+	{ 0x0112, "GeForce2 Go" },
+	{ 0x0113, "Quadro2 MXR/EX/Go" },
+	{ 0x0140, "GeForce 6600 GT" },
+	{ 0x0141, "GeForce 6600" },
+	{ 0x0142, "GeForce 6600 LE" },
+	{ 0x0143, "GeForce 6600 VE" },
+	{ 0x0144, "GeForce Go 6600" },
+	{ 0x0145, "GeForce 6610 XL" },
+	{ 0x0146, "GeForce Go 6600 TE/6200 TE" },
+	{ 0x0147, "GeForce 6700 XL" },
+	{ 0x0148, "GeForce Go 6600" },
+	{ 0x0149, "GeForce Go 6600 GT" },
+	{ 0x014a, "Quadro NVS 440" },
+	{ 0x014c, "Quadro FX 540M" },
+	{ 0x014d, "Quadro FX 550" },
+	{ 0x014e, "Quadro FX 540" },
+	{ 0x014f, "GeForce 6200" },
+	{ 0x0150, "GeForce2 GTS/GeForce2 Pro" },
+	{ 0x0151, "GeForce2 Ti" },
+	{ 0x0152, "GeForce2 Ultra" },
+	{ 0x0153, "Quadro2 Pro" },
+	{ 0x0160, "GeForce 6500" },
+	{ 0x0161, "GeForce 6200 TurboCache(TM)" },
+	{ 0x0162, "GeForce 6200SE TurboCache(TM)" },
+	{ 0x0163, "GeForce 6200 LE" },
+	{ 0x0164, "GeForce Go 6200" },
+	{ 0x0165, "Quadro NVS 285" },
+	{ 0x0166, "GeForce Go 6400" },
+	{ 0x0167, "GeForce Go 6200" },
+	{ 0x0168, "GeForce Go 6400" },
+	{ 0x0169, "GeForce 6250" },
+	{ 0x016a, "GeForce 7100 GS" },
+	{ 0x0170, "GeForce4 MX 460" },
+	{ 0x0171, "GeForce4 MX 440" },
+	{ 0x0172, "GeForce4 MX 420" },
+	{ 0x0173, "GeForce4 MX 440-SE" },
+	{ 0x0174, "GeForce4 440 Go" },
+	{ 0x0175, "GeForce4 420 Go" },
+	{ 0x0176, "GeForce4 420 Go 32M" },
+	{ 0x0177, "GeForce4 460 Go" },
+	{ 0x0178, "Quadro4 550 XGL" },
+	{ 0x0179, "GeForce4 440 Go 64M" },
+	{ 0x017a, "Quadro NVS 400" },
+	{ 0x017c, "Quadro4 500 GoGL" },
+	{ 0x017d, "GeForce4 410 Go 16M" },
+	{ 0x0181, "GeForce4 MX 440 with AGP8X" },
+	{ 0x0182, "GeForce4 MX 440SE with AGP8X" },
+	{ 0x0183, "GeForce4 MX 420 with AGP8X" },
+	{ 0x0185, "GeForce4 MX 4000" },
+	{ 0x0188, "Quadro4 580 XGL" },
+	{ 0x0189, "GeForce4 MX with AGP8X (Mac)", nvkm_device_pci_10de_0189 },
+	{ 0x018a, "Quadro NVS 280 SD" },
+	{ 0x018b, "Quadro4 380 XGL" },
+	{ 0x018c, "Quadro NVS 50 PCI" },
+	{ 0x0191, "GeForce 8800 GTX" },
+	{ 0x0193, "GeForce 8800 GTS" },
+	{ 0x0194, "GeForce 8800 Ultra" },
+	{ 0x0197, "Tesla C870" },
+	{ 0x019d, "Quadro FX 5600" },
+	{ 0x019e, "Quadro FX 4600" },
+	{ 0x01a0, "GeForce2 Integrated GPU" },
+	{ 0x01d0, "GeForce 7350 LE" },
+	{ 0x01d1, "GeForce 7300 LE" },
+	{ 0x01d2, "GeForce 7550 LE" },
+	{ 0x01d3, "GeForce 7300 SE/7200 GS" },
+	{ 0x01d6, "GeForce Go 7200" },
+	{ 0x01d7, "GeForce Go 7300" },
+	{ 0x01d8, "GeForce Go 7400" },
+	{ 0x01da, "Quadro NVS 110M" },
+	{ 0x01db, "Quadro NVS 120M" },
+	{ 0x01dc, "Quadro FX 350M" },
+	{ 0x01dd, "GeForce 7500 LE" },
+	{ 0x01de, "Quadro FX 350" },
+	{ 0x01df, "GeForce 7300 GS" },
+	{ 0x01f0, "GeForce4 MX Integrated GPU", nvkm_device_pci_10de_01f0 },
+	{ 0x0200, "GeForce3" },
+	{ 0x0201, "GeForce3 Ti 200" },
+	{ 0x0202, "GeForce3 Ti 500" },
+	{ 0x0203, "Quadro DCC" },
+	{ 0x0211, "GeForce 6800" },
+	{ 0x0212, "GeForce 6800 LE" },
+	{ 0x0215, "GeForce 6800 GT" },
+	{ 0x0218, "GeForce 6800 XT" },
+	{ 0x0221, "GeForce 6200" },
+	{ 0x0222, "GeForce 6200 A-LE" },
+	{ 0x0240, "GeForce 6150" },
+	{ 0x0241, "GeForce 6150 LE" },
+	{ 0x0242, "GeForce 6100" },
+	{ 0x0244, "GeForce Go 6150" },
+	{ 0x0245, "Quadro NVS 210S / GeForce 6150LE" },
+	{ 0x0247, "GeForce Go 6100" },
+	{ 0x0250, "GeForce4 Ti 4600" },
+	{ 0x0251, "GeForce4 Ti 4400" },
+	{ 0x0253, "GeForce4 Ti 4200" },
+	{ 0x0258, "Quadro4 900 XGL" },
+	{ 0x0259, "Quadro4 750 XGL" },
+	{ 0x025b, "Quadro4 700 XGL" },
+	{ 0x0280, "GeForce4 Ti 4800" },
+	{ 0x0281, "GeForce4 Ti 4200 with AGP8X" },
+	{ 0x0282, "GeForce4 Ti 4800 SE" },
+	{ 0x0286, "GeForce4 4200 Go" },
+	{ 0x0288, "Quadro4 980 XGL" },
+	{ 0x0289, "Quadro4 780 XGL" },
+	{ 0x028c, "Quadro4 700 GoGL" },
+	{ 0x0290, "GeForce 7900 GTX" },
+	{ 0x0291, "GeForce 7900 GT/GTO" },
+	{ 0x0292, "GeForce 7900 GS" },
+	{ 0x0293, "GeForce 7950 GX2" },
+	{ 0x0294, "GeForce 7950 GX2" },
+	{ 0x0295, "GeForce 7950 GT" },
+	{ 0x0297, "GeForce Go 7950 GTX" },
+	{ 0x0298, "GeForce Go 7900 GS" },
+	{ 0x0299, "Quadro NVS 510M" },
+	{ 0x029a, "Quadro FX 2500M" },
+	{ 0x029b, "Quadro FX 1500M" },
+	{ 0x029c, "Quadro FX 5500" },
+	{ 0x029d, "Quadro FX 3500" },
+	{ 0x029e, "Quadro FX 1500" },
+	{ 0x029f, "Quadro FX 4500 X2" },
+	{ 0x02e0, "GeForce 7600 GT" },
+	{ 0x02e1, "GeForce 7600 GS" },
+	{ 0x02e2, "GeForce 7300 GT" },
+	{ 0x02e3, "GeForce 7900 GS" },
+	{ 0x02e4, "GeForce 7950 GT" },
+	{ 0x0301, "GeForce FX 5800 Ultra" },
+	{ 0x0302, "GeForce FX 5800" },
+	{ 0x0308, "Quadro FX 2000" },
+	{ 0x0309, "Quadro FX 1000" },
+	{ 0x0311, "GeForce FX 5600 Ultra" },
+	{ 0x0312, "GeForce FX 5600" },
+	{ 0x0314, "GeForce FX 5600XT" },
+	{ 0x031a, "GeForce FX Go5600" },
+	{ 0x031b, "GeForce FX Go5650" },
+	{ 0x031c, "Quadro FX Go700" },
+	{ 0x0320, "GeForce FX 5200" },
+	{ 0x0321, "GeForce FX 5200 Ultra" },
+	{ 0x0322, "GeForce FX 5200", nvkm_device_pci_10de_0322 },
+	{ 0x0323, "GeForce FX 5200LE" },
+	{ 0x0324, "GeForce FX Go5200" },
+	{ 0x0325, "GeForce FX Go5250" },
+	{ 0x0326, "GeForce FX 5500" },
+	{ 0x0327, "GeForce FX 5100" },
+	{ 0x0328, "GeForce FX Go5200 32M/64M" },
+	{ 0x032a, "Quadro NVS 55/280 PCI" },
+	{ 0x032b, "Quadro FX 500/FX 600" },
+	{ 0x032c, "GeForce FX Go53xx" },
+	{ 0x032d, "GeForce FX Go5100" },
+	{ 0x0330, "GeForce FX 5900 Ultra" },
+	{ 0x0331, "GeForce FX 5900" },
+	{ 0x0332, "GeForce FX 5900XT" },
+	{ 0x0333, "GeForce FX 5950 Ultra" },
+	{ 0x0334, "GeForce FX 5900ZT" },
+	{ 0x0338, "Quadro FX 3000" },
+	{ 0x033f, "Quadro FX 700" },
+	{ 0x0341, "GeForce FX 5700 Ultra" },
+	{ 0x0342, "GeForce FX 5700" },
+	{ 0x0343, "GeForce FX 5700LE" },
+	{ 0x0344, "GeForce FX 5700VE" },
+	{ 0x0347, "GeForce FX Go5700" },
+	{ 0x0348, "GeForce FX Go5700" },
+	{ 0x034c, "Quadro FX Go1000" },
+	{ 0x034e, "Quadro FX 1100" },
+	{ 0x038b, "GeForce 7650 GS" },
+	{ 0x0390, "GeForce 7650 GS" },
+	{ 0x0391, "GeForce 7600 GT" },
+	{ 0x0392, "GeForce 7600 GS" },
+	{ 0x0393, "GeForce 7300 GT" },
+	{ 0x0394, "GeForce 7600 LE" },
+	{ 0x0395, "GeForce 7300 GT" },
+	{ 0x0397, "GeForce Go 7700" },
+	{ 0x0398, "GeForce Go 7600" },
+	{ 0x0399, "GeForce Go 7600 GT" },
+	{ 0x039c, "Quadro FX 560M" },
+	{ 0x039e, "Quadro FX 560" },
+	{ 0x03d0, "GeForce 6150SE nForce 430" },
+	{ 0x03d1, "GeForce 6100 nForce 405" },
+	{ 0x03d2, "GeForce 6100 nForce 400" },
+	{ 0x03d5, "GeForce 6100 nForce 420" },
+	{ 0x03d6, "GeForce 7025 / nForce 630a" },
+	{ 0x0400, "GeForce 8600 GTS" },
+	{ 0x0401, "GeForce 8600 GT" },
+	{ 0x0402, "GeForce 8600 GT" },
+	{ 0x0403, "GeForce 8600 GS" },
+	{ 0x0404, "GeForce 8400 GS" },
+	{ 0x0405, "GeForce 9500M GS" },
+	{ 0x0406, "GeForce 8300 GS" },
+	{ 0x0407, "GeForce 8600M GT" },
+	{ 0x0408, "GeForce 9650M GS" },
+	{ 0x0409, "GeForce 8700M GT" },
+	{ 0x040a, "Quadro FX 370" },
+	{ 0x040b, "Quadro NVS 320M" },
+	{ 0x040c, "Quadro FX 570M" },
+	{ 0x040d, "Quadro FX 1600M" },
+	{ 0x040e, "Quadro FX 570" },
+	{ 0x040f, "Quadro FX 1700" },
+	{ 0x0410, "GeForce GT 330" },
+	{ 0x0420, "GeForce 8400 SE" },
+	{ 0x0421, "GeForce 8500 GT" },
+	{ 0x0422, "GeForce 8400 GS" },
+	{ 0x0423, "GeForce 8300 GS" },
+	{ 0x0424, "GeForce 8400 GS" },
+	{ 0x0425, "GeForce 8600M GS" },
+	{ 0x0426, "GeForce 8400M GT" },
+	{ 0x0427, "GeForce 8400M GS" },
+	{ 0x0428, "GeForce 8400M G" },
+	{ 0x0429, "Quadro NVS 140M" },
+	{ 0x042a, "Quadro NVS 130M" },
+	{ 0x042b, "Quadro NVS 135M" },
+	{ 0x042c, "GeForce 9400 GT" },
+	{ 0x042d, "Quadro FX 360M" },
+	{ 0x042e, "GeForce 9300M G" },
+	{ 0x042f, "Quadro NVS 290" },
+	{ 0x0531, "GeForce 7150M / nForce 630M" },
+	{ 0x0533, "GeForce 7000M / nForce 610M" },
+	{ 0x053a, "GeForce 7050 PV / nForce 630a" },
+	{ 0x053b, "GeForce 7050 PV / nForce 630a" },
+	{ 0x053e, "GeForce 7025 / nForce 630a" },
+	{ 0x05e0, "GeForce GTX 295" },
+	{ 0x05e1, "GeForce GTX 280" },
+	{ 0x05e2, "GeForce GTX 260" },
+	{ 0x05e3, "GeForce GTX 285" },
+	{ 0x05e6, "GeForce GTX 275" },
+	{ 0x05e7, "Tesla C1060", nvkm_device_pci_10de_05e7 },
+	{ 0x05ea, "GeForce GTX 260" },
+	{ 0x05eb, "GeForce GTX 295" },
+	{ 0x05ed, "Quadroplex 2200 D2" },
+	{ 0x05f8, "Quadroplex 2200 S4" },
+	{ 0x05f9, "Quadro CX" },
+	{ 0x05fd, "Quadro FX 5800" },
+	{ 0x05fe, "Quadro FX 4800" },
+	{ 0x05ff, "Quadro FX 3800" },
+	{ 0x0600, "GeForce 8800 GTS 512" },
+	{ 0x0601, "GeForce 9800 GT" },
+	{ 0x0602, "GeForce 8800 GT" },
+	{ 0x0603, "GeForce GT 230" },
+	{ 0x0604, "GeForce 9800 GX2" },
+	{ 0x0605, "GeForce 9800 GT" },
+	{ 0x0606, "GeForce 8800 GS" },
+	{ 0x0607, "GeForce GTS 240" },
+	{ 0x0608, "GeForce 9800M GTX" },
+	{ 0x0609, "GeForce 8800M GTS", nvkm_device_pci_10de_0609 },
+	{ 0x060a, "GeForce GTX 280M" },
+	{ 0x060b, "GeForce 9800M GT" },
+	{ 0x060c, "GeForce 8800M GTX" },
+	{ 0x060d, "GeForce 8800 GS" },
+	{ 0x060f, "GeForce GTX 285M" },
+	{ 0x0610, "GeForce 9600 GSO" },
+	{ 0x0611, "GeForce 8800 GT" },
+	{ 0x0612, "GeForce 9800 GTX/9800 GTX+" },
+	{ 0x0613, "GeForce 9800 GTX+" },
+	{ 0x0614, "GeForce 9800 GT" },
+	{ 0x0615, "GeForce GTS 250" },
+	{ 0x0617, "GeForce 9800M GTX" },
+	{ 0x0618, "GeForce GTX 260M" },
+	{ 0x0619, "Quadro FX 4700 X2" },
+	{ 0x061a, "Quadro FX 3700" },
+	{ 0x061b, "Quadro VX 200" },
+	{ 0x061c, "Quadro FX 3600M" },
+	{ 0x061d, "Quadro FX 2800M" },
+	{ 0x061e, "Quadro FX 3700M" },
+	{ 0x061f, "Quadro FX 3800M" },
+	{ 0x0621, "GeForce GT 230" },
+	{ 0x0622, "GeForce 9600 GT" },
+	{ 0x0623, "GeForce 9600 GS" },
+	{ 0x0625, "GeForce 9600 GSO 512" },
+	{ 0x0626, "GeForce GT 130" },
+	{ 0x0627, "GeForce GT 140" },
+	{ 0x0628, "GeForce 9800M GTS" },
+	{ 0x062a, "GeForce 9700M GTS" },
+	{ 0x062b, "GeForce 9800M GS" },
+	{ 0x062c, "GeForce 9800M GTS" },
+	{ 0x062d, "GeForce 9600 GT" },
+	{ 0x062e, "GeForce 9600 GT", nvkm_device_pci_10de_062e },
+	{ 0x0630, "GeForce 9700 S" },
+	{ 0x0631, "GeForce GTS 160M" },
+	{ 0x0632, "GeForce GTS 150M" },
+	{ 0x0635, "GeForce 9600 GSO" },
+	{ 0x0637, "GeForce 9600 GT" },
+	{ 0x0638, "Quadro FX 1800" },
+	{ 0x063a, "Quadro FX 2700M" },
+	{ 0x0640, "GeForce 9500 GT" },
+	{ 0x0641, "GeForce 9400 GT" },
+	{ 0x0643, "GeForce 9500 GT" },
+	{ 0x0644, "GeForce 9500 GS" },
+	{ 0x0645, "GeForce 9500 GS" },
+	{ 0x0646, "GeForce GT 120" },
+	{ 0x0647, "GeForce 9600M GT" },
+	{ 0x0648, "GeForce 9600M GS" },
+	{ 0x0649, "GeForce 9600M GT", nvkm_device_pci_10de_0649 },
+	{ 0x064a, "GeForce 9700M GT" },
+	{ 0x064b, "GeForce 9500M G" },
+	{ 0x064c, "GeForce 9650M GT" },
+	{ 0x0651, "GeForce G 110M" },
+	{ 0x0652, "GeForce GT 130M", nvkm_device_pci_10de_0652 },
+	{ 0x0653, "GeForce GT 120M" },
+	{ 0x0654, "GeForce GT 220M", nvkm_device_pci_10de_0654 },
+	{ 0x0655, NULL, nvkm_device_pci_10de_0655 },
+	{ 0x0656, NULL, nvkm_device_pci_10de_0656 },
+	{ 0x0658, "Quadro FX 380" },
+	{ 0x0659, "Quadro FX 580" },
+	{ 0x065a, "Quadro FX 1700M" },
+	{ 0x065b, "GeForce 9400 GT" },
+	{ 0x065c, "Quadro FX 770M" },
+	{ 0x06c0, "GeForce GTX 480" },
+	{ 0x06c4, "GeForce GTX 465" },
+	{ 0x06ca, "GeForce GTX 480M" },
+	{ 0x06cd, "GeForce GTX 470" },
+	{ 0x06d1, "Tesla C2050 / C2070", nvkm_device_pci_10de_06d1 },
+	{ 0x06d2, "Tesla M2070", nvkm_device_pci_10de_06d2 },
+	{ 0x06d8, "Quadro 6000" },
+	{ 0x06d9, "Quadro 5000" },
+	{ 0x06da, "Quadro 5000M" },
+	{ 0x06dc, "Quadro 6000" },
+	{ 0x06dd, "Quadro 4000" },
+	{ 0x06de, "Tesla T20 Processor", nvkm_device_pci_10de_06de },
+	{ 0x06df, "Tesla M2070-Q" },
+	{ 0x06e0, "GeForce 9300 GE" },
+	{ 0x06e1, "GeForce 9300 GS" },
+	{ 0x06e2, "GeForce 8400" },
+	{ 0x06e3, "GeForce 8400 SE" },
+	{ 0x06e4, "GeForce 8400 GS" },
+	{ 0x06e5, "GeForce 9300M GS" },
+	{ 0x06e6, "GeForce G100" },
+	{ 0x06e7, "GeForce 9300 SE" },
+	{ 0x06e8, "GeForce 9200M GS", nvkm_device_pci_10de_06e8 },
+	{ 0x06e9, "GeForce 9300M GS" },
+	{ 0x06ea, "Quadro NVS 150M" },
+	{ 0x06eb, "Quadro NVS 160M" },
+	{ 0x06ec, "GeForce G 105M" },
+	{ 0x06ef, "GeForce G 103M" },
+	{ 0x06f1, "GeForce G105M" },
+	{ 0x06f8, "Quadro NVS 420" },
+	{ 0x06f9, "Quadro FX 370 LP", nvkm_device_pci_10de_06f9 },
+	{ 0x06fa, "Quadro NVS 450" },
+	{ 0x06fb, "Quadro FX 370M" },
+	{ 0x06fd, "Quadro NVS 295" },
+	{ 0x06ff, "HICx16 + Graphics", nvkm_device_pci_10de_06ff },
+	{ 0x07e0, "GeForce 7150 / nForce 630i" },
+	{ 0x07e1, "GeForce 7100 / nForce 630i" },
+	{ 0x07e2, "GeForce 7050 / nForce 630i" },
+	{ 0x07e3, "GeForce 7050 / nForce 610i" },
+	{ 0x07e5, "GeForce 7050 / nForce 620i" },
+	{ 0x0840, "GeForce 8200M" },
+	{ 0x0844, "GeForce 9100M G" },
+	{ 0x0845, "GeForce 8200M G" },
+	{ 0x0846, "GeForce 9200" },
+	{ 0x0847, "GeForce 9100" },
+	{ 0x0848, "GeForce 8300" },
+	{ 0x0849, "GeForce 8200" },
+	{ 0x084a, "nForce 730a" },
+	{ 0x084b, "GeForce 9200" },
+	{ 0x084c, "nForce 980a/780a SLI" },
+	{ 0x084d, "nForce 750a SLI" },
+	{ 0x084f, "GeForce 8100 / nForce 720a" },
+	{ 0x0860, "GeForce 9400" },
+	{ 0x0861, "GeForce 9400" },
+	{ 0x0862, "GeForce 9400M G" },
+	{ 0x0863, "GeForce 9400M" },
+	{ 0x0864, "GeForce 9300" },
+	{ 0x0865, "ION" },
+	{ 0x0866, "GeForce 9400M G", nvkm_device_pci_10de_0866 },
+	{ 0x0867, "GeForce 9400" },
+	{ 0x0868, "nForce 760i SLI" },
+	{ 0x0869, "GeForce 9400" },
+	{ 0x086a, "GeForce 9400" },
+	{ 0x086c, "GeForce 9300 / nForce 730i" },
+	{ 0x086d, "GeForce 9200" },
+	{ 0x086e, "GeForce 9100M G" },
+	{ 0x086f, "GeForce 8200M G" },
+	{ 0x0870, "GeForce 9400M" },
+	{ 0x0871, "GeForce 9200" },
+	{ 0x0872, "GeForce G102M", nvkm_device_pci_10de_0872 },
+	{ 0x0873, "GeForce G102M", nvkm_device_pci_10de_0873 },
+	{ 0x0874, "ION" },
+	{ 0x0876, "ION" },
+	{ 0x087a, "GeForce 9400" },
+	{ 0x087d, "ION" },
+	{ 0x087e, "ION LE" },
+	{ 0x087f, "ION LE" },
+	{ 0x08a0, "GeForce 320M" },
+	{ 0x08a2, "GeForce 320M" },
+	{ 0x08a3, "GeForce 320M" },
+	{ 0x08a4, "GeForce 320M" },
+	{ 0x08a5, "GeForce 320M" },
+	{ 0x0a20, "GeForce GT 220" },
+	{ 0x0a22, "GeForce 315" },
+	{ 0x0a23, "GeForce 210" },
+	{ 0x0a26, "GeForce 405" },
+	{ 0x0a27, "GeForce 405" },
+	{ 0x0a28, "GeForce GT 230M" },
+	{ 0x0a29, "GeForce GT 330M" },
+	{ 0x0a2a, "GeForce GT 230M" },
+	{ 0x0a2b, "GeForce GT 330M" },
+	{ 0x0a2c, "NVS 5100M" },
+	{ 0x0a2d, "GeForce GT 320M" },
+	{ 0x0a32, "GeForce GT 415" },
+	{ 0x0a34, "GeForce GT 240M" },
+	{ 0x0a35, "GeForce GT 325M" },
+	{ 0x0a38, "Quadro 400" },
+	{ 0x0a3c, "Quadro FX 880M" },
+	{ 0x0a60, "GeForce G210" },
+	{ 0x0a62, "GeForce 205" },
+	{ 0x0a63, "GeForce 310" },
+	{ 0x0a64, "Second Generation ION" },
+	{ 0x0a65, "GeForce 210" },
+	{ 0x0a66, "GeForce 310" },
+	{ 0x0a67, "GeForce 315" },
+	{ 0x0a68, "GeForce G105M" },
+	{ 0x0a69, "GeForce G105M" },
+	{ 0x0a6a, "NVS 2100M" },
+	{ 0x0a6c, "NVS 3100M" },
+	{ 0x0a6e, "GeForce 305M", nvkm_device_pci_10de_0a6e },
+	{ 0x0a6f, "Second Generation ION" },
+	{ 0x0a70, "GeForce 310M", nvkm_device_pci_10de_0a70 },
+	{ 0x0a71, "GeForce 305M" },
+	{ 0x0a72, "GeForce 310M" },
+	{ 0x0a73, "GeForce 305M", nvkm_device_pci_10de_0a73 },
+	{ 0x0a74, "GeForce G210M", nvkm_device_pci_10de_0a74 },
+	{ 0x0a75, "GeForce 310M", nvkm_device_pci_10de_0a75 },
+	{ 0x0a76, "Second Generation ION" },
+	{ 0x0a78, "Quadro FX 380 LP" },
+	{ 0x0a7a, "GeForce 315M", nvkm_device_pci_10de_0a7a },
+	{ 0x0a7c, "Quadro FX 380M" },
+	{ 0x0ca0, "GeForce GT 330" },
+	{ 0x0ca2, "GeForce GT 320" },
+	{ 0x0ca3, "GeForce GT 240" },
+	{ 0x0ca4, "GeForce GT 340" },
+	{ 0x0ca5, "GeForce GT 220" },
+	{ 0x0ca7, "GeForce GT 330" },
+	{ 0x0ca8, "GeForce GTS 260M" },
+	{ 0x0ca9, "GeForce GTS 250M" },
+	{ 0x0cac, "GeForce GT 220" },
+	{ 0x0caf, "GeForce GT 335M" },
+	{ 0x0cb0, "GeForce GTS 350M" },
+	{ 0x0cb1, "GeForce GTS 360M" },
+	{ 0x0cbc, "Quadro FX 1800M" },
+	{ 0x0dc0, "GeForce GT 440" },
+	{ 0x0dc4, "GeForce GTS 450" },
+	{ 0x0dc5, "GeForce GTS 450" },
+	{ 0x0dc6, "GeForce GTS 450" },
+	{ 0x0dcd, "GeForce GT 555M" },
+	{ 0x0dce, "GeForce GT 555M" },
+	{ 0x0dd1, "GeForce GTX 460M" },
+	{ 0x0dd2, "GeForce GT 445M" },
+	{ 0x0dd3, "GeForce GT 435M" },
+	{ 0x0dd6, "GeForce GT 550M" },
+	{ 0x0dd8, "Quadro 2000", nvkm_device_pci_10de_0dd8 },
+	{ 0x0dda, "Quadro 2000M" },
+	{ 0x0de0, "GeForce GT 440" },
+	{ 0x0de1, "GeForce GT 430" },
+	{ 0x0de2, "GeForce GT 420" },
+	{ 0x0de3, "GeForce GT 635M" },
+	{ 0x0de4, "GeForce GT 520" },
+	{ 0x0de5, "GeForce GT 530" },
+	{ 0x0de7, "GeForce GT 610" },
+	{ 0x0de8, "GeForce GT 620M" },
+	{ 0x0de9, "GeForce GT 630M", nvkm_device_pci_10de_0de9 },
+	{ 0x0dea, "GeForce 610M", nvkm_device_pci_10de_0dea },
+	{ 0x0deb, "GeForce GT 555M" },
+	{ 0x0dec, "GeForce GT 525M" },
+	{ 0x0ded, "GeForce GT 520M" },
+	{ 0x0dee, "GeForce GT 415M" },
+	{ 0x0def, "NVS 5400M" },
+	{ 0x0df0, "GeForce GT 425M" },
+	{ 0x0df1, "GeForce GT 420M" },
+	{ 0x0df2, "GeForce GT 435M" },
+	{ 0x0df3, "GeForce GT 420M" },
+	{ 0x0df4, "GeForce GT 540M", nvkm_device_pci_10de_0df4 },
+	{ 0x0df5, "GeForce GT 525M" },
+	{ 0x0df6, "GeForce GT 550M" },
+	{ 0x0df7, "GeForce GT 520M" },
+	{ 0x0df8, "Quadro 600" },
+	{ 0x0df9, "Quadro 500M" },
+	{ 0x0dfa, "Quadro 1000M" },
+	{ 0x0dfc, "NVS 5200M" },
+	{ 0x0e22, "GeForce GTX 460" },
+	{ 0x0e23, "GeForce GTX 460 SE" },
+	{ 0x0e24, "GeForce GTX 460" },
+	{ 0x0e30, "GeForce GTX 470M" },
+	{ 0x0e31, "GeForce GTX 485M" },
+	{ 0x0e3a, "Quadro 3000M" },
+	{ 0x0e3b, "Quadro 4000M" },
+	{ 0x0f00, "GeForce GT 630" },
+	{ 0x0f01, "GeForce GT 620" },
+	{ 0x0f02, "GeForce GT 730" },
+	{ 0x0fc0, "GeForce GT 640" },
+	{ 0x0fc1, "GeForce GT 640" },
+	{ 0x0fc2, "GeForce GT 630" },
+	{ 0x0fc6, "GeForce GTX 650" },
+	{ 0x0fc8, "GeForce GT 740" },
+	{ 0x0fc9, "GeForce GT 730" },
+	{ 0x0fcd, "GeForce GT 755M" },
+	{ 0x0fce, "GeForce GT 640M LE" },
+	{ 0x0fd1, "GeForce GT 650M" },
+	{ 0x0fd2, "GeForce GT 640M", nvkm_device_pci_10de_0fd2 },
+	{ 0x0fd3, "GeForce GT 640M LE" },
+	{ 0x0fd4, "GeForce GTX 660M" },
+	{ 0x0fd5, "GeForce GT 650M" },
+	{ 0x0fd8, "GeForce GT 640M" },
+	{ 0x0fd9, "GeForce GT 645M" },
+	{ 0x0fdf, "GeForce GT 740M" },
+	{ 0x0fe0, "GeForce GTX 660M" },
+	{ 0x0fe1, "GeForce GT 730M" },
+	{ 0x0fe2, "GeForce GT 745M" },
+	{ 0x0fe3, "GeForce GT 745M", nvkm_device_pci_10de_0fe3 },
+	{ 0x0fe4, "GeForce GT 750M" },
+	{ 0x0fe9, "GeForce GT 750M" },
+	{ 0x0fea, "GeForce GT 755M" },
+	{ 0x0fec, "GeForce 710A" },
+	{ 0x0fef, "GRID K340" },
+	{ 0x0ff2, "GRID K1" },
+	{ 0x0ff3, "Quadro K420" },
+	{ 0x0ff6, "Quadro K1100M" },
+	{ 0x0ff8, "Quadro K500M" },
+	{ 0x0ff9, "Quadro K2000D" },
+	{ 0x0ffa, "Quadro K600" },
+	{ 0x0ffb, "Quadro K2000M" },
+	{ 0x0ffc, "Quadro K1000M" },
+	{ 0x0ffd, "NVS 510" },
+	{ 0x0ffe, "Quadro K2000" },
+	{ 0x0fff, "Quadro 410" },
+	{ 0x1001, "GeForce GTX TITAN Z" },
+	{ 0x1004, "GeForce GTX 780" },
+	{ 0x1005, "GeForce GTX TITAN" },
+	{ 0x1007, "GeForce GTX 780" },
+	{ 0x1008, "GeForce GTX 780 Ti" },
+	{ 0x100a, "GeForce GTX 780 Ti" },
+	{ 0x100c, "GeForce GTX TITAN Black" },
+	{ 0x1021, "Tesla K20Xm" },
+	{ 0x1022, "Tesla K20c" },
+	{ 0x1023, "Tesla K40m" },
+	{ 0x1024, "Tesla K40c" },
+	{ 0x1026, "Tesla K20s" },
+	{ 0x1027, "Tesla K40st" },
+	{ 0x1028, "Tesla K20m" },
+	{ 0x1029, "Tesla K40s" },
+	{ 0x102a, "Tesla K40t" },
+	{ 0x102d, "Tesla K80" },
+	{ 0x103a, "Quadro K6000" },
+	{ 0x103c, "Quadro K5200" },
+	{ 0x1040, "GeForce GT 520" },
+	{ 0x1042, "GeForce 510" },
+	{ 0x1048, "GeForce 605" },
+	{ 0x1049, "GeForce GT 620" },
+	{ 0x104a, "GeForce GT 610" },
+	{ 0x104b, "GeForce GT 625 (OEM)", nvkm_device_pci_10de_104b },
+	{ 0x104c, "GeForce GT 705" },
+	{ 0x1050, "GeForce GT 520M" },
+	{ 0x1051, "GeForce GT 520MX" },
+	{ 0x1052, "GeForce GT 520M" },
+	{ 0x1054, "GeForce 410M" },
+	{ 0x1055, "GeForce 410M" },
+	{ 0x1056, "NVS 4200M" },
+	{ 0x1057, "NVS 4200M" },
+	{ 0x1058, "GeForce 610M", nvkm_device_pci_10de_1058 },
+	{ 0x1059, "GeForce 610M" },
+	{ 0x105a, "GeForce 610M" },
+	{ 0x105b, "GeForce 705M", nvkm_device_pci_10de_105b },
+	{ 0x107c, "NVS 315" },
+	{ 0x107d, "NVS 310" },
+	{ 0x1080, "GeForce GTX 580" },
+	{ 0x1081, "GeForce GTX 570" },
+	{ 0x1082, "GeForce GTX 560 Ti" },
+	{ 0x1084, "GeForce GTX 560" },
+	{ 0x1086, "GeForce GTX 570" },
+	{ 0x1087, "GeForce GTX 560 Ti" },
+	{ 0x1088, "GeForce GTX 590" },
+	{ 0x1089, "GeForce GTX 580" },
+	{ 0x108b, "GeForce GTX 580" },
+	{ 0x1091, "Tesla M2090", nvkm_device_pci_10de_1091 },
+	{ 0x1094, "Tesla M2075" },
+	{ 0x1096, "Tesla C2075", nvkm_device_pci_10de_1096 },
+	{ 0x109a, "Quadro 5010M" },
+	{ 0x109b, "Quadro 7000" },
+	{ 0x10c0, "GeForce 9300 GS" },
+	{ 0x10c3, "GeForce 8400GS" },
+	{ 0x10c5, "GeForce 405" },
+	{ 0x10d8, "NVS 300" },
+	{ 0x1140, NULL, nvkm_device_pci_10de_1140 },
+	{ 0x1180, "GeForce GTX 680" },
+	{ 0x1183, "GeForce GTX 660 Ti" },
+	{ 0x1184, "GeForce GTX 770" },
+	{ 0x1185, "GeForce GTX 660", nvkm_device_pci_10de_1185 },
+	{ 0x1187, "GeForce GTX 760" },
+	{ 0x1188, "GeForce GTX 690" },
+	{ 0x1189, "GeForce GTX 670", nvkm_device_pci_10de_1189 },
+	{ 0x118a, "GRID K520" },
+	{ 0x118e, "GeForce GTX 760 (192-bit)" },
+	{ 0x118f, "Tesla K10" },
+	{ 0x1193, "GeForce GTX 760 Ti OEM" },
+	{ 0x1194, "Tesla K8" },
+	{ 0x1195, "GeForce GTX 660" },
+	{ 0x1198, "GeForce GTX 880M" },
+	{ 0x1199, "GeForce GTX 870M", nvkm_device_pci_10de_1199 },
+	{ 0x119a, "GeForce GTX 860M" },
+	{ 0x119d, "GeForce GTX 775M" },
+	{ 0x119e, "GeForce GTX 780M" },
+	{ 0x119f, "GeForce GTX 780M" },
+	{ 0x11a0, "GeForce GTX 680M" },
+	{ 0x11a1, "GeForce GTX 670MX" },
+	{ 0x11a2, "GeForce GTX 675MX" },
+	{ 0x11a3, "GeForce GTX 680MX" },
+	{ 0x11a7, "GeForce GTX 675MX" },
+	{ 0x11b4, "Quadro K4200" },
+	{ 0x11b6, "Quadro K3100M" },
+	{ 0x11b7, "Quadro K4100M" },
+	{ 0x11b8, "Quadro K5100M" },
+	{ 0x11ba, "Quadro K5000" },
+	{ 0x11bc, "Quadro K5000M" },
+	{ 0x11bd, "Quadro K4000M" },
+	{ 0x11be, "Quadro K3000M" },
+	{ 0x11bf, "GRID K2" },
+	{ 0x11c0, "GeForce GTX 660" },
+	{ 0x11c2, "GeForce GTX 650 Ti BOOST" },
+	{ 0x11c3, "GeForce GTX 650 Ti" },
+	{ 0x11c4, "GeForce GTX 645" },
+	{ 0x11c5, "GeForce GT 740" },
+	{ 0x11c6, "GeForce GTX 650 Ti" },
+	{ 0x11c8, "GeForce GTX 650" },
+	{ 0x11cb, "GeForce GT 740" },
+	{ 0x11e0, "GeForce GTX 770M" },
+	{ 0x11e1, "GeForce GTX 765M" },
+	{ 0x11e2, "GeForce GTX 765M" },
+	{ 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 },
+	{ 0x11fa, "Quadro K4000" },
+	{ 0x11fc, "Quadro K2100M", nvkm_device_pci_10de_11fc },
+	{ 0x1200, "GeForce GTX 560 Ti" },
+	{ 0x1201, "GeForce GTX 560" },
+	{ 0x1203, "GeForce GTX 460 SE v2" },
+	{ 0x1205, "GeForce GTX 460 v2" },
+	{ 0x1206, "GeForce GTX 555" },
+	{ 0x1207, "GeForce GT 645" },
+	{ 0x1208, "GeForce GTX 560 SE" },
+	{ 0x1210, "GeForce GTX 570M" },
+	{ 0x1211, "GeForce GTX 580M" },
+	{ 0x1212, "GeForce GTX 675M" },
+	{ 0x1213, "GeForce GTX 670M" },
+	{ 0x1241, "GeForce GT 545" },
+	{ 0x1243, "GeForce GT 545" },
+	{ 0x1244, "GeForce GTX 550 Ti" },
+	{ 0x1245, "GeForce GTS 450" },
+	{ 0x1246, "GeForce GT 550M" },
+	{ 0x1247, "GeForce GT 555M", nvkm_device_pci_10de_1247 },
+	{ 0x1248, "GeForce GT 555M" },
+	{ 0x1249, "GeForce GTS 450" },
+	{ 0x124b, "GeForce GT 640" },
+	{ 0x124d, "GeForce GT 555M", nvkm_device_pci_10de_124d },
+	{ 0x1251, "GeForce GTX 560M" },
+	{ 0x1280, "GeForce GT 635" },
+	{ 0x1281, "GeForce GT 710" },
+	{ 0x1282, "GeForce GT 640" },
+	{ 0x1284, "GeForce GT 630" },
+	{ 0x1286, "GeForce GT 720" },
+	{ 0x1287, "GeForce GT 730" },
+	{ 0x1288, "GeForce GT 720" },
+	{ 0x1289, "GeForce GT 710" },
+	{ 0x1290, "GeForce GT 730M", nvkm_device_pci_10de_1290 },
+	{ 0x1291, "GeForce GT 735M" },
+	{ 0x1292, "GeForce GT 740M", nvkm_device_pci_10de_1292 },
+	{ 0x1293, "GeForce GT 730M" },
+	{ 0x1295, "GeForce 710M", nvkm_device_pci_10de_1295 },
+	{ 0x1296, "GeForce 825M" },
+	{ 0x1298, "GeForce GT 720M" },
+	{ 0x1299, "GeForce 920M", nvkm_device_pci_10de_1299 },
+	{ 0x129a, "GeForce 910M" },
+	{ 0x12b9, "Quadro K610M" },
+	{ 0x12ba, "Quadro K510M" },
+	{ 0x1340, "GeForce 830M", nvkm_device_pci_10de_1340 },
+	{ 0x1341, "GeForce 840M", nvkm_device_pci_10de_1341 },
+	{ 0x1344, "GeForce 845M" },
+	{ 0x1346, "GeForce 930M", nvkm_device_pci_10de_1346 },
+	{ 0x1347, "GeForce 940M", nvkm_device_pci_10de_1347 },
+	{ 0x137a, NULL, nvkm_device_pci_10de_137a },
+	{ 0x137d, NULL, nvkm_device_pci_10de_137d },
+	{ 0x1380, "GeForce GTX 750 Ti" },
+	{ 0x1381, "GeForce GTX 750" },
+	{ 0x1382, "GeForce GTX 745" },
+	{ 0x1390, "GeForce 845M" },
+	{ 0x1391, "GeForce GTX 850M", nvkm_device_pci_10de_1391 },
+	{ 0x1392, "GeForce GTX 860M", nvkm_device_pci_10de_1392 },
+	{ 0x1393, "GeForce 840M" },
+	{ 0x1398, "GeForce 845M" },
+	{ 0x139a, "GeForce GTX 950M", nvkm_device_pci_10de_139a },
+	{ 0x139b, "GeForce GTX 960M", nvkm_device_pci_10de_139b },
+	{ 0x139c, "GeForce 940M" },
+	{ 0x13b3, "Quadro K2200M" },
+	{ 0x13ba, "Quadro K2200" },
+	{ 0x13bb, "Quadro K620" },
+	{ 0x13bc, "Quadro K1200" },
+	{ 0x13c0, "GeForce GTX 980" },
+	{ 0x13c2, "GeForce GTX 970" },
+	{ 0x13d7, "GeForce GTX 980M" },
+	{ 0x13d8, "GeForce GTX 970M" },
+	{ 0x13d9, "GeForce GTX 965M" },
+	{ 0x1401, "GeForce GTX 960" },
+	{ 0x1617, "GeForce GTX 980M" },
+	{ 0x1618, "GeForce GTX 970M" },
+	{ 0x1619, "GeForce GTX 965M" },
+	{ 0x17c2, "GeForce GTX TITAN X" },
+	{ 0x17c8, "GeForce GTX 980 Ti" },
+	{ 0x17f0, "Quadro M6000" },
+	{}
+};
+
+static struct nvkm_device_pci *
+nvkm_device_pci(struct nvkm_device *device)
+{
+	return container_of(device, struct nvkm_device_pci, device);
+}
+
+static resource_size_t
+nvkm_device_pci_resource_addr(struct nvkm_device *device, unsigned bar)
+{
+	struct nvkm_device_pci *pdev = nvkm_device_pci(device);
+	return pci_resource_start(pdev->pdev, bar);
+}
+
+static resource_size_t
+nvkm_device_pci_resource_size(struct nvkm_device *device, unsigned bar)
+{
+	struct nvkm_device_pci *pdev = nvkm_device_pci(device);
+	return pci_resource_len(pdev->pdev, bar);
+}
+
+static void
+nvkm_device_pci_fini(struct nvkm_device *device, bool suspend)
+{
+	struct nvkm_device_pci *pdev = nvkm_device_pci(device);
+	if (suspend) {
+		pci_disable_device(pdev->pdev);
+		pdev->suspend = true;
+	}
+}
+
+static int
+nvkm_device_pci_preinit(struct nvkm_device *device)
+{
+	struct nvkm_device_pci *pdev = nvkm_device_pci(device);
+	if (pdev->suspend) {
+		int ret = pci_enable_device(pdev->pdev);
+		if (ret)
+			return ret;
+		pci_set_master(pdev->pdev);
+		pdev->suspend = false;
+	}
+	return 0;
+}
+
+static void *
+nvkm_device_pci_dtor(struct nvkm_device *device)
+{
+	struct nvkm_device_pci *pdev = nvkm_device_pci(device);
+	pci_disable_device(pdev->pdev);
+	return pdev;
+}
+
+static const struct nvkm_device_func
+nvkm_device_pci_func = {
+	.pci = nvkm_device_pci,
+	.dtor = nvkm_device_pci_dtor,
+	.preinit = nvkm_device_pci_preinit,
+	.fini = nvkm_device_pci_fini,
+	.resource_addr = nvkm_device_pci_resource_addr,
+	.resource_size = nvkm_device_pci_resource_size,
+	.cpu_coherent = !IS_ENABLED(CONFIG_ARM),
+};
+
+int
+nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
+		    bool detect, bool mmio, u64 subdev_mask,
+		    struct nvkm_device **pdevice)
+{
+	const struct nvkm_device_quirk *quirk = NULL;
+	const struct nvkm_device_pci_device *pcid;
+	const struct nvkm_device_pci_vendor *pciv;
+	const char *name = NULL;
+	struct nvkm_device_pci *pdev;
+	int ret;
+
+	ret = pci_enable_device(pci_dev);
+	if (ret)
+		return ret;
+
+	switch (pci_dev->vendor) {
+	case 0x10de: pcid = nvkm_device_pci_10de; break;
+	default:
+		pcid = NULL;
+		break;
+	}
+
+	while (pcid && pcid->device) {
+		if (pciv = pcid->vendor, pcid->device == pci_dev->device) {
+			while (pciv && pciv->vendor) {
+				if (pciv->vendor == pci_dev->subsystem_vendor &&
+				    pciv->device == pci_dev->subsystem_device) {
+					quirk = &pciv->quirk;
+					name  =  pciv->name;
+					break;
+				}
+				pciv++;
+			}
+			if (!name)
+				name = pcid->name;
+			break;
+		}
+		pcid++;
+	}
+
+	if (!(pdev = kzalloc(sizeof(*pdev), GFP_KERNEL))) {
+		pci_disable_device(pci_dev);
+		return -ENOMEM;
+	}
+	*pdevice = &pdev->device;
+	pdev->pdev = pci_dev;
+
+	return nvkm_device_ctor(&nvkm_device_pci_func, quirk, &pci_dev->dev,
+				pci_is_pcie(pci_dev) ? NVKM_DEVICE_PCIE :
+				pci_find_capability(pci_dev, PCI_CAP_ID_AGP) ?
+				NVKM_DEVICE_AGP : NVKM_DEVICE_PCI,
+				(u64)pci_domain_nr(pci_dev->bus) << 32 |
+				     pci_dev->bus->number << 16 |
+				     PCI_SLOT(pci_dev->devfn) << 8 |
+				     PCI_FUNC(pci_dev->devfn), name,
+				cfg, dbg, detect, mmio, subdev_mask,
+				&pdev->device);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index 8d3590e7bd87..ed3ad2c30e17 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -2,15 +2,49 @@
 #define __NVKM_DEVICE_PRIV_H__
 #include <core/device.h>
 
-extern struct nvkm_oclass nvkm_control_oclass[];
+#include <subdev/bar.h>
+#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/clk.h>
+#include <subdev/devinit.h>
+#include <subdev/fb.h>
+#include <subdev/fuse.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/ibus.h>
+#include <subdev/instmem.h>
+#include <subdev/ltc.h>
+#include <subdev/mc.h>
+#include <subdev/mmu.h>
+#include <subdev/mxm.h>
+#include <subdev/pci.h>
+#include <subdev/pmu.h>
+#include <subdev/therm.h>
+#include <subdev/timer.h>
+#include <subdev/volt.h>
 
-int nv04_identify(struct nvkm_device *);
-int nv10_identify(struct nvkm_device *);
-int nv20_identify(struct nvkm_device *);
-int nv30_identify(struct nvkm_device *);
-int nv40_identify(struct nvkm_device *);
-int nv50_identify(struct nvkm_device *);
-int gf100_identify(struct nvkm_device *);
-int gk104_identify(struct nvkm_device *);
-int gm100_identify(struct nvkm_device *);
+#include <engine/bsp.h>
+#include <engine/ce.h>
+#include <engine/cipher.h>
+#include <engine/disp.h>
+#include <engine/dma.h>
+#include <engine/fifo.h>
+#include <engine/gr.h>
+#include <engine/mpeg.h>
+#include <engine/mspdec.h>
+#include <engine/msppp.h>
+#include <engine/msvld.h>
+#include <engine/pm.h>
+#include <engine/sec.h>
+#include <engine/sw.h>
+#include <engine/vp.h>
+
+int  nvkm_device_ctor(const struct nvkm_device_func *,
+		      const struct nvkm_device_quirk *,
+		      struct device *, enum nvkm_device_type, u64 handle,
+		      const char *name, const char *cfg, const char *dbg,
+		      bool detect, bool mmio, u64 subdev_mask,
+		      struct nvkm_device *);
+int  nvkm_device_init(struct nvkm_device *);
+int  nvkm_device_fini(struct nvkm_device *, bool suspend);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
new file mode 100644
index 000000000000..da57c8a60608
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <core/tegra.h>
+#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
+#include "priv.h"
+
+static int
+nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
+{
+	int ret;
+
+	ret = regulator_enable(tdev->vdd);
+	if (ret)
+		goto err_power;
+
+	ret = clk_prepare_enable(tdev->clk);
+	if (ret)
+		goto err_clk;
+	ret = clk_prepare_enable(tdev->clk_pwr);
+	if (ret)
+		goto err_clk_pwr;
+	clk_set_rate(tdev->clk_pwr, 204000000);
+	udelay(10);
+
+	reset_control_assert(tdev->rst);
+	udelay(10);
+
+	ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
+	if (ret)
+		goto err_clamp;
+	udelay(10);
+
+	reset_control_deassert(tdev->rst);
+	udelay(10);
+
+	return 0;
+
+err_clamp:
+	clk_disable_unprepare(tdev->clk_pwr);
+err_clk_pwr:
+	clk_disable_unprepare(tdev->clk);
+err_clk:
+	regulator_disable(tdev->vdd);
+err_power:
+	return ret;
+}
+
+static int
+nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
+{
+	reset_control_assert(tdev->rst);
+	udelay(10);
+
+	clk_disable_unprepare(tdev->clk_pwr);
+	clk_disable_unprepare(tdev->clk);
+	udelay(10);
+
+	return regulator_disable(tdev->vdd);
+}
+
+static void
+nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
+{
+#if IS_ENABLED(CONFIG_IOMMU_API)
+	struct device *dev = &tdev->pdev->dev;
+	unsigned long pgsize_bitmap;
+	int ret;
+
+	mutex_init(&tdev->iommu.mutex);
+
+	if (iommu_present(&platform_bus_type)) {
+		tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
+		if (IS_ERR(tdev->iommu.domain))
+			goto error;
+
+		/*
+		 * A IOMMU is only usable if it supports page sizes smaller
+		 * or equal to the system's PAGE_SIZE, with a preference if
+		 * both are equal.
+		 */
+		pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap;
+		if (pgsize_bitmap & PAGE_SIZE) {
+			tdev->iommu.pgshift = PAGE_SHIFT;
+		} else {
+			tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
+			if (tdev->iommu.pgshift == 0) {
+				dev_warn(dev, "unsupported IOMMU page size\n");
+				goto free_domain;
+			}
+			tdev->iommu.pgshift -= 1;
+		}
+
+		ret = iommu_attach_device(tdev->iommu.domain, dev);
+		if (ret)
+			goto free_domain;
+
+		ret = nvkm_mm_init(&tdev->iommu.mm, 0,
+				   (1ULL << 40) >> tdev->iommu.pgshift, 1);
+		if (ret)
+			goto detach_device;
+	}
+
+	return;
+
+detach_device:
+	iommu_detach_device(tdev->iommu.domain, dev);
+
+free_domain:
+	iommu_domain_free(tdev->iommu.domain);
+
+error:
+	tdev->iommu.domain = NULL;
+	tdev->iommu.pgshift = 0;
+	dev_err(dev, "cannot initialize IOMMU MM\n");
+#endif
+}
+
+static void
+nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev)
+{
+#if IS_ENABLED(CONFIG_IOMMU_API)
+	if (tdev->iommu.domain) {
+		nvkm_mm_fini(&tdev->iommu.mm);
+		iommu_detach_device(tdev->iommu.domain, tdev->device.dev);
+		iommu_domain_free(tdev->iommu.domain);
+	}
+#endif
+}
+
+static struct nvkm_device_tegra *
+nvkm_device_tegra(struct nvkm_device *device)
+{
+	return container_of(device, struct nvkm_device_tegra, device);
+}
+
+static struct resource *
+nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar)
+{
+	struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
+	return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar);
+}
+
+static resource_size_t
+nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar)
+{
+	struct resource *res = nvkm_device_tegra_resource(device, bar);
+	return res ? res->start : 0;
+}
+
+static resource_size_t
+nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar)
+{
+	struct resource *res = nvkm_device_tegra_resource(device, bar);
+	return res ? resource_size(res) : 0;
+}
+
+static irqreturn_t
+nvkm_device_tegra_intr(int irq, void *arg)
+{
+	struct nvkm_device_tegra *tdev = arg;
+	struct nvkm_mc *mc = tdev->device.mc;
+	bool handled = false;
+	if (likely(mc)) {
+		nvkm_mc_intr_unarm(mc);
+		nvkm_mc_intr(mc, &handled);
+		nvkm_mc_intr_rearm(mc);
+	}
+	return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static void
+nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend)
+{
+	struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
+	if (tdev->irq) {
+		free_irq(tdev->irq, tdev);
+		tdev->irq = 0;
+	};
+}
+
+static int
+nvkm_device_tegra_init(struct nvkm_device *device)
+{
+	struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
+	int irq, ret;
+
+	irq = platform_get_irq_byname(tdev->pdev, "stall");
+	if (irq < 0)
+		return irq;
+
+	ret = request_irq(irq, nvkm_device_tegra_intr,
+			  IRQF_SHARED, "nvkm", tdev);
+	if (ret)
+		return ret;
+
+	tdev->irq = irq;
+	return 0;
+}
+
+static void *
+nvkm_device_tegra_dtor(struct nvkm_device *device)
+{
+	struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
+	nvkm_device_tegra_power_down(tdev);
+	nvkm_device_tegra_remove_iommu(tdev);
+	return tdev;
+}
+
+static const struct nvkm_device_func
+nvkm_device_tegra_func = {
+	.tegra = nvkm_device_tegra,
+	.dtor = nvkm_device_tegra_dtor,
+	.init = nvkm_device_tegra_init,
+	.fini = nvkm_device_tegra_fini,
+	.resource_addr = nvkm_device_tegra_resource_addr,
+	.resource_size = nvkm_device_tegra_resource_size,
+	.cpu_coherent = false,
+};
+
+int
+nvkm_device_tegra_new(struct platform_device *pdev,
+		      const char *cfg, const char *dbg,
+		      bool detect, bool mmio, u64 subdev_mask,
+		      struct nvkm_device **pdevice)
+{
+	struct nvkm_device_tegra *tdev;
+	int ret;
+
+	if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
+		return -ENOMEM;
+	*pdevice = &tdev->device;
+	tdev->pdev = pdev;
+	tdev->irq = -1;
+
+	tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
+	if (IS_ERR(tdev->vdd))
+		return PTR_ERR(tdev->vdd);
+
+	tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
+	if (IS_ERR(tdev->rst))
+		return PTR_ERR(tdev->rst);
+
+	tdev->clk = devm_clk_get(&pdev->dev, "gpu");
+	if (IS_ERR(tdev->clk))
+		return PTR_ERR(tdev->clk);
+
+	tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
+	if (IS_ERR(tdev->clk_pwr))
+		return PTR_ERR(tdev->clk_pwr);
+
+	nvkm_device_tegra_probe_iommu(tdev);
+
+	ret = nvkm_device_tegra_power_up(tdev);
+	if (ret)
+		return ret;
+
+	tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
+	ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
+			       NVKM_DEVICE_TEGRA, pdev->id, NULL,
+			       cfg, dbg, detect, mmio, subdev_mask,
+			       &tdev->device);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+#else
+int
+nvkm_device_tegra_new(struct platform_device *pdev,
+		      const char *cfg, const char *dbg,
+		      bool detect, bool mmio, u64 subdev_mask,
+		      struct nvkm_device **pdevice)
+{
+	return -ENOSYS;
+}
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
new file mode 100644
index 000000000000..1ae48f27029d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -0,0 +1,371 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define nvkm_udevice(p) container_of((p), struct nvkm_udevice, object)
+#include "priv.h"
+#include "ctrl.h"
+
+#include <core/client.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+struct nvkm_udevice {
+	struct nvkm_object object;
+	struct nvkm_device *device;
+};
+
+static int
+nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
+{
+	struct nvkm_object *object = &udev->object;
+	struct nvkm_device *device = udev->device;
+	struct nvkm_fb *fb = device->fb;
+	struct nvkm_instmem *imem = device->imem;
+	union {
+		struct nv_device_info_v0 v0;
+	} *args = data;
+	int ret;
+
+	nvif_ioctl(object, "device info size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(object, "device info vers %d\n", args->v0.version);
+	} else
+		return ret;
+
+	switch (device->chipset) {
+	case 0x01a:
+	case 0x01f:
+	case 0x04c:
+	case 0x04e:
+	case 0x063:
+	case 0x067:
+	case 0x068:
+	case 0x0aa:
+	case 0x0ac:
+	case 0x0af:
+		args->v0.platform = NV_DEVICE_INFO_V0_IGP;
+		break;
+	default:
+		switch (device->type) {
+		case NVKM_DEVICE_PCI:
+			args->v0.platform = NV_DEVICE_INFO_V0_PCI;
+			break;
+		case NVKM_DEVICE_AGP:
+			args->v0.platform = NV_DEVICE_INFO_V0_AGP;
+			break;
+		case NVKM_DEVICE_PCIE:
+			args->v0.platform = NV_DEVICE_INFO_V0_PCIE;
+			break;
+		case NVKM_DEVICE_TEGRA:
+			args->v0.platform = NV_DEVICE_INFO_V0_SOC;
+			break;
+		default:
+			WARN_ON(1);
+			break;
+		}
+		break;
+	}
+
+	switch (device->card_type) {
+	case NV_04: args->v0.family = NV_DEVICE_INFO_V0_TNT; break;
+	case NV_10:
+	case NV_11: args->v0.family = NV_DEVICE_INFO_V0_CELSIUS; break;
+	case NV_20: args->v0.family = NV_DEVICE_INFO_V0_KELVIN; break;
+	case NV_30: args->v0.family = NV_DEVICE_INFO_V0_RANKINE; break;
+	case NV_40: args->v0.family = NV_DEVICE_INFO_V0_CURIE; break;
+	case NV_50: args->v0.family = NV_DEVICE_INFO_V0_TESLA; break;
+	case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break;
+	case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break;
+	case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
+	default:
+		args->v0.family = 0;
+		break;
+	}
+
+	args->v0.chipset  = device->chipset;
+	args->v0.revision = device->chiprev;
+	if (fb && fb->ram)
+		args->v0.ram_size = args->v0.ram_user = fb->ram->size;
+	else
+		args->v0.ram_size = args->v0.ram_user = 0;
+	if (imem && args->v0.ram_size > 0)
+		args->v0.ram_user = args->v0.ram_user - imem->reserved;
+
+	strncpy(args->v0.chip, device->chip->name, sizeof(args->v0.chip));
+	strncpy(args->v0.name, device->name, sizeof(args->v0.name));
+	return 0;
+}
+
+static int
+nvkm_udevice_time(struct nvkm_udevice *udev, void *data, u32 size)
+{
+	struct nvkm_device *device = udev->device;
+	union {
+		struct nv_device_time_v0 v0;
+	} *args = data;
+	int ret;
+
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		args->v0.time = nvkm_timer_read(device->timer);
+	}
+
+	return ret;
+}
+
+static int
+nvkm_udevice_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	switch (mthd) {
+	case NV_DEVICE_V0_INFO:
+		return nvkm_udevice_info(udev, data, size);
+	case NV_DEVICE_V0_TIME:
+		return nvkm_udevice_time(udev, data, size);
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+static int
+nvkm_udevice_rd08(struct nvkm_object *object, u64 addr, u8 *data)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	*data = nvkm_rd08(udev->device, addr);
+	return 0;
+}
+
+static int
+nvkm_udevice_rd16(struct nvkm_object *object, u64 addr, u16 *data)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	*data = nvkm_rd16(udev->device, addr);
+	return 0;
+}
+
+static int
+nvkm_udevice_rd32(struct nvkm_object *object, u64 addr, u32 *data)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	*data = nvkm_rd32(udev->device, addr);
+	return 0;
+}
+
+static int
+nvkm_udevice_wr08(struct nvkm_object *object, u64 addr, u8 data)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	nvkm_wr08(udev->device, addr, data);
+	return 0;
+}
+
+static int
+nvkm_udevice_wr16(struct nvkm_object *object, u64 addr, u16 data)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	nvkm_wr16(udev->device, addr, data);
+	return 0;
+}
+
+static int
+nvkm_udevice_wr32(struct nvkm_object *object, u64 addr, u32 data)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	nvkm_wr32(udev->device, addr, data);
+	return 0;
+}
+
+static int
+nvkm_udevice_map(struct nvkm_object *object, u64 *addr, u32 *size)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	struct nvkm_device *device = udev->device;
+	*addr = device->func->resource_addr(device, 0);
+	*size = device->func->resource_size(device, 0);
+	return 0;
+}
+
+static int
+nvkm_udevice_fini(struct nvkm_object *object, bool suspend)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	struct nvkm_device *device = udev->device;
+	int ret = 0;
+
+	mutex_lock(&device->mutex);
+	if (!--device->refcount) {
+		ret = nvkm_device_fini(device, suspend);
+		if (ret && suspend) {
+			device->refcount++;
+			goto done;
+		}
+	}
+
+done:
+	mutex_unlock(&device->mutex);
+	return ret;
+}
+
+static int
+nvkm_udevice_init(struct nvkm_object *object)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	struct nvkm_device *device = udev->device;
+	int ret = 0;
+
+	mutex_lock(&device->mutex);
+	if (!device->refcount++) {
+		ret = nvkm_device_init(device);
+		if (ret) {
+			device->refcount--;
+			goto done;
+		}
+	}
+
+done:
+	mutex_unlock(&device->mutex);
+	return ret;
+}
+
+static int
+nvkm_udevice_child_new(const struct nvkm_oclass *oclass,
+		       void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(oclass->parent);
+	const struct nvkm_device_oclass *sclass = oclass->priv;
+	return sclass->ctor(udev->device, oclass, data, size, pobject);
+}
+
+static int
+nvkm_udevice_child_get(struct nvkm_object *object, int index,
+		       struct nvkm_oclass *oclass)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	struct nvkm_device *device = udev->device;
+	struct nvkm_engine *engine;
+	u64 mask = (1ULL << NVKM_ENGINE_DMAOBJ) |
+		   (1ULL << NVKM_ENGINE_FIFO) |
+		   (1ULL << NVKM_ENGINE_DISP) |
+		   (1ULL << NVKM_ENGINE_PM);
+	const struct nvkm_device_oclass *sclass = NULL;
+	int i;
+
+	for (; i = __ffs64(mask), mask && !sclass; mask &= ~(1ULL << i)) {
+		if (!(engine = nvkm_device_engine(device, i)) ||
+		    !(engine->func->base.sclass))
+			continue;
+		oclass->engine = engine;
+
+		index -= engine->func->base.sclass(oclass, index, &sclass);
+	}
+
+	if (!sclass) {
+		switch (index) {
+		case 0: sclass = &nvkm_control_oclass; break;
+		default:
+			return -EINVAL;
+		}
+		oclass->base = sclass->base;
+	}
+
+	oclass->ctor = nvkm_udevice_child_new;
+	oclass->priv = sclass;
+	return 0;
+}
+
+static const struct nvkm_object_func
+nvkm_udevice_super = {
+	.init = nvkm_udevice_init,
+	.fini = nvkm_udevice_fini,
+	.mthd = nvkm_udevice_mthd,
+	.map = nvkm_udevice_map,
+	.rd08 = nvkm_udevice_rd08,
+	.rd16 = nvkm_udevice_rd16,
+	.rd32 = nvkm_udevice_rd32,
+	.wr08 = nvkm_udevice_wr08,
+	.wr16 = nvkm_udevice_wr16,
+	.wr32 = nvkm_udevice_wr32,
+	.sclass = nvkm_udevice_child_get,
+};
+
+static const struct nvkm_object_func
+nvkm_udevice = {
+	.init = nvkm_udevice_init,
+	.fini = nvkm_udevice_fini,
+	.mthd = nvkm_udevice_mthd,
+	.sclass = nvkm_udevice_child_get,
+};
+
+int
+nvkm_udevice_new(const struct nvkm_oclass *oclass, void *data, u32 size,
+		 struct nvkm_object **pobject)
+{
+	union {
+		struct nv_device_v0 v0;
+	} *args = data;
+	struct nvkm_client *client = oclass->client;
+	struct nvkm_object *parent = &client->object;
+	const struct nvkm_object_func *func;
+	struct nvkm_udevice *udev;
+	int ret;
+
+	nvif_ioctl(parent, "create device size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create device v%d device %016llx\n",
+			   args->v0.version, args->v0.device);
+	} else
+		return ret;
+
+	/* give priviledged clients register access */
+	if (client->super)
+		func = &nvkm_udevice_super;
+	else
+		func = &nvkm_udevice;
+
+	if (!(udev = kzalloc(sizeof(*udev), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(func, oclass, &udev->object);
+	*pobject = &udev->object;
+
+	/* find the device that matches what the client requested */
+	if (args->v0.device != ~0)
+		udev->device = nvkm_device_find(args->v0.device);
+	else
+		udev->device = nvkm_device_find(client->device);
+	if (!udev->device)
+		return -ENODEV;
+
+	return 0;
+}
+
+const struct nvkm_sclass
+nvkm_udevice_sclass = {
+	.oclass = NV_DEVICE,
+	.minver = 0,
+	.maxver = 0,
+	.ctor = nvkm_udevice_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index 16a4e2a37008..04f60452011e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -1,29 +1,93 @@
 nvkm-y += nvkm/engine/disp/base.o
-nvkm-y += nvkm/engine/disp/conn.o
-nvkm-y += nvkm/engine/disp/outp.o
-nvkm-y += nvkm/engine/disp/outpdp.o
 nvkm-y += nvkm/engine/disp/nv04.o
 nvkm-y += nvkm/engine/disp/nv50.o
 nvkm-y += nvkm/engine/disp/g84.o
 nvkm-y += nvkm/engine/disp/g94.o
 nvkm-y += nvkm/engine/disp/gt200.o
 nvkm-y += nvkm/engine/disp/gt215.o
-nvkm-y += nvkm/engine/disp/gf110.o
+nvkm-y += nvkm/engine/disp/gf119.o
 nvkm-y += nvkm/engine/disp/gk104.o
 nvkm-y += nvkm/engine/disp/gk110.o
 nvkm-y += nvkm/engine/disp/gm107.o
 nvkm-y += nvkm/engine/disp/gm204.o
+
+nvkm-y += nvkm/engine/disp/outp.o
+nvkm-y += nvkm/engine/disp/outpdp.o
 nvkm-y += nvkm/engine/disp/dacnv50.o
+nvkm-y += nvkm/engine/disp/piornv50.o
+nvkm-y += nvkm/engine/disp/sornv50.o
+nvkm-y += nvkm/engine/disp/sorg94.o
+nvkm-y += nvkm/engine/disp/sorgf119.o
+nvkm-y += nvkm/engine/disp/sorgm204.o
 nvkm-y += nvkm/engine/disp/dport.o
+
+nvkm-y += nvkm/engine/disp/conn.o
+
 nvkm-y += nvkm/engine/disp/hdagt215.o
-nvkm-y += nvkm/engine/disp/hdagf110.o
+nvkm-y += nvkm/engine/disp/hdagf119.o
+
 nvkm-y += nvkm/engine/disp/hdmig84.o
 nvkm-y += nvkm/engine/disp/hdmigt215.o
-nvkm-y += nvkm/engine/disp/hdmigf110.o
+nvkm-y += nvkm/engine/disp/hdmigf119.o
 nvkm-y += nvkm/engine/disp/hdmigk104.o
-nvkm-y += nvkm/engine/disp/piornv50.o
-nvkm-y += nvkm/engine/disp/sornv50.o
-nvkm-y += nvkm/engine/disp/sorg94.o
-nvkm-y += nvkm/engine/disp/sorgf110.o
-nvkm-y += nvkm/engine/disp/sorgm204.o
+
 nvkm-y += nvkm/engine/disp/vga.o
+
+nvkm-y += nvkm/engine/disp/rootnv04.o
+nvkm-y += nvkm/engine/disp/rootnv50.o
+nvkm-y += nvkm/engine/disp/rootg84.o
+nvkm-y += nvkm/engine/disp/rootg94.o
+nvkm-y += nvkm/engine/disp/rootgt200.o
+nvkm-y += nvkm/engine/disp/rootgt215.o
+nvkm-y += nvkm/engine/disp/rootgf119.o
+nvkm-y += nvkm/engine/disp/rootgk104.o
+nvkm-y += nvkm/engine/disp/rootgk110.o
+nvkm-y += nvkm/engine/disp/rootgm107.o
+nvkm-y += nvkm/engine/disp/rootgm204.o
+
+nvkm-y += nvkm/engine/disp/channv50.o
+nvkm-y += nvkm/engine/disp/changf119.o
+
+nvkm-y += nvkm/engine/disp/dmacnv50.o
+nvkm-y += nvkm/engine/disp/dmacgf119.o
+
+nvkm-y += nvkm/engine/disp/basenv50.o
+nvkm-y += nvkm/engine/disp/baseg84.o
+nvkm-y += nvkm/engine/disp/basegt200.o
+nvkm-y += nvkm/engine/disp/basegt215.o
+nvkm-y += nvkm/engine/disp/basegf119.o
+nvkm-y += nvkm/engine/disp/basegk104.o
+nvkm-y += nvkm/engine/disp/basegk110.o
+
+nvkm-y += nvkm/engine/disp/corenv50.o
+nvkm-y += nvkm/engine/disp/coreg84.o
+nvkm-y += nvkm/engine/disp/coreg94.o
+nvkm-y += nvkm/engine/disp/coregt200.o
+nvkm-y += nvkm/engine/disp/coregt215.o
+nvkm-y += nvkm/engine/disp/coregf119.o
+nvkm-y += nvkm/engine/disp/coregk104.o
+nvkm-y += nvkm/engine/disp/coregk110.o
+nvkm-y += nvkm/engine/disp/coregm107.o
+nvkm-y += nvkm/engine/disp/coregm204.o
+
+nvkm-y += nvkm/engine/disp/ovlynv50.o
+nvkm-y += nvkm/engine/disp/ovlyg84.o
+nvkm-y += nvkm/engine/disp/ovlygt200.o
+nvkm-y += nvkm/engine/disp/ovlygt215.o
+nvkm-y += nvkm/engine/disp/ovlygf119.o
+nvkm-y += nvkm/engine/disp/ovlygk104.o
+
+nvkm-y += nvkm/engine/disp/piocnv50.o
+nvkm-y += nvkm/engine/disp/piocgf119.o
+
+nvkm-y += nvkm/engine/disp/cursnv50.o
+nvkm-y += nvkm/engine/disp/cursg84.o
+nvkm-y += nvkm/engine/disp/cursgt215.o
+nvkm-y += nvkm/engine/disp/cursgf119.o
+nvkm-y += nvkm/engine/disp/cursgk104.o
+
+nvkm-y += nvkm/engine/disp/oimmnv50.o
+nvkm-y += nvkm/engine/disp/oimmg84.o
+nvkm-y += nvkm/engine/disp/oimmgt215.o
+nvkm-y += nvkm/engine/disp/oimmgf119.o
+nvkm-y += nvkm/engine/disp/oimmgk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index 23d1b5c0dc16..44b67719f64d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -25,7 +25,9 @@
 #include "conn.h"
 #include "outp.h"
 
+#include <core/client.h>
 #include <core/notify.h>
+#include <core/oproxy.h>
 #include <subdev/bios.h>
 #include <subdev/bios/dcb.h>
 
@@ -33,7 +35,21 @@
 #include <nvif/event.h>
 #include <nvif/unpack.h>
 
-int
+static void
+nvkm_disp_vblank_fini(struct nvkm_event *event, int type, int head)
+{
+	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
+	disp->func->head.vblank_fini(disp, head);
+}
+
+static void
+nvkm_disp_vblank_init(struct nvkm_event *event, int type, int head)
+{
+	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
+	disp->func->head.vblank_init(disp, head);
+}
+
+static int
 nvkm_disp_vblank_ctor(struct nvkm_object *object, void *data, u32 size,
 		      struct nvkm_notify *notify)
 {
@@ -56,6 +72,13 @@ nvkm_disp_vblank_ctor(struct nvkm_object *object, void *data, u32 size,
 	return ret;
 }
 
+static const struct nvkm_event_func
+nvkm_disp_vblank_func = {
+	.ctor = nvkm_disp_vblank_ctor,
+	.init = nvkm_disp_vblank_init,
+	.fini = nvkm_disp_vblank_fini,
+};
+
 void
 nvkm_disp_vblank(struct nvkm_disp *disp, int head)
 {
@@ -100,7 +123,7 @@ nvkm_disp_hpd_func = {
 int
 nvkm_disp_ntfy(struct nvkm_object *object, u32 type, struct nvkm_event **event)
 {
-	struct nvkm_disp *disp = (void *)object->engine;
+	struct nvkm_disp *disp = nvkm_disp(object->engine);
 	switch (type) {
 	case NV04_DISP_NTFY_VBLANK:
 		*event = &disp->vblank;
@@ -114,127 +137,303 @@ nvkm_disp_ntfy(struct nvkm_object *object, u32 type, struct nvkm_event **event)
 	return -EINVAL;
 }
 
-int
-_nvkm_disp_fini(struct nvkm_object *object, bool suspend)
+static void
+nvkm_disp_class_del(struct nvkm_oproxy *oproxy)
 {
-	struct nvkm_disp *disp = (void *)object;
-	struct nvkm_output *outp;
+	struct nvkm_disp *disp = nvkm_disp(oproxy->base.engine);
+	mutex_lock(&disp->engine.subdev.mutex);
+	if (disp->client == oproxy)
+		disp->client = NULL;
+	mutex_unlock(&disp->engine.subdev.mutex);
+}
+
+static const struct nvkm_oproxy_func
+nvkm_disp_class = {
+	.dtor[1] = nvkm_disp_class_del,
+};
+
+static int
+nvkm_disp_class_new(struct nvkm_device *device,
+		    const struct nvkm_oclass *oclass, void *data, u32 size,
+		    struct nvkm_object **pobject)
+{
+	const struct nvkm_disp_oclass *sclass = oclass->engn;
+	struct nvkm_disp *disp = nvkm_disp(oclass->engine);
+	struct nvkm_oproxy *oproxy;
 	int ret;
 
-	list_for_each_entry(outp, &disp->outp, head) {
-		ret = nv_ofuncs(outp)->fini(nv_object(outp), suspend);
-		if (ret && suspend)
-			goto fail_outp;
+	ret = nvkm_oproxy_new_(&nvkm_disp_class, oclass, &oproxy);
+	if (ret)
+		return ret;
+	*pobject = &oproxy->base;
+
+	mutex_lock(&disp->engine.subdev.mutex);
+	if (disp->client) {
+		mutex_unlock(&disp->engine.subdev.mutex);
+		return -EBUSY;
 	}
+	disp->client = oproxy;
+	mutex_unlock(&disp->engine.subdev.mutex);
 
-	return nvkm_engine_fini(&disp->base, suspend);
+	return sclass->ctor(disp, oclass, data, size, &oproxy->object);
+}
 
-fail_outp:
-	list_for_each_entry_continue_reverse(outp, &disp->outp, head) {
-		nv_ofuncs(outp)->init(nv_object(outp));
+static const struct nvkm_device_oclass
+nvkm_disp_sclass = {
+	.ctor = nvkm_disp_class_new,
+};
+
+static int
+nvkm_disp_class_get(struct nvkm_oclass *oclass, int index,
+		    const struct nvkm_device_oclass **class)
+{
+	struct nvkm_disp *disp = nvkm_disp(oclass->engine);
+	if (index == 0) {
+		const struct nvkm_disp_oclass *root = disp->func->root(disp);
+		oclass->base = root->base;
+		oclass->engn = root;
+		*class = &nvkm_disp_sclass;
+		return 0;
 	}
+	return 1;
+}
 
-	return ret;
+static void
+nvkm_disp_intr(struct nvkm_engine *engine)
+{
+	struct nvkm_disp *disp = nvkm_disp(engine);
+	disp->func->intr(disp);
 }
 
-int
-_nvkm_disp_init(struct nvkm_object *object)
+static int
+nvkm_disp_fini(struct nvkm_engine *engine, bool suspend)
 {
-	struct nvkm_disp *disp = (void *)object;
+	struct nvkm_disp *disp = nvkm_disp(engine);
+	struct nvkm_connector *conn;
 	struct nvkm_output *outp;
-	int ret;
-
-	ret = nvkm_engine_init(&disp->base);
-	if (ret)
-		return ret;
 
 	list_for_each_entry(outp, &disp->outp, head) {
-		ret = nv_ofuncs(outp)->init(nv_object(outp));
-		if (ret)
-			goto fail_outp;
+		nvkm_output_fini(outp);
 	}
 
-	return ret;
+	list_for_each_entry(conn, &disp->conn, head) {
+		nvkm_connector_fini(conn);
+	}
+
+	return 0;
+}
+
+static int
+nvkm_disp_init(struct nvkm_engine *engine)
+{
+	struct nvkm_disp *disp = nvkm_disp(engine);
+	struct nvkm_connector *conn;
+	struct nvkm_output *outp;
 
-fail_outp:
-	list_for_each_entry_continue_reverse(outp, &disp->outp, head) {
-		nv_ofuncs(outp)->fini(nv_object(outp), false);
+	list_for_each_entry(conn, &disp->conn, head) {
+		nvkm_connector_init(conn);
 	}
 
-	return ret;
+	list_for_each_entry(outp, &disp->outp, head) {
+		nvkm_output_init(outp);
+	}
+
+	return 0;
 }
 
-void
-_nvkm_disp_dtor(struct nvkm_object *object)
+static void *
+nvkm_disp_dtor(struct nvkm_engine *engine)
 {
-	struct nvkm_disp *disp = (void *)object;
-	struct nvkm_output *outp, *outt;
+	struct nvkm_disp *disp = nvkm_disp(engine);
+	struct nvkm_connector *conn;
+	struct nvkm_output *outp;
+	void *data = disp;
+
+	if (disp->func->dtor)
+		data = disp->func->dtor(disp);
 
 	nvkm_event_fini(&disp->vblank);
 	nvkm_event_fini(&disp->hpd);
 
-	if (disp->outp.next) {
-		list_for_each_entry_safe(outp, outt, &disp->outp, head) {
-			nvkm_object_ref(NULL, (struct nvkm_object **)&outp);
-		}
+	while (!list_empty(&disp->outp)) {
+		outp = list_first_entry(&disp->outp, typeof(*outp), head);
+		list_del(&outp->head);
+		nvkm_output_del(&outp);
 	}
 
-	nvkm_engine_destroy(&disp->base);
+	while (!list_empty(&disp->conn)) {
+		conn = list_first_entry(&disp->conn, typeof(*conn), head);
+		list_del(&conn->head);
+		nvkm_connector_del(&conn);
+	}
+
+	return data;
 }
 
+static const struct nvkm_engine_func
+nvkm_disp = {
+	.dtor = nvkm_disp_dtor,
+	.init = nvkm_disp_init,
+	.fini = nvkm_disp_fini,
+	.intr = nvkm_disp_intr,
+	.base.sclass = nvkm_disp_class_get,
+};
+
 int
-nvkm_disp_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, int heads, const char *intname,
-		  const char *extname, int length, void **pobject)
+nvkm_disp_ctor(const struct nvkm_disp_func *func, struct nvkm_device *device,
+	       int index, int heads, struct nvkm_disp *disp)
 {
-	struct nvkm_disp_impl *impl = (void *)oclass;
-	struct nvkm_bios *bios = nvkm_bios(parent);
-	struct nvkm_disp *disp;
-	struct nvkm_oclass **sclass;
-	struct nvkm_object *object;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_output *outp, *outt, *pair;
+	struct nvkm_connector *conn;
+	struct nvbios_connE connE;
 	struct dcb_output dcbE;
 	u8  hpd = 0, ver, hdr;
 	u32 data;
 	int ret, i;
 
-	ret = nvkm_engine_create_(parent, engine, oclass, true, intname,
-				  extname, length, pobject);
-	disp = *pobject;
+	INIT_LIST_HEAD(&disp->outp);
+	INIT_LIST_HEAD(&disp->conn);
+	disp->func = func;
+	disp->head.nr = heads;
+
+	ret = nvkm_engine_ctor(&nvkm_disp, device, index, 0,
+			       true, &disp->engine);
 	if (ret)
 		return ret;
 
-	INIT_LIST_HEAD(&disp->outp);
-
 	/* create output objects for each display path in the vbios */
 	i = -1;
 	while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
+		const struct nvkm_disp_func_outp *outps;
+		int (*ctor)(struct nvkm_disp *, int, struct dcb_output *,
+			    struct nvkm_output **);
+
 		if (dcbE.type == DCB_OUTPUT_UNUSED)
 			continue;
 		if (dcbE.type == DCB_OUTPUT_EOL)
 			break;
-		data = dcbE.location << 4 | dcbE.type;
+		outp = NULL;
+
+		switch (dcbE.location) {
+		case 0: outps = &disp->func->outp.internal; break;
+		case 1: outps = &disp->func->outp.external; break;
+		default:
+			nvkm_warn(&disp->engine.subdev,
+				  "dcb %d locn %d unknown\n", i, dcbE.location);
+			continue;
+		}
 
-		oclass = nvkm_output_oclass;
-		sclass = impl->outp;
-		while (sclass && sclass[0]) {
-			if (sclass[0]->handle == data) {
-				oclass = sclass[0];
-				break;
+		switch (dcbE.type) {
+		case DCB_OUTPUT_ANALOG: ctor = outps->crt ; break;
+		case DCB_OUTPUT_TV    : ctor = outps->tv  ; break;
+		case DCB_OUTPUT_TMDS  : ctor = outps->tmds; break;
+		case DCB_OUTPUT_LVDS  : ctor = outps->lvds; break;
+		case DCB_OUTPUT_DP    : ctor = outps->dp  ; break;
+		default:
+			nvkm_warn(&disp->engine.subdev,
+				  "dcb %d type %d unknown\n", i, dcbE.type);
+			continue;
+		}
+
+		if (ctor)
+			ret = ctor(disp, i, &dcbE, &outp);
+		else
+			ret = -ENODEV;
+
+		if (ret) {
+			if (ret == -ENODEV) {
+				nvkm_debug(&disp->engine.subdev,
+					   "dcb %d %d/%d not supported\n",
+					   i, dcbE.location, dcbE.type);
+				continue;
 			}
-			sclass++;
+			nvkm_error(&disp->engine.subdev,
+				   "failed to create output %d\n", i);
+			nvkm_output_del(&outp);
+			continue;
 		}
 
-		nvkm_object_ctor(*pobject, NULL, oclass, &dcbE, i, &object);
+		list_add_tail(&outp->head, &disp->outp);
 		hpd = max(hpd, (u8)(dcbE.connector + 1));
 	}
 
+	/* create connector objects based on the outputs we support */
+	list_for_each_entry_safe(outp, outt, &disp->outp, head) {
+		/* bios data *should* give us the most useful information */
+		data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr,
+				     &connE);
+
+		/* no bios connector data... */
+		if (!data) {
+			/* heuristic: anything with the same ccb index is
+			 * considered to be on the same connector, any
+			 * output path without an associated ccb entry will
+			 * be put on its own connector
+			 */
+			int ccb_index = outp->info.i2c_index;
+			if (ccb_index != 0xf) {
+				list_for_each_entry(pair, &disp->outp, head) {
+					if (pair->info.i2c_index == ccb_index) {
+						outp->conn = pair->conn;
+						break;
+					}
+				}
+			}
+
+			/* connector shared with another output path */
+			if (outp->conn)
+				continue;
+
+			memset(&connE, 0x00, sizeof(connE));
+			connE.type = DCB_CONNECTOR_NONE;
+			i = -1;
+		} else {
+			i = outp->info.connector;
+		}
+
+		/* check that we haven't already created this connector */
+		list_for_each_entry(conn, &disp->conn, head) {
+			if (conn->index == outp->info.connector) {
+				outp->conn = conn;
+				break;
+			}
+		}
+
+		if (outp->conn)
+			continue;
+
+		/* apparently we need to create a new one! */
+		ret = nvkm_connector_new(disp, i, &connE, &outp->conn);
+		if (ret) {
+			nvkm_error(&disp->engine.subdev,
+				   "failed to create output %d conn: %d\n",
+				   outp->index, ret);
+			nvkm_connector_del(&outp->conn);
+			list_del(&outp->head);
+			nvkm_output_del(&outp);
+			continue;
+		}
+
+		list_add_tail(&outp->conn->head, &disp->conn);
+	}
+
 	ret = nvkm_event_init(&nvkm_disp_hpd_func, 3, hpd, &disp->hpd);
 	if (ret)
 		return ret;
 
-	ret = nvkm_event_init(impl->vblank, 1, heads, &disp->vblank);
+	ret = nvkm_event_init(&nvkm_disp_vblank_func, 1, heads, &disp->vblank);
 	if (ret)
 		return ret;
 
 	return 0;
 }
+
+int
+nvkm_disp_new_(const struct nvkm_disp_func *func, struct nvkm_device *device,
+	       int index, int heads, struct nvkm_disp **pdisp)
+{
+	if (!(*pdisp = kzalloc(sizeof(**pdisp), GFP_KERNEL)))
+		return -ENOMEM;
+	return nvkm_disp_ctor(func, device, index, heads, *pdisp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c
new file mode 100644
index 000000000000..6d17630a3dee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+g84_disp_base_mthd_base = {
+	.mthd = 0x0000,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0080, 0x000000 },
+		{ 0x0084, 0x0008c4 },
+		{ 0x0088, 0x0008d0 },
+		{ 0x008c, 0x0008dc },
+		{ 0x0090, 0x0008e4 },
+		{ 0x0094, 0x610884 },
+		{ 0x00a0, 0x6108a0 },
+		{ 0x00a4, 0x610878 },
+		{ 0x00c0, 0x61086c },
+		{ 0x00c4, 0x610800 },
+		{ 0x00c8, 0x61080c },
+		{ 0x00cc, 0x610818 },
+		{ 0x00e0, 0x610858 },
+		{ 0x00e4, 0x610860 },
+		{ 0x00e8, 0x6108ac },
+		{ 0x00ec, 0x6108b4 },
+		{ 0x00fc, 0x610824 },
+		{ 0x0100, 0x610894 },
+		{ 0x0104, 0x61082c },
+		{ 0x0110, 0x6108bc },
+		{ 0x0114, 0x61088c },
+		{}
+	}
+};
+
+const struct nv50_disp_chan_mthd
+g84_disp_base_chan_mthd = {
+	.name = "Base",
+	.addr = 0x000540,
+	.prev = 0x000004,
+	.data = {
+		{ "Global", 1, &g84_disp_base_mthd_base },
+		{  "Image", 2, &nv50_disp_base_mthd_image },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+g84_disp_base_oclass = {
+	.base.oclass = G82_DISP_BASE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_base_new,
+	.func = &nv50_disp_dmac_func,
+	.mthd = &g84_disp_base_chan_mthd,
+	.chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c
new file mode 100644
index 000000000000..ebcb925e9d90
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+gf119_disp_base_mthd_base = {
+	.mthd = 0x0000,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0080, 0x661080 },
+		{ 0x0084, 0x661084 },
+		{ 0x0088, 0x661088 },
+		{ 0x008c, 0x66108c },
+		{ 0x0090, 0x661090 },
+		{ 0x0094, 0x661094 },
+		{ 0x00a0, 0x6610a0 },
+		{ 0x00a4, 0x6610a4 },
+		{ 0x00c0, 0x6610c0 },
+		{ 0x00c4, 0x6610c4 },
+		{ 0x00c8, 0x6610c8 },
+		{ 0x00cc, 0x6610cc },
+		{ 0x00e0, 0x6610e0 },
+		{ 0x00e4, 0x6610e4 },
+		{ 0x00e8, 0x6610e8 },
+		{ 0x00ec, 0x6610ec },
+		{ 0x00fc, 0x6610fc },
+		{ 0x0100, 0x661100 },
+		{ 0x0104, 0x661104 },
+		{ 0x0108, 0x661108 },
+		{ 0x010c, 0x66110c },
+		{ 0x0110, 0x661110 },
+		{ 0x0114, 0x661114 },
+		{ 0x0118, 0x661118 },
+		{ 0x011c, 0x66111c },
+		{ 0x0130, 0x661130 },
+		{ 0x0134, 0x661134 },
+		{ 0x0138, 0x661138 },
+		{ 0x013c, 0x66113c },
+		{ 0x0140, 0x661140 },
+		{ 0x0144, 0x661144 },
+		{ 0x0148, 0x661148 },
+		{ 0x014c, 0x66114c },
+		{ 0x0150, 0x661150 },
+		{ 0x0154, 0x661154 },
+		{ 0x0158, 0x661158 },
+		{ 0x015c, 0x66115c },
+		{ 0x0160, 0x661160 },
+		{ 0x0164, 0x661164 },
+		{ 0x0168, 0x661168 },
+		{ 0x016c, 0x66116c },
+		{}
+	}
+};
+
+static const struct nv50_disp_mthd_list
+gf119_disp_base_mthd_image = {
+	.mthd = 0x0020,
+	.addr = 0x000020,
+	.data = {
+		{ 0x0400, 0x661400 },
+		{ 0x0404, 0x661404 },
+		{ 0x0408, 0x661408 },
+		{ 0x040c, 0x66140c },
+		{ 0x0410, 0x661410 },
+		{}
+	}
+};
+
+const struct nv50_disp_chan_mthd
+gf119_disp_base_chan_mthd = {
+	.name = "Base",
+	.addr = 0x001000,
+	.prev = -0x020000,
+	.data = {
+		{ "Global", 1, &gf119_disp_base_mthd_base },
+		{  "Image", 2, &gf119_disp_base_mthd_image },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+gf119_disp_base_oclass = {
+	.base.oclass = GF110_DISP_BASE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_base_new,
+	.func = &gf119_disp_dmac_func,
+	.mthd = &gf119_disp_base_chan_mthd,
+	.chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk104.c
new file mode 100644
index 000000000000..780a1d973634
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk104.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gk104_disp_base_oclass = {
+	.base.oclass = GK104_DISP_BASE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_base_new,
+	.func = &gf119_disp_dmac_func,
+	.mthd = &gf119_disp_base_chan_mthd,
+	.chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk110.c
new file mode 100644
index 000000000000..d8bdd246c8ed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk110.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gk110_disp_base_oclass = {
+	.base.oclass = GK110_DISP_BASE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_base_new,
+	.func = &gf119_disp_dmac_func,
+	.mthd = &gf119_disp_base_chan_mthd,
+	.chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt200.c
new file mode 100644
index 000000000000..93451e46570c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt200.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gt200_disp_base_oclass = {
+	.base.oclass = GT200_DISP_BASE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_base_new,
+	.func = &nv50_disp_dmac_func,
+	.mthd = &g84_disp_base_chan_mthd,
+	.chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt215.c
new file mode 100644
index 000000000000..08e2b1fa3806
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt215.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gt215_disp_base_oclass = {
+	.base.oclass = GT214_DISP_BASE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_base_new,
+	.func = &nv50_disp_dmac_func,
+	.mthd = &g84_disp_base_chan_mthd,
+	.chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c
new file mode 100644
index 000000000000..1fd89edefc26
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+nv50_disp_base_new(const struct nv50_disp_dmac_func *func,
+		   const struct nv50_disp_chan_mthd *mthd,
+		   struct nv50_disp_root *root, int chid,
+		   const struct nvkm_oclass *oclass, void *data, u32 size,
+		   struct nvkm_object **pobject)
+{
+	union {
+		struct nv50_disp_base_channel_dma_v0 v0;
+	} *args = data;
+	struct nvkm_object *parent = oclass->parent;
+	struct nv50_disp *disp = root->disp;
+	int head, ret;
+	u64 push;
+
+	nvif_ioctl(parent, "create disp base channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create disp base channel dma vers %d "
+				   "pushbuf %016llx head %d\n",
+			   args->v0.version, args->v0.pushbuf, args->v0.head);
+		if (args->v0.head > disp->base.head.nr)
+			return -EINVAL;
+		push = args->v0.pushbuf;
+		head = args->v0.head;
+	} else
+		return ret;
+
+	return nv50_disp_dmac_new_(func, mthd, root, chid + head,
+				   head, push, oclass, pobject);
+}
+
+static const struct nv50_disp_mthd_list
+nv50_disp_base_mthd_base = {
+	.mthd = 0x0000,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0080, 0x000000 },
+		{ 0x0084, 0x0008c4 },
+		{ 0x0088, 0x0008d0 },
+		{ 0x008c, 0x0008dc },
+		{ 0x0090, 0x0008e4 },
+		{ 0x0094, 0x610884 },
+		{ 0x00a0, 0x6108a0 },
+		{ 0x00a4, 0x610878 },
+		{ 0x00c0, 0x61086c },
+		{ 0x00e0, 0x610858 },
+		{ 0x00e4, 0x610860 },
+		{ 0x00e8, 0x6108ac },
+		{ 0x00ec, 0x6108b4 },
+		{ 0x0100, 0x610894 },
+		{ 0x0110, 0x6108bc },
+		{ 0x0114, 0x61088c },
+		{}
+	}
+};
+
+const struct nv50_disp_mthd_list
+nv50_disp_base_mthd_image = {
+	.mthd = 0x0400,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0800, 0x6108f0 },
+		{ 0x0804, 0x6108fc },
+		{ 0x0808, 0x61090c },
+		{ 0x080c, 0x610914 },
+		{ 0x0810, 0x610904 },
+		{}
+	}
+};
+
+static const struct nv50_disp_chan_mthd
+nv50_disp_base_chan_mthd = {
+	.name = "Base",
+	.addr = 0x000540,
+	.prev = 0x000004,
+	.data = {
+		{ "Global", 1, &nv50_disp_base_mthd_base },
+		{  "Image", 2, &nv50_disp_base_mthd_image },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+nv50_disp_base_oclass = {
+	.base.oclass = NV50_DISP_BASE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_base_new,
+	.func = &nv50_disp_dmac_func,
+	.mthd = &nv50_disp_base_chan_mthd,
+	.chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c
new file mode 100644
index 000000000000..17a3d835cb42
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+static void
+gf119_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
+{
+	struct nv50_disp *disp = container_of(event, typeof(*disp), uevent);
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_mask(device, 0x610090, 0x00000001 << index, 0x00000000 << index);
+	nvkm_wr32(device, 0x61008c, 0x00000001 << index);
+}
+
+static void
+gf119_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
+{
+	struct nv50_disp *disp = container_of(event, typeof(*disp), uevent);
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_wr32(device, 0x61008c, 0x00000001 << index);
+	nvkm_mask(device, 0x610090, 0x00000001 << index, 0x00000001 << index);
+}
+
+const struct nvkm_event_func
+gf119_disp_chan_uevent = {
+	.ctor = nv50_disp_chan_uevent_ctor,
+	.init = gf119_disp_chan_uevent_init,
+	.fini = gf119_disp_chan_uevent_fini,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
new file mode 100644
index 000000000000..01803c0679b6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -0,0 +1,301 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <engine/dma.h>
+
+#include <nvif/class.h>
+#include <nvif/event.h>
+#include <nvif/unpack.h>
+
+static void
+nv50_disp_mthd_list(struct nv50_disp *disp, int debug, u32 base, int c,
+		    const struct nv50_disp_mthd_list *list, int inst)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int i;
+
+	for (i = 0; list->data[i].mthd; i++) {
+		if (list->data[i].addr) {
+			u32 next = nvkm_rd32(device, list->data[i].addr + base + 0);
+			u32 prev = nvkm_rd32(device, list->data[i].addr + base + c);
+			u32 mthd = list->data[i].mthd + (list->mthd * inst);
+			const char *name = list->data[i].name;
+			char mods[16];
+
+			if (prev != next)
+				snprintf(mods, sizeof(mods), "-> %08x", next);
+			else
+				snprintf(mods, sizeof(mods), "%13c", ' ');
+
+			nvkm_printk_(subdev, debug, info,
+				     "\t%04x: %08x %s%s%s\n",
+				     mthd, prev, mods, name ? " // " : "",
+				     name ? name : "");
+		}
+	}
+}
+
+void
+nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug)
+{
+	struct nv50_disp *disp = chan->root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	const struct nv50_disp_chan_mthd *mthd = chan->mthd;
+	const struct nv50_disp_mthd_list *list;
+	int i, j;
+
+	if (debug > subdev->debug)
+		return;
+
+	for (i = 0; (list = mthd->data[i].mthd) != NULL; i++) {
+		u32 base = chan->head * mthd->addr;
+		for (j = 0; j < mthd->data[i].nr; j++, base += list->addr) {
+			const char *cname = mthd->name;
+			const char *sname = "";
+			char cname_[16], sname_[16];
+
+			if (mthd->addr) {
+				snprintf(cname_, sizeof(cname_), "%s %d",
+					 mthd->name, chan->chid);
+				cname = cname_;
+			}
+
+			if (mthd->data[i].nr > 1) {
+				snprintf(sname_, sizeof(sname_), " - %s %d",
+					 mthd->data[i].name, j);
+				sname = sname_;
+			}
+
+			nvkm_printk_(subdev, debug, info, "%s%s:\n", cname, sname);
+			nv50_disp_mthd_list(disp, debug, base, mthd->prev,
+					    list, j);
+		}
+	}
+}
+
+static void
+nv50_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
+{
+	struct nv50_disp *disp = container_of(event, typeof(*disp), uevent);
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000000 << index);
+	nvkm_wr32(device, 0x610020, 0x00000001 << index);
+}
+
+static void
+nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
+{
+	struct nv50_disp *disp = container_of(event, typeof(*disp), uevent);
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_wr32(device, 0x610020, 0x00000001 << index);
+	nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000001 << index);
+}
+
+void
+nv50_disp_chan_uevent_send(struct nv50_disp *disp, int chid)
+{
+	struct nvif_notify_uevent_rep {
+	} rep;
+
+	nvkm_event_send(&disp->uevent, 1, chid, &rep, sizeof(rep));
+}
+
+int
+nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
+			   struct nvkm_notify *notify)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	union {
+		struct nvif_notify_uevent_req none;
+	} *args = data;
+	int ret;
+
+	if (nvif_unvers(args->none)) {
+		notify->size  = sizeof(struct nvif_notify_uevent_rep);
+		notify->types = 1;
+		notify->index = chan->chid;
+		return 0;
+	}
+
+	return ret;
+}
+
+const struct nvkm_event_func
+nv50_disp_chan_uevent = {
+	.ctor = nv50_disp_chan_uevent_ctor,
+	.init = nv50_disp_chan_uevent_init,
+	.fini = nv50_disp_chan_uevent_fini,
+};
+
+int
+nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	struct nv50_disp *disp = chan->root->disp;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	*data = nvkm_rd32(device, 0x640000 + (chan->chid * 0x1000) + addr);
+	return 0;
+}
+
+int
+nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	struct nv50_disp *disp = chan->root->disp;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_wr32(device, 0x640000 + (chan->chid * 0x1000) + addr, data);
+	return 0;
+}
+
+int
+nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type,
+		    struct nvkm_event **pevent)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	struct nv50_disp *disp = chan->root->disp;
+	switch (type) {
+	case NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT:
+		*pevent = &disp->uevent;
+		return 0;
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+int
+nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	struct nv50_disp *disp = chan->root->disp;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	*addr = device->func->resource_addr(device, 0) +
+		0x640000 + (chan->chid * 0x1000);
+	*size = 0x001000;
+	return 0;
+}
+
+static int
+nv50_disp_chan_child_new(const struct nvkm_oclass *oclass,
+			 void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(oclass->parent);
+	return chan->func->child_new(chan, oclass, data, size, pobject);
+}
+
+static int
+nv50_disp_chan_child_get(struct nvkm_object *object, int index,
+			 struct nvkm_oclass *oclass)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	if (chan->func->child_get) {
+		int ret = chan->func->child_get(chan, index, oclass);
+		if (ret == 0)
+			oclass->ctor = nv50_disp_chan_child_new;
+		return ret;
+	}
+	return -EINVAL;
+}
+
+static int
+nv50_disp_chan_fini(struct nvkm_object *object, bool suspend)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	chan->func->fini(chan);
+	return 0;
+}
+
+static int
+nv50_disp_chan_init(struct nvkm_object *object)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	return chan->func->init(chan);
+}
+
+static void *
+nv50_disp_chan_dtor(struct nvkm_object *object)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	struct nv50_disp *disp = chan->root->disp;
+	if (chan->chid >= 0)
+		disp->chan[chan->chid] = NULL;
+	return chan->func->dtor ? chan->func->dtor(chan) : chan;
+}
+
+static const struct nvkm_object_func
+nv50_disp_chan = {
+	.dtor = nv50_disp_chan_dtor,
+	.init = nv50_disp_chan_init,
+	.fini = nv50_disp_chan_fini,
+	.rd32 = nv50_disp_chan_rd32,
+	.wr32 = nv50_disp_chan_wr32,
+	.ntfy = nv50_disp_chan_ntfy,
+	.map = nv50_disp_chan_map,
+	.sclass = nv50_disp_chan_child_get,
+};
+
+int
+nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func,
+		    const struct nv50_disp_chan_mthd *mthd,
+		    struct nv50_disp_root *root, int chid, int head,
+		    const struct nvkm_oclass *oclass,
+		    struct nv50_disp_chan *chan)
+{
+	struct nv50_disp *disp = root->disp;
+
+	nvkm_object_ctor(&nv50_disp_chan, oclass, &chan->object);
+	chan->func = func;
+	chan->mthd = mthd;
+	chan->root = root;
+	chan->chid = chid;
+	chan->head = head;
+
+	if (disp->chan[chan->chid]) {
+		chan->chid = -1;
+		return -EBUSY;
+	}
+	disp->chan[chan->chid] = chan;
+	return 0;
+}
+
+int
+nv50_disp_chan_new_(const struct nv50_disp_chan_func *func,
+		    const struct nv50_disp_chan_mthd *mthd,
+		    struct nv50_disp_root *root, int chid, int head,
+		    const struct nvkm_oclass *oclass,
+		    struct nvkm_object **pobject)
+{
+	struct nv50_disp_chan *chan;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->object;
+
+	return nv50_disp_chan_ctor(func, mthd, root, chid, head, oclass, chan);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
new file mode 100644
index 000000000000..aee374884c96
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -0,0 +1,127 @@
+#ifndef __NV50_DISP_CHAN_H__
+#define __NV50_DISP_CHAN_H__
+#define nv50_disp_chan(p) container_of((p), struct nv50_disp_chan, object)
+#include "nv50.h"
+
+struct nv50_disp_chan {
+	const struct nv50_disp_chan_func *func;
+	const struct nv50_disp_chan_mthd *mthd;
+	struct nv50_disp_root *root;
+	int chid;
+	int head;
+
+	struct nvkm_object object;
+};
+
+struct nv50_disp_chan_func {
+	void *(*dtor)(struct nv50_disp_chan *);
+	int (*init)(struct nv50_disp_chan *);
+	void (*fini)(struct nv50_disp_chan *);
+	int (*child_get)(struct nv50_disp_chan *, int index,
+			 struct nvkm_oclass *);
+	int (*child_new)(struct nv50_disp_chan *, const struct nvkm_oclass *,
+			 void *data, u32 size, struct nvkm_object **);
+};
+
+int nv50_disp_chan_ctor(const struct nv50_disp_chan_func *,
+			const struct nv50_disp_chan_mthd *,
+			struct nv50_disp_root *, int chid, int head,
+			const struct nvkm_oclass *, struct nv50_disp_chan *);
+int nv50_disp_chan_new_(const struct nv50_disp_chan_func *,
+			const struct nv50_disp_chan_mthd *,
+			struct nv50_disp_root *, int chid, int head,
+			const struct nvkm_oclass *, struct nvkm_object **);
+
+extern const struct nv50_disp_chan_func nv50_disp_pioc_func;
+extern const struct nv50_disp_chan_func gf119_disp_pioc_func;
+
+extern const struct nvkm_event_func nv50_disp_chan_uevent;
+int  nv50_disp_chan_uevent_ctor(struct nvkm_object *, void *, u32,
+				struct nvkm_notify *);
+void nv50_disp_chan_uevent_send(struct nv50_disp *, int);
+
+extern const struct nvkm_event_func gf119_disp_chan_uevent;
+
+struct nv50_disp_mthd_list {
+	u32 mthd;
+	u32 addr;
+	struct {
+		u32 mthd;
+		u32 addr;
+		const char *name;
+	} data[];
+};
+
+struct nv50_disp_chan_mthd {
+	const char *name;
+	u32 addr;
+	s32 prev;
+	struct {
+		const char *name;
+		int nr;
+		const struct nv50_disp_mthd_list *mthd;
+	} data[];
+};
+
+void nv50_disp_chan_mthd(struct nv50_disp_chan *, int debug);
+
+extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_base;
+extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_sor;
+extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_pior;
+extern const struct nv50_disp_mthd_list nv50_disp_base_mthd_image;
+
+extern const struct nv50_disp_chan_mthd g84_disp_core_chan_mthd;
+extern const struct nv50_disp_mthd_list g84_disp_core_mthd_dac;
+extern const struct nv50_disp_mthd_list g84_disp_core_mthd_head;
+extern const struct nv50_disp_chan_mthd g84_disp_base_chan_mthd;
+extern const struct nv50_disp_chan_mthd g84_disp_ovly_chan_mthd;
+
+extern const struct nv50_disp_chan_mthd g94_disp_core_chan_mthd;
+
+extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_base;
+extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_dac;
+extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_sor;
+extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_pior;
+extern const struct nv50_disp_chan_mthd gf119_disp_base_chan_mthd;
+
+extern const struct nv50_disp_chan_mthd gk104_disp_core_chan_mthd;
+
+struct nv50_disp_pioc_oclass {
+	int (*ctor)(const struct nv50_disp_chan_func *,
+		    const struct nv50_disp_chan_mthd *,
+		    struct nv50_disp_root *, int chid,
+		    const struct nvkm_oclass *, void *data, u32 size,
+		    struct nvkm_object **);
+	struct nvkm_sclass base;
+	const struct nv50_disp_chan_func *func;
+	const struct nv50_disp_chan_mthd *mthd;
+	int chid;
+};
+
+extern const struct nv50_disp_pioc_oclass nv50_disp_oimm_oclass;
+extern const struct nv50_disp_pioc_oclass nv50_disp_curs_oclass;
+
+extern const struct nv50_disp_pioc_oclass g84_disp_oimm_oclass;
+extern const struct nv50_disp_pioc_oclass g84_disp_curs_oclass;
+
+extern const struct nv50_disp_pioc_oclass gt215_disp_oimm_oclass;
+extern const struct nv50_disp_pioc_oclass gt215_disp_curs_oclass;
+
+extern const struct nv50_disp_pioc_oclass gf119_disp_oimm_oclass;
+extern const struct nv50_disp_pioc_oclass gf119_disp_curs_oclass;
+
+extern const struct nv50_disp_pioc_oclass gk104_disp_oimm_oclass;
+extern const struct nv50_disp_pioc_oclass gk104_disp_curs_oclass;
+
+
+int nv50_disp_curs_new(const struct nv50_disp_chan_func *,
+		       const struct nv50_disp_chan_mthd *,
+		       struct nv50_disp_root *, int chid,
+		       const struct nvkm_oclass *, void *data, u32 size,
+		       struct nvkm_object **);
+int nv50_disp_oimm_new(const struct nv50_disp_chan_func *,
+		       const struct nv50_disp_chan_mthd *,
+		       struct nv50_disp_root *, int chid,
+		       const struct nvkm_oclass *, void *data, u32 size,
+		       struct nvkm_object **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c
index cf03e0240ced..c6910d644a3d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c
@@ -33,15 +33,15 @@ static int
 nvkm_connector_hpd(struct nvkm_notify *notify)
 {
 	struct nvkm_connector *conn = container_of(notify, typeof(*conn), hpd);
-	struct nvkm_disp *disp = nvkm_disp(conn);
-	struct nvkm_gpio *gpio = nvkm_gpio(conn);
+	struct nvkm_disp *disp = conn->disp;
+	struct nvkm_gpio *gpio = disp->engine.subdev.device->gpio;
 	const struct nvkm_gpio_ntfy_rep *line = notify->data;
 	struct nvif_notify_conn_rep_v0 rep;
 	int index = conn->index;
 
-	DBG("HPD: %d\n", line->mask);
+	CONN_DBG(conn, "HPD: %d", line->mask);
 
-	if (!gpio->get(gpio, 0, DCB_GPIO_UNUSED, conn->hpd.index))
+	if (!nvkm_gpio_get(gpio, 0, DCB_GPIO_UNUSED, conn->hpd.index))
 		rep.mask = NVIF_NOTIFY_CONN_V0_UNPLUG;
 	else
 		rep.mask = NVIF_NOTIFY_CONN_V0_PLUG;
@@ -51,78 +51,58 @@ nvkm_connector_hpd(struct nvkm_notify *notify)
 	return NVKM_NOTIFY_KEEP;
 }
 
-int
-_nvkm_connector_fini(struct nvkm_object *object, bool suspend)
+void
+nvkm_connector_fini(struct nvkm_connector *conn)
 {
-	struct nvkm_connector *conn = (void *)object;
 	nvkm_notify_put(&conn->hpd);
-	return nvkm_object_fini(&conn->base, suspend);
 }
 
-int
-_nvkm_connector_init(struct nvkm_object *object)
+void
+nvkm_connector_init(struct nvkm_connector *conn)
 {
-	struct nvkm_connector *conn = (void *)object;
-	int ret = nvkm_object_init(&conn->base);
-	if (ret == 0)
-		nvkm_notify_get(&conn->hpd);
-	return ret;
+	nvkm_notify_get(&conn->hpd);
 }
 
 void
-_nvkm_connector_dtor(struct nvkm_object *object)
+nvkm_connector_del(struct nvkm_connector **pconn)
 {
-	struct nvkm_connector *conn = (void *)object;
-	nvkm_notify_fini(&conn->hpd);
-	nvkm_object_destroy(&conn->base);
+	struct nvkm_connector *conn = *pconn;
+	if (conn) {
+		nvkm_notify_fini(&conn->hpd);
+		kfree(*pconn);
+		*pconn = NULL;
+	}
 }
 
-int
-nvkm_connector_create_(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass,
-		       struct nvbios_connE *info, int index,
-		       int length, void **pobject)
+static void
+nvkm_connector_ctor(struct nvkm_disp *disp, int index,
+		    struct nvbios_connE *info, struct nvkm_connector *conn)
 {
 	static const u8 hpd[] = { 0x07, 0x08, 0x51, 0x52, 0x5e, 0x5f, 0x60 };
-	struct nvkm_disp *disp = nvkm_disp(parent);
-	struct nvkm_gpio *gpio = nvkm_gpio(parent);
-	struct nvkm_connector *conn;
-	struct nvkm_output *outp;
+	struct nvkm_gpio *gpio = disp->engine.subdev.device->gpio;
 	struct dcb_gpio_func func;
 	int ret;
 
-	list_for_each_entry(outp, &disp->outp, head) {
-		if (outp->conn && outp->conn->index == index) {
-			atomic_inc(&nv_object(outp->conn)->refcount);
-			*pobject = outp->conn;
-			return 1;
-		}
-	}
-
-	ret = nvkm_object_create_(parent, engine, oclass, 0, length, pobject);
-	conn = *pobject;
-	if (ret)
-		return ret;
-
-	conn->info = *info;
+	conn->disp = disp;
 	conn->index = index;
+	conn->info = *info;
 
-	DBG("type %02x loc %d hpd %02x dp %x di %x sr %x lcdid %x\n",
-	    info->type, info->location, info->hpd, info->dp,
-	    info->di, info->sr, info->lcdid);
+	CONN_DBG(conn, "type %02x loc %d hpd %02x dp %x di %x sr %x lcdid %x",
+		 info->type, info->location, info->hpd, info->dp,
+		 info->di, info->sr, info->lcdid);
 
 	if ((info->hpd = ffs(info->hpd))) {
 		if (--info->hpd >= ARRAY_SIZE(hpd)) {
-			ERR("hpd %02x unknown\n", info->hpd);
-			return 0;
+			CONN_ERR(conn, "hpd %02x unknown", info->hpd);
+			return;
 		}
 		info->hpd = hpd[info->hpd];
 
-		ret = gpio->find(gpio, 0, info->hpd, DCB_GPIO_UNUSED, &func);
+		ret = nvkm_gpio_find(gpio, 0, info->hpd, DCB_GPIO_UNUSED, &func);
 		if (ret) {
-			ERR("func %02x lookup failed, %d\n", info->hpd, ret);
-			return 0;
+			CONN_ERR(conn, "func %02x lookup failed, %d",
+				 info->hpd, ret);
+			return;
 		}
 
 		ret = nvkm_notify_init(NULL, &gpio->event, nvkm_connector_hpd,
@@ -134,41 +114,19 @@ nvkm_connector_create_(struct nvkm_object *parent,
 				       sizeof(struct nvkm_gpio_ntfy_rep),
 				       &conn->hpd);
 		if (ret) {
-			ERR("func %02x failed, %d\n", info->hpd, ret);
+			CONN_ERR(conn, "func %02x failed, %d", info->hpd, ret);
 		} else {
-			DBG("func %02x (HPD)\n", info->hpd);
+			CONN_DBG(conn, "func %02x (HPD)", info->hpd);
 		}
 	}
-
-	return 0;
 }
 
 int
-_nvkm_connector_ctor(struct nvkm_object *parent,
-		     struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *info, u32 index,
-		     struct nvkm_object **pobject)
+nvkm_connector_new(struct nvkm_disp *disp, int index,
+		   struct nvbios_connE *info, struct nvkm_connector **pconn)
 {
-	struct nvkm_connector *conn;
-	int ret;
-
-	ret = nvkm_connector_create(parent, engine, oclass, info, index, &conn);
-	*pobject = nv_object(conn);
-	if (ret)
-		return ret;
-
+	if (!(*pconn = kzalloc(sizeof(**pconn), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_connector_ctor(disp, index, info, *pconn);
 	return 0;
 }
-
-struct nvkm_oclass *
-nvkm_connector_oclass = &(struct nvkm_connector_impl) {
-	.base = {
-		.handle = 0,
-		.ofuncs = &(struct nvkm_ofuncs) {
-			.ctor = _nvkm_connector_ctor,
-			.dtor = _nvkm_connector_dtor,
-			.init = _nvkm_connector_init,
-			.fini = _nvkm_connector_fini,
-		},
-	},
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
index c87a061f7f7d..ed32fe7f1864 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
@@ -1,58 +1,33 @@
 #ifndef __NVKM_DISP_CONN_H__
 #define __NVKM_DISP_CONN_H__
-#include <core/object.h>
-#include <core/notify.h>
+#include <engine/disp.h>
 
+#include <core/notify.h>
 #include <subdev/bios.h>
 #include <subdev/bios/conn.h>
 
 struct nvkm_connector {
-	struct nvkm_object base;
-	struct list_head head;
-
-	struct nvbios_connE info;
+	struct nvkm_disp *disp;
 	int index;
+	struct nvbios_connE info;
 
 	struct nvkm_notify hpd;
-};
 
-#define nvkm_connector_create(p,e,c,b,i,d)                                     \
-	nvkm_connector_create_((p), (e), (c), (b), (i), sizeof(**d), (void **)d)
-#define nvkm_connector_destroy(d) ({                                           \
-	struct nvkm_connector *disp = (d);                                     \
-	_nvkm_connector_dtor(nv_object(disp));                                 \
-})
-#define nvkm_connector_init(d) ({                                              \
-	struct nvkm_connector *disp = (d);                                     \
-	_nvkm_connector_init(nv_object(disp));                                 \
-})
-#define nvkm_connector_fini(d,s) ({                                            \
-	struct nvkm_connector *disp = (d);                                     \
-	_nvkm_connector_fini(nv_object(disp), (s));                            \
-})
-
-int nvkm_connector_create_(struct nvkm_object *, struct nvkm_object *,
-			   struct nvkm_oclass *, struct nvbios_connE *,
-			   int, int, void **);
-
-int  _nvkm_connector_ctor(struct nvkm_object *, struct nvkm_object *,
-			  struct nvkm_oclass *, void *, u32,
-			  struct nvkm_object **);
-void _nvkm_connector_dtor(struct nvkm_object *);
-int  _nvkm_connector_init(struct nvkm_object *);
-int  _nvkm_connector_fini(struct nvkm_object *, bool);
-
-struct nvkm_connector_impl {
-	struct nvkm_oclass base;
+	struct list_head head;
 };
 
-#ifndef MSG
-#define MSG(l,f,a...) do {                                                     \
-	struct nvkm_connector *_conn = (void *)conn;                           \
-	nv_##l(_conn, "%02x:%02x%02x: "f, _conn->index,                        \
-	       _conn->info.location, _conn->info.type, ##a);                   \
+int  nvkm_connector_new(struct nvkm_disp *, int index, struct nvbios_connE *,
+			struct nvkm_connector **);
+void nvkm_connector_del(struct nvkm_connector **);
+void nvkm_connector_init(struct nvkm_connector *);
+void nvkm_connector_fini(struct nvkm_connector *);
+
+#define CONN_MSG(c,l,f,a...) do {                                              \
+	struct nvkm_connector *_conn = (c);                                    \
+	nvkm_##l(&_conn->disp->engine.subdev, "conn %02x:%02x%02x: "f"\n",     \
+		 _conn->index, _conn->info.location, _conn->info.type, ##a);   \
 } while(0)
-#define DBG(f,a...) MSG(debug, f, ##a)
-#define ERR(f,a...) MSG(error, f, ##a)
-#endif
+#define CONN_ERR(c,f,a...) CONN_MSG((c), error, f, ##a)
+#define CONN_DBG(c,f,a...) CONN_MSG((c), debug, f, ##a)
+#define CONN_TRACE(c,f,a...) CONN_MSG((c), trace, f, ##a)
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c
new file mode 100644
index 000000000000..1baa5c34b327
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_mthd_list
+g84_disp_core_mthd_dac = {
+	.mthd = 0x0080,
+	.addr = 0x000008,
+	.data = {
+		{ 0x0400, 0x610b58 },
+		{ 0x0404, 0x610bdc },
+		{ 0x0420, 0x610bc4 },
+		{}
+	}
+};
+
+const struct nv50_disp_mthd_list
+g84_disp_core_mthd_head = {
+	.mthd = 0x0400,
+	.addr = 0x000540,
+	.data = {
+		{ 0x0800, 0x610ad8 },
+		{ 0x0804, 0x610ad0 },
+		{ 0x0808, 0x610a48 },
+		{ 0x080c, 0x610a78 },
+		{ 0x0810, 0x610ac0 },
+		{ 0x0814, 0x610af8 },
+		{ 0x0818, 0x610b00 },
+		{ 0x081c, 0x610ae8 },
+		{ 0x0820, 0x610af0 },
+		{ 0x0824, 0x610b08 },
+		{ 0x0828, 0x610b10 },
+		{ 0x082c, 0x610a68 },
+		{ 0x0830, 0x610a60 },
+		{ 0x0834, 0x000000 },
+		{ 0x0838, 0x610a40 },
+		{ 0x0840, 0x610a24 },
+		{ 0x0844, 0x610a2c },
+		{ 0x0848, 0x610aa8 },
+		{ 0x084c, 0x610ab0 },
+		{ 0x085c, 0x610c5c },
+		{ 0x0860, 0x610a84 },
+		{ 0x0864, 0x610a90 },
+		{ 0x0868, 0x610b18 },
+		{ 0x086c, 0x610b20 },
+		{ 0x0870, 0x610ac8 },
+		{ 0x0874, 0x610a38 },
+		{ 0x0878, 0x610c50 },
+		{ 0x0880, 0x610a58 },
+		{ 0x0884, 0x610a9c },
+		{ 0x089c, 0x610c68 },
+		{ 0x08a0, 0x610a70 },
+		{ 0x08a4, 0x610a50 },
+		{ 0x08a8, 0x610ae0 },
+		{ 0x08c0, 0x610b28 },
+		{ 0x08c4, 0x610b30 },
+		{ 0x08c8, 0x610b40 },
+		{ 0x08d4, 0x610b38 },
+		{ 0x08d8, 0x610b48 },
+		{ 0x08dc, 0x610b50 },
+		{ 0x0900, 0x610a18 },
+		{ 0x0904, 0x610ab8 },
+		{ 0x0910, 0x610c70 },
+		{ 0x0914, 0x610c78 },
+		{}
+	}
+};
+
+const struct nv50_disp_chan_mthd
+g84_disp_core_chan_mthd = {
+	.name = "Core",
+	.addr = 0x000000,
+	.prev = 0x000004,
+	.data = {
+		{ "Global", 1, &nv50_disp_core_mthd_base },
+		{    "DAC", 3, &g84_disp_core_mthd_dac  },
+		{    "SOR", 2, &nv50_disp_core_mthd_sor  },
+		{   "PIOR", 3, &nv50_disp_core_mthd_pior },
+		{   "HEAD", 2, &g84_disp_core_mthd_head },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+g84_disp_core_oclass = {
+	.base.oclass = G82_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &nv50_disp_core_func,
+	.mthd = &g84_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
new file mode 100644
index 000000000000..019379a3a01c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_mthd_list
+g94_disp_core_mthd_sor = {
+	.mthd = 0x0040,
+	.addr = 0x000008,
+	.data = {
+		{ 0x0600, 0x610794 },
+		{}
+	}
+};
+
+const struct nv50_disp_chan_mthd
+g94_disp_core_chan_mthd = {
+	.name = "Core",
+	.addr = 0x000000,
+	.prev = 0x000004,
+	.data = {
+		{ "Global", 1, &nv50_disp_core_mthd_base },
+		{    "DAC", 3, &g84_disp_core_mthd_dac  },
+		{    "SOR", 4, &g94_disp_core_mthd_sor  },
+		{   "PIOR", 3, &nv50_disp_core_mthd_pior },
+		{   "HEAD", 2, &g84_disp_core_mthd_head },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+g94_disp_core_oclass = {
+	.base.oclass = GT206_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &nv50_disp_core_func,
+	.mthd = &g94_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
new file mode 100644
index 000000000000..6b1dc703dac7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+const struct nv50_disp_mthd_list
+gf119_disp_core_mthd_base = {
+	.mthd = 0x0000,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0080, 0x660080 },
+		{ 0x0084, 0x660084 },
+		{ 0x0088, 0x660088 },
+		{ 0x008c, 0x000000 },
+		{}
+	}
+};
+
+const struct nv50_disp_mthd_list
+gf119_disp_core_mthd_dac = {
+	.mthd = 0x0020,
+	.addr = 0x000020,
+	.data = {
+		{ 0x0180, 0x660180 },
+		{ 0x0184, 0x660184 },
+		{ 0x0188, 0x660188 },
+		{ 0x0190, 0x660190 },
+		{}
+	}
+};
+
+const struct nv50_disp_mthd_list
+gf119_disp_core_mthd_sor = {
+	.mthd = 0x0020,
+	.addr = 0x000020,
+	.data = {
+		{ 0x0200, 0x660200 },
+		{ 0x0204, 0x660204 },
+		{ 0x0208, 0x660208 },
+		{ 0x0210, 0x660210 },
+		{}
+	}
+};
+
+const struct nv50_disp_mthd_list
+gf119_disp_core_mthd_pior = {
+	.mthd = 0x0020,
+	.addr = 0x000020,
+	.data = {
+		{ 0x0300, 0x660300 },
+		{ 0x0304, 0x660304 },
+		{ 0x0308, 0x660308 },
+		{ 0x0310, 0x660310 },
+		{}
+	}
+};
+
+static const struct nv50_disp_mthd_list
+gf119_disp_core_mthd_head = {
+	.mthd = 0x0300,
+	.addr = 0x000300,
+	.data = {
+		{ 0x0400, 0x660400 },
+		{ 0x0404, 0x660404 },
+		{ 0x0408, 0x660408 },
+		{ 0x040c, 0x66040c },
+		{ 0x0410, 0x660410 },
+		{ 0x0414, 0x660414 },
+		{ 0x0418, 0x660418 },
+		{ 0x041c, 0x66041c },
+		{ 0x0420, 0x660420 },
+		{ 0x0424, 0x660424 },
+		{ 0x0428, 0x660428 },
+		{ 0x042c, 0x66042c },
+		{ 0x0430, 0x660430 },
+		{ 0x0434, 0x660434 },
+		{ 0x0438, 0x660438 },
+		{ 0x0440, 0x660440 },
+		{ 0x0444, 0x660444 },
+		{ 0x0448, 0x660448 },
+		{ 0x044c, 0x66044c },
+		{ 0x0450, 0x660450 },
+		{ 0x0454, 0x660454 },
+		{ 0x0458, 0x660458 },
+		{ 0x045c, 0x66045c },
+		{ 0x0460, 0x660460 },
+		{ 0x0468, 0x660468 },
+		{ 0x046c, 0x66046c },
+		{ 0x0470, 0x660470 },
+		{ 0x0474, 0x660474 },
+		{ 0x0480, 0x660480 },
+		{ 0x0484, 0x660484 },
+		{ 0x048c, 0x66048c },
+		{ 0x0490, 0x660490 },
+		{ 0x0494, 0x660494 },
+		{ 0x0498, 0x660498 },
+		{ 0x04b0, 0x6604b0 },
+		{ 0x04b8, 0x6604b8 },
+		{ 0x04bc, 0x6604bc },
+		{ 0x04c0, 0x6604c0 },
+		{ 0x04c4, 0x6604c4 },
+		{ 0x04c8, 0x6604c8 },
+		{ 0x04d0, 0x6604d0 },
+		{ 0x04d4, 0x6604d4 },
+		{ 0x04e0, 0x6604e0 },
+		{ 0x04e4, 0x6604e4 },
+		{ 0x04e8, 0x6604e8 },
+		{ 0x04ec, 0x6604ec },
+		{ 0x04f0, 0x6604f0 },
+		{ 0x04f4, 0x6604f4 },
+		{ 0x04f8, 0x6604f8 },
+		{ 0x04fc, 0x6604fc },
+		{ 0x0500, 0x660500 },
+		{ 0x0504, 0x660504 },
+		{ 0x0508, 0x660508 },
+		{ 0x050c, 0x66050c },
+		{ 0x0510, 0x660510 },
+		{ 0x0514, 0x660514 },
+		{ 0x0518, 0x660518 },
+		{ 0x051c, 0x66051c },
+		{ 0x052c, 0x66052c },
+		{ 0x0530, 0x660530 },
+		{ 0x054c, 0x66054c },
+		{ 0x0550, 0x660550 },
+		{ 0x0554, 0x660554 },
+		{ 0x0558, 0x660558 },
+		{ 0x055c, 0x66055c },
+		{}
+	}
+};
+
+static const struct nv50_disp_chan_mthd
+gf119_disp_core_chan_mthd = {
+	.name = "Core",
+	.addr = 0x000000,
+	.prev = -0x020000,
+	.data = {
+		{ "Global", 1, &gf119_disp_core_mthd_base },
+		{    "DAC", 3, &gf119_disp_core_mthd_dac  },
+		{    "SOR", 8, &gf119_disp_core_mthd_sor  },
+		{   "PIOR", 4, &gf119_disp_core_mthd_pior },
+		{   "HEAD", 4, &gf119_disp_core_mthd_head },
+		{}
+	}
+};
+
+static void
+gf119_disp_core_fini(struct nv50_disp_dmac *chan)
+{
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+
+	/* deactivate channel */
+	nvkm_mask(device, 0x610490, 0x00000010, 0x00000000);
+	nvkm_mask(device, 0x610490, 0x00000003, 0x00000000);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610490) & 0x001e0000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "core fini: %08x\n",
+			   nvkm_rd32(device, 0x610490));
+	}
+
+	/* disable error reporting and completion notification */
+	nvkm_mask(device, 0x610090, 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x6100a0, 0x00000001, 0x00000000);
+}
+
+static int
+gf119_disp_core_init(struct nv50_disp_dmac *chan)
+{
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+
+	/* enable error reporting */
+	nvkm_mask(device, 0x6100a0, 0x00000001, 0x00000001);
+
+	/* initialise channel for dma command submission */
+	nvkm_wr32(device, 0x610494, chan->push);
+	nvkm_wr32(device, 0x610498, 0x00010000);
+	nvkm_wr32(device, 0x61049c, 0x00000001);
+	nvkm_mask(device, 0x610490, 0x00000010, 0x00000010);
+	nvkm_wr32(device, 0x640000, 0x00000000);
+	nvkm_wr32(device, 0x610490, 0x01000013);
+
+	/* wait for it to go inactive */
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610490) & 0x80000000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "core init: %08x\n",
+			   nvkm_rd32(device, 0x610490));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+const struct nv50_disp_dmac_func
+gf119_disp_core_func = {
+	.init = gf119_disp_core_init,
+	.fini = gf119_disp_core_fini,
+	.bind = gf119_disp_dmac_bind,
+};
+
+const struct nv50_disp_dmac_oclass
+gf119_disp_core_oclass = {
+	.base.oclass = GF110_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &gf119_disp_core_func,
+	.mthd = &gf119_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c
new file mode 100644
index 000000000000..088ab222e823
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+gk104_disp_core_mthd_head = {
+	.mthd = 0x0300,
+	.addr = 0x000300,
+	.data = {
+		{ 0x0400, 0x660400 },
+		{ 0x0404, 0x660404 },
+		{ 0x0408, 0x660408 },
+		{ 0x040c, 0x66040c },
+		{ 0x0410, 0x660410 },
+		{ 0x0414, 0x660414 },
+		{ 0x0418, 0x660418 },
+		{ 0x041c, 0x66041c },
+		{ 0x0420, 0x660420 },
+		{ 0x0424, 0x660424 },
+		{ 0x0428, 0x660428 },
+		{ 0x042c, 0x66042c },
+		{ 0x0430, 0x660430 },
+		{ 0x0434, 0x660434 },
+		{ 0x0438, 0x660438 },
+		{ 0x0440, 0x660440 },
+		{ 0x0444, 0x660444 },
+		{ 0x0448, 0x660448 },
+		{ 0x044c, 0x66044c },
+		{ 0x0450, 0x660450 },
+		{ 0x0454, 0x660454 },
+		{ 0x0458, 0x660458 },
+		{ 0x045c, 0x66045c },
+		{ 0x0460, 0x660460 },
+		{ 0x0468, 0x660468 },
+		{ 0x046c, 0x66046c },
+		{ 0x0470, 0x660470 },
+		{ 0x0474, 0x660474 },
+		{ 0x047c, 0x66047c },
+		{ 0x0480, 0x660480 },
+		{ 0x0484, 0x660484 },
+		{ 0x0488, 0x660488 },
+		{ 0x048c, 0x66048c },
+		{ 0x0490, 0x660490 },
+		{ 0x0494, 0x660494 },
+		{ 0x0498, 0x660498 },
+		{ 0x04a0, 0x6604a0 },
+		{ 0x04b0, 0x6604b0 },
+		{ 0x04b8, 0x6604b8 },
+		{ 0x04bc, 0x6604bc },
+		{ 0x04c0, 0x6604c0 },
+		{ 0x04c4, 0x6604c4 },
+		{ 0x04c8, 0x6604c8 },
+		{ 0x04d0, 0x6604d0 },
+		{ 0x04d4, 0x6604d4 },
+		{ 0x04e0, 0x6604e0 },
+		{ 0x04e4, 0x6604e4 },
+		{ 0x04e8, 0x6604e8 },
+		{ 0x04ec, 0x6604ec },
+		{ 0x04f0, 0x6604f0 },
+		{ 0x04f4, 0x6604f4 },
+		{ 0x04f8, 0x6604f8 },
+		{ 0x04fc, 0x6604fc },
+		{ 0x0500, 0x660500 },
+		{ 0x0504, 0x660504 },
+		{ 0x0508, 0x660508 },
+		{ 0x050c, 0x66050c },
+		{ 0x0510, 0x660510 },
+		{ 0x0514, 0x660514 },
+		{ 0x0518, 0x660518 },
+		{ 0x051c, 0x66051c },
+		{ 0x0520, 0x660520 },
+		{ 0x0524, 0x660524 },
+		{ 0x052c, 0x66052c },
+		{ 0x0530, 0x660530 },
+		{ 0x054c, 0x66054c },
+		{ 0x0550, 0x660550 },
+		{ 0x0554, 0x660554 },
+		{ 0x0558, 0x660558 },
+		{ 0x055c, 0x66055c },
+		{}
+	}
+};
+
+const struct nv50_disp_chan_mthd
+gk104_disp_core_chan_mthd = {
+	.name = "Core",
+	.addr = 0x000000,
+	.prev = -0x020000,
+	.data = {
+		{ "Global", 1, &gf119_disp_core_mthd_base },
+		{    "DAC", 3, &gf119_disp_core_mthd_dac  },
+		{    "SOR", 8, &gf119_disp_core_mthd_sor  },
+		{   "PIOR", 4, &gf119_disp_core_mthd_pior },
+		{   "HEAD", 4, &gk104_disp_core_mthd_head },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+gk104_disp_core_oclass = {
+	.base.oclass = GK104_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &gf119_disp_core_func,
+	.mthd = &gk104_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c
new file mode 100644
index 000000000000..df0f45c20108
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gk110_disp_core_oclass = {
+	.base.oclass = GK110_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &gf119_disp_core_func,
+	.mthd = &gk104_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c
new file mode 100644
index 000000000000..9e27f8fd98b6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gm107_disp_core_oclass = {
+	.base.oclass = GM107_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &gf119_disp_core_func,
+	.mthd = &gk104_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c
new file mode 100644
index 000000000000..222f4a822f4d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gm204_disp_core_oclass = {
+	.base.oclass = GM204_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &gf119_disp_core_func,
+	.mthd = &gk104_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c
new file mode 100644
index 000000000000..b234547708fc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gt200_disp_core_oclass = {
+	.base.oclass = GT200_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &nv50_disp_core_func,
+	.mthd = &g84_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c
new file mode 100644
index 000000000000..8f5ba2018975
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gt215_disp_core_oclass = {
+	.base.oclass = GT214_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &nv50_disp_core_func,
+	.mthd = &g94_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
new file mode 100644
index 000000000000..db4a9b3e0e09
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+nv50_disp_core_new(const struct nv50_disp_dmac_func *func,
+		   const struct nv50_disp_chan_mthd *mthd,
+		   struct nv50_disp_root *root, int chid,
+		   const struct nvkm_oclass *oclass, void *data, u32 size,
+		   struct nvkm_object **pobject)
+{
+	union {
+		struct nv50_disp_core_channel_dma_v0 v0;
+	} *args = data;
+	struct nvkm_object *parent = oclass->parent;
+	u64 push;
+	int ret;
+
+	nvif_ioctl(parent, "create disp core channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create disp core channel dma vers %d "
+				   "pushbuf %016llx\n",
+			   args->v0.version, args->v0.pushbuf);
+		push = args->v0.pushbuf;
+	} else
+		return ret;
+
+	return nv50_disp_dmac_new_(func, mthd, root, chid, 0,
+				   push, oclass, pobject);
+}
+
+const struct nv50_disp_mthd_list
+nv50_disp_core_mthd_base = {
+	.mthd = 0x0000,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0080, 0x000000 },
+		{ 0x0084, 0x610bb8 },
+		{ 0x0088, 0x610b9c },
+		{ 0x008c, 0x000000 },
+		{}
+	}
+};
+
+static const struct nv50_disp_mthd_list
+nv50_disp_core_mthd_dac = {
+	.mthd = 0x0080,
+	.addr = 0x000008,
+	.data = {
+		{ 0x0400, 0x610b58 },
+		{ 0x0404, 0x610bdc },
+		{ 0x0420, 0x610828 },
+		{}
+	}
+};
+
+const struct nv50_disp_mthd_list
+nv50_disp_core_mthd_sor = {
+	.mthd = 0x0040,
+	.addr = 0x000008,
+	.data = {
+		{ 0x0600, 0x610b70 },
+		{}
+	}
+};
+
+const struct nv50_disp_mthd_list
+nv50_disp_core_mthd_pior = {
+	.mthd = 0x0040,
+	.addr = 0x000008,
+	.data = {
+		{ 0x0700, 0x610b80 },
+		{}
+	}
+};
+
+static const struct nv50_disp_mthd_list
+nv50_disp_core_mthd_head = {
+	.mthd = 0x0400,
+	.addr = 0x000540,
+	.data = {
+		{ 0x0800, 0x610ad8 },
+		{ 0x0804, 0x610ad0 },
+		{ 0x0808, 0x610a48 },
+		{ 0x080c, 0x610a78 },
+		{ 0x0810, 0x610ac0 },
+		{ 0x0814, 0x610af8 },
+		{ 0x0818, 0x610b00 },
+		{ 0x081c, 0x610ae8 },
+		{ 0x0820, 0x610af0 },
+		{ 0x0824, 0x610b08 },
+		{ 0x0828, 0x610b10 },
+		{ 0x082c, 0x610a68 },
+		{ 0x0830, 0x610a60 },
+		{ 0x0834, 0x000000 },
+		{ 0x0838, 0x610a40 },
+		{ 0x0840, 0x610a24 },
+		{ 0x0844, 0x610a2c },
+		{ 0x0848, 0x610aa8 },
+		{ 0x084c, 0x610ab0 },
+		{ 0x0860, 0x610a84 },
+		{ 0x0864, 0x610a90 },
+		{ 0x0868, 0x610b18 },
+		{ 0x086c, 0x610b20 },
+		{ 0x0870, 0x610ac8 },
+		{ 0x0874, 0x610a38 },
+		{ 0x0880, 0x610a58 },
+		{ 0x0884, 0x610a9c },
+		{ 0x08a0, 0x610a70 },
+		{ 0x08a4, 0x610a50 },
+		{ 0x08a8, 0x610ae0 },
+		{ 0x08c0, 0x610b28 },
+		{ 0x08c4, 0x610b30 },
+		{ 0x08c8, 0x610b40 },
+		{ 0x08d4, 0x610b38 },
+		{ 0x08d8, 0x610b48 },
+		{ 0x08dc, 0x610b50 },
+		{ 0x0900, 0x610a18 },
+		{ 0x0904, 0x610ab8 },
+		{}
+	}
+};
+
+static const struct nv50_disp_chan_mthd
+nv50_disp_core_chan_mthd = {
+	.name = "Core",
+	.addr = 0x000000,
+	.prev = 0x000004,
+	.data = {
+		{ "Global", 1, &nv50_disp_core_mthd_base },
+		{    "DAC", 3, &nv50_disp_core_mthd_dac  },
+		{    "SOR", 2, &nv50_disp_core_mthd_sor  },
+		{   "PIOR", 3, &nv50_disp_core_mthd_pior },
+		{   "HEAD", 2, &nv50_disp_core_mthd_head },
+		{}
+	}
+};
+
+static void
+nv50_disp_core_fini(struct nv50_disp_dmac *chan)
+{
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+
+	/* deactivate channel */
+	nvkm_mask(device, 0x610200, 0x00000010, 0x00000000);
+	nvkm_mask(device, 0x610200, 0x00000003, 0x00000000);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610200) & 0x001e0000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "core fini: %08x\n",
+			   nvkm_rd32(device, 0x610200));
+	}
+
+	/* disable error reporting and completion notifications */
+	nvkm_mask(device, 0x610028, 0x00010001, 0x00000000);
+}
+
+static int
+nv50_disp_core_init(struct nv50_disp_dmac *chan)
+{
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+
+	/* enable error reporting */
+	nvkm_mask(device, 0x610028, 0x00010000, 0x00010000);
+
+	/* attempt to unstick channel from some unknown state */
+	if ((nvkm_rd32(device, 0x610200) & 0x009f0000) == 0x00020000)
+		nvkm_mask(device, 0x610200, 0x00800000, 0x00800000);
+	if ((nvkm_rd32(device, 0x610200) & 0x003f0000) == 0x00030000)
+		nvkm_mask(device, 0x610200, 0x00600000, 0x00600000);
+
+	/* initialise channel for dma command submission */
+	nvkm_wr32(device, 0x610204, chan->push);
+	nvkm_wr32(device, 0x610208, 0x00010000);
+	nvkm_wr32(device, 0x61020c, 0x00000000);
+	nvkm_mask(device, 0x610200, 0x00000010, 0x00000010);
+	nvkm_wr32(device, 0x640000, 0x00000000);
+	nvkm_wr32(device, 0x610200, 0x01000013);
+
+	/* wait for it to go inactive */
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610200) & 0x80000000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "core init: %08x\n",
+			   nvkm_rd32(device, 0x610200));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+const struct nv50_disp_dmac_func
+nv50_disp_core_func = {
+	.init = nv50_disp_core_init,
+	.fini = nv50_disp_core_fini,
+	.bind = nv50_disp_dmac_bind,
+};
+
+const struct nv50_disp_dmac_oclass
+nv50_disp_core_oclass = {
+	.base.oclass = NV50_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &nv50_disp_core_func,
+	.mthd = &nv50_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
new file mode 100644
index 000000000000..dd99fc7060b1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+g84_disp_curs_oclass = {
+	.base.oclass = G82_DISP_CURSOR,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_curs_new,
+	.func = &nv50_disp_pioc_func,
+	.chid = 7,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
index f042e7d8321d..2a1574e06ad6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
@@ -21,17 +21,17 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "channv50.h"
+#include "rootnv50.h"
 
-struct nvkm_oclass *
-g94_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0x94),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv50_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
-	.intr = nv50_mc_intr,
-	.msi_rearm = nv40_mc_msi_rearm,
-}.base;
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gf119_disp_curs_oclass = {
+	.base.oclass = GF110_DISP_CURSOR,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_curs_new,
+	.func = &gf119_disp_pioc_func,
+	.chid = 13,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf106.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
index 8d2a8f457778..28e8f06c9472 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf106.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
@@ -21,18 +21,17 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "channv50.h"
+#include "rootnv50.h"
 
-struct nvkm_oclass *
-gf106_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0xc3),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv50_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
-	.intr = gf100_mc_intr,
-	.msi_rearm = nv40_mc_msi_rearm,
-	.unk260 = gf100_mc_unk260,
-}.base;
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gk104_disp_curs_oclass = {
+	.base.oclass = GK104_DISP_CURSOR,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_curs_new,
+	.func = &gf119_disp_pioc_func,
+	.chid = 13,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
new file mode 100644
index 000000000000..d8a4b9ca139c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gt215_disp_curs_oclass = {
+	.base.oclass = GT214_DISP_CURSOR,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_curs_new,
+	.func = &nv50_disp_pioc_func,
+	.chid = 7,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
new file mode 100644
index 000000000000..225858e62cf6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+nv50_disp_curs_new(const struct nv50_disp_chan_func *func,
+		   const struct nv50_disp_chan_mthd *mthd,
+		   struct nv50_disp_root *root, int chid,
+		   const struct nvkm_oclass *oclass, void *data, u32 size,
+		   struct nvkm_object **pobject)
+{
+	union {
+		struct nv50_disp_cursor_v0 v0;
+	} *args = data;
+	struct nvkm_object *parent = oclass->parent;
+	struct nv50_disp *disp = root->disp;
+	int head, ret;
+
+	nvif_ioctl(parent, "create disp cursor size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create disp cursor vers %d head %d\n",
+			   args->v0.version, args->v0.head);
+		if (args->v0.head > disp->base.head.nr)
+			return -EINVAL;
+		head = args->v0.head;
+	} else
+		return ret;
+
+	return nv50_disp_chan_new_(func, mthd, root, chid + head,
+				   head, oclass, pobject);
+}
+
+const struct nv50_disp_pioc_oclass
+nv50_disp_curs_oclass = {
+	.base.oclass = NV50_DISP_CURSOR,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_curs_new,
+	.func = &nv50_disp_pioc_func,
+	.chid = 7,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c
index 0f7d1ec4d37e..9bfa9e7dc161 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c
@@ -33,6 +33,7 @@
 int
 nv50_dac_power(NV50_DISP_MTHD_V1)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	const u32 doff = outp->or * 0x800;
 	union {
 		struct nv50_disp_dac_pwr_v0 v0;
@@ -40,12 +41,12 @@ nv50_dac_power(NV50_DISP_MTHD_V1)
 	u32 stat;
 	int ret;
 
-	nv_ioctl(object, "disp dac pwr size %d\n", size);
+	nvif_ioctl(object, "disp dac pwr size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp dac pwr vers %d state %d data %d "
-				 "vsync %d hsync %d\n",
-			 args->v0.version, args->v0.state, args->v0.data,
-			 args->v0.vsync, args->v0.hsync);
+		nvif_ioctl(object, "disp dac pwr vers %d state %d data %d "
+				   "vsync %d hsync %d\n",
+			   args->v0.version, args->v0.state, args->v0.data,
+			   args->v0.vsync, args->v0.hsync);
 		stat  = 0x00000040 * !args->v0.state;
 		stat |= 0x00000010 * !args->v0.data;
 		stat |= 0x00000004 * !args->v0.vsync;
@@ -53,15 +54,23 @@ nv50_dac_power(NV50_DISP_MTHD_V1)
 	} else
 		return ret;
 
-	nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
-	nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat);
-	nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000))
+			break;
+	);
+	nvkm_mask(device, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000))
+			break;
+	);
 	return 0;
 }
 
 int
 nv50_dac_sense(NV50_DISP_MTHD_V1)
 {
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	union {
 		struct nv50_disp_dac_load_v0 v0;
 	} *args = data;
@@ -69,31 +78,49 @@ nv50_dac_sense(NV50_DISP_MTHD_V1)
 	u32 loadval;
 	int ret;
 
-	nv_ioctl(object, "disp dac load size %d\n", size);
+	nvif_ioctl(object, "disp dac load size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp dac load vers %d data %08x\n",
-			 args->v0.version, args->v0.data);
+		nvif_ioctl(object, "disp dac load vers %d data %08x\n",
+			   args->v0.version, args->v0.data);
 		if (args->v0.data & 0xfff00000)
 			return -EINVAL;
 		loadval = args->v0.data;
 	} else
 		return ret;
 
-	nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000);
-	nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+	nvkm_mask(device, 0x61a004 + doff, 0x807f0000, 0x80150000);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000))
+			break;
+	);
 
-	nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
+	nvkm_wr32(device, 0x61a00c + doff, 0x00100000 | loadval);
 	mdelay(9);
 	udelay(500);
-	loadval = nv_mask(priv, 0x61a00c + doff, 0xffffffff, 0x00000000);
+	loadval = nvkm_mask(device, 0x61a00c + doff, 0xffffffff, 0x00000000);
 
-	nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000);
-	nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+	nvkm_mask(device, 0x61a004 + doff, 0x807f0000, 0x80550000);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000))
+			break;
+	);
 
-	nv_debug(priv, "DAC%d sense: 0x%08x\n", outp->or, loadval);
+	nvkm_debug(subdev, "DAC%d sense: %08x\n", outp->or, loadval);
 	if (!(loadval & 0x80000000))
 		return -ETIMEDOUT;
 
 	args->v0.load = (loadval & 0x38000000) >> 27;
 	return 0;
 }
+
+static const struct nvkm_output_func
+nv50_dac_output_func = {
+};
+
+int
+nv50_dac_output_new(struct nvkm_disp *disp, int index,
+		    struct dcb_output *dcbE, struct nvkm_output **poutp)
+{
+	return nvkm_output_new_(&nv50_dac_output_func, disp,
+				index, dcbE, poutp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
new file mode 100644
index 000000000000..876b14549a58
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <core/ramht.h>
+#include <subdev/timer.h>
+
+int
+gf119_disp_dmac_bind(struct nv50_disp_dmac *chan,
+		     struct nvkm_object *object, u32 handle)
+{
+	return nvkm_ramht_insert(chan->base.root->ramht, object,
+				 chan->base.chid, -9, handle,
+				 chan->base.chid << 27 | 0x00000001);
+}
+
+static void
+gf119_disp_dmac_fini(struct nv50_disp_dmac *chan)
+{
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int chid = chan->base.chid;
+
+	/* deactivate channel */
+	nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
+	nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x001e0000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d fini: %08x\n", chid,
+			   nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+	}
+
+	/* disable error reporting and completion notification */
+	nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000);
+	nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000);
+}
+
+static int
+gf119_disp_dmac_init(struct nv50_disp_dmac *chan)
+{
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int chid = chan->base.chid;
+
+	/* enable error reporting */
+	nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+	/* initialise channel for dma command submission */
+	nvkm_wr32(device, 0x610494 + (chid * 0x0010), chan->push);
+	nvkm_wr32(device, 0x610498 + (chid * 0x0010), 0x00010000);
+	nvkm_wr32(device, 0x61049c + (chid * 0x0010), 0x00000001);
+	nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
+	nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
+	nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013);
+
+	/* wait for it to go inactive */
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d init: %08x\n", chid,
+			   nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+const struct nv50_disp_dmac_func
+gf119_disp_dmac_func = {
+	.init = gf119_disp_dmac_init,
+	.fini = gf119_disp_dmac_fini,
+	.bind = gf119_disp_dmac_bind,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
new file mode 100644
index 000000000000..9c6645a357b9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+#include <core/oproxy.h>
+#include <core/ramht.h>
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+#include <engine/dma.h>
+
+struct nv50_disp_dmac_object {
+	struct nvkm_oproxy oproxy;
+	struct nv50_disp_root *root;
+	int hash;
+};
+
+static void
+nv50_disp_dmac_child_del_(struct nvkm_oproxy *base)
+{
+	struct nv50_disp_dmac_object *object =
+		container_of(base, typeof(*object), oproxy);
+	nvkm_ramht_remove(object->root->ramht, object->hash);
+}
+
+static const struct nvkm_oproxy_func
+nv50_disp_dmac_child_func_ = {
+	.dtor[0] = nv50_disp_dmac_child_del_,
+};
+
+static int
+nv50_disp_dmac_child_new_(struct nv50_disp_chan *base,
+			  const struct nvkm_oclass *oclass,
+			  void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
+	struct nv50_disp_root *root = chan->base.root;
+	struct nvkm_device *device = root->disp->base.engine.subdev.device;
+	const struct nvkm_device_oclass *sclass = oclass->priv;
+	struct nv50_disp_dmac_object *object;
+	int ret;
+
+	if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_oproxy_ctor(&nv50_disp_dmac_child_func_, oclass, &object->oproxy);
+	object->root = root;
+	*pobject = &object->oproxy.base;
+
+	ret = sclass->ctor(device, oclass, data, size, &object->oproxy.object);
+	if (ret)
+		return ret;
+
+	object->hash = chan->func->bind(chan, object->oproxy.object,
+					      oclass->handle);
+	if (object->hash < 0)
+		return object->hash;
+
+	return 0;
+}
+
+static int
+nv50_disp_dmac_child_get_(struct nv50_disp_chan *base, int index,
+			  struct nvkm_oclass *sclass)
+{
+	struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	const struct nvkm_device_oclass *oclass = NULL;
+
+	sclass->engine = nvkm_device_engine(device, NVKM_ENGINE_DMAOBJ);
+	if (sclass->engine && sclass->engine->func->base.sclass) {
+		sclass->engine->func->base.sclass(sclass, index, &oclass);
+		if (oclass) {
+			sclass->priv = oclass;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static void
+nv50_disp_dmac_fini_(struct nv50_disp_chan *base)
+{
+	struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
+	chan->func->fini(chan);
+}
+
+static int
+nv50_disp_dmac_init_(struct nv50_disp_chan *base)
+{
+	struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
+	return chan->func->init(chan);
+}
+
+static void *
+nv50_disp_dmac_dtor_(struct nv50_disp_chan *base)
+{
+	return nv50_disp_dmac(base);
+}
+
+static const struct nv50_disp_chan_func
+nv50_disp_dmac_func_ = {
+	.dtor = nv50_disp_dmac_dtor_,
+	.init = nv50_disp_dmac_init_,
+	.fini = nv50_disp_dmac_fini_,
+	.child_get = nv50_disp_dmac_child_get_,
+	.child_new = nv50_disp_dmac_child_new_,
+};
+
+int
+nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func,
+		    const struct nv50_disp_chan_mthd *mthd,
+		    struct nv50_disp_root *root, int chid, int head, u64 push,
+		    const struct nvkm_oclass *oclass,
+		    struct nvkm_object **pobject)
+{
+	struct nvkm_device *device = root->disp->base.engine.subdev.device;
+	struct nvkm_client *client = oclass->client;
+	struct nvkm_dmaobj *dmaobj;
+	struct nv50_disp_dmac *chan;
+	int ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+	chan->func = func;
+
+	ret = nv50_disp_chan_ctor(&nv50_disp_dmac_func_, mthd, root,
+				  chid, head, oclass, &chan->base);
+	if (ret)
+		return ret;
+
+	dmaobj = nvkm_dma_search(device->dma, client, push);
+	if (!dmaobj)
+		return -ENOENT;
+
+	if (dmaobj->limit - dmaobj->start != 0xfff)
+		return -EINVAL;
+
+	switch (dmaobj->target) {
+	case NV_MEM_TARGET_VRAM:
+		chan->push = 0x00000001 | dmaobj->start >> 8;
+		break;
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		chan->push = 0x00000003 | dmaobj->start >> 8;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int
+nv50_disp_dmac_bind(struct nv50_disp_dmac *chan,
+		    struct nvkm_object *object, u32 handle)
+{
+	return nvkm_ramht_insert(chan->base.root->ramht, object,
+				 chan->base.chid, -10, handle,
+				 chan->base.chid << 28 |
+				 chan->base.chid);
+}
+
+static void
+nv50_disp_dmac_fini(struct nv50_disp_dmac *chan)
+{
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int chid = chan->base.chid;
+
+	/* deactivate channel */
+	nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
+	nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x001e0000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d fini timeout, %08x\n", chid,
+			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+	}
+
+	/* disable error reporting and completion notifications */
+	nvkm_mask(device, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
+}
+
+static int
+nv50_disp_dmac_init(struct nv50_disp_dmac *chan)
+{
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int chid = chan->base.chid;
+
+	/* enable error reporting */
+	nvkm_mask(device, 0x610028, 0x00010000 << chid, 0x00010000 << chid);
+
+	/* initialise channel for dma command submission */
+	nvkm_wr32(device, 0x610204 + (chid * 0x0010), chan->push);
+	nvkm_wr32(device, 0x610208 + (chid * 0x0010), 0x00010000);
+	nvkm_wr32(device, 0x61020c + (chid * 0x0010), chid);
+	nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
+	nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
+	nvkm_wr32(device, 0x610200 + (chid * 0x0010), 0x00000013);
+
+	/* wait for it to go inactive */
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x80000000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d init timeout, %08x\n", chid,
+			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+const struct nv50_disp_dmac_func
+nv50_disp_dmac_func = {
+	.init = nv50_disp_dmac_init,
+	.fini = nv50_disp_dmac_fini,
+	.bind = nv50_disp_dmac_bind,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
new file mode 100644
index 000000000000..c748ca23ab70
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
@@ -0,0 +1,91 @@
+#ifndef __NV50_DISP_DMAC_H__
+#define __NV50_DISP_DMAC_H__
+#define nv50_disp_dmac(p) container_of((p), struct nv50_disp_dmac, base)
+#include "channv50.h"
+
+struct nv50_disp_dmac {
+	const struct nv50_disp_dmac_func *func;
+	struct nv50_disp_chan base;
+	u32 push;
+};
+
+struct nv50_disp_dmac_func {
+	int  (*init)(struct nv50_disp_dmac *);
+	void (*fini)(struct nv50_disp_dmac *);
+	int  (*bind)(struct nv50_disp_dmac *, struct nvkm_object *, u32 handle);
+};
+
+int nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *,
+			const struct nv50_disp_chan_mthd *,
+			struct nv50_disp_root *, int chid, int head, u64 push,
+			const struct nvkm_oclass *, struct nvkm_object **);
+
+extern const struct nv50_disp_dmac_func nv50_disp_dmac_func;
+int nv50_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
+extern const struct nv50_disp_dmac_func nv50_disp_core_func;
+
+extern const struct nv50_disp_dmac_func gf119_disp_dmac_func;
+int gf119_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
+extern const struct nv50_disp_dmac_func gf119_disp_core_func;
+
+struct nv50_disp_dmac_oclass {
+	int (*ctor)(const struct nv50_disp_dmac_func *,
+		    const struct nv50_disp_chan_mthd *,
+		    struct nv50_disp_root *, int chid,
+		    const struct nvkm_oclass *, void *data, u32 size,
+		    struct nvkm_object **);
+	struct nvkm_sclass base;
+	const struct nv50_disp_dmac_func *func;
+	const struct nv50_disp_chan_mthd *mthd;
+	int chid;
+};
+
+int nv50_disp_core_new(const struct nv50_disp_dmac_func *,
+		       const struct nv50_disp_chan_mthd *,
+		       struct nv50_disp_root *, int chid,
+		       const struct nvkm_oclass *oclass, void *data, u32 size,
+		       struct nvkm_object **);
+int nv50_disp_base_new(const struct nv50_disp_dmac_func *,
+		       const struct nv50_disp_chan_mthd *,
+		       struct nv50_disp_root *, int chid,
+		       const struct nvkm_oclass *oclass, void *data, u32 size,
+		       struct nvkm_object **);
+int nv50_disp_ovly_new(const struct nv50_disp_dmac_func *,
+		       const struct nv50_disp_chan_mthd *,
+		       struct nv50_disp_root *, int chid,
+		       const struct nvkm_oclass *oclass, void *data, u32 size,
+		       struct nvkm_object **);
+
+extern const struct nv50_disp_dmac_oclass nv50_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass nv50_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass nv50_disp_ovly_oclass;
+
+extern const struct nv50_disp_dmac_oclass g84_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass g84_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass g84_disp_ovly_oclass;
+
+extern const struct nv50_disp_dmac_oclass g94_disp_core_oclass;
+
+extern const struct nv50_disp_dmac_oclass gt200_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gt200_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass gt200_disp_ovly_oclass;
+
+extern const struct nv50_disp_dmac_oclass gt215_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gt215_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass gt215_disp_ovly_oclass;
+
+extern const struct nv50_disp_dmac_oclass gf119_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gf119_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass gf119_disp_ovly_oclass;
+
+extern const struct nv50_disp_dmac_oclass gk104_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gk104_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass gk104_disp_ovly_oclass;
+
+extern const struct nv50_disp_dmac_oclass gk110_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gk110_disp_base_oclass;
+
+extern const struct nv50_disp_dmac_oclass gm107_disp_core_oclass;
+
+extern const struct nv50_disp_dmac_oclass gm204_disp_core_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
index 68347661adca..74e2f7c6c07e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
@@ -48,12 +48,12 @@ struct dp_state {
 static int
 dp_set_link_config(struct dp_state *dp)
 {
-	struct nvkm_output_dp_impl *impl = (void *)nv_oclass(dp->outp);
 	struct nvkm_output_dp *outp = dp->outp;
-	struct nvkm_disp *disp = nvkm_disp(outp);
-	struct nvkm_bios *bios = nvkm_bios(disp);
+	struct nvkm_disp *disp = outp->base.disp;
+	struct nvkm_subdev *subdev = &disp->engine.subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct nvbios_init init = {
-		.subdev = nv_subdev(disp),
+		.subdev = subdev,
 		.bios = bios,
 		.offset = 0x0000,
 		.outp = &outp->base.info,
@@ -64,33 +64,33 @@ dp_set_link_config(struct dp_state *dp)
 	u8 sink[2];
 	int ret;
 
-	DBG("%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
+	OUTP_DBG(&outp->base, "%d lanes at %d KB/s", dp->link_nr, dp->link_bw);
 
 	/* set desired link configuration on the source */
 	if ((lnkcmp = dp->outp->info.lnkcmp)) {
 		if (outp->version < 0x30) {
-			while ((dp->link_bw / 10) < nv_ro16(bios, lnkcmp))
+			while ((dp->link_bw / 10) < nvbios_rd16(bios, lnkcmp))
 				lnkcmp += 4;
-			init.offset = nv_ro16(bios, lnkcmp + 2);
+			init.offset = nvbios_rd16(bios, lnkcmp + 2);
 		} else {
-			while ((dp->link_bw / 27000) < nv_ro08(bios, lnkcmp))
+			while ((dp->link_bw / 27000) < nvbios_rd08(bios, lnkcmp))
 				lnkcmp += 3;
-			init.offset = nv_ro16(bios, lnkcmp + 1);
+			init.offset = nvbios_rd16(bios, lnkcmp + 1);
 		}
 
 		nvbios_exec(&init);
 	}
 
-	ret = impl->lnk_ctl(outp, dp->link_nr, dp->link_bw / 27000,
-			    outp->dpcd[DPCD_RC02] &
-				       DPCD_RC02_ENHANCED_FRAME_CAP);
+	ret = outp->func->lnk_ctl(outp, dp->link_nr, dp->link_bw / 27000,
+				  outp->dpcd[DPCD_RC02] &
+					     DPCD_RC02_ENHANCED_FRAME_CAP);
 	if (ret) {
 		if (ret < 0)
-			ERR("lnk_ctl failed with %d\n", ret);
+			OUTP_ERR(&outp->base, "lnk_ctl failed with %d", ret);
 		return ret;
 	}
 
-	impl->lnk_pwr(outp, dp->link_nr);
+	outp->func->lnk_pwr(outp, dp->link_nr);
 
 	/* set desired link configuration on the sink */
 	sink[0] = dp->link_bw / 27000;
@@ -98,29 +98,27 @@ dp_set_link_config(struct dp_state *dp)
 	if (outp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
 		sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
 
-	return nv_wraux(outp->base.edid, DPCD_LC00_LINK_BW_SET, sink, 2);
+	return nvkm_wraux(outp->aux, DPCD_LC00_LINK_BW_SET, sink, 2);
 }
 
 static void
 dp_set_training_pattern(struct dp_state *dp, u8 pattern)
 {
-	struct nvkm_output_dp_impl *impl = (void *)nv_oclass(dp->outp);
 	struct nvkm_output_dp *outp = dp->outp;
 	u8 sink_tp;
 
-	DBG("training pattern %d\n", pattern);
-	impl->pattern(outp, pattern);
+	OUTP_DBG(&outp->base, "training pattern %d", pattern);
+	outp->func->pattern(outp, pattern);
 
-	nv_rdaux(outp->base.edid, DPCD_LC02, &sink_tp, 1);
+	nvkm_rdaux(outp->aux, DPCD_LC02, &sink_tp, 1);
 	sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET;
 	sink_tp |= pattern;
-	nv_wraux(outp->base.edid, DPCD_LC02, &sink_tp, 1);
+	nvkm_wraux(outp->aux, DPCD_LC02, &sink_tp, 1);
 }
 
 static int
 dp_link_train_commit(struct dp_state *dp, bool pc)
 {
-	struct nvkm_output_dp_impl *impl = (void *)nv_oclass(dp->outp);
 	struct nvkm_output_dp *outp = dp->outp;
 	int ret, i;
 
@@ -146,16 +144,17 @@ dp_link_train_commit(struct dp_state *dp, bool pc)
 		dp->conf[i] = (lpre << 3) | lvsw;
 		dp->pc2conf[i >> 1] |= lpc2 << ((i & 1) * 4);
 
-		DBG("config lane %d %02x %02x\n", i, dp->conf[i], lpc2);
-		impl->drv_ctl(outp, i, lvsw & 3, lpre & 3, lpc2 & 3);
+		OUTP_DBG(&outp->base, "config lane %d %02x %02x",
+			 i, dp->conf[i], lpc2);
+		outp->func->drv_ctl(outp, i, lvsw & 3, lpre & 3, lpc2 & 3);
 	}
 
-	ret = nv_wraux(outp->base.edid, DPCD_LC03(0), dp->conf, 4);
+	ret = nvkm_wraux(outp->aux, DPCD_LC03(0), dp->conf, 4);
 	if (ret)
 		return ret;
 
 	if (pc) {
-		ret = nv_wraux(outp->base.edid, DPCD_LC0F, dp->pc2conf, 2);
+		ret = nvkm_wraux(outp->aux, DPCD_LC0F, dp->pc2conf, 2);
 		if (ret)
 			return ret;
 	}
@@ -174,17 +173,18 @@ dp_link_train_update(struct dp_state *dp, bool pc, u32 delay)
 	else
 		udelay(delay);
 
-	ret = nv_rdaux(outp->base.edid, DPCD_LS02, dp->stat, 6);
+	ret = nvkm_rdaux(outp->aux, DPCD_LS02, dp->stat, 6);
 	if (ret)
 		return ret;
 
 	if (pc) {
-		ret = nv_rdaux(outp->base.edid, DPCD_LS0C, &dp->pc2stat, 1);
+		ret = nvkm_rdaux(outp->aux, DPCD_LS0C, &dp->pc2stat, 1);
 		if (ret)
 			dp->pc2stat = 0x00;
-		DBG("status %6ph pc2 %02x\n", dp->stat, dp->pc2stat);
+		OUTP_DBG(&outp->base, "status %6ph pc2 %02x",
+			 dp->stat, dp->pc2stat);
 	} else {
-		DBG("status %6ph\n", dp->stat);
+		OUTP_DBG(&outp->base, "status %6ph", dp->stat);
 	}
 
 	return 0;
@@ -260,11 +260,11 @@ static void
 dp_link_train_init(struct dp_state *dp, bool spread)
 {
 	struct nvkm_output_dp *outp = dp->outp;
-	struct nvkm_disp *disp = nvkm_disp(outp);
-	struct nvkm_bios *bios = nvkm_bios(disp);
+	struct nvkm_disp *disp = outp->base.disp;
+	struct nvkm_subdev *subdev = &disp->engine.subdev;
 	struct nvbios_init init = {
-		.subdev = nv_subdev(disp),
-		.bios = bios,
+		.subdev = subdev,
+		.bios = subdev->device->bios,
 		.outp = &outp->base.info,
 		.crtc = -1,
 		.execute = 1,
@@ -286,11 +286,11 @@ static void
 dp_link_train_fini(struct dp_state *dp)
 {
 	struct nvkm_output_dp *outp = dp->outp;
-	struct nvkm_disp *disp = nvkm_disp(outp);
-	struct nvkm_bios *bios = nvkm_bios(disp);
+	struct nvkm_disp *disp = outp->base.disp;
+	struct nvkm_subdev *subdev = &disp->engine.subdev;
 	struct nvbios_init init = {
-		.subdev = nv_subdev(disp),
-		.bios = bios,
+		.subdev = subdev,
+		.bios = subdev->device->bios,
 		.outp = &outp->base.info,
 		.crtc = -1,
 		.execute = 1,
@@ -322,7 +322,7 @@ void
 nvkm_dp_train(struct work_struct *w)
 {
 	struct nvkm_output_dp *outp = container_of(w, typeof(*outp), lt.work);
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+	struct nv50_disp *disp = nv50_disp(outp->base.disp);
 	const struct dp_rates *cfg = nvkm_dp_rates;
 	struct dp_state _dp = {
 		.outp = outp,
@@ -330,11 +330,11 @@ nvkm_dp_train(struct work_struct *w)
 	u32 datarate = 0;
 	int ret;
 
-	if (!outp->base.info.location && priv->sor.magic)
-		priv->sor.magic(&outp->base);
+	if (!outp->base.info.location && disp->func->sor.magic)
+		disp->func->sor.magic(&outp->base);
 
 	/* bring capabilities within encoder limits */
-	if (nv_mclass(priv) < GF110_DISP)
+	if (disp->base.engine.subdev.device->chipset < 0xd0)
 		outp->dpcd[2] &= ~DPCD_RC02_TPS3_SUPPORTED;
 	if ((outp->dpcd[2] & 0x1f) > outp->base.info.dpconf.link_nr) {
 		outp->dpcd[2] &= ~DPCD_RC02_MAX_LANE_COUNT;
@@ -386,12 +386,12 @@ nvkm_dp_train(struct work_struct *w)
 	/* finish link training and execute post-train script from vbios */
 	dp_set_training_pattern(dp, 0);
 	if (ret < 0)
-		ERR("link training failed\n");
+		OUTP_ERR(&outp->base, "link training failed");
 
 	dp_link_train_fini(dp);
 
 	/* signal completion and enable link interrupt handling */
-	DBG("training complete\n");
+	OUTP_DBG(&outp->base, "training complete");
 	atomic_set(&outp->lt.done, 1);
 	wake_up(&outp->lt.wait);
 	nvkm_notify_get(&outp->irq);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
index a0dcf534cb20..3e3e592cd09f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
@@ -22,251 +22,34 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
-
-#include <nvif/class.h>
-
-/*******************************************************************************
- * EVO master channel object
- ******************************************************************************/
-
-const struct nv50_disp_mthd_list
-g84_disp_core_mthd_dac = {
-	.mthd = 0x0080,
-	.addr = 0x000008,
-	.data = {
-		{ 0x0400, 0x610b58 },
-		{ 0x0404, 0x610bdc },
-		{ 0x0420, 0x610bc4 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_list
-g84_disp_core_mthd_head = {
-	.mthd = 0x0400,
-	.addr = 0x000540,
-	.data = {
-		{ 0x0800, 0x610ad8 },
-		{ 0x0804, 0x610ad0 },
-		{ 0x0808, 0x610a48 },
-		{ 0x080c, 0x610a78 },
-		{ 0x0810, 0x610ac0 },
-		{ 0x0814, 0x610af8 },
-		{ 0x0818, 0x610b00 },
-		{ 0x081c, 0x610ae8 },
-		{ 0x0820, 0x610af0 },
-		{ 0x0824, 0x610b08 },
-		{ 0x0828, 0x610b10 },
-		{ 0x082c, 0x610a68 },
-		{ 0x0830, 0x610a60 },
-		{ 0x0834, 0x000000 },
-		{ 0x0838, 0x610a40 },
-		{ 0x0840, 0x610a24 },
-		{ 0x0844, 0x610a2c },
-		{ 0x0848, 0x610aa8 },
-		{ 0x084c, 0x610ab0 },
-		{ 0x085c, 0x610c5c },
-		{ 0x0860, 0x610a84 },
-		{ 0x0864, 0x610a90 },
-		{ 0x0868, 0x610b18 },
-		{ 0x086c, 0x610b20 },
-		{ 0x0870, 0x610ac8 },
-		{ 0x0874, 0x610a38 },
-		{ 0x0878, 0x610c50 },
-		{ 0x0880, 0x610a58 },
-		{ 0x0884, 0x610a9c },
-		{ 0x089c, 0x610c68 },
-		{ 0x08a0, 0x610a70 },
-		{ 0x08a4, 0x610a50 },
-		{ 0x08a8, 0x610ae0 },
-		{ 0x08c0, 0x610b28 },
-		{ 0x08c4, 0x610b30 },
-		{ 0x08c8, 0x610b40 },
-		{ 0x08d4, 0x610b38 },
-		{ 0x08d8, 0x610b48 },
-		{ 0x08dc, 0x610b50 },
-		{ 0x0900, 0x610a18 },
-		{ 0x0904, 0x610ab8 },
-		{ 0x0910, 0x610c70 },
-		{ 0x0914, 0x610c78 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_chan
-g84_disp_core_mthd_chan = {
-	.name = "Core",
-	.addr = 0x000000,
-	.data = {
-		{ "Global", 1, &nv50_disp_core_mthd_base },
-		{    "DAC", 3, &g84_disp_core_mthd_dac  },
-		{    "SOR", 2, &nv50_disp_core_mthd_sor  },
-		{   "PIOR", 3, &nv50_disp_core_mthd_pior },
-		{   "HEAD", 2, &g84_disp_core_mthd_head },
-		{}
-	}
-};
-
-/*******************************************************************************
- * EVO sync channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-g84_disp_base_mthd_base = {
-	.mthd = 0x0000,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0080, 0x000000 },
-		{ 0x0084, 0x0008c4 },
-		{ 0x0088, 0x0008d0 },
-		{ 0x008c, 0x0008dc },
-		{ 0x0090, 0x0008e4 },
-		{ 0x0094, 0x610884 },
-		{ 0x00a0, 0x6108a0 },
-		{ 0x00a4, 0x610878 },
-		{ 0x00c0, 0x61086c },
-		{ 0x00c4, 0x610800 },
-		{ 0x00c8, 0x61080c },
-		{ 0x00cc, 0x610818 },
-		{ 0x00e0, 0x610858 },
-		{ 0x00e4, 0x610860 },
-		{ 0x00e8, 0x6108ac },
-		{ 0x00ec, 0x6108b4 },
-		{ 0x00fc, 0x610824 },
-		{ 0x0100, 0x610894 },
-		{ 0x0104, 0x61082c },
-		{ 0x0110, 0x6108bc },
-		{ 0x0114, 0x61088c },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_chan
-g84_disp_base_mthd_chan = {
-	.name = "Base",
-	.addr = 0x000540,
-	.data = {
-		{ "Global", 1, &g84_disp_base_mthd_base },
-		{  "Image", 2, &nv50_disp_base_mthd_image },
-		{}
-	}
-};
-
-/*******************************************************************************
- * EVO overlay channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-g84_disp_ovly_mthd_base = {
-	.mthd = 0x0000,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0080, 0x000000 },
-		{ 0x0084, 0x6109a0 },
-		{ 0x0088, 0x6109c0 },
-		{ 0x008c, 0x6109c8 },
-		{ 0x0090, 0x6109b4 },
-		{ 0x0094, 0x610970 },
-		{ 0x00a0, 0x610998 },
-		{ 0x00a4, 0x610964 },
-		{ 0x00c0, 0x610958 },
-		{ 0x00e0, 0x6109a8 },
-		{ 0x00e4, 0x6109d0 },
-		{ 0x00e8, 0x6109d8 },
-		{ 0x0100, 0x61094c },
-		{ 0x0104, 0x610984 },
-		{ 0x0108, 0x61098c },
-		{ 0x0800, 0x6109f8 },
-		{ 0x0808, 0x610a08 },
-		{ 0x080c, 0x610a10 },
-		{ 0x0810, 0x610a00 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_chan
-g84_disp_ovly_mthd_chan = {
-	.name = "Overlay",
-	.addr = 0x000540,
-	.data = {
-		{ "Global", 1, &g84_disp_ovly_mthd_base },
-		{}
-	}
-};
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_disp_sclass[] = {
-	{ G82_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
-	{ G82_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
-	{ G82_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
-	{ G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
-	{ G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
-	{}
-};
-
-static struct nvkm_oclass
-g84_disp_main_oclass[] = {
-	{ G82_DISP, &nv50_disp_main_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-g84_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+g84_disp = {
+	.intr = nv50_disp_intr,
+	.uevent = &nv50_disp_chan_uevent,
+	.super = nv50_disp_intr_supervisor,
+	.root = &g84_disp_root_oclass,
+	.head.vblank_init = nv50_disp_vblank_init,
+	.head.vblank_fini = nv50_disp_vblank_fini,
+	.head.scanoutpos = nv50_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.external.tmds = nv50_pior_output_new,
+	.outp.external.dp = nv50_pior_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 2,
+	.sor.power = nv50_sor_power,
+	.sor.hdmi = g84_hdmi_ctrl,
+	.pior.nr = 3,
+	.pior.power = nv50_pior_power,
+};
+
+int
+g84_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv50_disp_priv *priv;
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
-			       "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = g84_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = nv50_disp_intr;
-	INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
-	priv->sclass = g84_disp_sclass;
-	priv->head.nr = 2;
-	priv->dac.nr = 3;
-	priv->sor.nr = 2;
-	priv->pior.nr = 3;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hdmi = g84_hdmi_ctrl;
-	priv->pior.power = nv50_pior_power;
-	return 0;
+	return nv50_disp_new_(&g84_disp, device, index, 2, pdisp);
 }
-
-struct nvkm_oclass *
-g84_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x82),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g84_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &nv50_disp_vblank_func,
-	.base.outp =  nv50_disp_outp_sclass,
-	.mthd.core = &g84_disp_core_mthd_chan,
-	.mthd.base = &g84_disp_base_mthd_chan,
-	.mthd.ovly = &g84_disp_ovly_mthd_chan,
-	.mthd.prev = 0x000004,
-	.head.scanoutpos = nv50_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
index 1ab0d0ae3cc8..7a7af3b478f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
@@ -22,118 +22,35 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
-#include "outpdp.h"
-
-#include <nvif/class.h>
-
-/*******************************************************************************
- * EVO master channel object
- ******************************************************************************/
-
-const struct nv50_disp_mthd_list
-g94_disp_core_mthd_sor = {
-	.mthd = 0x0040,
-	.addr = 0x000008,
-	.data = {
-		{ 0x0600, 0x610794 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_chan
-g94_disp_core_mthd_chan = {
-	.name = "Core",
-	.addr = 0x000000,
-	.data = {
-		{ "Global", 1, &nv50_disp_core_mthd_base },
-		{    "DAC", 3, &g84_disp_core_mthd_dac  },
-		{    "SOR", 4, &g94_disp_core_mthd_sor  },
-		{   "PIOR", 3, &nv50_disp_core_mthd_pior },
-		{   "HEAD", 2, &g84_disp_core_mthd_head },
-		{}
-	}
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+g94_disp = {
+	.intr = nv50_disp_intr,
+	.uevent = &nv50_disp_chan_uevent,
+	.super = nv50_disp_intr_supervisor,
+	.root = &g94_disp_root_oclass,
+	.head.vblank_init = nv50_disp_vblank_init,
+	.head.vblank_fini = nv50_disp_vblank_fini,
+	.head.scanoutpos = nv50_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.internal.dp = g94_sor_dp_new,
+	.outp.external.tmds = nv50_pior_output_new,
+	.outp.external.dp = nv50_pior_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 4,
+	.sor.power = nv50_sor_power,
+	.sor.hdmi = g84_hdmi_ctrl,
+	.pior.nr = 3,
+	.pior.power = nv50_pior_power,
 };
 
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-g94_disp_sclass[] = {
-	{ GT206_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
-	{ GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
-	{ GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
-	{ G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
-	{ G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
-	{}
-};
-
-static struct nvkm_oclass
-g94_disp_main_oclass[] = {
-	{ GT206_DISP, &nv50_disp_main_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-g94_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+int
+g94_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv50_disp_priv *priv;
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
-			       "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = g94_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = nv50_disp_intr;
-	INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
-	priv->sclass = g94_disp_sclass;
-	priv->head.nr = 2;
-	priv->dac.nr = 3;
-	priv->sor.nr = 4;
-	priv->pior.nr = 3;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hdmi = g84_hdmi_ctrl;
-	priv->pior.power = nv50_pior_power;
-	return 0;
+	return nv50_disp_new_(&g94_disp, device, index, 2, pdisp);
 }
-
-struct nvkm_oclass *
-g94_disp_outp_sclass[] = {
-	&nv50_pior_dp_impl.base.base,
-	&g94_sor_dp_impl.base.base,
-	NULL
-};
-
-struct nvkm_oclass *
-g94_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x88),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g94_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &nv50_disp_vblank_func,
-	.base.outp =  g94_disp_outp_sclass,
-	.mthd.core = &g94_disp_core_mthd_chan,
-	.mthd.base = &g84_disp_base_mthd_chan,
-	.mthd.ovly = &g84_disp_ovly_mthd_chan,
-	.mthd.prev = 0x000004,
-	.head.scanoutpos = nv50_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
deleted file mode 100644
index 7f2f05f78cc8..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
+++ /dev/null
@@ -1,1310 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv50.h"
-#include "outp.h"
-#include "outpdp.h"
-
-#include <core/client.h>
-#include <core/gpuobj.h>
-#include <core/ramht.h>
-#include <subdev/bios.h>
-#include <subdev/bios/dcb.h>
-#include <subdev/bios/disp.h>
-#include <subdev/bios/init.h>
-#include <subdev/bios/pll.h>
-#include <subdev/devinit.h>
-#include <subdev/timer.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-/*******************************************************************************
- * EVO channel base class
- ******************************************************************************/
-
-static void
-gf110_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
-{
-	struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
-	nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000000 << index);
-	nv_wr32(priv, 0x61008c, 0x00000001 << index);
-}
-
-static void
-gf110_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
-{
-	struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
-	nv_wr32(priv, 0x61008c, 0x00000001 << index);
-	nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000001 << index);
-}
-
-const struct nvkm_event_func
-gf110_disp_chan_uevent = {
-	.ctor = nv50_disp_chan_uevent_ctor,
-	.init = gf110_disp_chan_uevent_init,
-	.fini = gf110_disp_chan_uevent_fini,
-};
-
-/*******************************************************************************
- * EVO DMA channel base class
- ******************************************************************************/
-
-static int
-gf110_disp_dmac_object_attach(struct nvkm_object *parent,
-			      struct nvkm_object *object, u32 name)
-{
-	struct nv50_disp_base *base = (void *)parent->parent;
-	struct nv50_disp_chan *chan = (void *)parent;
-	u32 addr = nv_gpuobj(object)->node->offset;
-	u32 data = (chan->chid << 27) | (addr << 9) | 0x00000001;
-	return nvkm_ramht_insert(base->ramht, chan->chid, name, data);
-}
-
-static void
-gf110_disp_dmac_object_detach(struct nvkm_object *parent, int cookie)
-{
-	struct nv50_disp_base *base = (void *)parent->parent;
-	nvkm_ramht_remove(base->ramht, cookie);
-}
-
-static int
-gf110_disp_dmac_init(struct nvkm_object *object)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_dmac *dmac = (void *)object;
-	int chid = dmac->base.chid;
-	int ret;
-
-	ret = nv50_disp_chan_init(&dmac->base);
-	if (ret)
-		return ret;
-
-	/* enable error reporting */
-	nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
-
-	/* initialise channel for dma command submission */
-	nv_wr32(priv, 0x610494 + (chid * 0x0010), dmac->push);
-	nv_wr32(priv, 0x610498 + (chid * 0x0010), 0x00010000);
-	nv_wr32(priv, 0x61049c + (chid * 0x0010), 0x00000001);
-	nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
-	nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
-	nv_wr32(priv, 0x610490 + (chid * 0x0010), 0x00000013);
-
-	/* wait for it to go inactive */
-	if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x80000000, 0x00000000)) {
-		nv_error(dmac, "init: 0x%08x\n",
-			 nv_rd32(priv, 0x610490 + (chid * 0x10)));
-		return -EBUSY;
-	}
-
-	return 0;
-}
-
-static int
-gf110_disp_dmac_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_dmac *dmac = (void *)object;
-	int chid = dmac->base.chid;
-
-	/* deactivate channel */
-	nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
-	nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
-	if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x001e0000, 0x00000000)) {
-		nv_error(dmac, "fini: 0x%08x\n",
-			 nv_rd32(priv, 0x610490 + (chid * 0x10)));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	/* disable error reporting and completion notification */
-	nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
-	nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
-
-	return nv50_disp_chan_fini(&dmac->base, suspend);
-}
-
-/*******************************************************************************
- * EVO master channel object
- ******************************************************************************/
-
-const struct nv50_disp_mthd_list
-gf110_disp_core_mthd_base = {
-	.mthd = 0x0000,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0080, 0x660080 },
-		{ 0x0084, 0x660084 },
-		{ 0x0088, 0x660088 },
-		{ 0x008c, 0x000000 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_list
-gf110_disp_core_mthd_dac = {
-	.mthd = 0x0020,
-	.addr = 0x000020,
-	.data = {
-		{ 0x0180, 0x660180 },
-		{ 0x0184, 0x660184 },
-		{ 0x0188, 0x660188 },
-		{ 0x0190, 0x660190 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_list
-gf110_disp_core_mthd_sor = {
-	.mthd = 0x0020,
-	.addr = 0x000020,
-	.data = {
-		{ 0x0200, 0x660200 },
-		{ 0x0204, 0x660204 },
-		{ 0x0208, 0x660208 },
-		{ 0x0210, 0x660210 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_list
-gf110_disp_core_mthd_pior = {
-	.mthd = 0x0020,
-	.addr = 0x000020,
-	.data = {
-		{ 0x0300, 0x660300 },
-		{ 0x0304, 0x660304 },
-		{ 0x0308, 0x660308 },
-		{ 0x0310, 0x660310 },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_list
-gf110_disp_core_mthd_head = {
-	.mthd = 0x0300,
-	.addr = 0x000300,
-	.data = {
-		{ 0x0400, 0x660400 },
-		{ 0x0404, 0x660404 },
-		{ 0x0408, 0x660408 },
-		{ 0x040c, 0x66040c },
-		{ 0x0410, 0x660410 },
-		{ 0x0414, 0x660414 },
-		{ 0x0418, 0x660418 },
-		{ 0x041c, 0x66041c },
-		{ 0x0420, 0x660420 },
-		{ 0x0424, 0x660424 },
-		{ 0x0428, 0x660428 },
-		{ 0x042c, 0x66042c },
-		{ 0x0430, 0x660430 },
-		{ 0x0434, 0x660434 },
-		{ 0x0438, 0x660438 },
-		{ 0x0440, 0x660440 },
-		{ 0x0444, 0x660444 },
-		{ 0x0448, 0x660448 },
-		{ 0x044c, 0x66044c },
-		{ 0x0450, 0x660450 },
-		{ 0x0454, 0x660454 },
-		{ 0x0458, 0x660458 },
-		{ 0x045c, 0x66045c },
-		{ 0x0460, 0x660460 },
-		{ 0x0468, 0x660468 },
-		{ 0x046c, 0x66046c },
-		{ 0x0470, 0x660470 },
-		{ 0x0474, 0x660474 },
-		{ 0x0480, 0x660480 },
-		{ 0x0484, 0x660484 },
-		{ 0x048c, 0x66048c },
-		{ 0x0490, 0x660490 },
-		{ 0x0494, 0x660494 },
-		{ 0x0498, 0x660498 },
-		{ 0x04b0, 0x6604b0 },
-		{ 0x04b8, 0x6604b8 },
-		{ 0x04bc, 0x6604bc },
-		{ 0x04c0, 0x6604c0 },
-		{ 0x04c4, 0x6604c4 },
-		{ 0x04c8, 0x6604c8 },
-		{ 0x04d0, 0x6604d0 },
-		{ 0x04d4, 0x6604d4 },
-		{ 0x04e0, 0x6604e0 },
-		{ 0x04e4, 0x6604e4 },
-		{ 0x04e8, 0x6604e8 },
-		{ 0x04ec, 0x6604ec },
-		{ 0x04f0, 0x6604f0 },
-		{ 0x04f4, 0x6604f4 },
-		{ 0x04f8, 0x6604f8 },
-		{ 0x04fc, 0x6604fc },
-		{ 0x0500, 0x660500 },
-		{ 0x0504, 0x660504 },
-		{ 0x0508, 0x660508 },
-		{ 0x050c, 0x66050c },
-		{ 0x0510, 0x660510 },
-		{ 0x0514, 0x660514 },
-		{ 0x0518, 0x660518 },
-		{ 0x051c, 0x66051c },
-		{ 0x052c, 0x66052c },
-		{ 0x0530, 0x660530 },
-		{ 0x054c, 0x66054c },
-		{ 0x0550, 0x660550 },
-		{ 0x0554, 0x660554 },
-		{ 0x0558, 0x660558 },
-		{ 0x055c, 0x66055c },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_chan
-gf110_disp_core_mthd_chan = {
-	.name = "Core",
-	.addr = 0x000000,
-	.data = {
-		{ "Global", 1, &gf110_disp_core_mthd_base },
-		{    "DAC", 3, &gf110_disp_core_mthd_dac  },
-		{    "SOR", 8, &gf110_disp_core_mthd_sor  },
-		{   "PIOR", 4, &gf110_disp_core_mthd_pior },
-		{   "HEAD", 4, &gf110_disp_core_mthd_head },
-		{}
-	}
-};
-
-static int
-gf110_disp_core_init(struct nvkm_object *object)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_dmac *mast = (void *)object;
-	int ret;
-
-	ret = nv50_disp_chan_init(&mast->base);
-	if (ret)
-		return ret;
-
-	/* enable error reporting */
-	nv_mask(priv, 0x6100a0, 0x00000001, 0x00000001);
-
-	/* initialise channel for dma command submission */
-	nv_wr32(priv, 0x610494, mast->push);
-	nv_wr32(priv, 0x610498, 0x00010000);
-	nv_wr32(priv, 0x61049c, 0x00000001);
-	nv_mask(priv, 0x610490, 0x00000010, 0x00000010);
-	nv_wr32(priv, 0x640000, 0x00000000);
-	nv_wr32(priv, 0x610490, 0x01000013);
-
-	/* wait for it to go inactive */
-	if (!nv_wait(priv, 0x610490, 0x80000000, 0x00000000)) {
-		nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610490));
-		return -EBUSY;
-	}
-
-	return 0;
-}
-
-static int
-gf110_disp_core_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_dmac *mast = (void *)object;
-
-	/* deactivate channel */
-	nv_mask(priv, 0x610490, 0x00000010, 0x00000000);
-	nv_mask(priv, 0x610490, 0x00000003, 0x00000000);
-	if (!nv_wait(priv, 0x610490, 0x001e0000, 0x00000000)) {
-		nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610490));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	/* disable error reporting and completion notification */
-	nv_mask(priv, 0x610090, 0x00000001, 0x00000000);
-	nv_mask(priv, 0x6100a0, 0x00000001, 0x00000000);
-
-	return nv50_disp_chan_fini(&mast->base, suspend);
-}
-
-struct nv50_disp_chan_impl
-gf110_disp_core_ofuncs = {
-	.base.ctor = nv50_disp_core_ctor,
-	.base.dtor = nv50_disp_dmac_dtor,
-	.base.init = gf110_disp_core_init,
-	.base.fini = gf110_disp_core_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 0,
-	.attach = gf110_disp_dmac_object_attach,
-	.detach = gf110_disp_dmac_object_detach,
-};
-
-/*******************************************************************************
- * EVO sync channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-gf110_disp_base_mthd_base = {
-	.mthd = 0x0000,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0080, 0x661080 },
-		{ 0x0084, 0x661084 },
-		{ 0x0088, 0x661088 },
-		{ 0x008c, 0x66108c },
-		{ 0x0090, 0x661090 },
-		{ 0x0094, 0x661094 },
-		{ 0x00a0, 0x6610a0 },
-		{ 0x00a4, 0x6610a4 },
-		{ 0x00c0, 0x6610c0 },
-		{ 0x00c4, 0x6610c4 },
-		{ 0x00c8, 0x6610c8 },
-		{ 0x00cc, 0x6610cc },
-		{ 0x00e0, 0x6610e0 },
-		{ 0x00e4, 0x6610e4 },
-		{ 0x00e8, 0x6610e8 },
-		{ 0x00ec, 0x6610ec },
-		{ 0x00fc, 0x6610fc },
-		{ 0x0100, 0x661100 },
-		{ 0x0104, 0x661104 },
-		{ 0x0108, 0x661108 },
-		{ 0x010c, 0x66110c },
-		{ 0x0110, 0x661110 },
-		{ 0x0114, 0x661114 },
-		{ 0x0118, 0x661118 },
-		{ 0x011c, 0x66111c },
-		{ 0x0130, 0x661130 },
-		{ 0x0134, 0x661134 },
-		{ 0x0138, 0x661138 },
-		{ 0x013c, 0x66113c },
-		{ 0x0140, 0x661140 },
-		{ 0x0144, 0x661144 },
-		{ 0x0148, 0x661148 },
-		{ 0x014c, 0x66114c },
-		{ 0x0150, 0x661150 },
-		{ 0x0154, 0x661154 },
-		{ 0x0158, 0x661158 },
-		{ 0x015c, 0x66115c },
-		{ 0x0160, 0x661160 },
-		{ 0x0164, 0x661164 },
-		{ 0x0168, 0x661168 },
-		{ 0x016c, 0x66116c },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_list
-gf110_disp_base_mthd_image = {
-	.mthd = 0x0020,
-	.addr = 0x000020,
-	.data = {
-		{ 0x0400, 0x661400 },
-		{ 0x0404, 0x661404 },
-		{ 0x0408, 0x661408 },
-		{ 0x040c, 0x66140c },
-		{ 0x0410, 0x661410 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_chan
-gf110_disp_base_mthd_chan = {
-	.name = "Base",
-	.addr = 0x001000,
-	.data = {
-		{ "Global", 1, &gf110_disp_base_mthd_base },
-		{  "Image", 2, &gf110_disp_base_mthd_image },
-		{}
-	}
-};
-
-struct nv50_disp_chan_impl
-gf110_disp_base_ofuncs = {
-	.base.ctor = nv50_disp_base_ctor,
-	.base.dtor = nv50_disp_dmac_dtor,
-	.base.init = gf110_disp_dmac_init,
-	.base.fini = gf110_disp_dmac_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 1,
-	.attach = gf110_disp_dmac_object_attach,
-	.detach = gf110_disp_dmac_object_detach,
-};
-
-/*******************************************************************************
- * EVO overlay channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-gf110_disp_ovly_mthd_base = {
-	.mthd = 0x0000,
-	.data = {
-		{ 0x0080, 0x665080 },
-		{ 0x0084, 0x665084 },
-		{ 0x0088, 0x665088 },
-		{ 0x008c, 0x66508c },
-		{ 0x0090, 0x665090 },
-		{ 0x0094, 0x665094 },
-		{ 0x00a0, 0x6650a0 },
-		{ 0x00a4, 0x6650a4 },
-		{ 0x00b0, 0x6650b0 },
-		{ 0x00b4, 0x6650b4 },
-		{ 0x00b8, 0x6650b8 },
-		{ 0x00c0, 0x6650c0 },
-		{ 0x00e0, 0x6650e0 },
-		{ 0x00e4, 0x6650e4 },
-		{ 0x00e8, 0x6650e8 },
-		{ 0x0100, 0x665100 },
-		{ 0x0104, 0x665104 },
-		{ 0x0108, 0x665108 },
-		{ 0x010c, 0x66510c },
-		{ 0x0110, 0x665110 },
-		{ 0x0118, 0x665118 },
-		{ 0x011c, 0x66511c },
-		{ 0x0120, 0x665120 },
-		{ 0x0124, 0x665124 },
-		{ 0x0130, 0x665130 },
-		{ 0x0134, 0x665134 },
-		{ 0x0138, 0x665138 },
-		{ 0x013c, 0x66513c },
-		{ 0x0140, 0x665140 },
-		{ 0x0144, 0x665144 },
-		{ 0x0148, 0x665148 },
-		{ 0x014c, 0x66514c },
-		{ 0x0150, 0x665150 },
-		{ 0x0154, 0x665154 },
-		{ 0x0158, 0x665158 },
-		{ 0x015c, 0x66515c },
-		{ 0x0160, 0x665160 },
-		{ 0x0164, 0x665164 },
-		{ 0x0168, 0x665168 },
-		{ 0x016c, 0x66516c },
-		{ 0x0400, 0x665400 },
-		{ 0x0408, 0x665408 },
-		{ 0x040c, 0x66540c },
-		{ 0x0410, 0x665410 },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_chan
-gf110_disp_ovly_mthd_chan = {
-	.name = "Overlay",
-	.addr = 0x001000,
-	.data = {
-		{ "Global", 1, &gf110_disp_ovly_mthd_base },
-		{}
-	}
-};
-
-struct nv50_disp_chan_impl
-gf110_disp_ovly_ofuncs = {
-	.base.ctor = nv50_disp_ovly_ctor,
-	.base.dtor = nv50_disp_dmac_dtor,
-	.base.init = gf110_disp_dmac_init,
-	.base.fini = gf110_disp_dmac_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 5,
-	.attach = gf110_disp_dmac_object_attach,
-	.detach = gf110_disp_dmac_object_detach,
-};
-
-/*******************************************************************************
- * EVO PIO channel base class
- ******************************************************************************/
-
-static int
-gf110_disp_pioc_init(struct nvkm_object *object)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_pioc *pioc = (void *)object;
-	int chid = pioc->base.chid;
-	int ret;
-
-	ret = nv50_disp_chan_init(&pioc->base);
-	if (ret)
-		return ret;
-
-	/* enable error reporting */
-	nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
-
-	/* activate channel */
-	nv_wr32(priv, 0x610490 + (chid * 0x10), 0x00000001);
-	if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00010000)) {
-		nv_error(pioc, "init: 0x%08x\n",
-			 nv_rd32(priv, 0x610490 + (chid * 0x10)));
-		return -EBUSY;
-	}
-
-	return 0;
-}
-
-static int
-gf110_disp_pioc_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_pioc *pioc = (void *)object;
-	int chid = pioc->base.chid;
-
-	nv_mask(priv, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
-	if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00000000)) {
-		nv_error(pioc, "timeout: 0x%08x\n",
-			 nv_rd32(priv, 0x610490 + (chid * 0x10)));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	/* disable error reporting and completion notification */
-	nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
-	nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
-
-	return nv50_disp_chan_fini(&pioc->base, suspend);
-}
-
-/*******************************************************************************
- * EVO immediate overlay channel objects
- ******************************************************************************/
-
-struct nv50_disp_chan_impl
-gf110_disp_oimm_ofuncs = {
-	.base.ctor = nv50_disp_oimm_ctor,
-	.base.dtor = nv50_disp_pioc_dtor,
-	.base.init = gf110_disp_pioc_init,
-	.base.fini = gf110_disp_pioc_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 9,
-};
-
-/*******************************************************************************
- * EVO cursor channel objects
- ******************************************************************************/
-
-struct nv50_disp_chan_impl
-gf110_disp_curs_ofuncs = {
-	.base.ctor = nv50_disp_curs_ctor,
-	.base.dtor = nv50_disp_pioc_dtor,
-	.base.init = gf110_disp_pioc_init,
-	.base.fini = gf110_disp_pioc_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 13,
-};
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-int
-gf110_disp_main_scanoutpos(NV50_DISP_MTHD_V0)
-{
-	const u32 total  = nv_rd32(priv, 0x640414 + (head * 0x300));
-	const u32 blanke = nv_rd32(priv, 0x64041c + (head * 0x300));
-	const u32 blanks = nv_rd32(priv, 0x640420 + (head * 0x300));
-	union {
-		struct nv04_disp_scanoutpos_v0 v0;
-	} *args = data;
-	int ret;
-
-	nv_ioctl(object, "disp scanoutpos size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
-		args->v0.vblanke = (blanke & 0xffff0000) >> 16;
-		args->v0.hblanke = (blanke & 0x0000ffff);
-		args->v0.vblanks = (blanks & 0xffff0000) >> 16;
-		args->v0.hblanks = (blanks & 0x0000ffff);
-		args->v0.vtotal  = ( total & 0xffff0000) >> 16;
-		args->v0.htotal  = ( total & 0x0000ffff);
-		args->v0.time[0] = ktime_to_ns(ktime_get());
-		args->v0.vline = /* vline read locks hline */
-			nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
-		args->v0.time[1] = ktime_to_ns(ktime_get());
-		args->v0.hline =
-			nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
-	} else
-		return ret;
-
-	return 0;
-}
-
-static int
-gf110_disp_main_init(struct nvkm_object *object)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_base *base = (void *)object;
-	int ret, i;
-	u32 tmp;
-
-	ret = nvkm_parent_init(&base->base);
-	if (ret)
-		return ret;
-
-	/* The below segments of code copying values from one register to
-	 * another appear to inform EVO of the display capabilities or
-	 * something similar.
-	 */
-
-	/* ... CRTC caps */
-	for (i = 0; i < priv->head.nr; i++) {
-		tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
-		nv_wr32(priv, 0x6101b4 + (i * 0x800), tmp);
-		tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
-		nv_wr32(priv, 0x6101b8 + (i * 0x800), tmp);
-		tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
-		nv_wr32(priv, 0x6101bc + (i * 0x800), tmp);
-	}
-
-	/* ... DAC caps */
-	for (i = 0; i < priv->dac.nr; i++) {
-		tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
-		nv_wr32(priv, 0x6101c0 + (i * 0x800), tmp);
-	}
-
-	/* ... SOR caps */
-	for (i = 0; i < priv->sor.nr; i++) {
-		tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
-		nv_wr32(priv, 0x6301c4 + (i * 0x800), tmp);
-	}
-
-	/* steal display away from vbios, or something like that */
-	if (nv_rd32(priv, 0x6100ac) & 0x00000100) {
-		nv_wr32(priv, 0x6100ac, 0x00000100);
-		nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
-		if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
-			nv_error(priv, "timeout acquiring display\n");
-			return -EBUSY;
-		}
-	}
-
-	/* point at display engine memory area (hash table, objects) */
-	nv_wr32(priv, 0x610010, (nv_gpuobj(object->parent)->addr >> 8) | 9);
-
-	/* enable supervisor interrupts, disable everything else */
-	nv_wr32(priv, 0x610090, 0x00000000);
-	nv_wr32(priv, 0x6100a0, 0x00000000);
-	nv_wr32(priv, 0x6100b0, 0x00000307);
-
-	/* disable underflow reporting, preventing an intermittent issue
-	 * on some gk104 boards where the production vbios left this
-	 * setting enabled by default.
-	 *
-	 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
-	 */
-	for (i = 0; i < priv->head.nr; i++)
-		nv_mask(priv, 0x616308 + (i * 0x800), 0x00000111, 0x00000010);
-
-	return 0;
-}
-
-static int
-gf110_disp_main_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_base *base = (void *)object;
-
-	/* disable all interrupts */
-	nv_wr32(priv, 0x6100b0, 0x00000000);
-
-	return nvkm_parent_fini(&base->base, suspend);
-}
-
-struct nvkm_ofuncs
-gf110_disp_main_ofuncs = {
-	.ctor = nv50_disp_main_ctor,
-	.dtor = nv50_disp_main_dtor,
-	.init = gf110_disp_main_init,
-	.fini = gf110_disp_main_fini,
-	.mthd = nv50_disp_main_mthd,
-	.ntfy = nvkm_disp_ntfy,
-};
-
-static struct nvkm_oclass
-gf110_disp_main_oclass[] = {
-	{ GF110_DISP, &gf110_disp_main_ofuncs },
-	{}
-};
-
-static struct nvkm_oclass
-gf110_disp_sclass[] = {
-	{ GF110_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
-	{ GF110_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
-	{ GF110_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
-	{ GF110_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
-	{ GF110_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
-	{}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static void
-gf110_disp_vblank_init(struct nvkm_event *event, int type, int head)
-{
-	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
-	nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
-}
-
-static void
-gf110_disp_vblank_fini(struct nvkm_event *event, int type, int head)
-{
-	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
-	nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
-}
-
-const struct nvkm_event_func
-gf110_disp_vblank_func = {
-	.ctor = nvkm_disp_vblank_ctor,
-	.init = gf110_disp_vblank_init,
-	.fini = gf110_disp_vblank_fini,
-};
-
-static struct nvkm_output *
-exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
-	    u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
-	    struct nvbios_outp *info)
-{
-	struct nvkm_bios *bios = nvkm_bios(priv);
-	struct nvkm_output *outp;
-	u16 mask, type;
-
-	if (or < 4) {
-		type = DCB_OUTPUT_ANALOG;
-		mask = 0;
-	} else {
-		or -= 4;
-		switch (ctrl & 0x00000f00) {
-		case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
-		case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
-		case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
-		case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
-		case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
-		case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
-		default:
-			nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
-			return NULL;
-		}
-	}
-
-	mask  = 0x00c0 & (mask << 6);
-	mask |= 0x0001 << or;
-	mask |= 0x0100 << head;
-
-	list_for_each_entry(outp, &priv->base.outp, head) {
-		if ((outp->info.hasht & 0xff) == type &&
-		    (outp->info.hashm & mask) == mask) {
-			*data = nvbios_outp_match(bios, outp->info.hasht,
-							outp->info.hashm,
-						  ver, hdr, cnt, len, info);
-			if (!*data)
-				return NULL;
-			return outp;
-		}
-	}
-
-	return NULL;
-}
-
-static struct nvkm_output *
-exec_script(struct nv50_disp_priv *priv, int head, int id)
-{
-	struct nvkm_bios *bios = nvkm_bios(priv);
-	struct nvkm_output *outp;
-	struct nvbios_outp info;
-	u8  ver, hdr, cnt, len;
-	u32 data, ctrl = 0;
-	int or;
-
-	for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
-		ctrl = nv_rd32(priv, 0x640180 + (or * 0x20));
-		if (ctrl & (1 << head))
-			break;
-	}
-
-	if (or == 8)
-		return NULL;
-
-	outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
-	if (outp) {
-		struct nvbios_init init = {
-			.subdev = nv_subdev(priv),
-			.bios = bios,
-			.offset = info.script[id],
-			.outp = &outp->info,
-			.crtc = head,
-			.execute = 1,
-		};
-
-		nvbios_exec(&init);
-	}
-
-	return outp;
-}
-
-static struct nvkm_output *
-exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
-{
-	struct nvkm_bios *bios = nvkm_bios(priv);
-	struct nvkm_output *outp;
-	struct nvbios_outp info1;
-	struct nvbios_ocfg info2;
-	u8  ver, hdr, cnt, len;
-	u32 data, ctrl = 0;
-	int or;
-
-	for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
-		ctrl = nv_rd32(priv, 0x660180 + (or * 0x20));
-		if (ctrl & (1 << head))
-			break;
-	}
-
-	if (or == 8)
-		return NULL;
-
-	outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
-	if (!outp)
-		return NULL;
-
-	switch (outp->info.type) {
-	case DCB_OUTPUT_TMDS:
-		*conf = (ctrl & 0x00000f00) >> 8;
-		if (pclk >= 165000)
-			*conf |= 0x0100;
-		break;
-	case DCB_OUTPUT_LVDS:
-		*conf = priv->sor.lvdsconf;
-		break;
-	case DCB_OUTPUT_DP:
-		*conf = (ctrl & 0x00000f00) >> 8;
-		break;
-	case DCB_OUTPUT_ANALOG:
-	default:
-		*conf = 0x00ff;
-		break;
-	}
-
-	data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
-	if (data && id < 0xff) {
-		data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
-		if (data) {
-			struct nvbios_init init = {
-				.subdev = nv_subdev(priv),
-				.bios = bios,
-				.offset = data,
-				.outp = &outp->info,
-				.crtc = head,
-				.execute = 1,
-			};
-
-			nvbios_exec(&init);
-		}
-	}
-
-	return outp;
-}
-
-static void
-gf110_disp_intr_unk1_0(struct nv50_disp_priv *priv, int head)
-{
-	exec_script(priv, head, 1);
-}
-
-static void
-gf110_disp_intr_unk2_0(struct nv50_disp_priv *priv, int head)
-{
-	struct nvkm_output *outp = exec_script(priv, head, 2);
-
-	/* see note in nv50_disp_intr_unk20_0() */
-	if (outp && outp->info.type == DCB_OUTPUT_DP) {
-		struct nvkm_output_dp *outpdp = (void *)outp;
-		struct nvbios_init init = {
-			.subdev = nv_subdev(priv),
-			.bios = nvkm_bios(priv),
-			.outp = &outp->info,
-			.crtc = head,
-			.offset = outpdp->info.script[4],
-			.execute = 1,
-		};
-
-		nvbios_exec(&init);
-		atomic_set(&outpdp->lt.done, 0);
-	}
-}
-
-static void
-gf110_disp_intr_unk2_1(struct nv50_disp_priv *priv, int head)
-{
-	struct nvkm_devinit *devinit = nvkm_devinit(priv);
-	u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
-	if (pclk)
-		devinit->pll_set(devinit, PLL_VPLL0 + head, pclk);
-	nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
-}
-
-static void
-gf110_disp_intr_unk2_2_tu(struct nv50_disp_priv *priv, int head,
-			  struct dcb_output *outp)
-{
-	const int or = ffs(outp->or) - 1;
-	const u32 ctrl = nv_rd32(priv, 0x660200 + (or   * 0x020));
-	const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300));
-	const s32 vactive = nv_rd32(priv, 0x660414 + (head * 0x300)) & 0xffff;
-	const s32 vblanke = nv_rd32(priv, 0x66041c + (head * 0x300)) & 0xffff;
-	const s32 vblanks = nv_rd32(priv, 0x660420 + (head * 0x300)) & 0xffff;
-	const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
-	const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
-	const u32 hoff = (head * 0x800);
-	const u32 soff = (  or * 0x800);
-	const u32 loff = (link * 0x080) + soff;
-	const u32 symbol = 100000;
-	const u32 TU = 64;
-	u32 dpctrl = nv_rd32(priv, 0x61c10c + loff);
-	u32 clksor = nv_rd32(priv, 0x612300 + soff);
-	u32 datarate, link_nr, link_bw, bits;
-	u64 ratio, value;
-
-	link_nr  = hweight32(dpctrl & 0x000f0000);
-	link_bw  = (clksor & 0x007c0000) >> 18;
-	link_bw *= 27000;
-
-	/* symbols/hblank - algorithm taken from comments in tegra driver */
-	value = vblanke + vactive - vblanks - 7;
-	value = value * link_bw;
-	do_div(value, pclk);
-	value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr);
-	nv_mask(priv, 0x616620 + hoff, 0x0000ffff, value);
-
-	/* symbols/vblank - algorithm taken from comments in tegra driver */
-	value = vblanks - vblanke - 25;
-	value = value * link_bw;
-	do_div(value, pclk);
-	value = value - ((36 / link_nr) + 3) - 1;
-	nv_mask(priv, 0x616624 + hoff, 0x00ffffff, value);
-
-	/* watermark */
-	if      ((conf & 0x3c0) == 0x180) bits = 30;
-	else if ((conf & 0x3c0) == 0x140) bits = 24;
-	else                              bits = 18;
-	datarate = (pclk * bits) / 8;
-
-	ratio  = datarate;
-	ratio *= symbol;
-	do_div(ratio, link_nr * link_bw);
-
-	value  = (symbol - ratio) * TU;
-	value *= ratio;
-	do_div(value, symbol);
-	do_div(value, symbol);
-
-	value += 5;
-	value |= 0x08000000;
-
-	nv_wr32(priv, 0x616610 + hoff, value);
-}
-
-static void
-gf110_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
-{
-	struct nvkm_output *outp;
-	u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
-	u32 conf, addr, data;
-
-	outp = exec_clkcmp(priv, head, 0xff, pclk, &conf);
-	if (!outp)
-		return;
-
-	/* see note in nv50_disp_intr_unk20_2() */
-	if (outp->info.type == DCB_OUTPUT_DP) {
-		u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300));
-		switch ((sync & 0x000003c0) >> 6) {
-		case 6: pclk = pclk * 30; break;
-		case 5: pclk = pclk * 24; break;
-		case 2:
-		default:
-			pclk = pclk * 18;
-			break;
-		}
-
-		if (nvkm_output_dp_train(outp, pclk, true))
-			ERR("link not trained before attach\n");
-	} else {
-		if (priv->sor.magic)
-			priv->sor.magic(outp);
-	}
-
-	exec_clkcmp(priv, head, 0, pclk, &conf);
-
-	if (outp->info.type == DCB_OUTPUT_ANALOG) {
-		addr = 0x612280 + (ffs(outp->info.or) - 1) * 0x800;
-		data = 0x00000000;
-	} else {
-		addr = 0x612300 + (ffs(outp->info.or) - 1) * 0x800;
-		data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
-		switch (outp->info.type) {
-		case DCB_OUTPUT_TMDS:
-			nv_mask(priv, addr, 0x007c0000, 0x00280000);
-			break;
-		case DCB_OUTPUT_DP:
-			gf110_disp_intr_unk2_2_tu(priv, head, &outp->info);
-			break;
-		default:
-			break;
-		}
-	}
-
-	nv_mask(priv, addr, 0x00000707, data);
-}
-
-static void
-gf110_disp_intr_unk4_0(struct nv50_disp_priv *priv, int head)
-{
-	u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
-	u32 conf;
-
-	exec_clkcmp(priv, head, 1, pclk, &conf);
-}
-
-void
-gf110_disp_intr_supervisor(struct work_struct *work)
-{
-	struct nv50_disp_priv *priv =
-		container_of(work, struct nv50_disp_priv, supervisor);
-	struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
-	u32 mask[4];
-	int head;
-
-	nv_debug(priv, "supervisor %d\n", ffs(priv->super));
-	for (head = 0; head < priv->head.nr; head++) {
-		mask[head] = nv_rd32(priv, 0x6101d4 + (head * 0x800));
-		nv_debug(priv, "head %d: 0x%08x\n", head, mask[head]);
-	}
-
-	if (priv->super & 0x00000001) {
-		nv50_disp_mthd_chan(priv, NV_DBG_DEBUG, 0, impl->mthd.core);
-		for (head = 0; head < priv->head.nr; head++) {
-			if (!(mask[head] & 0x00001000))
-				continue;
-			nv_debug(priv, "supervisor 1.0 - head %d\n", head);
-			gf110_disp_intr_unk1_0(priv, head);
-		}
-	} else
-	if (priv->super & 0x00000002) {
-		for (head = 0; head < priv->head.nr; head++) {
-			if (!(mask[head] & 0x00001000))
-				continue;
-			nv_debug(priv, "supervisor 2.0 - head %d\n", head);
-			gf110_disp_intr_unk2_0(priv, head);
-		}
-		for (head = 0; head < priv->head.nr; head++) {
-			if (!(mask[head] & 0x00010000))
-				continue;
-			nv_debug(priv, "supervisor 2.1 - head %d\n", head);
-			gf110_disp_intr_unk2_1(priv, head);
-		}
-		for (head = 0; head < priv->head.nr; head++) {
-			if (!(mask[head] & 0x00001000))
-				continue;
-			nv_debug(priv, "supervisor 2.2 - head %d\n", head);
-			gf110_disp_intr_unk2_2(priv, head);
-		}
-	} else
-	if (priv->super & 0x00000004) {
-		for (head = 0; head < priv->head.nr; head++) {
-			if (!(mask[head] & 0x00001000))
-				continue;
-			nv_debug(priv, "supervisor 3.0 - head %d\n", head);
-			gf110_disp_intr_unk4_0(priv, head);
-		}
-	}
-
-	for (head = 0; head < priv->head.nr; head++)
-		nv_wr32(priv, 0x6101d4 + (head * 0x800), 0x00000000);
-	nv_wr32(priv, 0x6101d0, 0x80000000);
-}
-
-static void
-gf110_disp_intr_error(struct nv50_disp_priv *priv, int chid)
-{
-	const struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
-	u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12));
-	u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12));
-	u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12));
-
-	nv_error(priv, "chid %d mthd 0x%04x data 0x%08x "
-		       "0x%08x 0x%08x\n",
-		 chid, (mthd & 0x0000ffc), data, mthd, unkn);
-
-	if (chid == 0) {
-		switch (mthd & 0xffc) {
-		case 0x0080:
-			nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 0,
-					    impl->mthd.core);
-			break;
-		default:
-			break;
-		}
-	} else
-	if (chid <= 4) {
-		switch (mthd & 0xffc) {
-		case 0x0080:
-			nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 1,
-					    impl->mthd.base);
-			break;
-		default:
-			break;
-		}
-	} else
-	if (chid <= 8) {
-		switch (mthd & 0xffc) {
-		case 0x0080:
-			nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 5,
-					    impl->mthd.ovly);
-			break;
-		default:
-			break;
-		}
-	}
-
-	nv_wr32(priv, 0x61009c, (1 << chid));
-	nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000);
-}
-
-void
-gf110_disp_intr(struct nvkm_subdev *subdev)
-{
-	struct nv50_disp_priv *priv = (void *)subdev;
-	u32 intr = nv_rd32(priv, 0x610088);
-	int i;
-
-	if (intr & 0x00000001) {
-		u32 stat = nv_rd32(priv, 0x61008c);
-		while (stat) {
-			int chid = __ffs(stat); stat &= ~(1 << chid);
-			nv50_disp_chan_uevent_send(priv, chid);
-			nv_wr32(priv, 0x61008c, 1 << chid);
-		}
-		intr &= ~0x00000001;
-	}
-
-	if (intr & 0x00000002) {
-		u32 stat = nv_rd32(priv, 0x61009c);
-		int chid = ffs(stat) - 1;
-		if (chid >= 0)
-			gf110_disp_intr_error(priv, chid);
-		intr &= ~0x00000002;
-	}
-
-	if (intr & 0x00100000) {
-		u32 stat = nv_rd32(priv, 0x6100ac);
-		if (stat & 0x00000007) {
-			priv->super = (stat & 0x00000007);
-			schedule_work(&priv->supervisor);
-			nv_wr32(priv, 0x6100ac, priv->super);
-			stat &= ~0x00000007;
-		}
-
-		if (stat) {
-			nv_info(priv, "unknown intr24 0x%08x\n", stat);
-			nv_wr32(priv, 0x6100ac, stat);
-		}
-
-		intr &= ~0x00100000;
-	}
-
-	for (i = 0; i < priv->head.nr; i++) {
-		u32 mask = 0x01000000 << i;
-		if (mask & intr) {
-			u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
-			if (stat & 0x00000001)
-				nvkm_disp_vblank(&priv->base, i);
-			nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
-			nv_rd32(priv, 0x6100c0 + (i * 0x800));
-		}
-	}
-}
-
-static int
-gf110_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
-{
-	struct nv50_disp_priv *priv;
-	int heads = nv_rd32(parent, 0x022448);
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, heads,
-			       "PDISP", "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = gf110_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = gf110_disp_intr;
-	INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
-	priv->sclass = gf110_disp_sclass;
-	priv->head.nr = heads;
-	priv->dac.nr = 3;
-	priv->sor.nr = 4;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hda_eld = gf110_hda_eld;
-	priv->sor.hdmi = gf110_hdmi_ctrl;
-	return 0;
-}
-
-struct nvkm_oclass *
-gf110_disp_outp_sclass[] = {
-	&gf110_sor_dp_impl.base.base,
-	NULL
-};
-
-struct nvkm_oclass *
-gf110_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x90),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf110_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &gf110_disp_vblank_func,
-	.base.outp =  gf110_disp_outp_sclass,
-	.mthd.core = &gf110_disp_core_mthd_chan,
-	.mthd.base = &gf110_disp_base_mthd_chan,
-	.mthd.ovly = &gf110_disp_ovly_mthd_chan,
-	.mthd.prev = -0x020000,
-	.head.scanoutpos = gf110_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
new file mode 100644
index 000000000000..186fd3ac78f6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -0,0 +1,536 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+#include "rootnv50.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/pll.h>
+#include <subdev/devinit.h>
+
+void
+gf119_disp_vblank_init(struct nv50_disp *disp, int head)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_mask(device, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
+}
+
+void
+gf119_disp_vblank_fini(struct nv50_disp *disp, int head)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_mask(device, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
+}
+
+static struct nvkm_output *
+exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
+	    u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+	    struct nvbios_outp *info)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
+	struct nvkm_output *outp;
+	u16 mask, type;
+
+	if (or < 4) {
+		type = DCB_OUTPUT_ANALOG;
+		mask = 0;
+	} else {
+		or -= 4;
+		switch (ctrl & 0x00000f00) {
+		case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
+		case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
+		case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
+		case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
+		case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
+		case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
+		default:
+			nvkm_error(subdev, "unknown SOR mc %08x\n", ctrl);
+			return NULL;
+		}
+	}
+
+	mask  = 0x00c0 & (mask << 6);
+	mask |= 0x0001 << or;
+	mask |= 0x0100 << head;
+
+	list_for_each_entry(outp, &disp->base.outp, head) {
+		if ((outp->info.hasht & 0xff) == type &&
+		    (outp->info.hashm & mask) == mask) {
+			*data = nvbios_outp_match(bios, outp->info.hasht,
+							outp->info.hashm,
+						  ver, hdr, cnt, len, info);
+			if (!*data)
+				return NULL;
+			return outp;
+		}
+	}
+
+	return NULL;
+}
+
+static struct nvkm_output *
+exec_script(struct nv50_disp *disp, int head, int id)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_output *outp;
+	struct nvbios_outp info;
+	u8  ver, hdr, cnt, len;
+	u32 data, ctrl = 0;
+	int or;
+
+	for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
+		ctrl = nvkm_rd32(device, 0x640180 + (or * 0x20));
+		if (ctrl & (1 << head))
+			break;
+	}
+
+	if (or == 8)
+		return NULL;
+
+	outp = exec_lookup(disp, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
+	if (outp) {
+		struct nvbios_init init = {
+			.subdev = subdev,
+			.bios = bios,
+			.offset = info.script[id],
+			.outp = &outp->info,
+			.crtc = head,
+			.execute = 1,
+		};
+
+		nvbios_exec(&init);
+	}
+
+	return outp;
+}
+
+static struct nvkm_output *
+exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_output *outp;
+	struct nvbios_outp info1;
+	struct nvbios_ocfg info2;
+	u8  ver, hdr, cnt, len;
+	u32 data, ctrl = 0;
+	int or;
+
+	for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
+		ctrl = nvkm_rd32(device, 0x660180 + (or * 0x20));
+		if (ctrl & (1 << head))
+			break;
+	}
+
+	if (or == 8)
+		return NULL;
+
+	outp = exec_lookup(disp, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
+	if (!outp)
+		return NULL;
+
+	switch (outp->info.type) {
+	case DCB_OUTPUT_TMDS:
+		*conf = (ctrl & 0x00000f00) >> 8;
+		if (pclk >= 165000)
+			*conf |= 0x0100;
+		break;
+	case DCB_OUTPUT_LVDS:
+		*conf = disp->sor.lvdsconf;
+		break;
+	case DCB_OUTPUT_DP:
+		*conf = (ctrl & 0x00000f00) >> 8;
+		break;
+	case DCB_OUTPUT_ANALOG:
+	default:
+		*conf = 0x00ff;
+		break;
+	}
+
+	data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
+	if (data && id < 0xff) {
+		data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
+		if (data) {
+			struct nvbios_init init = {
+				.subdev = subdev,
+				.bios = bios,
+				.offset = data,
+				.outp = &outp->info,
+				.crtc = head,
+				.execute = 1,
+			};
+
+			nvbios_exec(&init);
+		}
+	}
+
+	return outp;
+}
+
+static void
+gf119_disp_intr_unk1_0(struct nv50_disp *disp, int head)
+{
+	exec_script(disp, head, 1);
+}
+
+static void
+gf119_disp_intr_unk2_0(struct nv50_disp *disp, int head)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_output *outp = exec_script(disp, head, 2);
+
+	/* see note in nv50_disp_intr_unk20_0() */
+	if (outp && outp->info.type == DCB_OUTPUT_DP) {
+		struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
+		struct nvbios_init init = {
+			.subdev = subdev,
+			.bios = subdev->device->bios,
+			.outp = &outp->info,
+			.crtc = head,
+			.offset = outpdp->info.script[4],
+			.execute = 1,
+		};
+
+		nvbios_exec(&init);
+		atomic_set(&outpdp->lt.done, 0);
+	}
+}
+
+static void
+gf119_disp_intr_unk2_1(struct nv50_disp *disp, int head)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	struct nvkm_devinit *devinit = device->devinit;
+	u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000;
+	if (pclk)
+		nvkm_devinit_pll_set(devinit, PLL_VPLL0 + head, pclk);
+	nvkm_wr32(device, 0x612200 + (head * 0x800), 0x00000000);
+}
+
+static void
+gf119_disp_intr_unk2_2_tu(struct nv50_disp *disp, int head,
+			  struct dcb_output *outp)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	const int or = ffs(outp->or) - 1;
+	const u32 ctrl = nvkm_rd32(device, 0x660200 + (or   * 0x020));
+	const u32 conf = nvkm_rd32(device, 0x660404 + (head * 0x300));
+	const s32 vactive = nvkm_rd32(device, 0x660414 + (head * 0x300)) & 0xffff;
+	const s32 vblanke = nvkm_rd32(device, 0x66041c + (head * 0x300)) & 0xffff;
+	const s32 vblanks = nvkm_rd32(device, 0x660420 + (head * 0x300)) & 0xffff;
+	const u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000;
+	const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
+	const u32 hoff = (head * 0x800);
+	const u32 soff = (  or * 0x800);
+	const u32 loff = (link * 0x080) + soff;
+	const u32 symbol = 100000;
+	const u32 TU = 64;
+	u32 dpctrl = nvkm_rd32(device, 0x61c10c + loff);
+	u32 clksor = nvkm_rd32(device, 0x612300 + soff);
+	u32 datarate, link_nr, link_bw, bits;
+	u64 ratio, value;
+
+	link_nr  = hweight32(dpctrl & 0x000f0000);
+	link_bw  = (clksor & 0x007c0000) >> 18;
+	link_bw *= 27000;
+
+	/* symbols/hblank - algorithm taken from comments in tegra driver */
+	value = vblanke + vactive - vblanks - 7;
+	value = value * link_bw;
+	do_div(value, pclk);
+	value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr);
+	nvkm_mask(device, 0x616620 + hoff, 0x0000ffff, value);
+
+	/* symbols/vblank - algorithm taken from comments in tegra driver */
+	value = vblanks - vblanke - 25;
+	value = value * link_bw;
+	do_div(value, pclk);
+	value = value - ((36 / link_nr) + 3) - 1;
+	nvkm_mask(device, 0x616624 + hoff, 0x00ffffff, value);
+
+	/* watermark */
+	if      ((conf & 0x3c0) == 0x180) bits = 30;
+	else if ((conf & 0x3c0) == 0x140) bits = 24;
+	else                              bits = 18;
+	datarate = (pclk * bits) / 8;
+
+	ratio  = datarate;
+	ratio *= symbol;
+	do_div(ratio, link_nr * link_bw);
+
+	value  = (symbol - ratio) * TU;
+	value *= ratio;
+	do_div(value, symbol);
+	do_div(value, symbol);
+
+	value += 5;
+	value |= 0x08000000;
+
+	nvkm_wr32(device, 0x616610 + hoff, value);
+}
+
+static void
+gf119_disp_intr_unk2_2(struct nv50_disp *disp, int head)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	struct nvkm_output *outp;
+	u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000;
+	u32 conf, addr, data;
+
+	outp = exec_clkcmp(disp, head, 0xff, pclk, &conf);
+	if (!outp)
+		return;
+
+	/* see note in nv50_disp_intr_unk20_2() */
+	if (outp->info.type == DCB_OUTPUT_DP) {
+		u32 sync = nvkm_rd32(device, 0x660404 + (head * 0x300));
+		switch ((sync & 0x000003c0) >> 6) {
+		case 6: pclk = pclk * 30; break;
+		case 5: pclk = pclk * 24; break;
+		case 2:
+		default:
+			pclk = pclk * 18;
+			break;
+		}
+
+		if (nvkm_output_dp_train(outp, pclk, true))
+			OUTP_ERR(outp, "link not trained before attach");
+	} else {
+		if (disp->func->sor.magic)
+			disp->func->sor.magic(outp);
+	}
+
+	exec_clkcmp(disp, head, 0, pclk, &conf);
+
+	if (outp->info.type == DCB_OUTPUT_ANALOG) {
+		addr = 0x612280 + (ffs(outp->info.or) - 1) * 0x800;
+		data = 0x00000000;
+	} else {
+		addr = 0x612300 + (ffs(outp->info.or) - 1) * 0x800;
+		data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
+		switch (outp->info.type) {
+		case DCB_OUTPUT_TMDS:
+			nvkm_mask(device, addr, 0x007c0000, 0x00280000);
+			break;
+		case DCB_OUTPUT_DP:
+			gf119_disp_intr_unk2_2_tu(disp, head, &outp->info);
+			break;
+		default:
+			break;
+		}
+	}
+
+	nvkm_mask(device, addr, 0x00000707, data);
+}
+
+static void
+gf119_disp_intr_unk4_0(struct nv50_disp *disp, int head)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000;
+	u32 conf;
+
+	exec_clkcmp(disp, head, 1, pclk, &conf);
+}
+
+void
+gf119_disp_intr_supervisor(struct work_struct *work)
+{
+	struct nv50_disp *disp =
+		container_of(work, struct nv50_disp, supervisor);
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mask[4];
+	int head;
+
+	nvkm_debug(subdev, "supervisor %d\n", ffs(disp->super));
+	for (head = 0; head < disp->base.head.nr; head++) {
+		mask[head] = nvkm_rd32(device, 0x6101d4 + (head * 0x800));
+		nvkm_debug(subdev, "head %d: %08x\n", head, mask[head]);
+	}
+
+	if (disp->super & 0x00000001) {
+		nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
+		for (head = 0; head < disp->base.head.nr; head++) {
+			if (!(mask[head] & 0x00001000))
+				continue;
+			nvkm_debug(subdev, "supervisor 1.0 - head %d\n", head);
+			gf119_disp_intr_unk1_0(disp, head);
+		}
+	} else
+	if (disp->super & 0x00000002) {
+		for (head = 0; head < disp->base.head.nr; head++) {
+			if (!(mask[head] & 0x00001000))
+				continue;
+			nvkm_debug(subdev, "supervisor 2.0 - head %d\n", head);
+			gf119_disp_intr_unk2_0(disp, head);
+		}
+		for (head = 0; head < disp->base.head.nr; head++) {
+			if (!(mask[head] & 0x00010000))
+				continue;
+			nvkm_debug(subdev, "supervisor 2.1 - head %d\n", head);
+			gf119_disp_intr_unk2_1(disp, head);
+		}
+		for (head = 0; head < disp->base.head.nr; head++) {
+			if (!(mask[head] & 0x00001000))
+				continue;
+			nvkm_debug(subdev, "supervisor 2.2 - head %d\n", head);
+			gf119_disp_intr_unk2_2(disp, head);
+		}
+	} else
+	if (disp->super & 0x00000004) {
+		for (head = 0; head < disp->base.head.nr; head++) {
+			if (!(mask[head] & 0x00001000))
+				continue;
+			nvkm_debug(subdev, "supervisor 3.0 - head %d\n", head);
+			gf119_disp_intr_unk4_0(disp, head);
+		}
+	}
+
+	for (head = 0; head < disp->base.head.nr; head++)
+		nvkm_wr32(device, 0x6101d4 + (head * 0x800), 0x00000000);
+	nvkm_wr32(device, 0x6101d0, 0x80000000);
+}
+
+static void
+gf119_disp_intr_error(struct nv50_disp *disp, int chid)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mthd = nvkm_rd32(device, 0x6101f0 + (chid * 12));
+	u32 data = nvkm_rd32(device, 0x6101f4 + (chid * 12));
+	u32 unkn = nvkm_rd32(device, 0x6101f8 + (chid * 12));
+
+	nvkm_error(subdev, "chid %d mthd %04x data %08x %08x %08x\n",
+		   chid, (mthd & 0x0000ffc), data, mthd, unkn);
+
+	if (chid < ARRAY_SIZE(disp->chan)) {
+		switch (mthd & 0xffc) {
+		case 0x0080:
+			nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
+			break;
+		default:
+			break;
+		}
+	}
+
+	nvkm_wr32(device, 0x61009c, (1 << chid));
+	nvkm_wr32(device, 0x6101f0 + (chid * 12), 0x90000000);
+}
+
+void
+gf119_disp_intr(struct nv50_disp *disp)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 intr = nvkm_rd32(device, 0x610088);
+	int i;
+
+	if (intr & 0x00000001) {
+		u32 stat = nvkm_rd32(device, 0x61008c);
+		while (stat) {
+			int chid = __ffs(stat); stat &= ~(1 << chid);
+			nv50_disp_chan_uevent_send(disp, chid);
+			nvkm_wr32(device, 0x61008c, 1 << chid);
+		}
+		intr &= ~0x00000001;
+	}
+
+	if (intr & 0x00000002) {
+		u32 stat = nvkm_rd32(device, 0x61009c);
+		int chid = ffs(stat) - 1;
+		if (chid >= 0)
+			gf119_disp_intr_error(disp, chid);
+		intr &= ~0x00000002;
+	}
+
+	if (intr & 0x00100000) {
+		u32 stat = nvkm_rd32(device, 0x6100ac);
+		if (stat & 0x00000007) {
+			disp->super = (stat & 0x00000007);
+			schedule_work(&disp->supervisor);
+			nvkm_wr32(device, 0x6100ac, disp->super);
+			stat &= ~0x00000007;
+		}
+
+		if (stat) {
+			nvkm_warn(subdev, "intr24 %08x\n", stat);
+			nvkm_wr32(device, 0x6100ac, stat);
+		}
+
+		intr &= ~0x00100000;
+	}
+
+	for (i = 0; i < disp->base.head.nr; i++) {
+		u32 mask = 0x01000000 << i;
+		if (mask & intr) {
+			u32 stat = nvkm_rd32(device, 0x6100bc + (i * 0x800));
+			if (stat & 0x00000001)
+				nvkm_disp_vblank(&disp->base, i);
+			nvkm_mask(device, 0x6100bc + (i * 0x800), 0, 0);
+			nvkm_rd32(device, 0x6100c0 + (i * 0x800));
+		}
+	}
+}
+
+int
+gf119_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
+		int index, struct nvkm_disp **pdisp)
+{
+	u32 heads = nvkm_rd32(device, 0x022448);
+	return nv50_disp_new_(func, device, index, heads, pdisp);
+}
+
+static const struct nv50_disp_func
+gf119_disp = {
+	.intr = gf119_disp_intr,
+	.uevent = &gf119_disp_chan_uevent,
+	.super = gf119_disp_intr_supervisor,
+	.root = &gf119_disp_root_oclass,
+	.head.vblank_init = gf119_disp_vblank_init,
+	.head.vblank_fini = gf119_disp_vblank_fini,
+	.head.scanoutpos = gf119_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.internal.dp = gf119_sor_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 4,
+	.sor.power = nv50_sor_power,
+	.sor.hda_eld = gf119_hda_eld,
+	.sor.hdmi = gf119_hdmi_ctrl,
+};
+
+int
+gf119_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+	return gf119_disp_new_(&gf119_disp, device, index, pdisp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
index 6f4019ab4e65..a86384b8e388 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
@@ -22,247 +22,32 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
-
-#include <nvif/class.h>
-
-/*******************************************************************************
- * EVO master channel object
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-gk104_disp_core_mthd_head = {
-	.mthd = 0x0300,
-	.addr = 0x000300,
-	.data = {
-		{ 0x0400, 0x660400 },
-		{ 0x0404, 0x660404 },
-		{ 0x0408, 0x660408 },
-		{ 0x040c, 0x66040c },
-		{ 0x0410, 0x660410 },
-		{ 0x0414, 0x660414 },
-		{ 0x0418, 0x660418 },
-		{ 0x041c, 0x66041c },
-		{ 0x0420, 0x660420 },
-		{ 0x0424, 0x660424 },
-		{ 0x0428, 0x660428 },
-		{ 0x042c, 0x66042c },
-		{ 0x0430, 0x660430 },
-		{ 0x0434, 0x660434 },
-		{ 0x0438, 0x660438 },
-		{ 0x0440, 0x660440 },
-		{ 0x0444, 0x660444 },
-		{ 0x0448, 0x660448 },
-		{ 0x044c, 0x66044c },
-		{ 0x0450, 0x660450 },
-		{ 0x0454, 0x660454 },
-		{ 0x0458, 0x660458 },
-		{ 0x045c, 0x66045c },
-		{ 0x0460, 0x660460 },
-		{ 0x0468, 0x660468 },
-		{ 0x046c, 0x66046c },
-		{ 0x0470, 0x660470 },
-		{ 0x0474, 0x660474 },
-		{ 0x047c, 0x66047c },
-		{ 0x0480, 0x660480 },
-		{ 0x0484, 0x660484 },
-		{ 0x0488, 0x660488 },
-		{ 0x048c, 0x66048c },
-		{ 0x0490, 0x660490 },
-		{ 0x0494, 0x660494 },
-		{ 0x0498, 0x660498 },
-		{ 0x04a0, 0x6604a0 },
-		{ 0x04b0, 0x6604b0 },
-		{ 0x04b8, 0x6604b8 },
-		{ 0x04bc, 0x6604bc },
-		{ 0x04c0, 0x6604c0 },
-		{ 0x04c4, 0x6604c4 },
-		{ 0x04c8, 0x6604c8 },
-		{ 0x04d0, 0x6604d0 },
-		{ 0x04d4, 0x6604d4 },
-		{ 0x04e0, 0x6604e0 },
-		{ 0x04e4, 0x6604e4 },
-		{ 0x04e8, 0x6604e8 },
-		{ 0x04ec, 0x6604ec },
-		{ 0x04f0, 0x6604f0 },
-		{ 0x04f4, 0x6604f4 },
-		{ 0x04f8, 0x6604f8 },
-		{ 0x04fc, 0x6604fc },
-		{ 0x0500, 0x660500 },
-		{ 0x0504, 0x660504 },
-		{ 0x0508, 0x660508 },
-		{ 0x050c, 0x66050c },
-		{ 0x0510, 0x660510 },
-		{ 0x0514, 0x660514 },
-		{ 0x0518, 0x660518 },
-		{ 0x051c, 0x66051c },
-		{ 0x0520, 0x660520 },
-		{ 0x0524, 0x660524 },
-		{ 0x052c, 0x66052c },
-		{ 0x0530, 0x660530 },
-		{ 0x054c, 0x66054c },
-		{ 0x0550, 0x660550 },
-		{ 0x0554, 0x660554 },
-		{ 0x0558, 0x660558 },
-		{ 0x055c, 0x66055c },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_chan
-gk104_disp_core_mthd_chan = {
-	.name = "Core",
-	.addr = 0x000000,
-	.data = {
-		{ "Global", 1, &gf110_disp_core_mthd_base },
-		{    "DAC", 3, &gf110_disp_core_mthd_dac  },
-		{    "SOR", 8, &gf110_disp_core_mthd_sor  },
-		{   "PIOR", 4, &gf110_disp_core_mthd_pior },
-		{   "HEAD", 4, &gk104_disp_core_mthd_head },
-		{}
-	}
-};
-
-/*******************************************************************************
- * EVO overlay channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-gk104_disp_ovly_mthd_base = {
-	.mthd = 0x0000,
-	.data = {
-		{ 0x0080, 0x665080 },
-		{ 0x0084, 0x665084 },
-		{ 0x0088, 0x665088 },
-		{ 0x008c, 0x66508c },
-		{ 0x0090, 0x665090 },
-		{ 0x0094, 0x665094 },
-		{ 0x00a0, 0x6650a0 },
-		{ 0x00a4, 0x6650a4 },
-		{ 0x00b0, 0x6650b0 },
-		{ 0x00b4, 0x6650b4 },
-		{ 0x00b8, 0x6650b8 },
-		{ 0x00c0, 0x6650c0 },
-		{ 0x00c4, 0x6650c4 },
-		{ 0x00e0, 0x6650e0 },
-		{ 0x00e4, 0x6650e4 },
-		{ 0x00e8, 0x6650e8 },
-		{ 0x0100, 0x665100 },
-		{ 0x0104, 0x665104 },
-		{ 0x0108, 0x665108 },
-		{ 0x010c, 0x66510c },
-		{ 0x0110, 0x665110 },
-		{ 0x0118, 0x665118 },
-		{ 0x011c, 0x66511c },
-		{ 0x0120, 0x665120 },
-		{ 0x0124, 0x665124 },
-		{ 0x0130, 0x665130 },
-		{ 0x0134, 0x665134 },
-		{ 0x0138, 0x665138 },
-		{ 0x013c, 0x66513c },
-		{ 0x0140, 0x665140 },
-		{ 0x0144, 0x665144 },
-		{ 0x0148, 0x665148 },
-		{ 0x014c, 0x66514c },
-		{ 0x0150, 0x665150 },
-		{ 0x0154, 0x665154 },
-		{ 0x0158, 0x665158 },
-		{ 0x015c, 0x66515c },
-		{ 0x0160, 0x665160 },
-		{ 0x0164, 0x665164 },
-		{ 0x0168, 0x665168 },
-		{ 0x016c, 0x66516c },
-		{ 0x0400, 0x665400 },
-		{ 0x0404, 0x665404 },
-		{ 0x0408, 0x665408 },
-		{ 0x040c, 0x66540c },
-		{ 0x0410, 0x665410 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_chan
-gk104_disp_ovly_mthd_chan = {
-	.name = "Overlay",
-	.addr = 0x001000,
-	.data = {
-		{ "Global", 1, &gk104_disp_ovly_mthd_base },
-		{}
-	}
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+gk104_disp = {
+	.intr = gf119_disp_intr,
+	.uevent = &gf119_disp_chan_uevent,
+	.super = gf119_disp_intr_supervisor,
+	.root = &gk104_disp_root_oclass,
+	.head.vblank_init = gf119_disp_vblank_init,
+	.head.vblank_fini = gf119_disp_vblank_fini,
+	.head.scanoutpos = gf119_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.internal.dp = gf119_sor_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 4,
+	.sor.power = nv50_sor_power,
+	.sor.hda_eld = gf119_hda_eld,
+	.sor.hdmi = gk104_hdmi_ctrl,
 };
 
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_disp_sclass[] = {
-	{ GK104_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
-	{ GK104_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
-	{ GK104_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
-	{ GK104_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
-	{ GK104_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
-	{}
-};
-
-static struct nvkm_oclass
-gk104_disp_main_oclass[] = {
-	{ GK104_DISP, &gf110_disp_main_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-gk104_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+gk104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv50_disp_priv *priv;
-	int heads = nv_rd32(parent, 0x022448);
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, heads,
-			       "PDISP", "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = gk104_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = gf110_disp_intr;
-	INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
-	priv->sclass = gk104_disp_sclass;
-	priv->head.nr = heads;
-	priv->dac.nr = 3;
-	priv->sor.nr = 4;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hda_eld = gf110_hda_eld;
-	priv->sor.hdmi = gk104_hdmi_ctrl;
-	return 0;
+	return gf119_disp_new_(&gk104_disp, device, index, pdisp);
 }
-
-struct nvkm_oclass *
-gk104_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x91),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &gf110_disp_vblank_func,
-	.base.outp =  gf110_disp_outp_sclass,
-	.mthd.core = &gk104_disp_core_mthd_chan,
-	.mthd.base = &gf110_disp_base_mthd_chan,
-	.mthd.ovly = &gk104_disp_ovly_mthd_chan,
-	.mthd.prev = -0x020000,
-	.head.scanoutpos = gf110_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
index daa4b460a6ba..0d574c7e594a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
@@ -22,82 +22,32 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
-
-#include <nvif/class.h>
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk110_disp_sclass[] = {
-	{ GK110_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
-	{ GK110_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
-	{ GK104_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
-	{ GK104_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
-	{ GK104_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
-	{}
-};
-
-static struct nvkm_oclass
-gk110_disp_main_oclass[] = {
-	{ GK110_DISP, &gf110_disp_main_ofuncs },
-	{}
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+gk110_disp = {
+	.intr = gf119_disp_intr,
+	.uevent = &gf119_disp_chan_uevent,
+	.super = gf119_disp_intr_supervisor,
+	.root = &gk110_disp_root_oclass,
+	.head.vblank_init = gf119_disp_vblank_init,
+	.head.vblank_fini = gf119_disp_vblank_fini,
+	.head.scanoutpos = gf119_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.internal.dp = gf119_sor_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 4,
+	.sor.power = nv50_sor_power,
+	.sor.hda_eld = gf119_hda_eld,
+	.sor.hdmi = gk104_hdmi_ctrl,
 };
 
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-gk110_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+gk110_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv50_disp_priv *priv;
-	int heads = nv_rd32(parent, 0x022448);
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, heads,
-			       "PDISP", "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = gk110_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = gf110_disp_intr;
-	INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
-	priv->sclass = gk110_disp_sclass;
-	priv->head.nr = heads;
-	priv->dac.nr = 3;
-	priv->sor.nr = 4;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hda_eld = gf110_hda_eld;
-	priv->sor.hdmi = gk104_hdmi_ctrl;
-	return 0;
+	return gf119_disp_new_(&gk110_disp, device, index, pdisp);
 }
-
-struct nvkm_oclass *
-gk110_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x92),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk110_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &gf110_disp_vblank_func,
-	.base.outp =  gf110_disp_outp_sclass,
-	.mthd.core = &gk104_disp_core_mthd_chan,
-	.mthd.base = &gf110_disp_base_mthd_chan,
-	.mthd.ovly = &gk104_disp_ovly_mthd_chan,
-	.mthd.prev = -0x020000,
-	.head.scanoutpos = gf110_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
index 881cc94385a1..b6944142d616 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
@@ -22,82 +22,32 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
-
-#include <nvif/class.h>
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-gm107_disp_sclass[] = {
-	{ GM107_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
-	{ GK110_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
-	{ GK104_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
-	{ GK104_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
-	{ GK104_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
-	{}
-};
-
-static struct nvkm_oclass
-gm107_disp_main_oclass[] = {
-	{ GM107_DISP, &gf110_disp_main_ofuncs },
-	{}
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+gm107_disp = {
+	.intr = gf119_disp_intr,
+	.uevent = &gf119_disp_chan_uevent,
+	.super = gf119_disp_intr_supervisor,
+	.root = &gm107_disp_root_oclass,
+	.head.vblank_init = gf119_disp_vblank_init,
+	.head.vblank_fini = gf119_disp_vblank_fini,
+	.head.scanoutpos = gf119_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.internal.dp = gf119_sor_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 4,
+	.sor.power = nv50_sor_power,
+	.sor.hda_eld = gf119_hda_eld,
+	.sor.hdmi = gk104_hdmi_ctrl,
 };
 
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-gm107_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+gm107_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv50_disp_priv *priv;
-	int heads = nv_rd32(parent, 0x022448);
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, heads,
-			       "PDISP", "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = gm107_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = gf110_disp_intr;
-	INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
-	priv->sclass = gm107_disp_sclass;
-	priv->head.nr = heads;
-	priv->dac.nr = 3;
-	priv->sor.nr = 4;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hda_eld = gf110_hda_eld;
-	priv->sor.hdmi = gk104_hdmi_ctrl;
-	return 0;
+	return gf119_disp_new_(&gm107_disp, device, index, pdisp);
 }
-
-struct nvkm_oclass *
-gm107_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x07),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm107_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &gf110_disp_vblank_func,
-	.base.outp =  gf110_disp_outp_sclass,
-	.mthd.core = &gk104_disp_core_mthd_chan,
-	.mthd.base = &gf110_disp_base_mthd_chan,
-	.mthd.ovly = &gk104_disp_ovly_mthd_chan,
-	.mthd.prev = -0x020000,
-	.head.scanoutpos = gf110_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c
index 67004f8302b3..30f1987b5b40 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c
@@ -22,90 +22,33 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
-#include "outpdp.h"
-
-#include <nvif/class.h>
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-gm204_disp_sclass[] = {
-	{ GM204_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
-	{ GK110_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
-	{ GK104_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
-	{ GK104_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
-	{ GK104_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
-	{}
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+gm204_disp = {
+	.intr = gf119_disp_intr,
+	.uevent = &gf119_disp_chan_uevent,
+	.super = gf119_disp_intr_supervisor,
+	.root = &gm204_disp_root_oclass,
+	.head.vblank_init = gf119_disp_vblank_init,
+	.head.vblank_fini = gf119_disp_vblank_fini,
+	.head.scanoutpos = gf119_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.internal.dp = gm204_sor_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 4,
+	.sor.power = nv50_sor_power,
+	.sor.hda_eld = gf119_hda_eld,
+	.sor.hdmi = gk104_hdmi_ctrl,
+	.sor.magic = gm204_sor_magic,
 };
 
-static struct nvkm_oclass
-gm204_disp_main_oclass[] = {
-	{ GM204_DISP, &gf110_disp_main_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-gm204_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+gm204_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv50_disp_priv *priv;
-	int heads = nv_rd32(parent, 0x022448);
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, heads,
-			       "PDISP", "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = gm204_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = gf110_disp_intr;
-	INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
-	priv->sclass = gm204_disp_sclass;
-	priv->head.nr = heads;
-	priv->dac.nr = 3;
-	priv->sor.nr = 4;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hda_eld = gf110_hda_eld;
-	priv->sor.hdmi = gf110_hdmi_ctrl;
-	priv->sor.magic = gm204_sor_magic;
-	return 0;
+	return gf119_disp_new_(&gm204_disp, device, index, pdisp);
 }
-
-struct nvkm_oclass *
-gm204_disp_outp_sclass[] = {
-	&gm204_sor_dp_impl.base.base,
-	NULL
-};
-
-struct nvkm_oclass *
-gm204_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x07),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm204_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &gf110_disp_vblank_func,
-	.base.outp =  gm204_disp_outp_sclass,
-	.mthd.core = &gk104_disp_core_mthd_chan,
-	.mthd.base = &gf110_disp_base_mthd_chan,
-	.mthd.ovly = &gk104_disp_ovly_mthd_chan,
-	.mthd.prev = -0x020000,
-	.head.scanoutpos = gf110_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
index a45307213f4b..6bc3bf096001 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
@@ -22,127 +22,34 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
-
-#include <nvif/class.h>
-
-/*******************************************************************************
- * EVO overlay channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-gt200_disp_ovly_mthd_base = {
-	.mthd = 0x0000,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0080, 0x000000 },
-		{ 0x0084, 0x6109a0 },
-		{ 0x0088, 0x6109c0 },
-		{ 0x008c, 0x6109c8 },
-		{ 0x0090, 0x6109b4 },
-		{ 0x0094, 0x610970 },
-		{ 0x00a0, 0x610998 },
-		{ 0x00a4, 0x610964 },
-		{ 0x00b0, 0x610c98 },
-		{ 0x00b4, 0x610ca4 },
-		{ 0x00b8, 0x610cac },
-		{ 0x00c0, 0x610958 },
-		{ 0x00e0, 0x6109a8 },
-		{ 0x00e4, 0x6109d0 },
-		{ 0x00e8, 0x6109d8 },
-		{ 0x0100, 0x61094c },
-		{ 0x0104, 0x610984 },
-		{ 0x0108, 0x61098c },
-		{ 0x0800, 0x6109f8 },
-		{ 0x0808, 0x610a08 },
-		{ 0x080c, 0x610a10 },
-		{ 0x0810, 0x610a00 },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_chan
-gt200_disp_ovly_mthd_chan = {
-	.name = "Overlay",
-	.addr = 0x000540,
-	.data = {
-		{ "Global", 1, &gt200_disp_ovly_mthd_base },
-		{}
-	}
-};
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-gt200_disp_sclass[] = {
-	{ GT200_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
-	{ GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
-	{ GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
-	{ G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
-	{ G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
-	{}
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+gt200_disp = {
+	.intr = nv50_disp_intr,
+	.uevent = &nv50_disp_chan_uevent,
+	.super = nv50_disp_intr_supervisor,
+	.root = &gt200_disp_root_oclass,
+	.head.vblank_init = nv50_disp_vblank_init,
+	.head.vblank_fini = nv50_disp_vblank_fini,
+	.head.scanoutpos = nv50_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.external.tmds = nv50_pior_output_new,
+	.outp.external.dp = nv50_pior_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 2,
+	.sor.power = nv50_sor_power,
+	.sor.hdmi = g84_hdmi_ctrl,
+	.pior.nr = 3,
+	.pior.power = nv50_pior_power,
 };
 
-static struct nvkm_oclass
-gt200_disp_main_oclass[] = {
-	{ GT200_DISP, &nv50_disp_main_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-gt200_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+gt200_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv50_disp_priv *priv;
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
-			       "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = gt200_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = nv50_disp_intr;
-	INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
-	priv->sclass = gt200_disp_sclass;
-	priv->head.nr = 2;
-	priv->dac.nr = 3;
-	priv->sor.nr = 2;
-	priv->pior.nr = 3;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hdmi = g84_hdmi_ctrl;
-	priv->pior.power = nv50_pior_power;
-	return 0;
+	return nv50_disp_new_(&gt200_disp, device, index, 2, pdisp);
 }
-
-struct nvkm_oclass *
-gt200_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x83),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gt200_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &nv50_disp_vblank_func,
-	.base.outp =  nv50_disp_outp_sclass,
-	.mthd.core = &g84_disp_core_mthd_chan,
-	.mthd.base = &g84_disp_base_mthd_chan,
-	.mthd.ovly = &gt200_disp_ovly_mthd_chan,
-	.mthd.prev = 0x000004,
-	.head.scanoutpos = nv50_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
index 55f0d3ac591e..94026288ab4d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
@@ -22,83 +22,36 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
-
-#include <nvif/class.h>
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-gt215_disp_sclass[] = {
-	{ GT214_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
-	{ GT214_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
-	{ GT214_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
-	{ GT214_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
-	{ GT214_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
-	{}
-};
-
-static struct nvkm_oclass
-gt215_disp_main_oclass[] = {
-	{ GT214_DISP, &nv50_disp_main_ofuncs },
-	{}
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+gt215_disp = {
+	.intr = nv50_disp_intr,
+	.uevent = &nv50_disp_chan_uevent,
+	.super = nv50_disp_intr_supervisor,
+	.root = &gt215_disp_root_oclass,
+	.head.vblank_init = nv50_disp_vblank_init,
+	.head.vblank_fini = nv50_disp_vblank_fini,
+	.head.scanoutpos = nv50_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.internal.dp = g94_sor_dp_new,
+	.outp.external.tmds = nv50_pior_output_new,
+	.outp.external.dp = nv50_pior_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 4,
+	.sor.power = nv50_sor_power,
+	.sor.hda_eld = gt215_hda_eld,
+	.sor.hdmi = gt215_hdmi_ctrl,
+	.pior.nr = 3,
+	.pior.power = nv50_pior_power,
 };
 
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-gt215_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+gt215_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv50_disp_priv *priv;
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
-			       "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = gt215_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = nv50_disp_intr;
-	INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
-	priv->sclass = gt215_disp_sclass;
-	priv->head.nr = 2;
-	priv->dac.nr = 3;
-	priv->sor.nr = 4;
-	priv->pior.nr = 3;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hda_eld = gt215_hda_eld;
-	priv->sor.hdmi = gt215_hdmi_ctrl;
-	priv->pior.power = nv50_pior_power;
-	return 0;
+	return nv50_disp_new_(&gt215_disp, device, index, 2, pdisp);
 }
-
-struct nvkm_oclass *
-gt215_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x85),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gt215_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &nv50_disp_vblank_func,
-	.base.outp =  g94_disp_outp_sclass,
-	.mthd.core = &g94_disp_core_mthd_chan,
-	.mthd.base = &g84_disp_base_mthd_chan,
-	.mthd.ovly = &g84_disp_ovly_mthd_chan,
-	.mthd.prev = 0x000004,
-	.head.scanoutpos = nv50_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c
index b9813d246ba5..af99efbd63f7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c
@@ -33,8 +33,9 @@
 #include <nvif/unpack.h>
 
 int
-gf110_hda_eld(NV50_DISP_MTHD_V1)
+gf119_hda_eld(NV50_DISP_MTHD_V1)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	union {
 		struct nv50_disp_sor_hda_eld_v0 v0;
 	} *args = data;
@@ -42,9 +43,10 @@ gf110_hda_eld(NV50_DISP_MTHD_V1)
 	const u32 hoff = head * 0x800;
 	int ret, i;
 
-	nv_ioctl(object, "disp sor hda eld size %d\n", size);
+	nvif_ioctl(object, "disp sor hda eld size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(object, "disp sor hda eld vers %d\n", args->v0.version);
+		nvif_ioctl(object, "disp sor hda eld vers %d\n",
+			   args->v0.version);
 		if (size > 0x60)
 			return -E2BIG;
 	} else
@@ -52,21 +54,29 @@ gf110_hda_eld(NV50_DISP_MTHD_V1)
 
 	if (size && args->v0.data[0]) {
 		if (outp->info.type == DCB_OUTPUT_DP) {
-			nv_mask(priv, 0x616618 + hoff, 0x8000000c, 0x80000001);
-			nv_wait(priv, 0x616618 + hoff, 0x80000000, 0x00000000);
+			nvkm_mask(device, 0x616618 + hoff, 0x8000000c, 0x80000001);
+			nvkm_msec(device, 2000,
+				u32 tmp = nvkm_rd32(device, 0x616618 + hoff);
+				if (!(tmp & 0x80000000))
+					break;
+			);
 		}
-		nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000);
+		nvkm_mask(device, 0x616548 + hoff, 0x00000070, 0x00000000);
 		for (i = 0; i < size; i++)
-			nv_wr32(priv, 0x10ec00 + soff, (i << 8) | args->v0.data[i]);
+			nvkm_wr32(device, 0x10ec00 + soff, (i << 8) | args->v0.data[i]);
 		for (; i < 0x60; i++)
-			nv_wr32(priv, 0x10ec00 + soff, (i << 8));
-		nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003);
+			nvkm_wr32(device, 0x10ec00 + soff, (i << 8));
+		nvkm_mask(device, 0x10ec10 + soff, 0x80000003, 0x80000003);
 	} else {
 		if (outp->info.type == DCB_OUTPUT_DP) {
-			nv_mask(priv, 0x616618 + hoff, 0x80000001, 0x80000000);
-			nv_wait(priv, 0x616618 + hoff, 0x80000000, 0x00000000);
+			nvkm_mask(device, 0x616618 + hoff, 0x80000001, 0x80000000);
+			nvkm_msec(device, 2000,
+				u32 tmp = nvkm_rd32(device, 0x616618 + hoff);
+				if (!(tmp & 0x80000000))
+					break;
+			);
 		}
-		nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000 | !!size);
+		nvkm_mask(device, 0x10ec10 + soff, 0x80000003, 0x80000000 | !!size);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
index 891d1e7bf7d2..c1590b746f13 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
@@ -33,15 +33,17 @@
 int
 gt215_hda_eld(NV50_DISP_MTHD_V1)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	union {
 		struct nv50_disp_sor_hda_eld_v0 v0;
 	} *args = data;
 	const u32 soff = outp->or * 0x800;
 	int ret, i;
 
-	nv_ioctl(object, "disp sor hda eld size %d\n", size);
+	nvif_ioctl(object, "disp sor hda eld size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(object, "disp sor hda eld vers %d\n", args->v0.version);
+		nvif_ioctl(object, "disp sor hda eld vers %d\n",
+			   args->v0.version);
 		if (size > 0x60)
 			return -E2BIG;
 	} else
@@ -49,20 +51,28 @@ gt215_hda_eld(NV50_DISP_MTHD_V1)
 
 	if (size && args->v0.data[0]) {
 		if (outp->info.type == DCB_OUTPUT_DP) {
-			nv_mask(priv, 0x61c1e0 + soff, 0x8000000d, 0x80000001);
-			nv_wait(priv, 0x61c1e0 + soff, 0x80000000, 0x00000000);
+			nvkm_mask(device, 0x61c1e0 + soff, 0x8000000d, 0x80000001);
+			nvkm_msec(device, 2000,
+				u32 tmp = nvkm_rd32(device, 0x61c1e0 + soff);
+				if (!(tmp & 0x80000000))
+					break;
+			);
 		}
 		for (i = 0; i < size; i++)
-			nv_wr32(priv, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
+			nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
 		for (; i < 0x60; i++)
-			nv_wr32(priv, 0x61c440 + soff, (i << 8));
-		nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003);
+			nvkm_wr32(device, 0x61c440 + soff, (i << 8));
+		nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003);
 	} else {
 		if (outp->info.type == DCB_OUTPUT_DP) {
-			nv_mask(priv, 0x61c1e0 + soff, 0x80000001, 0x80000000);
-			nv_wait(priv, 0x61c1e0 + soff, 0x80000000, 0x00000000);
+			nvkm_mask(device, 0x61c1e0 + soff, 0x80000001, 0x80000000);
+			nvkm_msec(device, 2000,
+				u32 tmp = nvkm_rd32(device, 0x61c1e0 + soff);
+				if (!(tmp & 0x80000000))
+					break;
+			);
 		}
-		nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000 | !!size);
+		nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000000 | !!size);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c
index 621cb0b7ff19..ee9e800a8f06 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c
@@ -31,6 +31,7 @@
 int
 g84_hdmi_ctrl(NV50_DISP_MTHD_V1)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	const u32 hoff = (head * 0x800);
 	union {
 		struct nv50_disp_sor_hdmi_pwr_v0 v0;
@@ -38,12 +39,12 @@ g84_hdmi_ctrl(NV50_DISP_MTHD_V1)
 	u32 ctrl;
 	int ret;
 
-	nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
+	nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
-				 "max_ac_packet %d rekey %d\n",
-			 args->v0.version, args->v0.state,
-			 args->v0.max_ac_packet, args->v0.rekey);
+		nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
+				   "max_ac_packet %d rekey %d\n",
+			   args->v0.version, args->v0.state,
+			   args->v0.max_ac_packet, args->v0.rekey);
 		if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
 			return -EINVAL;
 		ctrl  = 0x40000000 * !!args->v0.state;
@@ -54,38 +55,38 @@ g84_hdmi_ctrl(NV50_DISP_MTHD_V1)
 		return ret;
 
 	if (!(ctrl & 0x40000000)) {
-		nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000);
-		nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
-		nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x6165a4 + hoff, 0x40000000, 0x00000000);
+		nvkm_mask(device, 0x616520 + hoff, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x616500 + hoff, 0x00000001, 0x00000000);
 		return 0;
 	}
 
 	/* AVI InfoFrame */
-	nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
-	nv_wr32(priv, 0x616528 + hoff, 0x000d0282);
-	nv_wr32(priv, 0x61652c + hoff, 0x0000006f);
-	nv_wr32(priv, 0x616530 + hoff, 0x00000000);
-	nv_wr32(priv, 0x616534 + hoff, 0x00000000);
-	nv_wr32(priv, 0x616538 + hoff, 0x00000000);
-	nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x616520 + hoff, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x616528 + hoff, 0x000d0282);
+	nvkm_wr32(device, 0x61652c + hoff, 0x0000006f);
+	nvkm_wr32(device, 0x616530 + hoff, 0x00000000);
+	nvkm_wr32(device, 0x616534 + hoff, 0x00000000);
+	nvkm_wr32(device, 0x616538 + hoff, 0x00000000);
+	nvkm_mask(device, 0x616520 + hoff, 0x00000001, 0x00000001);
 
 	/* Audio InfoFrame */
-	nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
-	nv_wr32(priv, 0x616508 + hoff, 0x000a0184);
-	nv_wr32(priv, 0x61650c + hoff, 0x00000071);
-	nv_wr32(priv, 0x616510 + hoff, 0x00000000);
-	nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x616500 + hoff, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x616508 + hoff, 0x000a0184);
+	nvkm_wr32(device, 0x61650c + hoff, 0x00000071);
+	nvkm_wr32(device, 0x616510 + hoff, 0x00000000);
+	nvkm_mask(device, 0x616500 + hoff, 0x00000001, 0x00000001);
 
-	nv_mask(priv, 0x6165d0 + hoff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
-	nv_mask(priv, 0x616568 + hoff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
-	nv_mask(priv, 0x616578 + hoff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
+	nvkm_mask(device, 0x6165d0 + hoff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
+	nvkm_mask(device, 0x616568 + hoff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
+	nvkm_mask(device, 0x616578 + hoff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
 
 	/* ??? */
-	nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
-	nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
-	nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+	nvkm_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+	nvkm_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+	nvkm_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
 
 	/* HDMI_CTRL */
-	nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, ctrl);
+	nvkm_mask(device, 0x6165a4 + hoff, 0x5f1f007f, ctrl);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf119.c
index c28449061bbd..b5af025d3b04 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf119.c
@@ -29,8 +29,9 @@
 #include <nvif/unpack.h>
 
 int
-gf110_hdmi_ctrl(NV50_DISP_MTHD_V1)
+gf119_hdmi_ctrl(NV50_DISP_MTHD_V1)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	const u32 hoff = (head * 0x800);
 	union {
 		struct nv50_disp_sor_hdmi_pwr_v0 v0;
@@ -38,12 +39,12 @@ gf110_hdmi_ctrl(NV50_DISP_MTHD_V1)
 	u32 ctrl;
 	int ret;
 
-	nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
+	nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
-				 "max_ac_packet %d rekey %d\n",
-			 args->v0.version, args->v0.state,
-			 args->v0.max_ac_packet, args->v0.rekey);
+		nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
+				   "max_ac_packet %d rekey %d\n",
+			   args->v0.version, args->v0.state,
+			   args->v0.max_ac_packet, args->v0.rekey);
 		if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
 			return -EINVAL;
 		ctrl  = 0x40000000 * !!args->v0.state;
@@ -53,27 +54,27 @@ gf110_hdmi_ctrl(NV50_DISP_MTHD_V1)
 		return ret;
 
 	if (!(ctrl & 0x40000000)) {
-		nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000);
-		nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
-		nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x616798 + hoff, 0x40000000, 0x00000000);
+		nvkm_mask(device, 0x6167a4 + hoff, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x616714 + hoff, 0x00000001, 0x00000000);
 		return 0;
 	}
 
 	/* AVI InfoFrame */
-	nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
-	nv_wr32(priv, 0x61671c + hoff, 0x000d0282);
-	nv_wr32(priv, 0x616720 + hoff, 0x0000006f);
-	nv_wr32(priv, 0x616724 + hoff, 0x00000000);
-	nv_wr32(priv, 0x616728 + hoff, 0x00000000);
-	nv_wr32(priv, 0x61672c + hoff, 0x00000000);
-	nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x616714 + hoff, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x61671c + hoff, 0x000d0282);
+	nvkm_wr32(device, 0x616720 + hoff, 0x0000006f);
+	nvkm_wr32(device, 0x616724 + hoff, 0x00000000);
+	nvkm_wr32(device, 0x616728 + hoff, 0x00000000);
+	nvkm_wr32(device, 0x61672c + hoff, 0x00000000);
+	nvkm_mask(device, 0x616714 + hoff, 0x00000001, 0x00000001);
 
 	/* ??? InfoFrame? */
-	nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
-	nv_wr32(priv, 0x6167ac + hoff, 0x00000010);
-	nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x6167a4 + hoff, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x6167ac + hoff, 0x00000010);
+	nvkm_mask(device, 0x6167a4 + hoff, 0x00000001, 0x00000001);
 
 	/* HDMI_CTRL */
-	nv_mask(priv, 0x616798 + hoff, 0x401f007f, ctrl);
+	nvkm_mask(device, 0x616798 + hoff, 0x401f007f, ctrl);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c
index ca34ff81ad7f..110dc19e4f67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c
@@ -31,6 +31,7 @@
 int
 gk104_hdmi_ctrl(NV50_DISP_MTHD_V1)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	const u32 hoff = (head * 0x800);
 	const u32 hdmi = (head * 0x400);
 	union {
@@ -39,12 +40,12 @@ gk104_hdmi_ctrl(NV50_DISP_MTHD_V1)
 	u32 ctrl;
 	int ret;
 
-	nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
+	nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
-				 "max_ac_packet %d rekey %d\n",
-			 args->v0.version, args->v0.state,
-			 args->v0.max_ac_packet, args->v0.rekey);
+		nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
+				   "max_ac_packet %d rekey %d\n",
+			   args->v0.version, args->v0.state,
+			   args->v0.max_ac_packet, args->v0.rekey);
 		if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
 			return -EINVAL;
 		ctrl  = 0x40000000 * !!args->v0.state;
@@ -54,30 +55,30 @@ gk104_hdmi_ctrl(NV50_DISP_MTHD_V1)
 		return ret;
 
 	if (!(ctrl & 0x40000000)) {
-		nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000);
-		nv_mask(priv, 0x6900c0 + hdmi, 0x00000001, 0x00000000);
-		nv_mask(priv, 0x690000 + hdmi, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x616798 + hoff, 0x40000000, 0x00000000);
+		nvkm_mask(device, 0x6900c0 + hdmi, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x690000 + hdmi, 0x00000001, 0x00000000);
 		return 0;
 	}
 
 	/* AVI InfoFrame */
-	nv_mask(priv, 0x690000 + hdmi, 0x00000001, 0x00000000);
-	nv_wr32(priv, 0x690008 + hdmi, 0x000d0282);
-	nv_wr32(priv, 0x69000c + hdmi, 0x0000006f);
-	nv_wr32(priv, 0x690010 + hdmi, 0x00000000);
-	nv_wr32(priv, 0x690014 + hdmi, 0x00000000);
-	nv_wr32(priv, 0x690018 + hdmi, 0x00000000);
-	nv_mask(priv, 0x690000 + hdmi, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x690000 + hdmi, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x690008 + hdmi, 0x000d0282);
+	nvkm_wr32(device, 0x69000c + hdmi, 0x0000006f);
+	nvkm_wr32(device, 0x690010 + hdmi, 0x00000000);
+	nvkm_wr32(device, 0x690014 + hdmi, 0x00000000);
+	nvkm_wr32(device, 0x690018 + hdmi, 0x00000000);
+	nvkm_mask(device, 0x690000 + hdmi, 0x00000001, 0x00000001);
 
 	/* ??? InfoFrame? */
-	nv_mask(priv, 0x6900c0 + hdmi, 0x00000001, 0x00000000);
-	nv_wr32(priv, 0x6900cc + hdmi, 0x00000010);
-	nv_mask(priv, 0x6900c0 + hdmi, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x6900c0 + hdmi, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x6900cc + hdmi, 0x00000010);
+	nvkm_mask(device, 0x6900c0 + hdmi, 0x00000001, 0x00000001);
 
 	/* ??? */
-	nv_wr32(priv, 0x690080 + hdmi, 0x82000000);
+	nvkm_wr32(device, 0x690080 + hdmi, 0x82000000);
 
 	/* HDMI_CTRL */
-	nv_mask(priv, 0x616798 + hoff, 0x401f007f, ctrl);
+	nvkm_mask(device, 0x616798 + hoff, 0x401f007f, ctrl);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c
index b641c167dcfa..61237dbfa35a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c
@@ -32,6 +32,7 @@
 int
 gt215_hdmi_ctrl(NV50_DISP_MTHD_V1)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	const u32 soff = outp->or * 0x800;
 	union {
 		struct nv50_disp_sor_hdmi_pwr_v0 v0;
@@ -39,12 +40,12 @@ gt215_hdmi_ctrl(NV50_DISP_MTHD_V1)
 	u32 ctrl;
 	int ret;
 
-	nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
+	nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
-				 "max_ac_packet %d rekey %d\n",
-			 args->v0.version, args->v0.state,
-			 args->v0.max_ac_packet, args->v0.rekey);
+		nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
+				   "max_ac_packet %d rekey %d\n",
+			   args->v0.version, args->v0.state,
+			   args->v0.max_ac_packet, args->v0.rekey);
 		if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
 			return -EINVAL;
 		ctrl  = 0x40000000 * !!args->v0.state;
@@ -55,38 +56,38 @@ gt215_hdmi_ctrl(NV50_DISP_MTHD_V1)
 		return ret;
 
 	if (!(ctrl & 0x40000000)) {
-		nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000);
-		nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
-		nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x61c5a4 + soff, 0x40000000, 0x00000000);
+		nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x61c500 + soff, 0x00000001, 0x00000000);
 		return 0;
 	}
 
 	/* AVI InfoFrame */
-	nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
-	nv_wr32(priv, 0x61c528 + soff, 0x000d0282);
-	nv_wr32(priv, 0x61c52c + soff, 0x0000006f);
-	nv_wr32(priv, 0x61c530 + soff, 0x00000000);
-	nv_wr32(priv, 0x61c534 + soff, 0x00000000);
-	nv_wr32(priv, 0x61c538 + soff, 0x00000000);
-	nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x61c528 + soff, 0x000d0282);
+	nvkm_wr32(device, 0x61c52c + soff, 0x0000006f);
+	nvkm_wr32(device, 0x61c530 + soff, 0x00000000);
+	nvkm_wr32(device, 0x61c534 + soff, 0x00000000);
+	nvkm_wr32(device, 0x61c538 + soff, 0x00000000);
+	nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000001);
 
 	/* Audio InfoFrame */
-	nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
-	nv_wr32(priv, 0x61c508 + soff, 0x000a0184);
-	nv_wr32(priv, 0x61c50c + soff, 0x00000071);
-	nv_wr32(priv, 0x61c510 + soff, 0x00000000);
-	nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x61c500 + soff, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x61c508 + soff, 0x000a0184);
+	nvkm_wr32(device, 0x61c50c + soff, 0x00000071);
+	nvkm_wr32(device, 0x61c510 + soff, 0x00000000);
+	nvkm_mask(device, 0x61c500 + soff, 0x00000001, 0x00000001);
 
-	nv_mask(priv, 0x61c5d0 + soff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
-	nv_mask(priv, 0x61c568 + soff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
-	nv_mask(priv, 0x61c578 + soff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
+	nvkm_mask(device, 0x61c5d0 + soff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
+	nvkm_mask(device, 0x61c568 + soff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
+	nvkm_mask(device, 0x61c578 + soff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
 
 	/* ??? */
-	nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
-	nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
-	nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+	nvkm_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+	nvkm_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+	nvkm_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
 
 	/* HDMI_CTRL */
-	nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, ctrl);
+	nvkm_mask(device, 0x61c5a4 + soff, 0x5f1f007f, ctrl);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
index ff09b2659c17..67254ce6f83f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
@@ -23,183 +23,63 @@
  */
 #include "priv.h"
 
-#include <core/client.h>
-#include <core/device.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-struct nv04_disp_priv {
-	struct nvkm_disp base;
-};
-
-static int
-nv04_disp_scanoutpos(struct nvkm_object *object, struct nv04_disp_priv *priv,
-		     void *data, u32 size, int head)
+static const struct nvkm_disp_oclass *
+nv04_disp_root(struct nvkm_disp *disp)
 {
-	const u32 hoff = head * 0x2000;
-	union {
-		struct nv04_disp_scanoutpos_v0 v0;
-	} *args = data;
-	u32 line;
-	int ret;
-
-	nv_ioctl(object, "disp scanoutpos size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
-		args->v0.vblanks = nv_rd32(priv, 0x680800 + hoff) & 0xffff;
-		args->v0.vtotal  = nv_rd32(priv, 0x680804 + hoff) & 0xffff;
-		args->v0.vblanke = args->v0.vtotal - 1;
-
-		args->v0.hblanks = nv_rd32(priv, 0x680820 + hoff) & 0xffff;
-		args->v0.htotal  = nv_rd32(priv, 0x680824 + hoff) & 0xffff;
-		args->v0.hblanke = args->v0.htotal - 1;
-
-		/*
-		 * If output is vga instead of digital then vtotal/htotal is
-		 * invalid so we have to give up and trigger the timestamping
-		 * fallback in the drm core.
-		 */
-		if (!args->v0.vtotal || !args->v0.htotal)
-			return -ENOTSUPP;
-
-		args->v0.time[0] = ktime_to_ns(ktime_get());
-		line = nv_rd32(priv, 0x600868 + hoff);
-		args->v0.time[1] = ktime_to_ns(ktime_get());
-		args->v0.hline = (line & 0xffff0000) >> 16;
-		args->v0.vline = (line & 0x0000ffff);
-	} else
-		return ret;
-
-	return 0;
+	return &nv04_disp_root_oclass;
 }
 
-static int
-nv04_disp_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
-{
-	union {
-		struct nv04_disp_mthd_v0 v0;
-	} *args = data;
-	struct nv04_disp_priv *priv = (void *)object->engine;
-	int head, ret;
-
-	nv_ioctl(object, "disp mthd size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
-			 args->v0.version, args->v0.method, args->v0.head);
-		mthd = args->v0.method;
-		head = args->v0.head;
-	} else
-		return ret;
-
-	if (head < 0 || head >= 2)
-		return -ENXIO;
-
-	switch (mthd) {
-	case NV04_DISP_SCANOUTPOS:
-		return nv04_disp_scanoutpos(object, priv, data, size, head);
-	default:
-		break;
-	}
-
-	return -EINVAL;
-}
-
-static struct nvkm_ofuncs
-nv04_disp_ofuncs = {
-	.ctor = _nvkm_object_ctor,
-	.dtor = nvkm_object_destroy,
-	.init = nvkm_object_init,
-	.fini = nvkm_object_fini,
-	.mthd = nv04_disp_mthd,
-	.ntfy = nvkm_disp_ntfy,
-};
-
-static struct nvkm_oclass
-nv04_disp_sclass[] = {
-	{ NV04_DISP, &nv04_disp_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
 static void
-nv04_disp_vblank_init(struct nvkm_event *event, int type, int head)
+nv04_disp_vblank_init(struct nvkm_disp *disp, int head)
 {
-	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
-	nv_wr32(disp, 0x600140 + (head * 0x2000) , 0x00000001);
+	struct nvkm_device *device = disp->engine.subdev.device;
+	nvkm_wr32(device, 0x600140 + (head * 0x2000) , 0x00000001);
 }
 
 static void
-nv04_disp_vblank_fini(struct nvkm_event *event, int type, int head)
+nv04_disp_vblank_fini(struct nvkm_disp *disp, int head)
 {
-	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
-	nv_wr32(disp, 0x600140 + (head * 0x2000) , 0x00000000);
+	struct nvkm_device *device = disp->engine.subdev.device;
+	nvkm_wr32(device, 0x600140 + (head * 0x2000) , 0x00000000);
 }
 
-static const struct nvkm_event_func
-nv04_disp_vblank_func = {
-	.ctor = nvkm_disp_vblank_ctor,
-	.init = nv04_disp_vblank_init,
-	.fini = nv04_disp_vblank_fini,
-};
-
 static void
-nv04_disp_intr(struct nvkm_subdev *subdev)
+nv04_disp_intr(struct nvkm_disp *disp)
 {
-	struct nv04_disp_priv *priv = (void *)subdev;
-	u32 crtc0 = nv_rd32(priv, 0x600100);
-	u32 crtc1 = nv_rd32(priv, 0x602100);
+	struct nvkm_subdev *subdev = &disp->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 crtc0 = nvkm_rd32(device, 0x600100);
+	u32 crtc1 = nvkm_rd32(device, 0x602100);
 	u32 pvideo;
 
 	if (crtc0 & 0x00000001) {
-		nvkm_disp_vblank(&priv->base, 0);
-		nv_wr32(priv, 0x600100, 0x00000001);
+		nvkm_disp_vblank(disp, 0);
+		nvkm_wr32(device, 0x600100, 0x00000001);
 	}
 
 	if (crtc1 & 0x00000001) {
-		nvkm_disp_vblank(&priv->base, 1);
-		nv_wr32(priv, 0x602100, 0x00000001);
+		nvkm_disp_vblank(disp, 1);
+		nvkm_wr32(device, 0x602100, 0x00000001);
 	}
 
-	if (nv_device(priv)->chipset >= 0x10 &&
-	    nv_device(priv)->chipset <= 0x40) {
-		pvideo = nv_rd32(priv, 0x8100);
+	if (device->chipset >= 0x10 && device->chipset <= 0x40) {
+		pvideo = nvkm_rd32(device, 0x8100);
 		if (pvideo & ~0x11)
-			nv_info(priv, "PVIDEO intr: %08x\n", pvideo);
-		nv_wr32(priv, 0x8100, pvideo);
+			nvkm_info(subdev, "PVIDEO intr: %08x\n", pvideo);
+		nvkm_wr32(device, 0x8100, pvideo);
 	}
 }
 
-static int
-nv04_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nv04_disp_priv *priv;
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, 2, "DISPLAY",
-			       "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+static const struct nvkm_disp_func
+nv04_disp = {
+	.intr = nv04_disp_intr,
+	.root = nv04_disp_root,
+	.head.vblank_init = nv04_disp_vblank_init,
+	.head.vblank_fini = nv04_disp_vblank_fini,
+};
 
-	nv_engine(priv)->sclass = nv04_disp_sclass;
-	nv_subdev(priv)->intr = nv04_disp_intr;
-	return 0;
+int
+nv04_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+	return nvkm_disp_new_(&nv04_disp, device, index, 2, pdisp);
 }
-
-struct nvkm_oclass *
-nv04_disp_oclass = &(struct nvkm_disp_impl) {
-	.base.handle = NV_ENGINE(DISP, 0x04),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.vblank = &nv04_disp_vblank_func,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 8ba808df24ad..32e73a975b58 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -22,1291 +22,158 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
-#include "outp.h"
-#include "outpdp.h"
+#include "rootnv50.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/engctx.h>
 #include <core/enum.h>
-#include <core/handle.h>
-#include <core/ramht.h>
-#include <engine/dmaobj.h>
+#include <core/gpuobj.h>
 #include <subdev/bios.h>
-#include <subdev/bios/dcb.h>
 #include <subdev/bios/disp.h>
 #include <subdev/bios/init.h>
 #include <subdev/bios/pll.h>
 #include <subdev/devinit.h>
-#include <subdev/fb.h>
-#include <subdev/timer.h>
 
-#include <nvif/class.h>
-#include <nvif/event.h>
-#include <nvif/unpack.h>
-
-/*******************************************************************************
- * EVO channel base class
- ******************************************************************************/
-
-static int
-nv50_disp_chan_create_(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, int head,
-		       int length, void **pobject)
-{
-	const struct nv50_disp_chan_impl *impl = (void *)oclass->ofuncs;
-	struct nv50_disp_base *base = (void *)parent;
-	struct nv50_disp_chan *chan;
-	int chid = impl->chid + head;
-	int ret;
-
-	if (base->chan & (1 << chid))
-		return -EBUSY;
-	base->chan |= (1 << chid);
-
-	ret = nvkm_namedb_create_(parent, engine, oclass, 0, NULL,
-				  (1ULL << NVDEV_ENGINE_DMAOBJ),
-				  length, pobject);
-	chan = *pobject;
-	if (ret)
-		return ret;
-	chan->chid = chid;
-
-	nv_parent(chan)->object_attach = impl->attach;
-	nv_parent(chan)->object_detach = impl->detach;
-	return 0;
-}
-
-static void
-nv50_disp_chan_destroy(struct nv50_disp_chan *chan)
-{
-	struct nv50_disp_base *base = (void *)nv_object(chan)->parent;
-	base->chan &= ~(1 << chan->chid);
-	nvkm_namedb_destroy(&chan->base);
-}
-
-static void
-nv50_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
-{
-	struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
-	nv_mask(priv, 0x610028, 0x00000001 << index, 0x00000000 << index);
-	nv_wr32(priv, 0x610020, 0x00000001 << index);
-}
-
-static void
-nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
-{
-	struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
-	nv_wr32(priv, 0x610020, 0x00000001 << index);
-	nv_mask(priv, 0x610028, 0x00000001 << index, 0x00000001 << index);
-}
-
-void
-nv50_disp_chan_uevent_send(struct nv50_disp_priv *priv, int chid)
-{
-	struct nvif_notify_uevent_rep {
-	} rep;
-
-	nvkm_event_send(&priv->uevent, 1, chid, &rep, sizeof(rep));
-}
-
-int
-nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
-			   struct nvkm_notify *notify)
-{
-	struct nv50_disp_dmac *dmac = (void *)object;
-	union {
-		struct nvif_notify_uevent_req none;
-	} *args = data;
-	int ret;
-
-	if (nvif_unvers(args->none)) {
-		notify->size  = sizeof(struct nvif_notify_uevent_rep);
-		notify->types = 1;
-		notify->index = dmac->base.chid;
-		return 0;
-	}
-
-	return ret;
-}
-
-const struct nvkm_event_func
-nv50_disp_chan_uevent = {
-	.ctor = nv50_disp_chan_uevent_ctor,
-	.init = nv50_disp_chan_uevent_init,
-	.fini = nv50_disp_chan_uevent_fini,
-};
-
-int
-nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type,
-		    struct nvkm_event **pevent)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	switch (type) {
-	case NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT:
-		*pevent = &priv->uevent;
-		return 0;
-	default:
-		break;
-	}
-	return -EINVAL;
-}
-
-int
-nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
+static const struct nvkm_disp_oclass *
+nv50_disp_root_(struct nvkm_disp *base)
 {
-	struct nv50_disp_chan *chan = (void *)object;
-	*addr = nv_device_resource_start(nv_device(object), 0) +
-		0x640000 + (chan->chid * 0x1000);
-	*size = 0x001000;
-	return 0;
+	return nv50_disp(base)->func->root;
 }
 
-u32
-nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_chan *chan = (void *)object;
-	return nv_rd32(priv, 0x640000 + (chan->chid * 0x1000) + addr);
-}
-
-void
-nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_chan *chan = (void *)object;
-	nv_wr32(priv, 0x640000 + (chan->chid * 0x1000) + addr, data);
-}
-
-/*******************************************************************************
- * EVO DMA channel base class
- ******************************************************************************/
-
 static int
-nv50_disp_dmac_object_attach(struct nvkm_object *parent,
-			     struct nvkm_object *object, u32 name)
-{
-	struct nv50_disp_base *base = (void *)parent->parent;
-	struct nv50_disp_chan *chan = (void *)parent;
-	u32 addr = nv_gpuobj(object)->node->offset;
-	u32 chid = chan->chid;
-	u32 data = (chid << 28) | (addr << 10) | chid;
-	return nvkm_ramht_insert(base->ramht, chid, name, data);
-}
-
-static void
-nv50_disp_dmac_object_detach(struct nvkm_object *parent, int cookie)
+nv50_disp_outp_internal_crt_(struct nvkm_disp *base, int index,
+			     struct dcb_output *dcb, struct nvkm_output **poutp)
 {
-	struct nv50_disp_base *base = (void *)parent->parent;
-	nvkm_ramht_remove(base->ramht, cookie);
+	struct nv50_disp *disp = nv50_disp(base);
+	return disp->func->outp.internal.crt(base, index, dcb, poutp);
 }
 
 static int
-nv50_disp_dmac_create_(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, u32 pushbuf, int head,
-		       int length, void **pobject)
-{
-	struct nv50_disp_dmac *dmac;
-	int ret;
-
-	ret = nv50_disp_chan_create_(parent, engine, oclass, head,
-				     length, pobject);
-	dmac = *pobject;
-	if (ret)
-		return ret;
-
-	dmac->pushdma = (void *)nvkm_handle_ref(parent, pushbuf);
-	if (!dmac->pushdma)
-		return -ENOENT;
-
-	switch (nv_mclass(dmac->pushdma)) {
-	case 0x0002:
-	case 0x003d:
-		if (dmac->pushdma->limit - dmac->pushdma->start != 0xfff)
-			return -EINVAL;
-
-		switch (dmac->pushdma->target) {
-		case NV_MEM_TARGET_VRAM:
-			dmac->push = 0x00000001 | dmac->pushdma->start >> 8;
-			break;
-		case NV_MEM_TARGET_PCI_NOSNOOP:
-			dmac->push = 0x00000003 | dmac->pushdma->start >> 8;
-			break;
-		default:
-			return -EINVAL;
-		}
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-void
-nv50_disp_dmac_dtor(struct nvkm_object *object)
+nv50_disp_outp_internal_tmds_(struct nvkm_disp *base, int index,
+			      struct dcb_output *dcb,
+			      struct nvkm_output **poutp)
 {
-	struct nv50_disp_dmac *dmac = (void *)object;
-	nvkm_object_ref(NULL, (struct nvkm_object **)&dmac->pushdma);
-	nv50_disp_chan_destroy(&dmac->base);
+	struct nv50_disp *disp = nv50_disp(base);
+	return disp->func->outp.internal.tmds(base, index, dcb, poutp);
 }
 
 static int
-nv50_disp_dmac_init(struct nvkm_object *object)
+nv50_disp_outp_internal_lvds_(struct nvkm_disp *base, int index,
+			      struct dcb_output *dcb,
+			      struct nvkm_output **poutp)
 {
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_dmac *dmac = (void *)object;
-	int chid = dmac->base.chid;
-	int ret;
-
-	ret = nv50_disp_chan_init(&dmac->base);
-	if (ret)
-		return ret;
-
-	/* enable error reporting */
-	nv_mask(priv, 0x610028, 0x00010000 << chid, 0x00010000 << chid);
-
-	/* initialise channel for dma command submission */
-	nv_wr32(priv, 0x610204 + (chid * 0x0010), dmac->push);
-	nv_wr32(priv, 0x610208 + (chid * 0x0010), 0x00010000);
-	nv_wr32(priv, 0x61020c + (chid * 0x0010), chid);
-	nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
-	nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
-	nv_wr32(priv, 0x610200 + (chid * 0x0010), 0x00000013);
-
-	/* wait for it to go inactive */
-	if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x80000000, 0x00000000)) {
-		nv_error(dmac, "init timeout, 0x%08x\n",
-			 nv_rd32(priv, 0x610200 + (chid * 0x10)));
-		return -EBUSY;
-	}
-
-	return 0;
+	struct nv50_disp *disp = nv50_disp(base);
+	return disp->func->outp.internal.lvds(base, index, dcb, poutp);
 }
 
 static int
-nv50_disp_dmac_fini(struct nvkm_object *object, bool suspend)
+nv50_disp_outp_internal_dp_(struct nvkm_disp *base, int index,
+			    struct dcb_output *dcb, struct nvkm_output **poutp)
 {
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_dmac *dmac = (void *)object;
-	int chid = dmac->base.chid;
-
-	/* deactivate channel */
-	nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
-	nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
-	if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x001e0000, 0x00000000)) {
-		nv_error(dmac, "fini timeout, 0x%08x\n",
-			 nv_rd32(priv, 0x610200 + (chid * 0x10)));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	/* disable error reporting and completion notifications */
-	nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
-
-	return nv50_disp_chan_fini(&dmac->base, suspend);
-}
-
-/*******************************************************************************
- * EVO master channel object
- ******************************************************************************/
-
-static void
-nv50_disp_mthd_list(struct nv50_disp_priv *priv, int debug, u32 base, int c,
-		    const struct nv50_disp_mthd_list *list, int inst)
-{
-	struct nvkm_object *disp = nv_object(priv);
-	int i;
-
-	for (i = 0; list->data[i].mthd; i++) {
-		if (list->data[i].addr) {
-			u32 next = nv_rd32(priv, list->data[i].addr + base + 0);
-			u32 prev = nv_rd32(priv, list->data[i].addr + base + c);
-			u32 mthd = list->data[i].mthd + (list->mthd * inst);
-			const char *name = list->data[i].name;
-			char mods[16];
-
-			if (prev != next)
-				snprintf(mods, sizeof(mods), "-> 0x%08x", next);
-			else
-				snprintf(mods, sizeof(mods), "%13c", ' ');
-
-			nv_printk_(disp, debug, "\t0x%04x: 0x%08x %s%s%s\n",
-				   mthd, prev, mods, name ? " // " : "",
-				   name ? name : "");
-		}
-	}
-}
-
-void
-nv50_disp_mthd_chan(struct nv50_disp_priv *priv, int debug, int head,
-		    const struct nv50_disp_mthd_chan *chan)
-{
-	struct nvkm_object *disp = nv_object(priv);
-	const struct nv50_disp_impl *impl = (void *)disp->oclass;
-	const struct nv50_disp_mthd_list *list;
-	int i, j;
-
-	if (debug > nv_subdev(priv)->debug)
-		return;
-
-	for (i = 0; (list = chan->data[i].mthd) != NULL; i++) {
-		u32 base = head * chan->addr;
-		for (j = 0; j < chan->data[i].nr; j++, base += list->addr) {
-			const char *cname = chan->name;
-			const char *sname = "";
-			char cname_[16], sname_[16];
-
-			if (chan->addr) {
-				snprintf(cname_, sizeof(cname_), "%s %d",
-					 chan->name, head);
-				cname = cname_;
-			}
-
-			if (chan->data[i].nr > 1) {
-				snprintf(sname_, sizeof(sname_), " - %s %d",
-					 chan->data[i].name, j);
-				sname = sname_;
-			}
-
-			nv_printk_(disp, debug, "%s%s:\n", cname, sname);
-			nv50_disp_mthd_list(priv, debug, base, impl->mthd.prev,
-					    list, j);
-		}
-	}
-}
-
-const struct nv50_disp_mthd_list
-nv50_disp_core_mthd_base = {
-	.mthd = 0x0000,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0080, 0x000000 },
-		{ 0x0084, 0x610bb8 },
-		{ 0x0088, 0x610b9c },
-		{ 0x008c, 0x000000 },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_list
-nv50_disp_core_mthd_dac = {
-	.mthd = 0x0080,
-	.addr = 0x000008,
-	.data = {
-		{ 0x0400, 0x610b58 },
-		{ 0x0404, 0x610bdc },
-		{ 0x0420, 0x610828 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_list
-nv50_disp_core_mthd_sor = {
-	.mthd = 0x0040,
-	.addr = 0x000008,
-	.data = {
-		{ 0x0600, 0x610b70 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_list
-nv50_disp_core_mthd_pior = {
-	.mthd = 0x0040,
-	.addr = 0x000008,
-	.data = {
-		{ 0x0700, 0x610b80 },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_list
-nv50_disp_core_mthd_head = {
-	.mthd = 0x0400,
-	.addr = 0x000540,
-	.data = {
-		{ 0x0800, 0x610ad8 },
-		{ 0x0804, 0x610ad0 },
-		{ 0x0808, 0x610a48 },
-		{ 0x080c, 0x610a78 },
-		{ 0x0810, 0x610ac0 },
-		{ 0x0814, 0x610af8 },
-		{ 0x0818, 0x610b00 },
-		{ 0x081c, 0x610ae8 },
-		{ 0x0820, 0x610af0 },
-		{ 0x0824, 0x610b08 },
-		{ 0x0828, 0x610b10 },
-		{ 0x082c, 0x610a68 },
-		{ 0x0830, 0x610a60 },
-		{ 0x0834, 0x000000 },
-		{ 0x0838, 0x610a40 },
-		{ 0x0840, 0x610a24 },
-		{ 0x0844, 0x610a2c },
-		{ 0x0848, 0x610aa8 },
-		{ 0x084c, 0x610ab0 },
-		{ 0x0860, 0x610a84 },
-		{ 0x0864, 0x610a90 },
-		{ 0x0868, 0x610b18 },
-		{ 0x086c, 0x610b20 },
-		{ 0x0870, 0x610ac8 },
-		{ 0x0874, 0x610a38 },
-		{ 0x0880, 0x610a58 },
-		{ 0x0884, 0x610a9c },
-		{ 0x08a0, 0x610a70 },
-		{ 0x08a4, 0x610a50 },
-		{ 0x08a8, 0x610ae0 },
-		{ 0x08c0, 0x610b28 },
-		{ 0x08c4, 0x610b30 },
-		{ 0x08c8, 0x610b40 },
-		{ 0x08d4, 0x610b38 },
-		{ 0x08d8, 0x610b48 },
-		{ 0x08dc, 0x610b50 },
-		{ 0x0900, 0x610a18 },
-		{ 0x0904, 0x610ab8 },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_chan
-nv50_disp_core_mthd_chan = {
-	.name = "Core",
-	.addr = 0x000000,
-	.data = {
-		{ "Global", 1, &nv50_disp_core_mthd_base },
-		{    "DAC", 3, &nv50_disp_core_mthd_dac  },
-		{    "SOR", 2, &nv50_disp_core_mthd_sor  },
-		{   "PIOR", 3, &nv50_disp_core_mthd_pior },
-		{   "HEAD", 2, &nv50_disp_core_mthd_head },
-		{}
-	}
-};
-
-int
-nv50_disp_core_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	union {
-		struct nv50_disp_core_channel_dma_v0 v0;
-	} *args = data;
-	struct nv50_disp_dmac *mast;
-	int ret;
-
-	nv_ioctl(parent, "create disp core channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create disp core channel dma vers %d "
-				 "pushbuf %08x\n",
-			 args->v0.version, args->v0.pushbuf);
-	} else
-		return ret;
-
-	ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
-				     0, sizeof(*mast), (void **)&mast);
-	*pobject = nv_object(mast);
-	if (ret)
-		return ret;
-
-	return 0;
+	struct nv50_disp *disp = nv50_disp(base);
+	if (disp->func->outp.internal.dp)
+		return disp->func->outp.internal.dp(base, index, dcb, poutp);
+	return -ENODEV;
 }
 
 static int
-nv50_disp_core_init(struct nvkm_object *object)
+nv50_disp_outp_external_tmds_(struct nvkm_disp *base, int index,
+			      struct dcb_output *dcb,
+			      struct nvkm_output **poutp)
 {
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_dmac *mast = (void *)object;
-	int ret;
-
-	ret = nv50_disp_chan_init(&mast->base);
-	if (ret)
-		return ret;
-
-	/* enable error reporting */
-	nv_mask(priv, 0x610028, 0x00010000, 0x00010000);
-
-	/* attempt to unstick channel from some unknown state */
-	if ((nv_rd32(priv, 0x610200) & 0x009f0000) == 0x00020000)
-		nv_mask(priv, 0x610200, 0x00800000, 0x00800000);
-	if ((nv_rd32(priv, 0x610200) & 0x003f0000) == 0x00030000)
-		nv_mask(priv, 0x610200, 0x00600000, 0x00600000);
-
-	/* initialise channel for dma command submission */
-	nv_wr32(priv, 0x610204, mast->push);
-	nv_wr32(priv, 0x610208, 0x00010000);
-	nv_wr32(priv, 0x61020c, 0x00000000);
-	nv_mask(priv, 0x610200, 0x00000010, 0x00000010);
-	nv_wr32(priv, 0x640000, 0x00000000);
-	nv_wr32(priv, 0x610200, 0x01000013);
-
-	/* wait for it to go inactive */
-	if (!nv_wait(priv, 0x610200, 0x80000000, 0x00000000)) {
-		nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610200));
-		return -EBUSY;
-	}
-
-	return 0;
+	struct nv50_disp *disp = nv50_disp(base);
+	if (disp->func->outp.external.tmds)
+		return disp->func->outp.external.tmds(base, index, dcb, poutp);
+	return -ENODEV;
 }
 
 static int
-nv50_disp_core_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_dmac *mast = (void *)object;
-
-	/* deactivate channel */
-	nv_mask(priv, 0x610200, 0x00000010, 0x00000000);
-	nv_mask(priv, 0x610200, 0x00000003, 0x00000000);
-	if (!nv_wait(priv, 0x610200, 0x001e0000, 0x00000000)) {
-		nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610200));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	/* disable error reporting and completion notifications */
-	nv_mask(priv, 0x610028, 0x00010001, 0x00000000);
-
-	return nv50_disp_chan_fini(&mast->base, suspend);
-}
-
-struct nv50_disp_chan_impl
-nv50_disp_core_ofuncs = {
-	.base.ctor = nv50_disp_core_ctor,
-	.base.dtor = nv50_disp_dmac_dtor,
-	.base.init = nv50_disp_core_init,
-	.base.fini = nv50_disp_core_fini,
-	.base.map  = nv50_disp_chan_map,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 0,
-	.attach = nv50_disp_dmac_object_attach,
-	.detach = nv50_disp_dmac_object_detach,
-};
-
-/*******************************************************************************
- * EVO sync channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-nv50_disp_base_mthd_base = {
-	.mthd = 0x0000,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0080, 0x000000 },
-		{ 0x0084, 0x0008c4 },
-		{ 0x0088, 0x0008d0 },
-		{ 0x008c, 0x0008dc },
-		{ 0x0090, 0x0008e4 },
-		{ 0x0094, 0x610884 },
-		{ 0x00a0, 0x6108a0 },
-		{ 0x00a4, 0x610878 },
-		{ 0x00c0, 0x61086c },
-		{ 0x00e0, 0x610858 },
-		{ 0x00e4, 0x610860 },
-		{ 0x00e8, 0x6108ac },
-		{ 0x00ec, 0x6108b4 },
-		{ 0x0100, 0x610894 },
-		{ 0x0110, 0x6108bc },
-		{ 0x0114, 0x61088c },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_list
-nv50_disp_base_mthd_image = {
-	.mthd = 0x0400,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0800, 0x6108f0 },
-		{ 0x0804, 0x6108fc },
-		{ 0x0808, 0x61090c },
-		{ 0x080c, 0x610914 },
-		{ 0x0810, 0x610904 },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_chan
-nv50_disp_base_mthd_chan = {
-	.name = "Base",
-	.addr = 0x000540,
-	.data = {
-		{ "Global", 1, &nv50_disp_base_mthd_base },
-		{  "Image", 2, &nv50_disp_base_mthd_image },
-		{}
-	}
-};
-
-int
-nv50_disp_base_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
+nv50_disp_outp_external_dp_(struct nvkm_disp *base, int index,
+			    struct dcb_output *dcb, struct nvkm_output **poutp)
 {
-	union {
-		struct nv50_disp_base_channel_dma_v0 v0;
-	} *args = data;
-	struct nv50_disp_priv *priv = (void *)engine;
-	struct nv50_disp_dmac *dmac;
-	int ret;
-
-	nv_ioctl(parent, "create disp base channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create disp base channel dma vers %d "
-				 "pushbuf %08x head %d\n",
-			 args->v0.version, args->v0.pushbuf, args->v0.head);
-		if (args->v0.head > priv->head.nr)
-			return -EINVAL;
-	} else
-		return ret;
-
-	ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
-				     args->v0.head, sizeof(*dmac),
-				     (void **)&dmac);
-	*pobject = nv_object(dmac);
-	if (ret)
-		return ret;
-
-	return 0;
+	struct nv50_disp *disp = nv50_disp(base);
+	if (disp->func->outp.external.dp)
+		return disp->func->outp.external.dp(base, index, dcb, poutp);
+	return -ENODEV;
 }
 
-struct nv50_disp_chan_impl
-nv50_disp_base_ofuncs = {
-	.base.ctor = nv50_disp_base_ctor,
-	.base.dtor = nv50_disp_dmac_dtor,
-	.base.init = nv50_disp_dmac_init,
-	.base.fini = nv50_disp_dmac_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 1,
-	.attach = nv50_disp_dmac_object_attach,
-	.detach = nv50_disp_dmac_object_detach,
-};
-
-/*******************************************************************************
- * EVO overlay channel objects
- ******************************************************************************/
-
-const struct nv50_disp_mthd_list
-nv50_disp_ovly_mthd_base = {
-	.mthd = 0x0000,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0080, 0x000000 },
-		{ 0x0084, 0x0009a0 },
-		{ 0x0088, 0x0009c0 },
-		{ 0x008c, 0x0009c8 },
-		{ 0x0090, 0x6109b4 },
-		{ 0x0094, 0x610970 },
-		{ 0x00a0, 0x610998 },
-		{ 0x00a4, 0x610964 },
-		{ 0x00c0, 0x610958 },
-		{ 0x00e0, 0x6109a8 },
-		{ 0x00e4, 0x6109d0 },
-		{ 0x00e8, 0x6109d8 },
-		{ 0x0100, 0x61094c },
-		{ 0x0104, 0x610984 },
-		{ 0x0108, 0x61098c },
-		{ 0x0800, 0x6109f8 },
-		{ 0x0808, 0x610a08 },
-		{ 0x080c, 0x610a10 },
-		{ 0x0810, 0x610a00 },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_chan
-nv50_disp_ovly_mthd_chan = {
-	.name = "Overlay",
-	.addr = 0x000540,
-	.data = {
-		{ "Global", 1, &nv50_disp_ovly_mthd_base },
-		{}
-	}
-};
-
-int
-nv50_disp_ovly_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	union {
-		struct nv50_disp_overlay_channel_dma_v0 v0;
-	} *args = data;
-	struct nv50_disp_priv *priv = (void *)engine;
-	struct nv50_disp_dmac *dmac;
-	int ret;
-
-	nv_ioctl(parent, "create disp overlay channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create disp overlay channel dma vers %d "
-				 "pushbuf %08x head %d\n",
-			 args->v0.version, args->v0.pushbuf, args->v0.head);
-		if (args->v0.head > priv->head.nr)
-			return -EINVAL;
-	} else
-		return ret;
-
-	ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
-				     args->v0.head, sizeof(*dmac),
-				     (void **)&dmac);
-	*pobject = nv_object(dmac);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-struct nv50_disp_chan_impl
-nv50_disp_ovly_ofuncs = {
-	.base.ctor = nv50_disp_ovly_ctor,
-	.base.dtor = nv50_disp_dmac_dtor,
-	.base.init = nv50_disp_dmac_init,
-	.base.fini = nv50_disp_dmac_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 3,
-	.attach = nv50_disp_dmac_object_attach,
-	.detach = nv50_disp_dmac_object_detach,
-};
-
-/*******************************************************************************
- * EVO PIO channel base class
- ******************************************************************************/
-
-static int
-nv50_disp_pioc_create_(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, int head,
-		       int length, void **pobject)
-{
-	return nv50_disp_chan_create_(parent, engine, oclass, head,
-				      length, pobject);
-}
-
-void
-nv50_disp_pioc_dtor(struct nvkm_object *object)
-{
-	struct nv50_disp_pioc *pioc = (void *)object;
-	nv50_disp_chan_destroy(&pioc->base);
-}
-
-static int
-nv50_disp_pioc_init(struct nvkm_object *object)
+static void
+nv50_disp_vblank_fini_(struct nvkm_disp *base, int head)
 {
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_pioc *pioc = (void *)object;
-	int chid = pioc->base.chid;
-	int ret;
-
-	ret = nv50_disp_chan_init(&pioc->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00002000);
-	if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00000000, 0x00000000)) {
-		nv_error(pioc, "timeout0: 0x%08x\n",
-			 nv_rd32(priv, 0x610200 + (chid * 0x10)));
-		return -EBUSY;
-	}
-
-	nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00000001);
-	if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00010000)) {
-		nv_error(pioc, "timeout1: 0x%08x\n",
-			 nv_rd32(priv, 0x610200 + (chid * 0x10)));
-		return -EBUSY;
-	}
-
-	return 0;
+	struct nv50_disp *disp = nv50_disp(base);
+	disp->func->head.vblank_fini(disp, head);
 }
 
-static int
-nv50_disp_pioc_fini(struct nvkm_object *object, bool suspend)
+static void
+nv50_disp_vblank_init_(struct nvkm_disp *base, int head)
 {
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_pioc *pioc = (void *)object;
-	int chid = pioc->base.chid;
-
-	nv_mask(priv, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
-	if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00000000)) {
-		nv_error(pioc, "timeout: 0x%08x\n",
-			 nv_rd32(priv, 0x610200 + (chid * 0x10)));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	return nv50_disp_chan_fini(&pioc->base, suspend);
+	struct nv50_disp *disp = nv50_disp(base);
+	disp->func->head.vblank_init(disp, head);
 }
 
-/*******************************************************************************
- * EVO immediate overlay channel objects
- ******************************************************************************/
-
-int
-nv50_disp_oimm_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
+static void
+nv50_disp_intr_(struct nvkm_disp *base)
 {
-	union {
-		struct nv50_disp_overlay_v0 v0;
-	} *args = data;
-	struct nv50_disp_priv *priv = (void *)engine;
-	struct nv50_disp_pioc *pioc;
-	int ret;
-
-	nv_ioctl(parent, "create disp overlay size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create disp overlay vers %d head %d\n",
-			 args->v0.version, args->v0.head);
-		if (args->v0.head > priv->head.nr)
-			return -EINVAL;
-	} else
-		return ret;
-
-	ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head,
-				     sizeof(*pioc), (void **)&pioc);
-	*pobject = nv_object(pioc);
-	if (ret)
-		return ret;
-
-	return 0;
+	struct nv50_disp *disp = nv50_disp(base);
+	disp->func->intr(disp);
 }
 
-struct nv50_disp_chan_impl
-nv50_disp_oimm_ofuncs = {
-	.base.ctor = nv50_disp_oimm_ctor,
-	.base.dtor = nv50_disp_pioc_dtor,
-	.base.init = nv50_disp_pioc_init,
-	.base.fini = nv50_disp_pioc_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 5,
-};
-
-/*******************************************************************************
- * EVO cursor channel objects
- ******************************************************************************/
-
-int
-nv50_disp_curs_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
+static void *
+nv50_disp_dtor_(struct nvkm_disp *base)
 {
-	union {
-		struct nv50_disp_cursor_v0 v0;
-	} *args = data;
-	struct nv50_disp_priv *priv = (void *)engine;
-	struct nv50_disp_pioc *pioc;
-	int ret;
-
-	nv_ioctl(parent, "create disp cursor size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create disp cursor vers %d head %d\n",
-			 args->v0.version, args->v0.head);
-		if (args->v0.head > priv->head.nr)
-			return -EINVAL;
-	} else
-		return ret;
-
-	ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head,
-				     sizeof(*pioc), (void **)&pioc);
-	*pobject = nv_object(pioc);
-	if (ret)
-		return ret;
-
-	return 0;
+	struct nv50_disp *disp = nv50_disp(base);
+	nvkm_event_fini(&disp->uevent);
+	return disp;
 }
 
-struct nv50_disp_chan_impl
-nv50_disp_curs_ofuncs = {
-	.base.ctor = nv50_disp_curs_ctor,
-	.base.dtor = nv50_disp_pioc_dtor,
-	.base.init = nv50_disp_pioc_init,
-	.base.fini = nv50_disp_pioc_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 7,
+static const struct nvkm_disp_func
+nv50_disp_ = {
+	.dtor = nv50_disp_dtor_,
+	.intr = nv50_disp_intr_,
+	.root = nv50_disp_root_,
+	.outp.internal.crt = nv50_disp_outp_internal_crt_,
+	.outp.internal.tmds = nv50_disp_outp_internal_tmds_,
+	.outp.internal.lvds = nv50_disp_outp_internal_lvds_,
+	.outp.internal.dp = nv50_disp_outp_internal_dp_,
+	.outp.external.tmds = nv50_disp_outp_external_tmds_,
+	.outp.external.dp = nv50_disp_outp_external_dp_,
+	.head.vblank_init = nv50_disp_vblank_init_,
+	.head.vblank_fini = nv50_disp_vblank_fini_,
 };
 
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
 int
-nv50_disp_main_scanoutpos(NV50_DISP_MTHD_V0)
+nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
+	       int index, int heads, struct nvkm_disp **pdisp)
 {
-	const u32 blanke = nv_rd32(priv, 0x610aec + (head * 0x540));
-	const u32 blanks = nv_rd32(priv, 0x610af4 + (head * 0x540));
-	const u32 total  = nv_rd32(priv, 0x610afc + (head * 0x540));
-	union {
-		struct nv04_disp_scanoutpos_v0 v0;
-	} *args = data;
+	struct nv50_disp *disp;
 	int ret;
 
-	nv_ioctl(object, "disp scanoutpos size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
-		args->v0.vblanke = (blanke & 0xffff0000) >> 16;
-		args->v0.hblanke = (blanke & 0x0000ffff);
-		args->v0.vblanks = (blanks & 0xffff0000) >> 16;
-		args->v0.hblanks = (blanks & 0x0000ffff);
-		args->v0.vtotal  = ( total & 0xffff0000) >> 16;
-		args->v0.htotal  = ( total & 0x0000ffff);
-		args->v0.time[0] = ktime_to_ns(ktime_get());
-		args->v0.vline = /* vline read locks hline */
-			nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
-		args->v0.time[1] = ktime_to_ns(ktime_get());
-		args->v0.hline =
-			nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
-	} else
-		return ret;
-
-	return 0;
-}
-
-int
-nv50_disp_main_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
-{
-	const struct nv50_disp_impl *impl = (void *)nv_oclass(object->engine);
-	union {
-		struct nv50_disp_mthd_v0 v0;
-		struct nv50_disp_mthd_v1 v1;
-	} *args = data;
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nvkm_output *outp = NULL;
-	struct nvkm_output *temp;
-	u16 type, mask = 0;
-	int head, ret;
-
-	if (mthd != NV50_DISP_MTHD)
-		return -EINVAL;
-
-	nv_ioctl(object, "disp mthd size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
-			 args->v0.version, args->v0.method, args->v0.head);
-		mthd = args->v0.method;
-		head = args->v0.head;
-	} else
-	if (nvif_unpack(args->v1, 1, 1, true)) {
-		nv_ioctl(object, "disp mthd vers %d mthd %02x "
-				 "type %04x mask %04x\n",
-			 args->v1.version, args->v1.method,
-			 args->v1.hasht, args->v1.hashm);
-		mthd = args->v1.method;
-		type = args->v1.hasht;
-		mask = args->v1.hashm;
-		head = ffs((mask >> 8) & 0x0f) - 1;
-	} else
-		return ret;
-
-	if (head < 0 || head >= priv->head.nr)
-		return -ENXIO;
-
-	if (mask) {
-		list_for_each_entry(temp, &priv->base.outp, head) {
-			if ((temp->info.hasht         == type) &&
-			    (temp->info.hashm & mask) == mask) {
-				outp = temp;
-				break;
-			}
-		}
-		if (outp == NULL)
-			return -ENXIO;
-	}
-
-	switch (mthd) {
-	case NV50_DISP_SCANOUTPOS:
-		return impl->head.scanoutpos(object, priv, data, size, head);
-	default:
-		break;
-	}
-
-	switch (mthd * !!outp) {
-	case NV50_DISP_MTHD_V1_DAC_PWR:
-		return priv->dac.power(object, priv, data, size, head, outp);
-	case NV50_DISP_MTHD_V1_DAC_LOAD:
-		return priv->dac.sense(object, priv, data, size, head, outp);
-	case NV50_DISP_MTHD_V1_SOR_PWR:
-		return priv->sor.power(object, priv, data, size, head, outp);
-	case NV50_DISP_MTHD_V1_SOR_HDA_ELD:
-		if (!priv->sor.hda_eld)
-			return -ENODEV;
-		return priv->sor.hda_eld(object, priv, data, size, head, outp);
-	case NV50_DISP_MTHD_V1_SOR_HDMI_PWR:
-		if (!priv->sor.hdmi)
-			return -ENODEV;
-		return priv->sor.hdmi(object, priv, data, size, head, outp);
-	case NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT: {
-		union {
-			struct nv50_disp_sor_lvds_script_v0 v0;
-		} *args = data;
-		nv_ioctl(object, "disp sor lvds script size %d\n", size);
-		if (nvif_unpack(args->v0, 0, 0, false)) {
-			nv_ioctl(object, "disp sor lvds script "
-					 "vers %d name %04x\n",
-				 args->v0.version, args->v0.script);
-			priv->sor.lvdsconf = args->v0.script;
-			return 0;
-		} else
-			return ret;
-	}
-		break;
-	case NV50_DISP_MTHD_V1_SOR_DP_PWR: {
-		struct nvkm_output_dp *outpdp = (void *)outp;
-		union {
-			struct nv50_disp_sor_dp_pwr_v0 v0;
-		} *args = data;
-		nv_ioctl(object, "disp sor dp pwr size %d\n", size);
-		if (nvif_unpack(args->v0, 0, 0, false)) {
-			nv_ioctl(object, "disp sor dp pwr vers %d state %d\n",
-				 args->v0.version, args->v0.state);
-			if (args->v0.state == 0) {
-				nvkm_notify_put(&outpdp->irq);
-				((struct nvkm_output_dp_impl *)nv_oclass(outp))
-					->lnk_pwr(outpdp, 0);
-				atomic_set(&outpdp->lt.done, 0);
-				return 0;
-			} else
-			if (args->v0.state != 0) {
-				nvkm_output_dp_train(&outpdp->base, 0, true);
-				return 0;
-			}
-		} else
-			return ret;
-	}
-		break;
-	case NV50_DISP_MTHD_V1_PIOR_PWR:
-		if (!priv->pior.power)
-			return -ENODEV;
-		return priv->pior.power(object, priv, data, size, head, outp);
-	default:
-		break;
-	}
-
-	return -EINVAL;
-}
-
-int
-nv50_disp_main_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	struct nv50_disp_priv *priv = (void *)engine;
-	struct nv50_disp_base *base;
-	int ret;
+	if (!(disp = kzalloc(sizeof(*disp), GFP_KERNEL)))
+		return -ENOMEM;
+	INIT_WORK(&disp->supervisor, func->super);
+	disp->func = func;
+	*pdisp = &disp->base;
 
-	ret = nvkm_parent_create(parent, engine, oclass, 0,
-				 priv->sclass, 0, &base);
-	*pobject = nv_object(base);
+	ret = nvkm_disp_ctor(&nv50_disp_, device, index, heads, &disp->base);
 	if (ret)
 		return ret;
 
-	return nvkm_ramht_new(nv_object(base), nv_object(base), 0x1000, 0,
-			      &base->ramht);
+	return nvkm_event_init(func->uevent, 1, 1 + (heads * 4), &disp->uevent);
 }
 
 void
-nv50_disp_main_dtor(struct nvkm_object *object)
-{
-	struct nv50_disp_base *base = (void *)object;
-	nvkm_ramht_ref(NULL, &base->ramht);
-	nvkm_parent_destroy(&base->base);
-}
-
-static int
-nv50_disp_main_init(struct nvkm_object *object)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_base *base = (void *)object;
-	int ret, i;
-	u32 tmp;
-
-	ret = nvkm_parent_init(&base->base);
-	if (ret)
-		return ret;
-
-	/* The below segments of code copying values from one register to
-	 * another appear to inform EVO of the display capabilities or
-	 * something similar.  NFI what the 0x614004 caps are for..
-	 */
-	tmp = nv_rd32(priv, 0x614004);
-	nv_wr32(priv, 0x610184, tmp);
-
-	/* ... CRTC caps */
-	for (i = 0; i < priv->head.nr; i++) {
-		tmp = nv_rd32(priv, 0x616100 + (i * 0x800));
-		nv_wr32(priv, 0x610190 + (i * 0x10), tmp);
-		tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
-		nv_wr32(priv, 0x610194 + (i * 0x10), tmp);
-		tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
-		nv_wr32(priv, 0x610198 + (i * 0x10), tmp);
-		tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
-		nv_wr32(priv, 0x61019c + (i * 0x10), tmp);
-	}
-
-	/* ... DAC caps */
-	for (i = 0; i < priv->dac.nr; i++) {
-		tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
-		nv_wr32(priv, 0x6101d0 + (i * 0x04), tmp);
-	}
-
-	/* ... SOR caps */
-	for (i = 0; i < priv->sor.nr; i++) {
-		tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
-		nv_wr32(priv, 0x6101e0 + (i * 0x04), tmp);
-	}
-
-	/* ... PIOR caps */
-	for (i = 0; i < priv->pior.nr; i++) {
-		tmp = nv_rd32(priv, 0x61e000 + (i * 0x800));
-		nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp);
-	}
-
-	/* steal display away from vbios, or something like that */
-	if (nv_rd32(priv, 0x610024) & 0x00000100) {
-		nv_wr32(priv, 0x610024, 0x00000100);
-		nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
-		if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
-			nv_error(priv, "timeout acquiring display\n");
-			return -EBUSY;
-		}
-	}
-
-	/* point at display engine memory area (hash table, objects) */
-	nv_wr32(priv, 0x610010, (nv_gpuobj(base->ramht)->addr >> 8) | 9);
-
-	/* enable supervisor interrupts, disable everything else */
-	nv_wr32(priv, 0x61002c, 0x00000370);
-	nv_wr32(priv, 0x610028, 0x00000000);
-	return 0;
-}
-
-static int
-nv50_disp_main_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_base *base = (void *)object;
-
-	/* disable all interrupts */
-	nv_wr32(priv, 0x610024, 0x00000000);
-	nv_wr32(priv, 0x610020, 0x00000000);
-
-	return nvkm_parent_fini(&base->base, suspend);
-}
-
-struct nvkm_ofuncs
-nv50_disp_main_ofuncs = {
-	.ctor = nv50_disp_main_ctor,
-	.dtor = nv50_disp_main_dtor,
-	.init = nv50_disp_main_init,
-	.fini = nv50_disp_main_fini,
-	.mthd = nv50_disp_main_mthd,
-	.ntfy = nvkm_disp_ntfy,
-};
-
-static struct nvkm_oclass
-nv50_disp_main_oclass[] = {
-	{ NV50_DISP, &nv50_disp_main_ofuncs },
-	{}
-};
-
-static struct nvkm_oclass
-nv50_disp_sclass[] = {
-	{ NV50_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
-	{ NV50_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
-	{ NV50_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
-	{ NV50_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
-	{ NV50_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
-	{}
-};
-
-/*******************************************************************************
- * Display context, tracks instmem allocation and prevents more than one
- * client using the display hardware at any time.
- ******************************************************************************/
-
-static int
-nv50_disp_data_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	struct nv50_disp_priv *priv = (void *)engine;
-	struct nvkm_engctx *ectx;
-	int ret = -EBUSY;
-
-	/* no context needed for channel objects... */
-	if (nv_mclass(parent) != NV_DEVICE) {
-		atomic_inc(&parent->refcount);
-		*pobject = parent;
-		return 1;
-	}
-
-	/* allocate display hardware to client */
-	mutex_lock(&nv_subdev(priv)->mutex);
-	if (list_empty(&nv_engine(priv)->contexts)) {
-		ret = nvkm_engctx_create(parent, engine, oclass, NULL, 0x10000,
-					 0x10000, NVOBJ_FLAG_HEAP, &ectx);
-		*pobject = nv_object(ectx);
-	}
-	mutex_unlock(&nv_subdev(priv)->mutex);
-	return ret;
-}
-
-struct nvkm_oclass
-nv50_disp_cclass = {
-	.handle = NV_ENGCTX(DISP, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_disp_data_ctor,
-		.dtor = _nvkm_engctx_dtor,
-		.init = _nvkm_engctx_init,
-		.fini = _nvkm_engctx_fini,
-		.rd32 = _nvkm_engctx_rd32,
-		.wr32 = _nvkm_engctx_wr32,
-	},
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static void
-nv50_disp_vblank_fini(struct nvkm_event *event, int type, int head)
+nv50_disp_vblank_fini(struct nv50_disp *disp, int head)
 {
-	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
-	nv_mask(disp, 0x61002c, (4 << head), 0);
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_mask(device, 0x61002c, (4 << head), 0);
 }
 
-static void
-nv50_disp_vblank_init(struct nvkm_event *event, int type, int head)
+void
+nv50_disp_vblank_init(struct nv50_disp *disp, int head)
 {
-	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
-	nv_mask(disp, 0x61002c, (4 << head), (4 << head));
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_mask(device, 0x61002c, (4 << head), (4 << head));
 }
 
-const struct nvkm_event_func
-nv50_disp_vblank_func = {
-	.ctor = nvkm_disp_vblank_ctor,
-	.init = nv50_disp_vblank_init,
-	.fini = nv50_disp_vblank_fini,
-};
-
 static const struct nvkm_enum
 nv50_disp_intr_error_type[] = {
 	{ 3, "ILLEGAL_MTHD" },
@@ -1323,70 +190,46 @@ nv50_disp_intr_error_code[] = {
 };
 
 static void
-nv50_disp_intr_error(struct nv50_disp_priv *priv, int chid)
+nv50_disp_intr_error(struct nv50_disp *disp, int chid)
 {
-	struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
-	u32 data = nv_rd32(priv, 0x610084 + (chid * 0x08));
-	u32 addr = nv_rd32(priv, 0x610080 + (chid * 0x08));
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 data = nvkm_rd32(device, 0x610084 + (chid * 0x08));
+	u32 addr = nvkm_rd32(device, 0x610080 + (chid * 0x08));
 	u32 code = (addr & 0x00ff0000) >> 16;
 	u32 type = (addr & 0x00007000) >> 12;
 	u32 mthd = (addr & 0x00000ffc);
 	const struct nvkm_enum *ec, *et;
-	char ecunk[6], etunk[6];
 
 	et = nvkm_enum_find(nv50_disp_intr_error_type, type);
-	if (!et)
-		snprintf(etunk, sizeof(etunk), "UNK%02X", type);
-
 	ec = nvkm_enum_find(nv50_disp_intr_error_code, code);
-	if (!ec)
-		snprintf(ecunk, sizeof(ecunk), "UNK%02X", code);
 
-	nv_error(priv, "%s [%s] chid %d mthd 0x%04x data 0x%08x\n",
-		 et ? et->name : etunk, ec ? ec->name : ecunk,
-		 chid, mthd, data);
+	nvkm_error(subdev,
+		   "ERROR %d [%s] %02x [%s] chid %d mthd %04x data %08x\n",
+		   type, et ? et->name : "", code, ec ? ec->name : "",
+		   chid, mthd, data);
 
-	if (chid == 0) {
-		switch (mthd) {
-		case 0x0080:
-			nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 0,
-					    impl->mthd.core);
-			break;
-		default:
-			break;
-		}
-	} else
-	if (chid <= 2) {
-		switch (mthd) {
-		case 0x0080:
-			nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 1,
-					    impl->mthd.base);
-			break;
-		default:
-			break;
-		}
-	} else
-	if (chid <= 4) {
+	if (chid < ARRAY_SIZE(disp->chan)) {
 		switch (mthd) {
 		case 0x0080:
-			nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 3,
-					    impl->mthd.ovly);
+			nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
 			break;
 		default:
 			break;
 		}
 	}
 
-	nv_wr32(priv, 0x610020, 0x00010000 << chid);
-	nv_wr32(priv, 0x610080 + (chid * 0x08), 0x90000000);
+	nvkm_wr32(device, 0x610020, 0x00010000 << chid);
+	nvkm_wr32(device, 0x610080 + (chid * 0x08), 0x90000000);
 }
 
 static struct nvkm_output *
-exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
+exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
 	    u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
 	    struct nvbios_outp *info)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct nvkm_output *outp;
 	u16 mask, type;
 
@@ -1403,7 +246,7 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
 		case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
 		case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
 		default:
-			nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
+			nvkm_error(subdev, "unknown SOR mc %08x\n", ctrl);
 			return NULL;
 		}
 		or  -= 4;
@@ -1412,9 +255,9 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
 		type = 0x0010;
 		mask = 0;
 		switch (ctrl & 0x00000f00) {
-		case 0x00000000: type |= priv->pior.type[or]; break;
+		case 0x00000000: type |= disp->pior.type[or]; break;
 		default:
-			nv_error(priv, "unknown PIOR mc 0x%08x\n", ctrl);
+			nvkm_error(subdev, "unknown PIOR mc %08x\n", ctrl);
 			return NULL;
 		}
 	}
@@ -1423,7 +266,7 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
 	mask |= 0x0001 << or;
 	mask |= 0x0100 << head;
 
-	list_for_each_entry(outp, &priv->base.outp, head) {
+	list_for_each_entry(outp, &disp->base.outp, head) {
 		if ((outp->info.hasht & 0xff) == type &&
 		    (outp->info.hashm & mask) == mask) {
 			*data = nvbios_outp_match(bios, outp->info.hasht,
@@ -1439,9 +282,11 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
 }
 
 static struct nvkm_output *
-exec_script(struct nv50_disp_priv *priv, int head, int id)
+exec_script(struct nv50_disp *disp, int head, int id)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
 	struct nvkm_output *outp;
 	struct nvbios_outp info;
 	u8  ver, hdr, cnt, len;
@@ -1450,27 +295,27 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
 	int i;
 
 	/* DAC */
-	for (i = 0; !(ctrl & (1 << head)) && i < priv->dac.nr; i++)
-		ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
+	for (i = 0; !(ctrl & (1 << head)) && i < disp->func->dac.nr; i++)
+		ctrl = nvkm_rd32(device, 0x610b5c + (i * 8));
 
 	/* SOR */
 	if (!(ctrl & (1 << head))) {
-		if (nv_device(priv)->chipset  < 0x90 ||
-		    nv_device(priv)->chipset == 0x92 ||
-		    nv_device(priv)->chipset == 0xa0) {
+		if (device->chipset  < 0x90 ||
+		    device->chipset == 0x92 ||
+		    device->chipset == 0xa0) {
 			reg = 0x610b74;
 		} else {
 			reg = 0x610798;
 		}
-		for (i = 0; !(ctrl & (1 << head)) && i < priv->sor.nr; i++)
-			ctrl = nv_rd32(priv, reg + (i * 8));
+		for (i = 0; !(ctrl & (1 << head)) && i < disp->func->sor.nr; i++)
+			ctrl = nvkm_rd32(device, reg + (i * 8));
 		i += 4;
 	}
 
 	/* PIOR */
 	if (!(ctrl & (1 << head))) {
-		for (i = 0; !(ctrl & (1 << head)) && i < priv->pior.nr; i++)
-			ctrl = nv_rd32(priv, 0x610b84 + (i * 8));
+		for (i = 0; !(ctrl & (1 << head)) && i < disp->func->pior.nr; i++)
+			ctrl = nvkm_rd32(device, 0x610b84 + (i * 8));
 		i += 8;
 	}
 
@@ -1478,10 +323,10 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
 		return NULL;
 	i--;
 
-	outp = exec_lookup(priv, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
+	outp = exec_lookup(disp, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
 	if (outp) {
 		struct nvbios_init init = {
-			.subdev = nv_subdev(priv),
+			.subdev = subdev,
 			.bios = bios,
 			.offset = info.script[id],
 			.outp = &outp->info,
@@ -1496,9 +341,11 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
 }
 
 static struct nvkm_output *
-exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
+exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
 	struct nvkm_output *outp;
 	struct nvbios_outp info1;
 	struct nvbios_ocfg info2;
@@ -1508,27 +355,27 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
 	int i;
 
 	/* DAC */
-	for (i = 0; !(ctrl & (1 << head)) && i < priv->dac.nr; i++)
-		ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
+	for (i = 0; !(ctrl & (1 << head)) && i < disp->func->dac.nr; i++)
+		ctrl = nvkm_rd32(device, 0x610b58 + (i * 8));
 
 	/* SOR */
 	if (!(ctrl & (1 << head))) {
-		if (nv_device(priv)->chipset  < 0x90 ||
-		    nv_device(priv)->chipset == 0x92 ||
-		    nv_device(priv)->chipset == 0xa0) {
+		if (device->chipset  < 0x90 ||
+		    device->chipset == 0x92 ||
+		    device->chipset == 0xa0) {
 			reg = 0x610b70;
 		} else {
 			reg = 0x610794;
 		}
-		for (i = 0; !(ctrl & (1 << head)) && i < priv->sor.nr; i++)
-			ctrl = nv_rd32(priv, reg + (i * 8));
+		for (i = 0; !(ctrl & (1 << head)) && i < disp->func->sor.nr; i++)
+			ctrl = nvkm_rd32(device, reg + (i * 8));
 		i += 4;
 	}
 
 	/* PIOR */
 	if (!(ctrl & (1 << head))) {
-		for (i = 0; !(ctrl & (1 << head)) && i < priv->pior.nr; i++)
-			ctrl = nv_rd32(priv, 0x610b80 + (i * 8));
+		for (i = 0; !(ctrl & (1 << head)) && i < disp->func->pior.nr; i++)
+			ctrl = nvkm_rd32(device, 0x610b80 + (i * 8));
 		i += 8;
 	}
 
@@ -1536,7 +383,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
 		return NULL;
 	i--;
 
-	outp = exec_lookup(priv, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
+	outp = exec_lookup(disp, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
 	if (!outp)
 		return NULL;
 
@@ -1548,7 +395,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
 				*conf |= 0x0100;
 			break;
 		case DCB_OUTPUT_LVDS:
-			*conf = priv->sor.lvdsconf;
+			*conf = disp->sor.lvdsconf;
 			break;
 		case DCB_OUTPUT_DP:
 			*conf = (ctrl & 0x00000f00) >> 8;
@@ -1568,7 +415,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
 		data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
 		if (data) {
 			struct nvbios_init init = {
-				.subdev = nv_subdev(priv),
+				.subdev = subdev,
 				.bios = bios,
 				.offset = data,
 				.outp = &outp->info,
@@ -1584,15 +431,16 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
 }
 
 static void
-nv50_disp_intr_unk10_0(struct nv50_disp_priv *priv, int head)
+nv50_disp_intr_unk10_0(struct nv50_disp *disp, int head)
 {
-	exec_script(priv, head, 1);
+	exec_script(disp, head, 1);
 }
 
 static void
-nv50_disp_intr_unk20_0(struct nv50_disp_priv *priv, int head)
+nv50_disp_intr_unk20_0(struct nv50_disp *disp, int head)
 {
-	struct nvkm_output *outp = exec_script(priv, head, 2);
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_output *outp = exec_script(disp, head, 2);
 
 	/* the binary driver does this outside of the supervisor handling
 	 * (after the third supervisor from a detach).  we (currently?)
@@ -1608,10 +456,10 @@ nv50_disp_intr_unk20_0(struct nv50_disp_priv *priv, int head)
 	 * in a blank screen (SOR_PWR off/on can restore it)
 	 */
 	if (outp && outp->info.type == DCB_OUTPUT_DP) {
-		struct nvkm_output_dp *outpdp = (void *)outp;
+		struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
 		struct nvbios_init init = {
-			.subdev = nv_subdev(priv),
-			.bios = nvkm_bios(priv),
+			.subdev = subdev,
+			.bios = subdev->device->bios,
 			.outp = &outp->info,
 			.crtc = head,
 			.offset = outpdp->info.script[4],
@@ -1624,29 +472,32 @@ nv50_disp_intr_unk20_0(struct nv50_disp_priv *priv, int head)
 }
 
 static void
-nv50_disp_intr_unk20_1(struct nv50_disp_priv *priv, int head)
+nv50_disp_intr_unk20_1(struct nv50_disp *disp, int head)
 {
-	struct nvkm_devinit *devinit = nvkm_devinit(priv);
-	u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	struct nvkm_devinit *devinit = device->devinit;
+	u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff;
 	if (pclk)
-		devinit->pll_set(devinit, PLL_VPLL0 + head, pclk);
+		nvkm_devinit_pll_set(devinit, PLL_VPLL0 + head, pclk);
 }
 
 static void
-nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv, int head,
+nv50_disp_intr_unk20_2_dp(struct nv50_disp *disp, int head,
 			  struct dcb_output *outp, u32 pclk)
 {
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	const int link = !(outp->sorconf.link & 1);
 	const int   or = ffs(outp->or) - 1;
 	const u32 soff = (  or * 0x800);
 	const u32 loff = (link * 0x080) + soff;
-	const u32 ctrl = nv_rd32(priv, 0x610794 + (or * 8));
+	const u32 ctrl = nvkm_rd32(device, 0x610794 + (or * 8));
 	const u32 symbol = 100000;
-	const s32 vactive = nv_rd32(priv, 0x610af8 + (head * 0x540)) & 0xffff;
-	const s32 vblanke = nv_rd32(priv, 0x610ae8 + (head * 0x540)) & 0xffff;
-	const s32 vblanks = nv_rd32(priv, 0x610af0 + (head * 0x540)) & 0xffff;
-	u32 dpctrl = nv_rd32(priv, 0x61c10c + loff);
-	u32 clksor = nv_rd32(priv, 0x614300 + soff);
+	const s32 vactive = nvkm_rd32(device, 0x610af8 + (head * 0x540)) & 0xffff;
+	const s32 vblanke = nvkm_rd32(device, 0x610ae8 + (head * 0x540)) & 0xffff;
+	const s32 vblanks = nvkm_rd32(device, 0x610af0 + (head * 0x540)) & 0xffff;
+	u32 dpctrl = nvkm_rd32(device, 0x61c10c + loff);
+	u32 clksor = nvkm_rd32(device, 0x614300 + soff);
 	int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
 	int TU, VTUi, VTUf, VTUa;
 	u64 link_data_rate, link_ratio, unk;
@@ -1662,14 +513,14 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv, int head,
 	value = value * link_bw;
 	do_div(value, pclk);
 	value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr);
-	nv_mask(priv, 0x61c1e8 + soff, 0x0000ffff, value);
+	nvkm_mask(device, 0x61c1e8 + soff, 0x0000ffff, value);
 
 	/* symbols/vblank - algorithm taken from comments in tegra driver */
 	value = vblanks - vblanke - 25;
 	value = value * link_bw;
 	do_div(value, pclk);
 	value = value - ((36 / link_nr) + 3) - 1;
-	nv_mask(priv, 0x61c1ec + soff, 0x00ffffff, value);
+	nvkm_mask(device, 0x61c1ec + soff, 0x00ffffff, value);
 
 	/* watermark / activesym */
 	if      ((ctrl & 0xf0000) == 0x60000) bits = 30;
@@ -1734,7 +585,7 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv, int head,
 	}
 
 	if (!bestTU) {
-		nv_error(priv, "unable to find suitable dp config\n");
+		nvkm_error(subdev, "unable to find suitable dp config\n");
 		return;
 	}
 
@@ -1745,22 +596,23 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv, int head,
 	do_div(unk, symbol);
 	unk += 6;
 
-	nv_mask(priv, 0x61c10c + loff, 0x000001fc, bestTU << 2);
-	nv_mask(priv, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 |
+	nvkm_mask(device, 0x61c10c + loff, 0x000001fc, bestTU << 2);
+	nvkm_mask(device, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 |
 						   bestVTUf << 16 |
 						   bestVTUi << 8 | unk);
 }
 
 static void
-nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
+nv50_disp_intr_unk20_2(struct nv50_disp *disp, int head)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	struct nvkm_output *outp;
-	u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+	u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff;
 	u32 hval, hreg = 0x614200 + (head * 0x800);
 	u32 oval, oreg;
 	u32 mask, conf;
 
-	outp = exec_clkcmp(priv, head, 0xff, pclk, &conf);
+	outp = exec_clkcmp(disp, head, 0xff, pclk, &conf);
 	if (!outp)
 		return;
 
@@ -1787,10 +639,10 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
 		u32 ctrl, datarate;
 
 		if (outp->info.location == 0) {
-			ctrl = nv_rd32(priv, 0x610794 + soff);
+			ctrl = nvkm_rd32(device, 0x610794 + soff);
 			soff = 1;
 		} else {
-			ctrl = nv_rd32(priv, 0x610b80 + soff);
+			ctrl = nvkm_rd32(device, 0x610b80 + soff);
 			soff = 2;
 		}
 
@@ -1804,10 +656,10 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
 		}
 
 		if (nvkm_output_dp_train(outp, datarate / soff, true))
-			ERR("link not trained before attach\n");
+			OUTP_ERR(outp, "link not trained before attach");
 	}
 
-	exec_clkcmp(priv, head, 0, pclk, &conf);
+	exec_clkcmp(disp, head, 0, pclk, &conf);
 
 	if (!outp->info.location && outp->info.type == DCB_OUTPUT_ANALOG) {
 		oreg = 0x614280 + (ffs(outp->info.or) - 1) * 0x800;
@@ -1817,7 +669,7 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
 	} else
 	if (!outp->info.location) {
 		if (outp->info.type == DCB_OUTPUT_DP)
-			nv50_disp_intr_unk20_2_dp(priv, head, &outp->info, pclk);
+			nv50_disp_intr_unk20_2_dp(disp, head, &outp->info, pclk);
 		oreg = 0x614300 + (ffs(outp->info.or) - 1) * 0x800;
 		oval = (conf & 0x0100) ? 0x00000101 : 0x00000000;
 		hval = 0x00000000;
@@ -1829,8 +681,8 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
 		mask = 0x00000707;
 	}
 
-	nv_mask(priv, hreg, 0x0000000f, hval);
-	nv_mask(priv, oreg, mask, oval);
+	nvkm_mask(device, hreg, 0x0000000f, hval);
+	nvkm_mask(device, oreg, mask, oval);
 }
 
 /* If programming a TMDS output on a SOR that can also be configured for
@@ -1842,10 +694,11 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
  * programmed for DisplayPort.
  */
 static void
-nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv,
+nv50_disp_intr_unk40_0_tmds(struct nv50_disp *disp,
 			    struct dcb_output *outp)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	struct nvkm_bios *bios = device->bios;
 	const int link = !(outp->sorconf.link & 1);
 	const int   or = ffs(outp->or) - 1;
 	const u32 loff = (or * 0x800) + (link * 0x80);
@@ -1854,166 +707,136 @@ nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv,
 	u8  ver, hdr;
 
 	if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, &match))
-		nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x61c10c + loff, 0x00000001, 0x00000000);
 }
 
 static void
-nv50_disp_intr_unk40_0(struct nv50_disp_priv *priv, int head)
+nv50_disp_intr_unk40_0(struct nv50_disp *disp, int head)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	struct nvkm_output *outp;
-	u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+	u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff;
 	u32 conf;
 
-	outp = exec_clkcmp(priv, head, 1, pclk, &conf);
+	outp = exec_clkcmp(disp, head, 1, pclk, &conf);
 	if (!outp)
 		return;
 
 	if (outp->info.location == 0 && outp->info.type == DCB_OUTPUT_TMDS)
-		nv50_disp_intr_unk40_0_tmds(priv, &outp->info);
+		nv50_disp_intr_unk40_0_tmds(disp, &outp->info);
 }
 
 void
 nv50_disp_intr_supervisor(struct work_struct *work)
 {
-	struct nv50_disp_priv *priv =
-		container_of(work, struct nv50_disp_priv, supervisor);
-	struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
-	u32 super = nv_rd32(priv, 0x610030);
+	struct nv50_disp *disp =
+		container_of(work, struct nv50_disp, supervisor);
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 super = nvkm_rd32(device, 0x610030);
 	int head;
 
-	nv_debug(priv, "supervisor 0x%08x 0x%08x\n", priv->super, super);
+	nvkm_debug(subdev, "supervisor %08x %08x\n", disp->super, super);
 
-	if (priv->super & 0x00000010) {
-		nv50_disp_mthd_chan(priv, NV_DBG_DEBUG, 0, impl->mthd.core);
-		for (head = 0; head < priv->head.nr; head++) {
+	if (disp->super & 0x00000010) {
+		nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
+		for (head = 0; head < disp->base.head.nr; head++) {
 			if (!(super & (0x00000020 << head)))
 				continue;
 			if (!(super & (0x00000080 << head)))
 				continue;
-			nv50_disp_intr_unk10_0(priv, head);
+			nv50_disp_intr_unk10_0(disp, head);
 		}
 	} else
-	if (priv->super & 0x00000020) {
-		for (head = 0; head < priv->head.nr; head++) {
+	if (disp->super & 0x00000020) {
+		for (head = 0; head < disp->base.head.nr; head++) {
 			if (!(super & (0x00000080 << head)))
 				continue;
-			nv50_disp_intr_unk20_0(priv, head);
+			nv50_disp_intr_unk20_0(disp, head);
 		}
-		for (head = 0; head < priv->head.nr; head++) {
+		for (head = 0; head < disp->base.head.nr; head++) {
 			if (!(super & (0x00000200 << head)))
 				continue;
-			nv50_disp_intr_unk20_1(priv, head);
+			nv50_disp_intr_unk20_1(disp, head);
 		}
-		for (head = 0; head < priv->head.nr; head++) {
+		for (head = 0; head < disp->base.head.nr; head++) {
 			if (!(super & (0x00000080 << head)))
 				continue;
-			nv50_disp_intr_unk20_2(priv, head);
+			nv50_disp_intr_unk20_2(disp, head);
 		}
 	} else
-	if (priv->super & 0x00000040) {
-		for (head = 0; head < priv->head.nr; head++) {
+	if (disp->super & 0x00000040) {
+		for (head = 0; head < disp->base.head.nr; head++) {
 			if (!(super & (0x00000080 << head)))
 				continue;
-			nv50_disp_intr_unk40_0(priv, head);
+			nv50_disp_intr_unk40_0(disp, head);
 		}
 	}
 
-	nv_wr32(priv, 0x610030, 0x80000000);
+	nvkm_wr32(device, 0x610030, 0x80000000);
 }
 
 void
-nv50_disp_intr(struct nvkm_subdev *subdev)
+nv50_disp_intr(struct nv50_disp *disp)
 {
-	struct nv50_disp_priv *priv = (void *)subdev;
-	u32 intr0 = nv_rd32(priv, 0x610020);
-	u32 intr1 = nv_rd32(priv, 0x610024);
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	u32 intr0 = nvkm_rd32(device, 0x610020);
+	u32 intr1 = nvkm_rd32(device, 0x610024);
 
 	while (intr0 & 0x001f0000) {
 		u32 chid = __ffs(intr0 & 0x001f0000) - 16;
-		nv50_disp_intr_error(priv, chid);
+		nv50_disp_intr_error(disp, chid);
 		intr0 &= ~(0x00010000 << chid);
 	}
 
 	while (intr0 & 0x0000001f) {
 		u32 chid = __ffs(intr0 & 0x0000001f);
-		nv50_disp_chan_uevent_send(priv, chid);
+		nv50_disp_chan_uevent_send(disp, chid);
 		intr0 &= ~(0x00000001 << chid);
 	}
 
 	if (intr1 & 0x00000004) {
-		nvkm_disp_vblank(&priv->base, 0);
-		nv_wr32(priv, 0x610024, 0x00000004);
-		intr1 &= ~0x00000004;
+		nvkm_disp_vblank(&disp->base, 0);
+		nvkm_wr32(device, 0x610024, 0x00000004);
 	}
 
 	if (intr1 & 0x00000008) {
-		nvkm_disp_vblank(&priv->base, 1);
-		nv_wr32(priv, 0x610024, 0x00000008);
-		intr1 &= ~0x00000008;
+		nvkm_disp_vblank(&disp->base, 1);
+		nvkm_wr32(device, 0x610024, 0x00000008);
 	}
 
 	if (intr1 & 0x00000070) {
-		priv->super = (intr1 & 0x00000070);
-		schedule_work(&priv->supervisor);
-		nv_wr32(priv, 0x610024, priv->super);
-		intr1 &= ~0x00000070;
-	}
-}
+		disp->super = (intr1 & 0x00000070);
+		schedule_work(&disp->supervisor);
+		nvkm_wr32(device, 0x610024, disp->super);
+	}
+}
+
+static const struct nv50_disp_func
+nv50_disp = {
+	.intr = nv50_disp_intr,
+	.uevent = &nv50_disp_chan_uevent,
+	.super = nv50_disp_intr_supervisor,
+	.root = &nv50_disp_root_oclass,
+	.head.vblank_init = nv50_disp_vblank_init,
+	.head.vblank_fini = nv50_disp_vblank_fini,
+	.head.scanoutpos = nv50_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.external.tmds = nv50_pior_output_new,
+	.outp.external.dp = nv50_pior_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 2,
+	.sor.power = nv50_sor_power,
+	.pior.nr = 3,
+	.pior.power = nv50_pior_power,
+};
 
-static int
-nv50_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+nv50_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv50_disp_priv *priv;
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
-			       "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = nv50_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = nv50_disp_intr;
-	INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
-	priv->sclass = nv50_disp_sclass;
-	priv->head.nr = 2;
-	priv->dac.nr = 3;
-	priv->sor.nr = 2;
-	priv->pior.nr = 3;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->pior.power = nv50_pior_power;
-	return 0;
+	return nv50_disp_new_(&nv50_disp, device, index, 2, pdisp);
 }
-
-struct nvkm_oclass *
-nv50_disp_outp_sclass[] = {
-	&nv50_pior_dp_impl.base.base,
-	NULL
-};
-
-struct nvkm_oclass *
-nv50_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x50),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &nv50_disp_vblank_func,
-	.base.outp =  nv50_disp_outp_sclass,
-	.mthd.core = &nv50_disp_core_mthd_chan,
-	.mthd.base = &nv50_disp_base_mthd_chan,
-	.mthd.ovly = &nv50_disp_ovly_mthd_chan,
-	.mthd.prev = 0x000004,
-	.head.scanoutpos = nv50_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index b4ed620070fa..aecebd8717e5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -1,17 +1,18 @@
 #ifndef __NV50_DISP_H__
 #define __NV50_DISP_H__
+#define nv50_disp(p) container_of((p), struct nv50_disp, base)
 #include "priv.h"
 struct nvkm_output;
 struct nvkm_output_dp;
 
 #define NV50_DISP_MTHD_ struct nvkm_object *object,                            \
-	struct nv50_disp_priv *priv, void *data, u32 size
+	struct nv50_disp *disp, void *data, u32 size
 #define NV50_DISP_MTHD_V0 NV50_DISP_MTHD_, int head
 #define NV50_DISP_MTHD_V1 NV50_DISP_MTHD_, int head, struct nvkm_output *outp
 
-struct nv50_disp_priv {
+struct nv50_disp {
+	const struct nv50_disp_func *func;
 	struct nvkm_disp base;
-	struct nvkm_oclass *sclass;
 
 	struct work_struct supervisor;
 	u32 super;
@@ -19,208 +20,98 @@ struct nv50_disp_priv {
 	struct nvkm_event uevent;
 
 	struct {
-		int nr;
-	} head;
-	struct {
-		int nr;
-		int (*power)(NV50_DISP_MTHD_V1);
-		int (*sense)(NV50_DISP_MTHD_V1);
-	} dac;
-	struct {
-		int nr;
-		int (*power)(NV50_DISP_MTHD_V1);
-		int (*hda_eld)(NV50_DISP_MTHD_V1);
-		int (*hdmi)(NV50_DISP_MTHD_V1);
 		u32 lvdsconf;
-		void (*magic)(struct nvkm_output *);
 	} sor;
+
 	struct {
-		int nr;
-		int (*power)(NV50_DISP_MTHD_V1);
 		u8 type[3];
 	} pior;
-};
 
-struct nv50_disp_impl {
-	struct nvkm_disp_impl base;
-	struct {
-		const struct nv50_disp_mthd_chan *core;
-		const struct nv50_disp_mthd_chan *base;
-		const struct nv50_disp_mthd_chan *ovly;
-		int prev;
-	} mthd;
-	struct {
-		int (*scanoutpos)(NV50_DISP_MTHD_V0);
-	} head;
+	struct nv50_disp_chan *chan[17];
 };
 
-int nv50_disp_main_scanoutpos(NV50_DISP_MTHD_V0);
-int nv50_disp_main_mthd(struct nvkm_object *, u32, void *, u32);
+int nv50_disp_root_scanoutpos(NV50_DISP_MTHD_V0);
 
-int gf110_disp_main_scanoutpos(NV50_DISP_MTHD_V0);
+int gf119_disp_root_scanoutpos(NV50_DISP_MTHD_V0);
 
 int nv50_dac_power(NV50_DISP_MTHD_V1);
 int nv50_dac_sense(NV50_DISP_MTHD_V1);
 
 int gt215_hda_eld(NV50_DISP_MTHD_V1);
-int gf110_hda_eld(NV50_DISP_MTHD_V1);
+int gf119_hda_eld(NV50_DISP_MTHD_V1);
 
 int g84_hdmi_ctrl(NV50_DISP_MTHD_V1);
 int gt215_hdmi_ctrl(NV50_DISP_MTHD_V1);
-int gf110_hdmi_ctrl(NV50_DISP_MTHD_V1);
+int gf119_hdmi_ctrl(NV50_DISP_MTHD_V1);
 int gk104_hdmi_ctrl(NV50_DISP_MTHD_V1);
 
 int nv50_sor_power(NV50_DISP_MTHD_V1);
 int nv50_pior_power(NV50_DISP_MTHD_V1);
 
-#include <core/parent.h>
-
-struct nv50_disp_base {
-	struct nvkm_parent base;
-	struct nvkm_ramht *ramht;
-	u32 chan;
-};
-
-struct nv50_disp_chan_impl {
-	struct nvkm_ofuncs base;
-	int chid;
-	int  (*attach)(struct nvkm_object *, struct nvkm_object *, u32);
-	void (*detach)(struct nvkm_object *, int);
+int nv50_disp_new_(const struct nv50_disp_func *, struct nvkm_device *,
+		   int index, int heads, struct nvkm_disp **);
+int gf119_disp_new_(const struct nv50_disp_func *, struct nvkm_device *,
+		    int index, struct nvkm_disp **);
+
+struct nv50_disp_func_outp {
+	int (* crt)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
+	int (*  tv)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
+	int (*tmds)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
+	int (*lvds)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
+	int (*  dp)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
 };
 
-#include <core/namedb.h>
+struct nv50_disp_func {
+	void (*intr)(struct nv50_disp *);
 
-struct nv50_disp_chan {
-	struct nvkm_namedb base;
-	int chid;
-};
+	const struct nvkm_event_func *uevent;
+	void (*super)(struct work_struct *);
 
-int  nv50_disp_chan_ntfy(struct nvkm_object *, u32, struct nvkm_event **);
-int  nv50_disp_chan_map(struct nvkm_object *, u64 *, u32 *);
-u32  nv50_disp_chan_rd32(struct nvkm_object *, u64);
-void nv50_disp_chan_wr32(struct nvkm_object *, u64, u32);
-extern const struct nvkm_event_func nv50_disp_chan_uevent;
-int  nv50_disp_chan_uevent_ctor(struct nvkm_object *, void *, u32,
-				struct nvkm_notify *);
-void nv50_disp_chan_uevent_send(struct nv50_disp_priv *, int);
-
-extern const struct nvkm_event_func gf110_disp_chan_uevent;
-
-#define nv50_disp_chan_init(a)                                                 \
-	nvkm_namedb_init(&(a)->base)
-#define nv50_disp_chan_fini(a,b)                                               \
-	nvkm_namedb_fini(&(a)->base, (b))
-
-struct nv50_disp_dmac {
-	struct nv50_disp_chan base;
-	struct nvkm_dmaobj *pushdma;
-	u32 push;
-};
+	const struct nvkm_disp_oclass *root;
 
-void nv50_disp_dmac_dtor(struct nvkm_object *);
+	struct {
+		void (*vblank_init)(struct nv50_disp *, int head);
+		void (*vblank_fini)(struct nv50_disp *, int head);
+		int (*scanoutpos)(NV50_DISP_MTHD_V0);
+	} head;
 
-struct nv50_disp_pioc {
-	struct nv50_disp_chan base;
-};
+	struct {
+		const struct nv50_disp_func_outp internal;
+		const struct nv50_disp_func_outp external;
+	} outp;
 
-void nv50_disp_pioc_dtor(struct nvkm_object *);
+	struct {
+		int nr;
+		int (*power)(NV50_DISP_MTHD_V1);
+		int (*sense)(NV50_DISP_MTHD_V1);
+	} dac;
 
-struct nv50_disp_mthd_list {
-	u32 mthd;
-	u32 addr;
 	struct {
-		u32 mthd;
-		u32 addr;
-		const char *name;
-	} data[];
-};
+		int nr;
+		int (*power)(NV50_DISP_MTHD_V1);
+		int (*hda_eld)(NV50_DISP_MTHD_V1);
+		int (*hdmi)(NV50_DISP_MTHD_V1);
+		void (*magic)(struct nvkm_output *);
+	} sor;
 
-struct nv50_disp_mthd_chan {
-	const char *name;
-	u32 addr;
 	struct {
-		const char *name;
 		int nr;
-		const struct nv50_disp_mthd_list *mthd;
-	} data[];
+		int (*power)(NV50_DISP_MTHD_V1);
+	} pior;
 };
 
-extern struct nv50_disp_chan_impl nv50_disp_core_ofuncs;
-int nv50_disp_core_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_base;
-extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_sor;
-extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_pior;
-extern struct nv50_disp_chan_impl nv50_disp_base_ofuncs;
-int nv50_disp_base_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-extern const struct nv50_disp_mthd_list nv50_disp_base_mthd_image;
-extern struct nv50_disp_chan_impl nv50_disp_ovly_ofuncs;
-int nv50_disp_ovly_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-extern const struct nv50_disp_mthd_list nv50_disp_ovly_mthd_base;
-extern struct nv50_disp_chan_impl nv50_disp_oimm_ofuncs;
-int nv50_disp_oimm_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-extern struct nv50_disp_chan_impl nv50_disp_curs_ofuncs;
-int nv50_disp_curs_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-extern struct nvkm_ofuncs nv50_disp_main_ofuncs;
-int  nv50_disp_main_ctor(struct nvkm_object *, struct nvkm_object *,
-			 struct nvkm_oclass *, void *, u32,
-			 struct nvkm_object **);
-void nv50_disp_main_dtor(struct nvkm_object *);
-extern struct nvkm_omthds nv50_disp_main_omthds[];
-extern struct nvkm_oclass nv50_disp_cclass;
-void nv50_disp_mthd_chan(struct nv50_disp_priv *, int debug, int head,
-			 const struct nv50_disp_mthd_chan *);
+void nv50_disp_vblank_init(struct nv50_disp *, int);
+void nv50_disp_vblank_fini(struct nv50_disp *, int);
+void nv50_disp_intr(struct nv50_disp *);
 void nv50_disp_intr_supervisor(struct work_struct *);
-void nv50_disp_intr(struct nvkm_subdev *);
-extern const struct nvkm_event_func nv50_disp_vblank_func;
-
-extern const struct nv50_disp_mthd_chan g84_disp_core_mthd_chan;
-extern const struct nv50_disp_mthd_list g84_disp_core_mthd_dac;
-extern const struct nv50_disp_mthd_list g84_disp_core_mthd_head;
-extern const struct nv50_disp_mthd_chan g84_disp_base_mthd_chan;
-extern const struct nv50_disp_mthd_chan g84_disp_ovly_mthd_chan;
-
-extern const struct nv50_disp_mthd_chan g94_disp_core_mthd_chan;
-
-extern struct nv50_disp_chan_impl gf110_disp_core_ofuncs;
-extern const struct nv50_disp_mthd_list gf110_disp_core_mthd_base;
-extern const struct nv50_disp_mthd_list gf110_disp_core_mthd_dac;
-extern const struct nv50_disp_mthd_list gf110_disp_core_mthd_sor;
-extern const struct nv50_disp_mthd_list gf110_disp_core_mthd_pior;
-extern struct nv50_disp_chan_impl gf110_disp_base_ofuncs;
-extern struct nv50_disp_chan_impl gf110_disp_ovly_ofuncs;
-extern const struct nv50_disp_mthd_chan gf110_disp_base_mthd_chan;
-extern struct nv50_disp_chan_impl gf110_disp_oimm_ofuncs;
-extern struct nv50_disp_chan_impl gf110_disp_curs_ofuncs;
-extern struct nvkm_ofuncs gf110_disp_main_ofuncs;
-extern struct nvkm_oclass gf110_disp_cclass;
-void gf110_disp_intr_supervisor(struct work_struct *);
-void gf110_disp_intr(struct nvkm_subdev *);
-extern const struct nvkm_event_func gf110_disp_vblank_func;
-
-extern const struct nv50_disp_mthd_chan gk104_disp_core_mthd_chan;
-extern const struct nv50_disp_mthd_chan gk104_disp_ovly_mthd_chan;
-
-extern struct nvkm_output_dp_impl nv50_pior_dp_impl;
-extern struct nvkm_oclass *nv50_disp_outp_sclass[];
-
-extern struct nvkm_output_dp_impl g94_sor_dp_impl;
-int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
-extern struct nvkm_oclass *g94_disp_outp_sclass[];
-
-extern struct nvkm_output_dp_impl gf110_sor_dp_impl;
-int gf110_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
-extern struct nvkm_oclass *gf110_disp_outp_sclass[];
-
-void gm204_sor_magic(struct nvkm_output *outp);
-extern struct nvkm_output_dp_impl gm204_sor_dp_impl;
+
+void gf119_disp_vblank_init(struct nv50_disp *, int);
+void gf119_disp_vblank_fini(struct nv50_disp *, int);
+void gf119_disp_intr(struct nv50_disp *);
+void gf119_disp_intr_supervisor(struct work_struct *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
index c0aac7e20d45..54a4ae8d66c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv4c.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2014 Ilia Mirkin
+ * Copyright 2012 Red Hat Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -19,18 +19,19 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
  *
- * Authors: Ilia Mirkin
+ * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "channv50.h"
+#include "rootnv50.h"
 
-struct nvkm_oclass *
-nv4c_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0x4c),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv44_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
-	.intr = nv04_mc_intr,
-}.base;
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+g84_disp_oimm_oclass = {
+	.base.oclass = G82_DISP_OVERLAY,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_oimm_new,
+	.func = &nv50_disp_pioc_func,
+	.chid = 5,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
new file mode 100644
index 000000000000..c658db54afc5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gf119_disp_oimm_oclass = {
+	.base.oclass = GF110_DISP_OVERLAY,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_oimm_new,
+	.func = &gf119_disp_pioc_func,
+	.chid = 9,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
new file mode 100644
index 000000000000..b1fde8c125d6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gk104_disp_oimm_oclass = {
+	.base.oclass = GK104_DISP_OVERLAY,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_oimm_new,
+	.func = &gf119_disp_pioc_func,
+	.chid = 9,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
new file mode 100644
index 000000000000..f4e7eb3d1177
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gt215_disp_oimm_oclass = {
+	.base.oclass = GT214_DISP_OVERLAY,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_oimm_new,
+	.func = &nv50_disp_pioc_func,
+	.chid = 5,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
new file mode 100644
index 000000000000..cd888a1e443c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+nv50_disp_oimm_new(const struct nv50_disp_chan_func *func,
+		   const struct nv50_disp_chan_mthd *mthd,
+		   struct nv50_disp_root *root, int chid,
+		   const struct nvkm_oclass *oclass, void *data, u32 size,
+		   struct nvkm_object **pobject)
+{
+	union {
+		struct nv50_disp_overlay_v0 v0;
+	} *args = data;
+	struct nvkm_object *parent = oclass->parent;
+	struct nv50_disp *disp = root->disp;
+	int head, ret;
+
+	nvif_ioctl(parent, "create disp overlay size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create disp overlay vers %d head %d\n",
+			   args->v0.version, args->v0.head);
+		if (args->v0.head > disp->base.head.nr)
+			return -EINVAL;
+		head = args->v0.head;
+	} else
+		return ret;
+
+	return nv50_disp_chan_new_(func, mthd, root, chid + head,
+				   head, oclass, pobject);
+}
+
+const struct nv50_disp_pioc_oclass
+nv50_disp_oimm_oclass = {
+	.base.oclass = NV50_DISP_OVERLAY,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_oimm_new,
+	.func = &nv50_disp_pioc_func,
+	.chid = 5,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index 9224bcbf0159..bbe5ec0dedb2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -22,121 +22,66 @@
  * Authors: Ben Skeggs
  */
 #include "outp.h"
-#include "priv.h"
 
 #include <subdev/bios.h>
-#include <subdev/bios/conn.h>
 #include <subdev/bios/dcb.h>
 #include <subdev/i2c.h>
 
-int
-_nvkm_output_fini(struct nvkm_object *object, bool suspend)
+void
+nvkm_output_fini(struct nvkm_output *outp)
 {
-	struct nvkm_output *outp = (void *)object;
-	nv_ofuncs(outp->conn)->fini(nv_object(outp->conn), suspend);
-	return nvkm_object_fini(&outp->base, suspend);
+	if (outp->func->fini)
+		outp->func->fini(outp);
 }
 
-int
-_nvkm_output_init(struct nvkm_object *object)
+void
+nvkm_output_init(struct nvkm_output *outp)
 {
-	struct nvkm_output *outp = (void *)object;
-	int ret = nvkm_object_init(&outp->base);
-	if (ret == 0)
-		nv_ofuncs(outp->conn)->init(nv_object(outp->conn));
-	return 0;
+	if (outp->func->init)
+		outp->func->init(outp);
 }
 
 void
-_nvkm_output_dtor(struct nvkm_object *object)
+nvkm_output_del(struct nvkm_output **poutp)
 {
-	struct nvkm_output *outp = (void *)object;
-	list_del(&outp->head);
-	nvkm_object_ref(NULL, (void *)&outp->conn);
-	nvkm_object_destroy(&outp->base);
+	struct nvkm_output *outp = *poutp;
+	if (outp && !WARN_ON(!outp->func)) {
+		if (outp->func->dtor)
+			*poutp = outp->func->dtor(outp);
+		kfree(*poutp);
+		*poutp = NULL;
+	}
 }
 
-int
-nvkm_output_create_(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass,
-		    struct dcb_output *dcbE, int index,
-		    int length, void **pobject)
+void
+nvkm_output_ctor(const struct nvkm_output_func *func, struct nvkm_disp *disp,
+		 int index, struct dcb_output *dcbE, struct nvkm_output *outp)
 {
-	struct nvkm_disp *disp = nvkm_disp(parent);
-	struct nvkm_bios *bios = nvkm_bios(parent);
-	struct nvkm_i2c *i2c = nvkm_i2c(parent);
-	struct nvbios_connE connE;
-	struct nvkm_output *outp;
-	u8  ver, hdr;
-	u32 data;
-	int ret;
+	struct nvkm_i2c *i2c = disp->engine.subdev.device->i2c;
 
-	ret = nvkm_object_create_(parent, engine, oclass, 0, length, pobject);
-	outp = *pobject;
-	if (ret)
-		return ret;
-
-	outp->info = *dcbE;
+	outp->func = func;
+	outp->disp = disp;
 	outp->index = index;
+	outp->info = *dcbE;
+	outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index);
 	outp->or = ffs(outp->info.or) - 1;
 
-	DBG("type %02x loc %d or %d link %d con %x edid %x bus %d head %x\n",
-	    dcbE->type, dcbE->location, dcbE->or, dcbE->type >= 2 ?
-	    dcbE->sorconf.link : 0, dcbE->connector, dcbE->i2c_index,
-	    dcbE->bus, dcbE->heads);
-
-	if (outp->info.type != DCB_OUTPUT_DP)
-		outp->port = i2c->find(i2c, NV_I2C_PORT(outp->info.i2c_index));
-	else
-		outp->port = i2c->find(i2c, NV_I2C_AUX(outp->info.i2c_index));
-	outp->edid = outp->port;
-
-	data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr, &connE);
-	if (!data) {
-		DBG("vbios connector data not found\n");
-		memset(&connE, 0x00, sizeof(connE));
-		connE.type = DCB_CONNECTOR_NONE;
-	}
-
-	ret = nvkm_object_ctor(parent, NULL, nvkm_connector_oclass,
-			       &connE, outp->info.connector,
-			       (struct nvkm_object **)&outp->conn);
-	if (ret < 0) {
-		ERR("error %d creating connector, disabling\n", ret);
-		return ret;
-	}
-
-	list_add_tail(&outp->head, &disp->outp);
-	return 0;
+	OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x "
+		       "edid %x bus %d head %x",
+		 outp->info.type, outp->info.location, outp->info.or,
+		 outp->info.type >= 2 ? outp->info.sorconf.link : 0,
+		 outp->info.connector, outp->info.i2c_index,
+		 outp->info.bus, outp->info.heads);
 }
 
 int
-_nvkm_output_ctor(struct nvkm_object *parent,
-		  struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *dcbE, u32 index,
-		  struct nvkm_object **pobject)
+nvkm_output_new_(const struct nvkm_output_func *func,
+		 struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
+		 struct nvkm_output **poutp)
 {
-	struct nvkm_output *outp;
-	int ret;
-
-	ret = nvkm_output_create(parent, engine, oclass, dcbE, index, &outp);
-	*pobject = nv_object(outp);
-	if (ret)
-		return ret;
+	if (!(*poutp = kzalloc(sizeof(**poutp), GFP_KERNEL)))
+		return -ENOMEM;
 
+	nvkm_output_ctor(func, disp, index, dcbE, *poutp);
 	return 0;
 }
-
-struct nvkm_oclass *
-nvkm_output_oclass = &(struct nvkm_output_impl) {
-	.base = {
-		.handle = 0,
-		.ofuncs = &(struct nvkm_ofuncs) {
-			.ctor = _nvkm_output_ctor,
-			.dtor = _nvkm_output_dtor,
-			.init = _nvkm_output_init,
-			.fini = _nvkm_output_fini,
-		},
-	},
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
index d9253d26c31b..2590fec67ca9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
@@ -1,61 +1,55 @@
 #ifndef __NVKM_DISP_OUTP_H__
 #define __NVKM_DISP_OUTP_H__
-#include <core/object.h>
+#include <engine/disp.h>
 
 #include <subdev/bios.h>
 #include <subdev/bios/dcb.h>
 
 struct nvkm_output {
-	struct nvkm_object base;
-	struct list_head head;
-
-	struct dcb_output info;
+	const struct nvkm_output_func *func;
+	struct nvkm_disp *disp;
 	int index;
-	int or;
+	struct dcb_output info;
 
-	struct nvkm_i2c_port *port;
-	struct nvkm_i2c_port *edid;
+	// whatever (if anything) is pointed at by the dcb device entry
+	struct nvkm_i2c_bus *i2c;
+	int or;
 
+	struct list_head head;
 	struct nvkm_connector *conn;
 };
 
-#define nvkm_output_create(p,e,c,b,i,d)                                        \
-	nvkm_output_create_((p), (e), (c), (b), (i), sizeof(**d), (void **)d)
-#define nvkm_output_destroy(d) ({                                              \
-	struct nvkm_output *_outp = (d);                                       \
-	_nvkm_output_dtor(nv_object(_outp));                                   \
-})
-#define nvkm_output_init(d) ({                                                 \
-	struct nvkm_output *_outp = (d);                                       \
-	_nvkm_output_init(nv_object(_outp));                                   \
-})
-#define nvkm_output_fini(d,s) ({                                               \
-	struct nvkm_output *_outp = (d);                                       \
-	_nvkm_output_fini(nv_object(_outp), (s));                              \
-})
-
-int nvkm_output_create_(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, struct dcb_output *,
-			int, int, void **);
-
-int  _nvkm_output_ctor(struct nvkm_object *, struct nvkm_object *,
-		       struct nvkm_oclass *, void *, u32,
-		       struct nvkm_object **);
-void _nvkm_output_dtor(struct nvkm_object *);
-int  _nvkm_output_init(struct nvkm_object *);
-int  _nvkm_output_fini(struct nvkm_object *, bool);
-
-struct nvkm_output_impl {
-	struct nvkm_oclass base;
+struct nvkm_output_func {
+	void *(*dtor)(struct nvkm_output *);
+	void (*init)(struct nvkm_output *);
+	void (*fini)(struct nvkm_output *);
 };
 
-#ifndef MSG
-#define MSG(l,f,a...) do {                                                     \
-	struct nvkm_output *_outp = (void *)outp;                              \
-	nv_##l(_outp, "%02x:%04x:%04x: "f, _outp->index,                       \
-	       _outp->info.hasht, _outp->info.hashm, ##a);                     \
+void nvkm_output_ctor(const struct nvkm_output_func *, struct nvkm_disp *,
+		      int index, struct dcb_output *, struct nvkm_output *);
+int nvkm_output_new_(const struct nvkm_output_func *, struct nvkm_disp *,
+		     int index, struct dcb_output *, struct nvkm_output **);
+void nvkm_output_del(struct nvkm_output **);
+void nvkm_output_init(struct nvkm_output *);
+void nvkm_output_fini(struct nvkm_output *);
+
+int nv50_dac_output_new(struct nvkm_disp *, int, struct dcb_output *,
+			struct nvkm_output **);
+int nv50_sor_output_new(struct nvkm_disp *, int, struct dcb_output *,
+			struct nvkm_output **);
+int nv50_pior_output_new(struct nvkm_disp *, int, struct dcb_output *,
+			 struct nvkm_output **);
+
+u32 g94_sor_dp_lane_map(struct nvkm_device *, u8 lane);
+
+void gm204_sor_magic(struct nvkm_output *outp);
+
+#define OUTP_MSG(o,l,f,a...) do {                                              \
+	struct nvkm_output *_outp = (o);                                       \
+	nvkm_##l(&_outp->disp->engine.subdev, "outp %02x:%04x:%04x: "f"\n",    \
+		 _outp->index, _outp->info.hasht, _outp->info.hashm, ##a);     \
 } while(0)
-#define DBG(f,a...) MSG(debug, f, ##a)
-#define ERR(f,a...) MSG(error, f, ##a)
-#endif
+#define OUTP_ERR(o,f,a...) OUTP_MSG((o), error, f, ##a)
+#define OUTP_DBG(o,f,a...) OUTP_MSG((o), debug, f, ##a)
+#define OUTP_TRACE(o,f,a...) OUTP_MSG((o), trace, f, ##a)
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
index 0bde0fa5b59d..3b7a9e7a1ea8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
@@ -33,16 +33,17 @@
 int
 nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
 {
-	struct nvkm_output_dp *outp = (void *)base;
+	struct nvkm_output_dp *outp = nvkm_output_dp(base);
 	bool retrain = true;
 	u8 link[2], stat[3];
 	u32 linkrate;
 	int ret, i;
 
 	/* check that the link is trained at a high enough rate */
-	ret = nv_rdaux(outp->base.edid, DPCD_LC00_LINK_BW_SET, link, 2);
+	ret = nvkm_rdaux(outp->aux, DPCD_LC00_LINK_BW_SET, link, 2);
 	if (ret) {
-		DBG("failed to read link config, assuming no sink\n");
+		OUTP_DBG(&outp->base,
+			 "failed to read link config, assuming no sink");
 		goto done;
 	}
 
@@ -50,14 +51,15 @@ nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
 	linkrate = (linkrate * 8) / 10; /* 8B/10B coding overhead */
 	datarate = (datarate + 9) / 10; /* -> decakilobits */
 	if (linkrate < datarate) {
-		DBG("link not trained at sufficient rate\n");
+		OUTP_DBG(&outp->base, "link not trained at sufficient rate");
 		goto done;
 	}
 
 	/* check that link is still trained */
-	ret = nv_rdaux(outp->base.edid, DPCD_LS02, stat, 3);
+	ret = nvkm_rdaux(outp->aux, DPCD_LS02, stat, 3);
 	if (ret) {
-		DBG("failed to read link status, assuming no sink\n");
+		OUTP_DBG(&outp->base,
+			 "failed to read link status, assuming no sink");
 		goto done;
 	}
 
@@ -67,13 +69,14 @@ nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
 			if (!(lane & DPCD_LS02_LANE0_CR_DONE) ||
 			    !(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
 			    !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED)) {
-				DBG("lane %d not equalised\n", lane);
+				OUTP_DBG(&outp->base,
+					 "lane %d not equalised", lane);
 				goto done;
 			}
 		}
 		retrain = false;
 	} else {
-		DBG("no inter-lane alignment\n");
+		OUTP_DBG(&outp->base, "no inter-lane alignment");
 	}
 
 done:
@@ -102,150 +105,138 @@ done:
 }
 
 static void
-nvkm_output_dp_enable(struct nvkm_output_dp *outp, bool present)
+nvkm_output_dp_enable(struct nvkm_output_dp *outp, bool enable)
 {
-	struct nvkm_i2c_port *port = outp->base.edid;
-	if (present) {
+	struct nvkm_i2c_aux *aux = outp->aux;
+
+	if (enable) {
 		if (!outp->present) {
-			nvkm_i2c(port)->acquire_pad(port, 0);
-			DBG("aux power -> always\n");
+			OUTP_DBG(&outp->base, "aux power -> always");
+			nvkm_i2c_aux_monitor(aux, true);
 			outp->present = true;
 		}
-		nvkm_output_dp_train(&outp->base, 0, true);
-	} else {
-		if (outp->present) {
-			nvkm_i2c(port)->release_pad(port);
-			DBG("aux power -> demand\n");
-			outp->present = false;
+
+		if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dpcd,
+				sizeof(outp->dpcd))) {
+			nvkm_output_dp_train(&outp->base, 0, true);
+			return;
 		}
-		atomic_set(&outp->lt.done, 0);
 	}
-}
 
-static void
-nvkm_output_dp_detect(struct nvkm_output_dp *outp)
-{
-	struct nvkm_i2c_port *port = outp->base.edid;
-	int ret = nvkm_i2c(port)->acquire_pad(port, 0);
-	if (ret == 0) {
-		ret = nv_rdaux(outp->base.edid, DPCD_RC00_DPCD_REV,
-			       outp->dpcd, sizeof(outp->dpcd));
-		nvkm_output_dp_enable(outp, ret == 0);
-		nvkm_i2c(port)->release_pad(port);
+	if (outp->present) {
+		OUTP_DBG(&outp->base, "aux power -> demand");
+		nvkm_i2c_aux_monitor(aux, false);
+		outp->present = false;
 	}
+
+	atomic_set(&outp->lt.done, 0);
 }
 
 static int
 nvkm_output_dp_hpd(struct nvkm_notify *notify)
 {
-	struct nvkm_connector *conn = container_of(notify, typeof(*conn), hpd);
-	struct nvkm_output_dp *outp;
-	struct nvkm_disp *disp = nvkm_disp(conn);
 	const struct nvkm_i2c_ntfy_rep *line = notify->data;
+	struct nvkm_output_dp *outp = container_of(notify, typeof(*outp), hpd);
+	struct nvkm_connector *conn = outp->base.conn;
+	struct nvkm_disp *disp = outp->base.disp;
 	struct nvif_notify_conn_rep_v0 rep = {};
 
-	list_for_each_entry(outp, &disp->outp, base.head) {
-		if (outp->base.conn == conn &&
-		    outp->info.type == DCB_OUTPUT_DP) {
-			DBG("HPD: %d\n", line->mask);
-			nvkm_output_dp_detect(outp);
+	OUTP_DBG(&outp->base, "HPD: %d", line->mask);
+	nvkm_output_dp_enable(outp, true);
 
-			if (line->mask & NVKM_I2C_UNPLUG)
-				rep.mask |= NVIF_NOTIFY_CONN_V0_UNPLUG;
-			if (line->mask & NVKM_I2C_PLUG)
-				rep.mask |= NVIF_NOTIFY_CONN_V0_PLUG;
+	if (line->mask & NVKM_I2C_UNPLUG)
+		rep.mask |= NVIF_NOTIFY_CONN_V0_UNPLUG;
+	if (line->mask & NVKM_I2C_PLUG)
+		rep.mask |= NVIF_NOTIFY_CONN_V0_PLUG;
 
-			nvkm_event_send(&disp->hpd, rep.mask, conn->index,
-					&rep, sizeof(rep));
-			return NVKM_NOTIFY_KEEP;
-		}
-	}
-
-	WARN_ON(1);
-	return NVKM_NOTIFY_DROP;
+	nvkm_event_send(&disp->hpd, rep.mask, conn->index, &rep, sizeof(rep));
+	return NVKM_NOTIFY_KEEP;
 }
 
 static int
 nvkm_output_dp_irq(struct nvkm_notify *notify)
 {
-	struct nvkm_output_dp *outp = container_of(notify, typeof(*outp), irq);
-	struct nvkm_disp *disp = nvkm_disp(outp);
 	const struct nvkm_i2c_ntfy_rep *line = notify->data;
+	struct nvkm_output_dp *outp = container_of(notify, typeof(*outp), irq);
+	struct nvkm_connector *conn = outp->base.conn;
+	struct nvkm_disp *disp = outp->base.disp;
 	struct nvif_notify_conn_rep_v0 rep = {
 		.mask = NVIF_NOTIFY_CONN_V0_IRQ,
 	};
-	int index = outp->base.info.connector;
 
-	DBG("IRQ: %d\n", line->mask);
+	OUTP_DBG(&outp->base, "IRQ: %d", line->mask);
 	nvkm_output_dp_train(&outp->base, 0, true);
 
-	nvkm_event_send(&disp->hpd, rep.mask, index, &rep, sizeof(rep));
+	nvkm_event_send(&disp->hpd, rep.mask, conn->index, &rep, sizeof(rep));
 	return NVKM_NOTIFY_DROP;
 }
 
-int
-_nvkm_output_dp_fini(struct nvkm_object *object, bool suspend)
+static void
+nvkm_output_dp_fini(struct nvkm_output *base)
 {
-	struct nvkm_output_dp *outp = (void *)object;
+	struct nvkm_output_dp *outp = nvkm_output_dp(base);
+	nvkm_notify_put(&outp->hpd);
 	nvkm_notify_put(&outp->irq);
+	flush_work(&outp->lt.work);
 	nvkm_output_dp_enable(outp, false);
-	return nvkm_output_fini(&outp->base, suspend);
 }
 
-int
-_nvkm_output_dp_init(struct nvkm_object *object)
+static void
+nvkm_output_dp_init(struct nvkm_output *base)
 {
-	struct nvkm_output_dp *outp = (void *)object;
-	nvkm_output_dp_detect(outp);
-	return nvkm_output_init(&outp->base);
+	struct nvkm_output_dp *outp = nvkm_output_dp(base);
+	nvkm_notify_put(&outp->base.conn->hpd);
+	nvkm_output_dp_enable(outp, true);
+	nvkm_notify_get(&outp->hpd);
 }
 
-void
-_nvkm_output_dp_dtor(struct nvkm_object *object)
+static void *
+nvkm_output_dp_dtor(struct nvkm_output *base)
 {
-	struct nvkm_output_dp *outp = (void *)object;
+	struct nvkm_output_dp *outp = nvkm_output_dp(base);
+	nvkm_notify_fini(&outp->hpd);
 	nvkm_notify_fini(&outp->irq);
-	nvkm_output_destroy(&outp->base);
+	return outp;
 }
 
+static const struct nvkm_output_func
+nvkm_output_dp_func = {
+	.dtor = nvkm_output_dp_dtor,
+	.init = nvkm_output_dp_init,
+	.fini = nvkm_output_dp_fini,
+};
+
 int
-nvkm_output_dp_create_(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass,
-		       struct dcb_output *info, int index,
-		       int length, void **pobject)
+nvkm_output_dp_ctor(const struct nvkm_output_dp_func *func,
+		    struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
+		    struct nvkm_i2c_aux *aux, struct nvkm_output_dp *outp)
 {
-	struct nvkm_bios *bios = nvkm_bios(parent);
-	struct nvkm_i2c *i2c = nvkm_i2c(parent);
-	struct nvkm_output_dp *outp;
+	struct nvkm_device *device = disp->engine.subdev.device;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_i2c *i2c = device->i2c;
 	u8  hdr, cnt, len;
 	u32 data;
 	int ret;
 
-	ret = nvkm_output_create_(parent, engine, oclass, info, index,
-				  length, pobject);
-	outp = *pobject;
-	if (ret)
-		return ret;
-
-	nvkm_notify_fini(&outp->base.conn->hpd);
-
-	/* access to the aux channel is not optional... */
-	if (!outp->base.edid) {
-		ERR("aux channel not found\n");
+	nvkm_output_ctor(&nvkm_output_dp_func, disp, index, dcbE, &outp->base);
+	outp->func = func;
+	outp->aux = aux;
+	if (!outp->aux) {
+		OUTP_ERR(&outp->base, "no aux");
 		return -ENODEV;
 	}
 
-	/* nor is the bios data for this output... */
+	/* bios data is not optional */
 	data = nvbios_dpout_match(bios, outp->base.info.hasht,
 				  outp->base.info.hashm, &outp->version,
 				  &hdr, &cnt, &len, &outp->info);
 	if (!data) {
-		ERR("no bios dp data\n");
+		OUTP_ERR(&outp->base, "no bios dp data");
 		return -ENODEV;
 	}
 
-	DBG("bios dp %02x %02x %02x %02x\n", outp->version, hdr, cnt, len);
+	OUTP_DBG(&outp->base, "bios dp %02x %02x %02x %02x",
+		 outp->version, hdr, cnt, len);
 
 	/* link training */
 	INIT_WORK(&outp->lt.work, nvkm_dp_train);
@@ -256,13 +247,13 @@ nvkm_output_dp_create_(struct nvkm_object *parent,
 	ret = nvkm_notify_init(NULL, &i2c->event, nvkm_output_dp_irq, true,
 			       &(struct nvkm_i2c_ntfy_req) {
 				.mask = NVKM_I2C_IRQ,
-				.port = outp->base.edid->index,
+				.port = outp->aux->id,
 			       },
 			       sizeof(struct nvkm_i2c_ntfy_req),
 			       sizeof(struct nvkm_i2c_ntfy_rep),
 			       &outp->irq);
 	if (ret) {
-		ERR("error monitoring aux irq event: %d\n", ret);
+		OUTP_ERR(&outp->base, "error monitoring aux irq: %d", ret);
 		return ret;
 	}
 
@@ -270,13 +261,13 @@ nvkm_output_dp_create_(struct nvkm_object *parent,
 	ret = nvkm_notify_init(NULL, &i2c->event, nvkm_output_dp_hpd, true,
 			       &(struct nvkm_i2c_ntfy_req) {
 				.mask = NVKM_I2C_PLUG | NVKM_I2C_UNPLUG,
-				.port = outp->base.edid->index,
+				.port = outp->aux->id,
 			       },
 			       sizeof(struct nvkm_i2c_ntfy_req),
 			       sizeof(struct nvkm_i2c_ntfy_rep),
-			       &outp->base.conn->hpd);
+			       &outp->hpd);
 	if (ret) {
-		ERR("error monitoring aux hpd events: %d\n", ret);
+		OUTP_ERR(&outp->base, "error monitoring aux hpd: %d", ret);
 		return ret;
 	}
 
@@ -284,18 +275,17 @@ nvkm_output_dp_create_(struct nvkm_object *parent,
 }
 
 int
-_nvkm_output_dp_ctor(struct nvkm_object *parent,
-		     struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *info, u32 index,
-		     struct nvkm_object **pobject)
+nvkm_output_dp_new_(const struct nvkm_output_dp_func *func,
+		    struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
+		    struct nvkm_output **poutp)
 {
+	struct nvkm_i2c *i2c = disp->engine.subdev.device->i2c;
+	struct nvkm_i2c_aux *aux = nvkm_i2c_aux_find(i2c, dcbE->i2c_index);
 	struct nvkm_output_dp *outp;
-	int ret;
 
-	ret = nvkm_output_dp_create(parent, engine, oclass, info, index, &outp);
-	*pobject = nv_object(outp);
-	if (ret)
-		return ret;
+	if (!(outp = kzalloc(sizeof(*outp), GFP_KERNEL)))
+		return -ENOMEM;
+	*poutp = &outp->base;
 
-	return 0;
+	return nvkm_output_dp_ctor(func, disp, index, dcbE, aux, outp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
index 70c77aec4850..731136d660b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
@@ -1,5 +1,14 @@
 #ifndef __NVKM_DISP_OUTP_DP_H__
 #define __NVKM_DISP_OUTP_DP_H__
+#define nvkm_output_dp(p) container_of((p), struct nvkm_output_dp, base)
+#ifndef MSG
+#define MSG(l,f,a...)                                                          \
+	nvkm_##l(&outp->base.disp->engine.subdev, "%02x:%04x:%04x: "f,         \
+		 outp->base.index, outp->base.info.hasht,                      \
+		 outp->base.info.hashm, ##a)
+#define DBG(f,a...) MSG(debug, f, ##a)
+#define ERR(f,a...) MSG(error, f, ##a)
+#endif
 #include "outp.h"
 
 #include <core/notify.h>
@@ -7,12 +16,16 @@
 #include <subdev/bios/dp.h>
 
 struct nvkm_output_dp {
+	const struct nvkm_output_dp_func *func;
 	struct nvkm_output base;
 
 	struct nvbios_dpout info;
 	u8 version;
 
+	struct nvkm_i2c_aux *aux;
+
 	struct nvkm_notify irq;
+	struct nvkm_notify hpd;
 	bool present;
 	u8 dpcd[16];
 
@@ -23,34 +36,7 @@ struct nvkm_output_dp {
 	} lt;
 };
 
-#define nvkm_output_dp_create(p,e,c,b,i,d)                                     \
-	nvkm_output_dp_create_((p), (e), (c), (b), (i), sizeof(**d), (void **)d)
-#define nvkm_output_dp_destroy(d) ({                                           \
-	struct nvkm_output_dp *_outp = (d);                                    \
-	_nvkm_output_dp_dtor(nv_object(_outp));                                \
-})
-#define nvkm_output_dp_init(d) ({                                              \
-	struct nvkm_output_dp *_outp = (d);                                    \
-	_nvkm_output_dp_init(nv_object(_outp));                                \
-})
-#define nvkm_output_dp_fini(d,s) ({                                            \
-	struct nvkm_output_dp *_outp = (d);                                    \
-	_nvkm_output_dp_fini(nv_object(_outp), (s));                           \
-})
-
-int nvkm_output_dp_create_(struct nvkm_object *, struct nvkm_object *,
-			   struct nvkm_oclass *, struct dcb_output *,
-			   int, int, void **);
-
-int  _nvkm_output_dp_ctor(struct nvkm_object *, struct nvkm_object *,
-			  struct nvkm_oclass *, void *, u32,
-			  struct nvkm_object **);
-void _nvkm_output_dp_dtor(struct nvkm_object *);
-int  _nvkm_output_dp_init(struct nvkm_object *);
-int  _nvkm_output_dp_fini(struct nvkm_object *, bool);
-
-struct nvkm_output_dp_impl {
-	struct nvkm_output_impl base;
+struct nvkm_output_dp_func {
 	int (*pattern)(struct nvkm_output_dp *, int);
 	int (*lnk_pwr)(struct nvkm_output_dp *, int nr);
 	int (*lnk_ctl)(struct nvkm_output_dp *, int nr, int bw, bool ef);
@@ -58,4 +44,25 @@ struct nvkm_output_dp_impl {
 };
 
 int nvkm_output_dp_train(struct nvkm_output *, u32 rate, bool wait);
+
+int nvkm_output_dp_ctor(const struct nvkm_output_dp_func *, struct nvkm_disp *,
+			int index, struct dcb_output *, struct nvkm_i2c_aux *,
+			struct nvkm_output_dp *);
+int nvkm_output_dp_new_(const struct nvkm_output_dp_func *, struct nvkm_disp *,
+			int index, struct dcb_output *,
+			struct nvkm_output **);
+
+int nv50_pior_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+		     struct nvkm_output **);
+
+int g94_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+		   struct nvkm_output **);
+int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
+
+int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+		     struct nvkm_output **);
+int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
+
+int  gm204_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+		      struct nvkm_output **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c
new file mode 100644
index 000000000000..db6234eebc61
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+g84_disp_ovly_mthd_base = {
+	.mthd = 0x0000,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0080, 0x000000 },
+		{ 0x0084, 0x6109a0 },
+		{ 0x0088, 0x6109c0 },
+		{ 0x008c, 0x6109c8 },
+		{ 0x0090, 0x6109b4 },
+		{ 0x0094, 0x610970 },
+		{ 0x00a0, 0x610998 },
+		{ 0x00a4, 0x610964 },
+		{ 0x00c0, 0x610958 },
+		{ 0x00e0, 0x6109a8 },
+		{ 0x00e4, 0x6109d0 },
+		{ 0x00e8, 0x6109d8 },
+		{ 0x0100, 0x61094c },
+		{ 0x0104, 0x610984 },
+		{ 0x0108, 0x61098c },
+		{ 0x0800, 0x6109f8 },
+		{ 0x0808, 0x610a08 },
+		{ 0x080c, 0x610a10 },
+		{ 0x0810, 0x610a00 },
+		{}
+	}
+};
+
+const struct nv50_disp_chan_mthd
+g84_disp_ovly_chan_mthd = {
+	.name = "Overlay",
+	.addr = 0x000540,
+	.prev = 0x000004,
+	.data = {
+		{ "Global", 1, &g84_disp_ovly_mthd_base },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+g84_disp_ovly_oclass = {
+	.base.oclass = G82_DISP_OVERLAY_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_ovly_new,
+	.func = &nv50_disp_dmac_func,
+	.mthd = &g84_disp_ovly_chan_mthd,
+	.chid = 3,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c
new file mode 100644
index 000000000000..5985879abd23
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+gf119_disp_ovly_mthd_base = {
+	.mthd = 0x0000,
+	.data = {
+		{ 0x0080, 0x665080 },
+		{ 0x0084, 0x665084 },
+		{ 0x0088, 0x665088 },
+		{ 0x008c, 0x66508c },
+		{ 0x0090, 0x665090 },
+		{ 0x0094, 0x665094 },
+		{ 0x00a0, 0x6650a0 },
+		{ 0x00a4, 0x6650a4 },
+		{ 0x00b0, 0x6650b0 },
+		{ 0x00b4, 0x6650b4 },
+		{ 0x00b8, 0x6650b8 },
+		{ 0x00c0, 0x6650c0 },
+		{ 0x00e0, 0x6650e0 },
+		{ 0x00e4, 0x6650e4 },
+		{ 0x00e8, 0x6650e8 },
+		{ 0x0100, 0x665100 },
+		{ 0x0104, 0x665104 },
+		{ 0x0108, 0x665108 },
+		{ 0x010c, 0x66510c },
+		{ 0x0110, 0x665110 },
+		{ 0x0118, 0x665118 },
+		{ 0x011c, 0x66511c },
+		{ 0x0120, 0x665120 },
+		{ 0x0124, 0x665124 },
+		{ 0x0130, 0x665130 },
+		{ 0x0134, 0x665134 },
+		{ 0x0138, 0x665138 },
+		{ 0x013c, 0x66513c },
+		{ 0x0140, 0x665140 },
+		{ 0x0144, 0x665144 },
+		{ 0x0148, 0x665148 },
+		{ 0x014c, 0x66514c },
+		{ 0x0150, 0x665150 },
+		{ 0x0154, 0x665154 },
+		{ 0x0158, 0x665158 },
+		{ 0x015c, 0x66515c },
+		{ 0x0160, 0x665160 },
+		{ 0x0164, 0x665164 },
+		{ 0x0168, 0x665168 },
+		{ 0x016c, 0x66516c },
+		{ 0x0400, 0x665400 },
+		{ 0x0408, 0x665408 },
+		{ 0x040c, 0x66540c },
+		{ 0x0410, 0x665410 },
+		{}
+	}
+};
+
+static const struct nv50_disp_chan_mthd
+gf119_disp_ovly_chan_mthd = {
+	.name = "Overlay",
+	.addr = 0x001000,
+	.prev = -0x020000,
+	.data = {
+		{ "Global", 1, &gf119_disp_ovly_mthd_base },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+gf119_disp_ovly_oclass = {
+	.base.oclass = GF110_DISP_OVERLAY_CONTROL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_ovly_new,
+	.func = &gf119_disp_dmac_func,
+	.mthd = &gf119_disp_ovly_chan_mthd,
+	.chid = 5,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
new file mode 100644
index 000000000000..2e2dc0641ef2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+gk104_disp_ovly_mthd_base = {
+	.mthd = 0x0000,
+	.data = {
+		{ 0x0080, 0x665080 },
+		{ 0x0084, 0x665084 },
+		{ 0x0088, 0x665088 },
+		{ 0x008c, 0x66508c },
+		{ 0x0090, 0x665090 },
+		{ 0x0094, 0x665094 },
+		{ 0x00a0, 0x6650a0 },
+		{ 0x00a4, 0x6650a4 },
+		{ 0x00b0, 0x6650b0 },
+		{ 0x00b4, 0x6650b4 },
+		{ 0x00b8, 0x6650b8 },
+		{ 0x00c0, 0x6650c0 },
+		{ 0x00c4, 0x6650c4 },
+		{ 0x00e0, 0x6650e0 },
+		{ 0x00e4, 0x6650e4 },
+		{ 0x00e8, 0x6650e8 },
+		{ 0x0100, 0x665100 },
+		{ 0x0104, 0x665104 },
+		{ 0x0108, 0x665108 },
+		{ 0x010c, 0x66510c },
+		{ 0x0110, 0x665110 },
+		{ 0x0118, 0x665118 },
+		{ 0x011c, 0x66511c },
+		{ 0x0120, 0x665120 },
+		{ 0x0124, 0x665124 },
+		{ 0x0130, 0x665130 },
+		{ 0x0134, 0x665134 },
+		{ 0x0138, 0x665138 },
+		{ 0x013c, 0x66513c },
+		{ 0x0140, 0x665140 },
+		{ 0x0144, 0x665144 },
+		{ 0x0148, 0x665148 },
+		{ 0x014c, 0x66514c },
+		{ 0x0150, 0x665150 },
+		{ 0x0154, 0x665154 },
+		{ 0x0158, 0x665158 },
+		{ 0x015c, 0x66515c },
+		{ 0x0160, 0x665160 },
+		{ 0x0164, 0x665164 },
+		{ 0x0168, 0x665168 },
+		{ 0x016c, 0x66516c },
+		{ 0x0400, 0x665400 },
+		{ 0x0404, 0x665404 },
+		{ 0x0408, 0x665408 },
+		{ 0x040c, 0x66540c },
+		{ 0x0410, 0x665410 },
+		{}
+	}
+};
+
+static const struct nv50_disp_chan_mthd
+gk104_disp_ovly_chan_mthd = {
+	.name = "Overlay",
+	.addr = 0x001000,
+	.prev = -0x020000,
+	.data = {
+		{ "Global", 1, &gk104_disp_ovly_mthd_base },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+gk104_disp_ovly_oclass = {
+	.base.oclass = GK104_DISP_OVERLAY_CONTROL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_ovly_new,
+	.func = &gf119_disp_dmac_func,
+	.mthd = &gk104_disp_ovly_chan_mthd,
+	.chid = 5,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c
new file mode 100644
index 000000000000..f858053db83d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+gt200_disp_ovly_mthd_base = {
+	.mthd = 0x0000,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0080, 0x000000 },
+		{ 0x0084, 0x6109a0 },
+		{ 0x0088, 0x6109c0 },
+		{ 0x008c, 0x6109c8 },
+		{ 0x0090, 0x6109b4 },
+		{ 0x0094, 0x610970 },
+		{ 0x00a0, 0x610998 },
+		{ 0x00a4, 0x610964 },
+		{ 0x00b0, 0x610c98 },
+		{ 0x00b4, 0x610ca4 },
+		{ 0x00b8, 0x610cac },
+		{ 0x00c0, 0x610958 },
+		{ 0x00e0, 0x6109a8 },
+		{ 0x00e4, 0x6109d0 },
+		{ 0x00e8, 0x6109d8 },
+		{ 0x0100, 0x61094c },
+		{ 0x0104, 0x610984 },
+		{ 0x0108, 0x61098c },
+		{ 0x0800, 0x6109f8 },
+		{ 0x0808, 0x610a08 },
+		{ 0x080c, 0x610a10 },
+		{ 0x0810, 0x610a00 },
+		{}
+	}
+};
+
+static const struct nv50_disp_chan_mthd
+gt200_disp_ovly_chan_mthd = {
+	.name = "Overlay",
+	.addr = 0x000540,
+	.prev = 0x000004,
+	.data = {
+		{ "Global", 1, &gt200_disp_ovly_mthd_base },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+gt200_disp_ovly_oclass = {
+	.base.oclass = GT200_DISP_OVERLAY_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_ovly_new,
+	.func = &nv50_disp_dmac_func,
+	.mthd = &gt200_disp_ovly_chan_mthd,
+	.chid = 3,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c
new file mode 100644
index 000000000000..c947e1e16a37
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gt215_disp_ovly_oclass = {
+	.base.oclass = GT214_DISP_OVERLAY_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_ovly_new,
+	.func = &nv50_disp_dmac_func,
+	.mthd = &g84_disp_ovly_chan_mthd,
+	.chid = 3,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c
new file mode 100644
index 000000000000..6fa296c047b8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+nv50_disp_ovly_new(const struct nv50_disp_dmac_func *func,
+		   const struct nv50_disp_chan_mthd *mthd,
+		   struct nv50_disp_root *root, int chid,
+		   const struct nvkm_oclass *oclass, void *data, u32 size,
+		   struct nvkm_object **pobject)
+{
+	union {
+		struct nv50_disp_overlay_channel_dma_v0 v0;
+	} *args = data;
+	struct nvkm_object *parent = oclass->parent;
+	struct nv50_disp *disp = root->disp;
+	int head, ret;
+	u64 push;
+
+	nvif_ioctl(parent, "create disp overlay channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create disp overlay channel dma vers %d "
+				   "pushbuf %016llx head %d\n",
+			   args->v0.version, args->v0.pushbuf, args->v0.head);
+		if (args->v0.head > disp->base.head.nr)
+			return -EINVAL;
+		push = args->v0.pushbuf;
+		head = args->v0.head;
+	} else
+		return ret;
+
+	return nv50_disp_dmac_new_(func, mthd, root, chid + head,
+				   head, push, oclass, pobject);
+}
+
+static const struct nv50_disp_mthd_list
+nv50_disp_ovly_mthd_base = {
+	.mthd = 0x0000,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0080, 0x000000 },
+		{ 0x0084, 0x0009a0 },
+		{ 0x0088, 0x0009c0 },
+		{ 0x008c, 0x0009c8 },
+		{ 0x0090, 0x6109b4 },
+		{ 0x0094, 0x610970 },
+		{ 0x00a0, 0x610998 },
+		{ 0x00a4, 0x610964 },
+		{ 0x00c0, 0x610958 },
+		{ 0x00e0, 0x6109a8 },
+		{ 0x00e4, 0x6109d0 },
+		{ 0x00e8, 0x6109d8 },
+		{ 0x0100, 0x61094c },
+		{ 0x0104, 0x610984 },
+		{ 0x0108, 0x61098c },
+		{ 0x0800, 0x6109f8 },
+		{ 0x0808, 0x610a08 },
+		{ 0x080c, 0x610a10 },
+		{ 0x0810, 0x610a00 },
+		{}
+	}
+};
+
+static const struct nv50_disp_chan_mthd
+nv50_disp_ovly_chan_mthd = {
+	.name = "Overlay",
+	.addr = 0x000540,
+	.prev = 0x000004,
+	.data = {
+		{ "Global", 1, &nv50_disp_ovly_mthd_base },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+nv50_disp_ovly_oclass = {
+	.base.oclass = NV50_DISP_OVERLAY_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_ovly_new,
+	.func = &nv50_disp_dmac_func,
+	.mthd = &nv50_disp_ovly_chan_mthd,
+	.chid = 3,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
new file mode 100644
index 000000000000..a625a9876e34
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <subdev/timer.h>
+
+static void
+gf119_disp_pioc_fini(struct nv50_disp_chan *chan)
+{
+	struct nv50_disp *disp = chan->root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int chid = chan->chid;
+
+	nvkm_mask(device, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x00030000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d fini: %08x\n", chid,
+			   nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+	}
+
+	/* disable error reporting and completion notification */
+	nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000);
+	nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000);
+}
+
+static int
+gf119_disp_pioc_init(struct nv50_disp_chan *chan)
+{
+	struct nv50_disp *disp = chan->root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int chid = chan->chid;
+
+	/* enable error reporting */
+	nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+	/* activate channel */
+	nvkm_wr32(device, 0x610490 + (chid * 0x10), 0x00000001);
+	if (nvkm_msec(device, 2000,
+		u32 tmp = nvkm_rd32(device, 0x610490 + (chid * 0x10));
+		if ((tmp & 0x00030000) == 0x00010000)
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d init: %08x\n", chid,
+			   nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+const struct nv50_disp_chan_func
+gf119_disp_pioc_func = {
+	.init = gf119_disp_pioc_init,
+	.fini = gf119_disp_pioc_fini,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
new file mode 100644
index 000000000000..9d2618dacf20
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <subdev/timer.h>
+
+static void
+nv50_disp_pioc_fini(struct nv50_disp_chan *chan)
+{
+	struct nv50_disp *disp = chan->root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int chid = chan->chid;
+
+	nvkm_mask(device, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d timeout: %08x\n", chid,
+			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+	}
+}
+
+static int
+nv50_disp_pioc_init(struct nv50_disp_chan *chan)
+{
+	struct nv50_disp *disp = chan->root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int chid = chan->chid;
+
+	nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00002000);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d timeout0: %08x\n", chid,
+			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+		return -EBUSY;
+	}
+
+	nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00000001);
+	if (nvkm_msec(device, 2000,
+		u32 tmp = nvkm_rd32(device, 0x610200 + (chid * 0x10));
+		if ((tmp & 0x00030000) == 0x00010000)
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d timeout1: %08x\n", chid,
+			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+const struct nv50_disp_chan_func
+nv50_disp_pioc_func = {
+	.init = nv50_disp_pioc_init,
+	.fini = nv50_disp_pioc_fini,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c
index 2a1d8871bf82..ab524bde7795 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c
@@ -21,8 +21,8 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv50.h"
 #include "outpdp.h"
+#include "nv50.h"
 
 #include <core/client.h>
 #include <subdev/i2c.h>
@@ -31,140 +31,101 @@
 #include <nvif/class.h>
 #include <nvif/unpack.h>
 
-/******************************************************************************
- * TMDS
- *****************************************************************************/
-
-static int
-nv50_pior_tmds_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *info, u32 index,
-		    struct nvkm_object **pobject)
+int
+nv50_pior_power(NV50_DISP_MTHD_V1)
 {
-	struct nvkm_i2c *i2c = nvkm_i2c(parent);
-	struct nvkm_output *outp;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	const u32 soff = outp->or * 0x800;
+	union {
+		struct nv50_disp_pior_pwr_v0 v0;
+	} *args = data;
+	u32 ctrl, type;
 	int ret;
 
-	ret = nvkm_output_create(parent, engine, oclass, info, index, &outp);
-	*pobject = nv_object(outp);
-	if (ret)
+	nvif_ioctl(object, "disp pior pwr size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(object, "disp pior pwr vers %d state %d type %x\n",
+			   args->v0.version, args->v0.state, args->v0.type);
+		if (args->v0.type > 0x0f)
+			return -EINVAL;
+		ctrl = !!args->v0.state;
+		type = args->v0.type;
+	} else
 		return ret;
 
-	outp->edid = i2c->find_type(i2c, NV_I2C_TYPE_EXTDDC(outp->info.extdev));
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61e004 + soff) & 0x80000000))
+			break;
+	);
+	nvkm_mask(device, 0x61e004 + soff, 0x80000101, 0x80000000 | ctrl);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61e004 + soff) & 0x80000000))
+			break;
+	);
+	disp->pior.type[outp->or] = type;
 	return 0;
 }
 
-struct nvkm_output_impl
-nv50_pior_tmds_impl = {
-	.base.handle = DCB_OUTPUT_TMDS | 0x0100,
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_pior_tmds_ctor,
-		.dtor = _nvkm_output_dtor,
-		.init = _nvkm_output_init,
-		.fini = _nvkm_output_fini,
-	},
-};
-
 /******************************************************************************
- * DisplayPort
+ * TMDS
  *****************************************************************************/
+static const struct nvkm_output_func
+nv50_pior_output_func = {
+};
 
-static int
-nv50_pior_dp_pattern(struct nvkm_output_dp *outp, int pattern)
+int
+nv50_pior_output_new(struct nvkm_disp *disp, int index,
+		     struct dcb_output *dcbE, struct nvkm_output **poutp)
 {
-	struct nvkm_i2c_port *port = outp->base.edid;
-	if (port && port->func->pattern)
-		return port->func->pattern(port, pattern);
-	return port ? 0 : -ENODEV;
+	return nvkm_output_new_(&nv50_pior_output_func, disp,
+				index, dcbE, poutp);
 }
 
+/******************************************************************************
+ * DisplayPort
+ *****************************************************************************/
 static int
-nv50_pior_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
+nv50_pior_output_dp_pattern(struct nvkm_output_dp *outp, int pattern)
 {
 	return 0;
 }
 
 static int
-nv50_pior_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
-{
-	struct nvkm_i2c_port *port = outp->base.edid;
-	if (port && port->func->lnk_ctl)
-		return port->func->lnk_ctl(port, nr, bw, ef);
-	return port ? 0 : -ENODEV;
-}
-
-static int
-nv50_pior_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
+nv50_pior_output_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
 {
-	struct nvkm_i2c_port *port = outp->base.edid;
-	if (port && port->func->drv_ctl)
-		return port->func->drv_ctl(port, ln, vs, pe);
-	return port ? 0 : -ENODEV;
+	return 0;
 }
 
 static int
-nv50_pior_dp_ctor(struct nvkm_object *parent,
-		  struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *info, u32 index,
-		  struct nvkm_object **pobject)
+nv50_pior_output_dp_lnk_ctl(struct nvkm_output_dp *outp,
+			    int nr, int bw, bool ef)
 {
-	struct nvkm_i2c *i2c = nvkm_i2c(parent);
-	struct nvkm_output_dp *outp;
-	int ret;
-
-	ret = nvkm_output_dp_create(parent, engine, oclass, info, index, &outp);
-	*pobject = nv_object(outp);
+	int ret = nvkm_i2c_aux_lnk_ctl(outp->aux, nr, bw, ef);
 	if (ret)
 		return ret;
-
-	outp->base.edid = i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(
-					 outp->base.info.extdev));
-	return 0;
+	return 1;
 }
 
-struct nvkm_output_dp_impl
-nv50_pior_dp_impl = {
-	.base.base.handle = DCB_OUTPUT_DP | 0x0010,
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_pior_dp_ctor,
-		.dtor = _nvkm_output_dp_dtor,
-		.init = _nvkm_output_dp_init,
-		.fini = _nvkm_output_dp_fini,
-	},
-	.pattern = nv50_pior_dp_pattern,
-	.lnk_pwr = nv50_pior_dp_lnk_pwr,
-	.lnk_ctl = nv50_pior_dp_lnk_ctl,
-	.drv_ctl = nv50_pior_dp_drv_ctl,
+static const struct nvkm_output_dp_func
+nv50_pior_output_dp_func = {
+	.pattern = nv50_pior_output_dp_pattern,
+	.lnk_pwr = nv50_pior_output_dp_lnk_pwr,
+	.lnk_ctl = nv50_pior_output_dp_lnk_ctl,
 };
 
-/******************************************************************************
- * General PIOR handling
- *****************************************************************************/
-
 int
-nv50_pior_power(NV50_DISP_MTHD_V1)
+nv50_pior_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
+		 struct nvkm_output **poutp)
 {
-	const u32 soff = outp->or * 0x800;
-	union {
-		struct nv50_disp_pior_pwr_v0 v0;
-	} *args = data;
-	u32 ctrl, type;
-	int ret;
+	struct nvkm_i2c *i2c = disp->engine.subdev.device->i2c;
+	struct nvkm_i2c_aux *aux =
+		nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbE->extdev));
+	struct nvkm_output_dp *outp;
 
-	nv_ioctl(object, "disp pior pwr size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp pior pwr vers %d state %d type %x\n",
-			 args->v0.version, args->v0.state, args->v0.type);
-		if (args->v0.type > 0x0f)
-			return -EINVAL;
-		ctrl = !!args->v0.state;
-		type = args->v0.type;
-	} else
-		return ret;
+	if (!(outp = kzalloc(sizeof(*outp), GFP_KERNEL)))
+		return -ENOMEM;
+	*poutp = &outp->base;
 
-	nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000);
-	nv_mask(priv, 0x61e004 + soff, 0x80000101, 0x80000000 | ctrl);
-	nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000);
-	priv->pior.type[outp->or] = type;
-	return 0;
+	return nvkm_output_dp_ctor(&nv50_pior_output_dp_func, disp,
+				   index, dcbE, aux, outp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
index 961ce8bb2135..c2452957fc57 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
@@ -1,42 +1,52 @@
 #ifndef __NVKM_DISP_PRIV_H__
 #define __NVKM_DISP_PRIV_H__
 #include <engine/disp.h>
+#include "outp.h"
+#include "outpdp.h"
 
-struct nvkm_disp_impl {
-	struct nvkm_oclass base;
-	struct nvkm_oclass **outp;
-	struct nvkm_oclass **conn;
-	const struct nvkm_event_func *vblank;
+int nvkm_disp_ctor(const struct nvkm_disp_func *, struct nvkm_device *,
+		   int index, int heads, struct nvkm_disp *);
+int nvkm_disp_new_(const struct nvkm_disp_func *, struct nvkm_device *,
+		   int index, int heads, struct nvkm_disp **);
+void nvkm_disp_vblank(struct nvkm_disp *, int head);
+
+struct nvkm_disp_func_outp {
+	int (* crt)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
+	int (*  tv)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
+	int (*tmds)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
+	int (*lvds)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
+	int (*  dp)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
+};
+
+struct nvkm_disp_func {
+	void *(*dtor)(struct nvkm_disp *);
+	void (*intr)(struct nvkm_disp *);
+
+	const struct nvkm_disp_oclass *(*root)(struct nvkm_disp *);
+
+	struct {
+		void (*vblank_init)(struct nvkm_disp *, int head);
+		void (*vblank_fini)(struct nvkm_disp *, int head);
+	} head;
+
+	struct {
+		const struct nvkm_disp_func_outp internal;
+		const struct nvkm_disp_func_outp external;
+	} outp;
 };
 
-#define nvkm_disp_create(p,e,c,h,i,x,d)                                     \
-	nvkm_disp_create_((p), (e), (c), (h), (i), (x),                     \
-			     sizeof(**d), (void **)d)
-#define nvkm_disp_destroy(d) ({                                             \
-	struct nvkm_disp *disp = (d);                                       \
-	_nvkm_disp_dtor(nv_object(disp));                                   \
-})
-#define nvkm_disp_init(d) ({                                                \
-	struct nvkm_disp *disp = (d);                                       \
-	_nvkm_disp_init(nv_object(disp));                                   \
-})
-#define nvkm_disp_fini(d,s) ({                                              \
-	struct nvkm_disp *disp = (d);                                       \
-	_nvkm_disp_fini(nv_object(disp), (s));                              \
-})
-
-int  nvkm_disp_create_(struct nvkm_object *, struct nvkm_object *,
-			  struct nvkm_oclass *, int heads,
-			  const char *, const char *, int, void **);
-void _nvkm_disp_dtor(struct nvkm_object *);
-int  _nvkm_disp_init(struct nvkm_object *);
-int  _nvkm_disp_fini(struct nvkm_object *, bool);
-
-extern struct nvkm_oclass *nvkm_output_oclass;
-extern struct nvkm_oclass *nvkm_connector_oclass;
-
-int  nvkm_disp_vblank_ctor(struct nvkm_object *, void *data, u32 size,
-			   struct nvkm_notify *);
-void nvkm_disp_vblank(struct nvkm_disp *, int head);
 int  nvkm_disp_ntfy(struct nvkm_object *, u32, struct nvkm_event **);
+
+extern const struct nvkm_disp_oclass nv04_disp_root_oclass;
+
+struct nvkm_disp_oclass {
+	int (*ctor)(struct nvkm_disp *, const struct nvkm_oclass *,
+		    void *data, u32 size, struct nvkm_object **);
+	struct nvkm_sclass base;
+};
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
index 6820176e5f78..721e4f74d1fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2013 Red Hat Inc.
+ * Copyright 2012 Red Hat Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -21,37 +21,38 @@
  *
  * Authors: Ben Skeggs
  */
-#include "gf100.h"
+#include "rootnv50.h"
+#include "dmacnv50.h"
 
-static int
-gk110_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
-{
-	struct gf100_pm_priv *priv;
-	int ret;
-
-	ret = nvkm_pm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+#include <nvif/class.h>
 
-	ret = nvkm_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0, gk104_pm_pwr);
-	if (ret)
-		return ret;
+static const struct nv50_disp_root_func
+g84_disp_root = {
+	.init = nv50_disp_root_init,
+	.fini = nv50_disp_root_fini,
+	.dmac = {
+		&g84_disp_core_oclass,
+		&g84_disp_base_oclass,
+		&g84_disp_ovly_oclass,
+	},
+	.pioc = {
+		&g84_disp_oimm_oclass,
+		&g84_disp_curs_oclass,
+	},
+};
 
-	nv_engine(priv)->cclass = &nvkm_pm_cclass;
-	nv_engine(priv)->sclass =  nvkm_pm_sclass;
-	return 0;
+static int
+g84_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		  void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&g84_disp_root, disp, oclass,
+				   data, size, pobject);
 }
 
-struct nvkm_oclass
-gk110_pm_oclass = {
-	.handle = NV_ENGINE(PM, 0xf0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk110_pm_ctor,
-		.dtor = _nvkm_pm_dtor,
-		.init = _nvkm_pm_init,
-		.fini = gf100_pm_fini,
-	},
+const struct nvkm_disp_oclass
+g84_disp_root_oclass = {
+	.base.oclass = G82_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = g84_disp_root_new,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
new file mode 100644
index 000000000000..9493f6edf62b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+g94_disp_root = {
+	.init = nv50_disp_root_init,
+	.fini = nv50_disp_root_fini,
+	.dmac = {
+		&g94_disp_core_oclass,
+		&gt200_disp_base_oclass,
+		&gt200_disp_ovly_oclass,
+	},
+	.pioc = {
+		&g84_disp_oimm_oclass,
+		&g84_disp_curs_oclass,
+	},
+};
+
+static int
+g94_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		  void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&g94_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+g94_disp_root_oclass = {
+	.base.oclass = GT206_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = g94_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
new file mode 100644
index 000000000000..8591726871ac
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+gf119_disp_root_scanoutpos(NV50_DISP_MTHD_V0)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	const u32 total  = nvkm_rd32(device, 0x640414 + (head * 0x300));
+	const u32 blanke = nvkm_rd32(device, 0x64041c + (head * 0x300));
+	const u32 blanks = nvkm_rd32(device, 0x640420 + (head * 0x300));
+	union {
+		struct nv04_disp_scanoutpos_v0 v0;
+	} *args = data;
+	int ret;
+
+	nvif_ioctl(object, "disp scanoutpos size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(object, "disp scanoutpos vers %d\n",
+			   args->v0.version);
+		args->v0.vblanke = (blanke & 0xffff0000) >> 16;
+		args->v0.hblanke = (blanke & 0x0000ffff);
+		args->v0.vblanks = (blanks & 0xffff0000) >> 16;
+		args->v0.hblanks = (blanks & 0x0000ffff);
+		args->v0.vtotal  = ( total & 0xffff0000) >> 16;
+		args->v0.htotal  = ( total & 0x0000ffff);
+		args->v0.time[0] = ktime_to_ns(ktime_get());
+		args->v0.vline = /* vline read locks hline */
+			nvkm_rd32(device, 0x616340 + (head * 0x800)) & 0xffff;
+		args->v0.time[1] = ktime_to_ns(ktime_get());
+		args->v0.hline =
+			nvkm_rd32(device, 0x616344 + (head * 0x800)) & 0xffff;
+	} else
+		return ret;
+
+	return 0;
+}
+
+void
+gf119_disp_root_fini(struct nv50_disp_root *root)
+{
+	struct nvkm_device *device = root->disp->base.engine.subdev.device;
+	/* disable all interrupts */
+	nvkm_wr32(device, 0x6100b0, 0x00000000);
+}
+
+int
+gf119_disp_root_init(struct nv50_disp_root *root)
+{
+	struct nv50_disp *disp = root->disp;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	u32 tmp;
+	int i;
+
+	/* The below segments of code copying values from one register to
+	 * another appear to inform EVO of the display capabilities or
+	 * something similar.
+	 */
+
+	/* ... CRTC caps */
+	for (i = 0; i < disp->base.head.nr; i++) {
+		tmp = nvkm_rd32(device, 0x616104 + (i * 0x800));
+		nvkm_wr32(device, 0x6101b4 + (i * 0x800), tmp);
+		tmp = nvkm_rd32(device, 0x616108 + (i * 0x800));
+		nvkm_wr32(device, 0x6101b8 + (i * 0x800), tmp);
+		tmp = nvkm_rd32(device, 0x61610c + (i * 0x800));
+		nvkm_wr32(device, 0x6101bc + (i * 0x800), tmp);
+	}
+
+	/* ... DAC caps */
+	for (i = 0; i < disp->func->dac.nr; i++) {
+		tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
+		nvkm_wr32(device, 0x6101c0 + (i * 0x800), tmp);
+	}
+
+	/* ... SOR caps */
+	for (i = 0; i < disp->func->sor.nr; i++) {
+		tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
+		nvkm_wr32(device, 0x6301c4 + (i * 0x800), tmp);
+	}
+
+	/* steal display away from vbios, or something like that */
+	if (nvkm_rd32(device, 0x6100ac) & 0x00000100) {
+		nvkm_wr32(device, 0x6100ac, 0x00000100);
+		nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
+		if (nvkm_msec(device, 2000,
+			if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
+				break;
+		) < 0)
+			return -EBUSY;
+	}
+
+	/* point at display engine memory area (hash table, objects) */
+	nvkm_wr32(device, 0x610010, (root->instmem->addr >> 8) | 9);
+
+	/* enable supervisor interrupts, disable everything else */
+	nvkm_wr32(device, 0x610090, 0x00000000);
+	nvkm_wr32(device, 0x6100a0, 0x00000000);
+	nvkm_wr32(device, 0x6100b0, 0x00000307);
+
+	/* disable underflow reporting, preventing an intermittent issue
+	 * on some gk104 boards where the production vbios left this
+	 * setting enabled by default.
+	 *
+	 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
+	 */
+	for (i = 0; i < disp->base.head.nr; i++)
+		nvkm_mask(device, 0x616308 + (i * 0x800), 0x00000111, 0x00000010);
+
+	return 0;
+}
+
+static const struct nv50_disp_root_func
+gf119_disp_root = {
+	.init = gf119_disp_root_init,
+	.fini = gf119_disp_root_fini,
+	.dmac = {
+		&gf119_disp_core_oclass,
+		&gf119_disp_base_oclass,
+		&gf119_disp_ovly_oclass,
+	},
+	.pioc = {
+		&gf119_disp_oimm_oclass,
+		&gf119_disp_curs_oclass,
+	},
+};
+
+static int
+gf119_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&gf119_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gf119_disp_root_oclass = {
+	.base.oclass = GF110_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = gf119_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
new file mode 100644
index 000000000000..0bfdb1d1c6ab
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gk104_disp_root = {
+	.init = gf119_disp_root_init,
+	.fini = gf119_disp_root_fini,
+	.dmac = {
+		&gk104_disp_core_oclass,
+		&gk104_disp_base_oclass,
+		&gk104_disp_ovly_oclass,
+	},
+	.pioc = {
+		&gk104_disp_oimm_oclass,
+		&gk104_disp_curs_oclass,
+	},
+};
+
+static int
+gk104_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&gk104_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gk104_disp_root_oclass = {
+	.base.oclass = GK104_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = gk104_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
new file mode 100644
index 000000000000..1e8dbed8a67c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gk110_disp_root = {
+	.init = gf119_disp_root_init,
+	.fini = gf119_disp_root_fini,
+	.dmac = {
+		&gk110_disp_core_oclass,
+		&gk110_disp_base_oclass,
+		&gk104_disp_ovly_oclass,
+	},
+	.pioc = {
+		&gk104_disp_oimm_oclass,
+		&gk104_disp_curs_oclass,
+	},
+};
+
+static int
+gk110_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&gk110_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gk110_disp_root_oclass = {
+	.base.oclass = GK110_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = gk110_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
new file mode 100644
index 000000000000..44c55be69e99
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gm107_disp_root = {
+	.init = gf119_disp_root_init,
+	.fini = gf119_disp_root_fini,
+	.dmac = {
+		&gm107_disp_core_oclass,
+		&gk110_disp_base_oclass,
+		&gk104_disp_ovly_oclass,
+	},
+	.pioc = {
+		&gk104_disp_oimm_oclass,
+		&gk104_disp_curs_oclass,
+	},
+};
+
+static int
+gm107_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&gm107_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gm107_disp_root_oclass = {
+	.base.oclass = GM107_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = gm107_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm204.c
new file mode 100644
index 000000000000..168bffe0643c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm204.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gm204_disp_root = {
+	.init = gf119_disp_root_init,
+	.fini = gf119_disp_root_fini,
+	.dmac = {
+		&gm204_disp_core_oclass,
+		&gk110_disp_base_oclass,
+		&gk104_disp_ovly_oclass,
+	},
+	.pioc = {
+		&gk104_disp_oimm_oclass,
+		&gk104_disp_curs_oclass,
+	},
+};
+
+static int
+gm204_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&gm204_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gm204_disp_root_oclass = {
+	.base.oclass = GM204_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = gm204_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
new file mode 100644
index 000000000000..124a0c24f92c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gt200_disp_root = {
+	.init = nv50_disp_root_init,
+	.fini = nv50_disp_root_fini,
+	.dmac = {
+		&gt200_disp_core_oclass,
+		&gt200_disp_base_oclass,
+		&gt200_disp_ovly_oclass,
+	},
+	.pioc = {
+		&g84_disp_oimm_oclass,
+		&g84_disp_curs_oclass,
+	},
+};
+
+static int
+gt200_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&gt200_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gt200_disp_root_oclass = {
+	.base.oclass = GT200_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = gt200_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
new file mode 100644
index 000000000000..dff52f30668b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gt215_disp_root = {
+	.init = nv50_disp_root_init,
+	.fini = nv50_disp_root_fini,
+	.dmac = {
+		&gt215_disp_core_oclass,
+		&gt215_disp_base_oclass,
+		&gt215_disp_ovly_oclass,
+	},
+	.pioc = {
+		&gt215_disp_oimm_oclass,
+		&gt215_disp_curs_oclass,
+	},
+};
+
+static int
+gt215_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&gt215_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gt215_disp_root_oclass = {
+	.base.oclass = GT214_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = gt215_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c
new file mode 100644
index 000000000000..62d3fb66d0ec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define nv04_disp_root(p) container_of((p), struct nv04_disp_root, object)
+#include "priv.h"
+
+#include <core/client.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+struct nv04_disp_root {
+	struct nvkm_object object;
+	struct nvkm_disp *disp;
+};
+
+static int
+nv04_disp_scanoutpos(struct nv04_disp_root *root,
+		     void *data, u32 size, int head)
+{
+	struct nvkm_device *device = root->disp->engine.subdev.device;
+	struct nvkm_object *object = &root->object;
+	const u32 hoff = head * 0x2000;
+	union {
+		struct nv04_disp_scanoutpos_v0 v0;
+	} *args = data;
+	u32 line;
+	int ret;
+
+	nvif_ioctl(object, "disp scanoutpos size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(object, "disp scanoutpos vers %d\n",
+			   args->v0.version);
+		args->v0.vblanks = nvkm_rd32(device, 0x680800 + hoff) & 0xffff;
+		args->v0.vtotal  = nvkm_rd32(device, 0x680804 + hoff) & 0xffff;
+		args->v0.vblanke = args->v0.vtotal - 1;
+
+		args->v0.hblanks = nvkm_rd32(device, 0x680820 + hoff) & 0xffff;
+		args->v0.htotal  = nvkm_rd32(device, 0x680824 + hoff) & 0xffff;
+		args->v0.hblanke = args->v0.htotal - 1;
+
+		/*
+		 * If output is vga instead of digital then vtotal/htotal is
+		 * invalid so we have to give up and trigger the timestamping
+		 * fallback in the drm core.
+		 */
+		if (!args->v0.vtotal || !args->v0.htotal)
+			return -ENOTSUPP;
+
+		args->v0.time[0] = ktime_to_ns(ktime_get());
+		line = nvkm_rd32(device, 0x600868 + hoff);
+		args->v0.time[1] = ktime_to_ns(ktime_get());
+		args->v0.hline = (line & 0xffff0000) >> 16;
+		args->v0.vline = (line & 0x0000ffff);
+	} else
+		return ret;
+
+	return 0;
+}
+
+static int
+nv04_disp_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+{
+	struct nv04_disp_root *root = nv04_disp_root(object);
+	union {
+		struct nv04_disp_mthd_v0 v0;
+	} *args = data;
+	int head, ret;
+
+	nvif_ioctl(object, "disp mthd size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, true)) {
+		nvif_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
+			   args->v0.version, args->v0.method, args->v0.head);
+		mthd = args->v0.method;
+		head = args->v0.head;
+	} else
+		return ret;
+
+	if (head < 0 || head >= 2)
+		return -ENXIO;
+
+	switch (mthd) {
+	case NV04_DISP_SCANOUTPOS:
+		return nv04_disp_scanoutpos(root, data, size, head);
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static struct nvkm_object_func
+nv04_disp_root = {
+	.mthd = nv04_disp_mthd,
+	.ntfy = nvkm_disp_ntfy,
+};
+
+static int
+nv04_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		   void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nv04_disp_root *root;
+
+	if (!(root = kzalloc(sizeof(*root), GFP_KERNEL)))
+		return -ENOMEM;
+	root->disp = disp;
+	*pobject = &root->object;
+
+	nvkm_object_ctor(&nv04_disp_root, oclass, &root->object);
+	return 0;
+}
+
+const struct nvkm_disp_oclass
+nv04_disp_root_oclass = {
+	.base.oclass = NV04_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = nv04_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
new file mode 100644
index 000000000000..06fb24d88702
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -0,0 +1,399 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+nv50_disp_root_scanoutpos(NV50_DISP_MTHD_V0)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	const u32 blanke = nvkm_rd32(device, 0x610aec + (head * 0x540));
+	const u32 blanks = nvkm_rd32(device, 0x610af4 + (head * 0x540));
+	const u32 total  = nvkm_rd32(device, 0x610afc + (head * 0x540));
+	union {
+		struct nv04_disp_scanoutpos_v0 v0;
+	} *args = data;
+	int ret;
+
+	nvif_ioctl(object, "disp scanoutpos size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(object, "disp scanoutpos vers %d\n",
+			   args->v0.version);
+		args->v0.vblanke = (blanke & 0xffff0000) >> 16;
+		args->v0.hblanke = (blanke & 0x0000ffff);
+		args->v0.vblanks = (blanks & 0xffff0000) >> 16;
+		args->v0.hblanks = (blanks & 0x0000ffff);
+		args->v0.vtotal  = ( total & 0xffff0000) >> 16;
+		args->v0.htotal  = ( total & 0x0000ffff);
+		args->v0.time[0] = ktime_to_ns(ktime_get());
+		args->v0.vline = /* vline read locks hline */
+			nvkm_rd32(device, 0x616340 + (head * 0x800)) & 0xffff;
+		args->v0.time[1] = ktime_to_ns(ktime_get());
+		args->v0.hline =
+			nvkm_rd32(device, 0x616344 + (head * 0x800)) & 0xffff;
+	} else
+		return ret;
+
+	return 0;
+}
+
+int
+nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+{
+	union {
+		struct nv50_disp_mthd_v0 v0;
+		struct nv50_disp_mthd_v1 v1;
+	} *args = data;
+	struct nv50_disp_root *root = nv50_disp_root(object);
+	struct nv50_disp *disp = root->disp;
+	const struct nv50_disp_func *func = disp->func;
+	struct nvkm_output *outp = NULL;
+	struct nvkm_output *temp;
+	u16 type, mask = 0;
+	int head, ret;
+
+	if (mthd != NV50_DISP_MTHD)
+		return -EINVAL;
+
+	nvif_ioctl(object, "disp mthd size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, true)) {
+		nvif_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
+			   args->v0.version, args->v0.method, args->v0.head);
+		mthd = args->v0.method;
+		head = args->v0.head;
+	} else
+	if (nvif_unpack(args->v1, 1, 1, true)) {
+		nvif_ioctl(object, "disp mthd vers %d mthd %02x "
+				   "type %04x mask %04x\n",
+			   args->v1.version, args->v1.method,
+			   args->v1.hasht, args->v1.hashm);
+		mthd = args->v1.method;
+		type = args->v1.hasht;
+		mask = args->v1.hashm;
+		head = ffs((mask >> 8) & 0x0f) - 1;
+	} else
+		return ret;
+
+	if (head < 0 || head >= disp->base.head.nr)
+		return -ENXIO;
+
+	if (mask) {
+		list_for_each_entry(temp, &disp->base.outp, head) {
+			if ((temp->info.hasht         == type) &&
+			    (temp->info.hashm & mask) == mask) {
+				outp = temp;
+				break;
+			}
+		}
+		if (outp == NULL)
+			return -ENXIO;
+	}
+
+	switch (mthd) {
+	case NV50_DISP_SCANOUTPOS:
+		return func->head.scanoutpos(object, disp, data, size, head);
+	default:
+		break;
+	}
+
+	switch (mthd * !!outp) {
+	case NV50_DISP_MTHD_V1_DAC_PWR:
+		return func->dac.power(object, disp, data, size, head, outp);
+	case NV50_DISP_MTHD_V1_DAC_LOAD:
+		return func->dac.sense(object, disp, data, size, head, outp);
+	case NV50_DISP_MTHD_V1_SOR_PWR:
+		return func->sor.power(object, disp, data, size, head, outp);
+	case NV50_DISP_MTHD_V1_SOR_HDA_ELD:
+		if (!func->sor.hda_eld)
+			return -ENODEV;
+		return func->sor.hda_eld(object, disp, data, size, head, outp);
+	case NV50_DISP_MTHD_V1_SOR_HDMI_PWR:
+		if (!func->sor.hdmi)
+			return -ENODEV;
+		return func->sor.hdmi(object, disp, data, size, head, outp);
+	case NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT: {
+		union {
+			struct nv50_disp_sor_lvds_script_v0 v0;
+		} *args = data;
+		nvif_ioctl(object, "disp sor lvds script size %d\n", size);
+		if (nvif_unpack(args->v0, 0, 0, false)) {
+			nvif_ioctl(object, "disp sor lvds script "
+					   "vers %d name %04x\n",
+				   args->v0.version, args->v0.script);
+			disp->sor.lvdsconf = args->v0.script;
+			return 0;
+		} else
+			return ret;
+	}
+		break;
+	case NV50_DISP_MTHD_V1_SOR_DP_PWR: {
+		struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
+		union {
+			struct nv50_disp_sor_dp_pwr_v0 v0;
+		} *args = data;
+		nvif_ioctl(object, "disp sor dp pwr size %d\n", size);
+		if (nvif_unpack(args->v0, 0, 0, false)) {
+			nvif_ioctl(object, "disp sor dp pwr vers %d state %d\n",
+				   args->v0.version, args->v0.state);
+			if (args->v0.state == 0) {
+				nvkm_notify_put(&outpdp->irq);
+				outpdp->func->lnk_pwr(outpdp, 0);
+				atomic_set(&outpdp->lt.done, 0);
+				return 0;
+			} else
+			if (args->v0.state != 0) {
+				nvkm_output_dp_train(&outpdp->base, 0, true);
+				return 0;
+			}
+		} else
+			return ret;
+	}
+		break;
+	case NV50_DISP_MTHD_V1_PIOR_PWR:
+		if (!func->pior.power)
+			return -ENODEV;
+		return func->pior.power(object, disp, data, size, head, outp);
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static int
+nv50_disp_root_dmac_new_(const struct nvkm_oclass *oclass,
+			 void *data, u32 size, struct nvkm_object **pobject)
+{
+	const struct nv50_disp_dmac_oclass *sclass = oclass->priv;
+	struct nv50_disp_root *root = nv50_disp_root(oclass->parent);
+	return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid,
+			    oclass, data, size, pobject);
+}
+
+static int
+nv50_disp_root_pioc_new_(const struct nvkm_oclass *oclass,
+			 void *data, u32 size, struct nvkm_object **pobject)
+{
+	const struct nv50_disp_pioc_oclass *sclass = oclass->priv;
+	struct nv50_disp_root *root = nv50_disp_root(oclass->parent);
+	return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid,
+			    oclass, data, size, pobject);
+}
+
+static int
+nv50_disp_root_child_get_(struct nvkm_object *object, int index,
+			  struct nvkm_oclass *sclass)
+{
+	struct nv50_disp_root *root = nv50_disp_root(object);
+
+	if (index < ARRAY_SIZE(root->func->dmac)) {
+		sclass->base = root->func->dmac[index]->base;
+		sclass->priv = root->func->dmac[index];
+		sclass->ctor = nv50_disp_root_dmac_new_;
+		return 0;
+	}
+
+	index -= ARRAY_SIZE(root->func->dmac);
+
+	if (index < ARRAY_SIZE(root->func->pioc)) {
+		sclass->base = root->func->pioc[index]->base;
+		sclass->priv = root->func->pioc[index];
+		sclass->ctor = nv50_disp_root_pioc_new_;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int
+nv50_disp_root_fini_(struct nvkm_object *object, bool suspend)
+{
+	struct nv50_disp_root *root = nv50_disp_root(object);
+	root->func->fini(root);
+	return 0;
+}
+
+static int
+nv50_disp_root_init_(struct nvkm_object *object)
+{
+	struct nv50_disp_root *root = nv50_disp_root(object);
+	return root->func->init(root);
+}
+
+static void *
+nv50_disp_root_dtor_(struct nvkm_object *object)
+{
+	struct nv50_disp_root *root = nv50_disp_root(object);
+	nvkm_ramht_del(&root->ramht);
+	nvkm_gpuobj_del(&root->instmem);
+	return root;
+}
+
+static const struct nvkm_object_func
+nv50_disp_root_ = {
+	.dtor = nv50_disp_root_dtor_,
+	.init = nv50_disp_root_init_,
+	.fini = nv50_disp_root_fini_,
+	.mthd = nv50_disp_root_mthd_,
+	.ntfy = nvkm_disp_ntfy,
+	.sclass = nv50_disp_root_child_get_,
+};
+
+int
+nv50_disp_root_new_(const struct nv50_disp_root_func *func,
+		    struct nvkm_disp *base, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nv50_disp *disp = nv50_disp(base);
+	struct nv50_disp_root *root;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	int ret;
+
+	if (!(root = kzalloc(sizeof(*root), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &root->object;
+
+	nvkm_object_ctor(&nv50_disp_root_, oclass, &root->object);
+	root->func = func;
+	root->disp = disp;
+
+	ret = nvkm_gpuobj_new(disp->base.engine.subdev.device, 0x10000, 0x10000,
+			      false, NULL, &root->instmem);
+	if (ret)
+		return ret;
+
+	return nvkm_ramht_new(device, 0x1000, 0, root->instmem, &root->ramht);
+}
+
+void
+nv50_disp_root_fini(struct nv50_disp_root *root)
+{
+	struct nvkm_device *device = root->disp->base.engine.subdev.device;
+	/* disable all interrupts */
+	nvkm_wr32(device, 0x610024, 0x00000000);
+	nvkm_wr32(device, 0x610020, 0x00000000);
+}
+
+int
+nv50_disp_root_init(struct nv50_disp_root *root)
+{
+	struct nv50_disp *disp = root->disp;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	u32 tmp;
+	int i;
+
+	/* The below segments of code copying values from one register to
+	 * another appear to inform EVO of the display capabilities or
+	 * something similar.  NFI what the 0x614004 caps are for..
+	 */
+	tmp = nvkm_rd32(device, 0x614004);
+	nvkm_wr32(device, 0x610184, tmp);
+
+	/* ... CRTC caps */
+	for (i = 0; i < disp->base.head.nr; i++) {
+		tmp = nvkm_rd32(device, 0x616100 + (i * 0x800));
+		nvkm_wr32(device, 0x610190 + (i * 0x10), tmp);
+		tmp = nvkm_rd32(device, 0x616104 + (i * 0x800));
+		nvkm_wr32(device, 0x610194 + (i * 0x10), tmp);
+		tmp = nvkm_rd32(device, 0x616108 + (i * 0x800));
+		nvkm_wr32(device, 0x610198 + (i * 0x10), tmp);
+		tmp = nvkm_rd32(device, 0x61610c + (i * 0x800));
+		nvkm_wr32(device, 0x61019c + (i * 0x10), tmp);
+	}
+
+	/* ... DAC caps */
+	for (i = 0; i < disp->func->dac.nr; i++) {
+		tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
+		nvkm_wr32(device, 0x6101d0 + (i * 0x04), tmp);
+	}
+
+	/* ... SOR caps */
+	for (i = 0; i < disp->func->sor.nr; i++) {
+		tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
+		nvkm_wr32(device, 0x6101e0 + (i * 0x04), tmp);
+	}
+
+	/* ... PIOR caps */
+	for (i = 0; i < disp->func->pior.nr; i++) {
+		tmp = nvkm_rd32(device, 0x61e000 + (i * 0x800));
+		nvkm_wr32(device, 0x6101f0 + (i * 0x04), tmp);
+	}
+
+	/* steal display away from vbios, or something like that */
+	if (nvkm_rd32(device, 0x610024) & 0x00000100) {
+		nvkm_wr32(device, 0x610024, 0x00000100);
+		nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
+		if (nvkm_msec(device, 2000,
+			if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
+				break;
+		) < 0)
+			return -EBUSY;
+	}
+
+	/* point at display engine memory area (hash table, objects) */
+	nvkm_wr32(device, 0x610010, (root->instmem->addr >> 8) | 9);
+
+	/* enable supervisor interrupts, disable everything else */
+	nvkm_wr32(device, 0x61002c, 0x00000370);
+	nvkm_wr32(device, 0x610028, 0x00000000);
+	return 0;
+}
+
+static const struct nv50_disp_root_func
+nv50_disp_root = {
+	.init = nv50_disp_root_init,
+	.fini = nv50_disp_root_fini,
+	.dmac = {
+		&nv50_disp_core_oclass,
+		&nv50_disp_base_oclass,
+		&nv50_disp_ovly_oclass,
+	},
+	.pioc = {
+		&nv50_disp_oimm_oclass,
+		&nv50_disp_curs_oclass,
+	},
+};
+
+static int
+nv50_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		   void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&nv50_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+nv50_disp_root_oclass = {
+	.base.oclass = NV50_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = nv50_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
new file mode 100644
index 000000000000..5b2c903ce9ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -0,0 +1,43 @@
+#ifndef __NV50_DISP_ROOT_H__
+#define __NV50_DISP_ROOT_H__
+#define nv50_disp_root(p) container_of((p), struct nv50_disp_root, object)
+#include "nv50.h"
+#include "channv50.h"
+#include "dmacnv50.h"
+
+struct nv50_disp_root {
+	const struct nv50_disp_root_func *func;
+	struct nv50_disp *disp;
+	struct nvkm_object object;
+
+	struct nvkm_gpuobj *instmem;
+	struct nvkm_ramht *ramht;
+};
+
+struct nv50_disp_root_func {
+	int (*init)(struct nv50_disp_root *);
+	void (*fini)(struct nv50_disp_root *);
+	const struct nv50_disp_dmac_oclass *dmac[3];
+	const struct nv50_disp_pioc_oclass *pioc[2];
+};
+
+int  nv50_disp_root_new_(const struct nv50_disp_root_func *, struct nvkm_disp *,
+			 const struct nvkm_oclass *, void *data, u32 size,
+			 struct nvkm_object **);
+int  nv50_disp_root_init(struct nv50_disp_root *);
+void nv50_disp_root_fini(struct nv50_disp_root *);
+
+int  gf119_disp_root_init(struct nv50_disp_root *);
+void gf119_disp_root_fini(struct nv50_disp_root *);
+
+extern const struct nvkm_disp_oclass nv50_disp_root_oclass;
+extern const struct nvkm_disp_oclass g84_disp_root_oclass;
+extern const struct nvkm_disp_oclass g94_disp_root_oclass;
+extern const struct nvkm_disp_oclass gt200_disp_root_oclass;
+extern const struct nvkm_disp_oclass gt215_disp_root_oclass;
+extern const struct nvkm_disp_oclass gf119_disp_root_oclass;
+extern const struct nvkm_disp_oclass gk104_disp_root_oclass;
+extern const struct nvkm_disp_oclass gk110_disp_root_oclass;
+extern const struct nvkm_disp_oclass gm107_disp_root_oclass;
+extern const struct nvkm_disp_oclass gm204_disp_root_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
index 8918da7ffdf2..1bb9d661e9b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
@@ -24,7 +24,6 @@
 #include "nv50.h"
 #include "outpdp.h"
 
-#include <core/device.h>
 #include <subdev/timer.h>
 
 static inline u32
@@ -39,12 +38,33 @@ g94_sor_loff(struct nvkm_output_dp *outp)
 	return g94_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
 }
 
-static inline u32
-g94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+/*******************************************************************************
+ * TMDS/LVDS
+ ******************************************************************************/
+static const struct nvkm_output_func
+g94_sor_output_func = {
+};
+
+int
+g94_sor_output_new(struct nvkm_disp *disp, int index,
+		   struct dcb_output *dcbE, struct nvkm_output **poutp)
+{
+	return nvkm_output_new_(&g94_sor_output_func, disp,
+				index, dcbE, poutp);
+}
+
+/*******************************************************************************
+ * DisplayPort
+ ******************************************************************************/
+u32
+g94_sor_dp_lane_map(struct nvkm_device *device, u8 lane)
 {
+	static const u8 gm100[] = { 0, 8, 16, 24 };
 	static const u8 mcp89[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
-	static const u8 g94[] = { 16, 8, 0, 24 };
-	if (nv_device(priv)->chipset == 0xaf)
+	static const u8   g94[] = { 16, 8, 0, 24 };
+	if (device->chipset >= 0x110)
+		return gm100[lane];
+	if (device->chipset == 0xaf)
 		return mcp89[lane];
 	return g94[lane];
 }
@@ -52,33 +72,36 @@ g94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
 static int
 g94_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
 	const u32 loff = g94_sor_loff(outp);
-	nv_mask(priv, 0x61c10c + loff, 0x0f000000, pattern << 24);
+	nvkm_mask(device, 0x61c10c + loff, 0x0f000000, pattern << 24);
 	return 0;
 }
 
 int
 g94_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
 	const u32 soff = g94_sor_soff(outp);
 	const u32 loff = g94_sor_loff(outp);
 	u32 mask = 0, i;
 
 	for (i = 0; i < nr; i++)
-		mask |= 1 << (g94_sor_dp_lane_map(priv, i) >> 3);
-
-	nv_mask(priv, 0x61c130 + loff, 0x0000000f, mask);
-	nv_mask(priv, 0x61c034 + soff, 0x80000000, 0x80000000);
-	nv_wait(priv, 0x61c034 + soff, 0x80000000, 0x00000000);
+		mask |= 1 << (g94_sor_dp_lane_map(device, i) >> 3);
+
+	nvkm_mask(device, 0x61c130 + loff, 0x0000000f, mask);
+	nvkm_mask(device, 0x61c034 + soff, 0x80000000, 0x80000000);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61c034 + soff) & 0x80000000))
+			break;
+	);
 	return 0;
 }
 
 static int
 g94_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
 	const u32 soff = g94_sor_soff(outp);
 	const u32 loff = g94_sor_loff(outp);
 	u32 dpctrl = 0x00000000;
@@ -90,17 +113,17 @@ g94_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
 	if (bw > 0x06)
 		clksor |= 0x00040000;
 
-	nv_mask(priv, 0x614300 + soff, 0x000c0000, clksor);
-	nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
+	nvkm_mask(device, 0x614300 + soff, 0x000c0000, clksor);
+	nvkm_mask(device, 0x61c10c + loff, 0x001f4000, dpctrl);
 	return 0;
 }
 
 static int
 g94_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
-	struct nvkm_bios *bios = nvkm_bios(priv);
-	const u32 shift = g94_sor_dp_lane_map(priv, ln);
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+	struct nvkm_bios *bios = device->bios;
+	const u32 shift = g94_sor_dp_lane_map(device, ln);
 	const u32 loff = g94_sor_loff(outp);
 	u32 addr, data[3];
 	u8  ver, hdr, cnt, len;
@@ -109,37 +132,37 @@ g94_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
 
 	addr = nvbios_dpout_match(bios, outp->base.info.hasht,
 					outp->base.info.hashm,
-				 &ver, &hdr, &cnt, &len, &info);
+				  &ver, &hdr, &cnt, &len, &info);
 	if (!addr)
 		return -ENODEV;
 
 	addr = nvbios_dpcfg_match(bios, addr, 0, vs, pe,
-				 &ver, &hdr, &cnt, &len, &ocfg);
+				  &ver, &hdr, &cnt, &len, &ocfg);
 	if (!addr)
 		return -EINVAL;
 
-	data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
-	data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
-	data[2] = nv_rd32(priv, 0x61c130 + loff);
+	data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift);
+	data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift);
+	data[2] = nvkm_rd32(device, 0x61c130 + loff);
 	if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0)
 		data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8);
-	nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
-	nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
-	nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.tx_pu << 8));
+	nvkm_wr32(device, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
+	nvkm_wr32(device, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
+	nvkm_wr32(device, 0x61c130 + loff, data[2]);
 	return 0;
 }
 
-struct nvkm_output_dp_impl
-g94_sor_dp_impl = {
-	.base.base.handle = DCB_OUTPUT_DP,
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_output_dp_ctor,
-		.dtor = _nvkm_output_dp_dtor,
-		.init = _nvkm_output_dp_init,
-		.fini = _nvkm_output_dp_fini,
-	},
+static const struct nvkm_output_dp_func
+g94_sor_dp_func = {
 	.pattern = g94_sor_dp_pattern,
 	.lnk_pwr = g94_sor_dp_lnk_pwr,
 	.lnk_ctl = g94_sor_dp_lnk_ctl,
 	.drv_ctl = g94_sor_dp_drv_ctl,
 };
+
+int
+g94_sor_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
+	       struct nvkm_output **poutp)
+{
+	return nvkm_output_dp_new_(&g94_sor_dp_func, disp, index, dcbE, poutp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index 52fbe4880e13..b4b41b135643 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -25,39 +25,32 @@
 #include "outpdp.h"
 
 static inline u32
-gf110_sor_soff(struct nvkm_output_dp *outp)
+gf119_sor_soff(struct nvkm_output_dp *outp)
 {
 	return (ffs(outp->base.info.or) - 1) * 0x800;
 }
 
 static inline u32
-gf110_sor_loff(struct nvkm_output_dp *outp)
+gf119_sor_loff(struct nvkm_output_dp *outp)
 {
-	return gf110_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
-}
-
-static inline u32
-gf110_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
-{
-	static const u8 gf110[] = { 16, 8, 0, 24 };
-	return gf110[lane];
+	return gf119_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
 }
 
 static int
-gf110_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
+gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
-	const u32 loff = gf110_sor_loff(outp);
-	nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+	const u32 loff = gf119_sor_loff(outp);
+	nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
 	return 0;
 }
 
 int
-gf110_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
+gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
-	const u32 soff = gf110_sor_soff(outp);
-	const u32 loff = gf110_sor_loff(outp);
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+	const u32 soff = gf119_sor_soff(outp);
+	const u32 loff = gf119_sor_loff(outp);
 	u32 dpctrl = 0x00000000;
 	u32 clksor = 0x00000000;
 
@@ -66,19 +59,19 @@ gf110_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
 	if (ef)
 		dpctrl |= 0x00004000;
 
-	nv_mask(priv, 0x612300 + soff, 0x007c0000, clksor);
-	nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
+	nvkm_mask(device, 0x612300 + soff, 0x007c0000, clksor);
+	nvkm_mask(device, 0x61c10c + loff, 0x001f4000, dpctrl);
 	return 0;
 }
 
 static int
-gf110_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
+gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
 		     int ln, int vs, int pe, int pc)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
-	struct nvkm_bios *bios = nvkm_bios(priv);
-	const u32 shift = gf110_sor_dp_lane_map(priv, ln);
-	const u32 loff = gf110_sor_loff(outp);
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+	struct nvkm_bios *bios = device->bios;
+	const u32 shift = g94_sor_dp_lane_map(device, ln);
+	const u32 loff = gf119_sor_loff(outp);
 	u32 addr, data[4];
 	u8  ver, hdr, cnt, len;
 	struct nvbios_dpout info;
@@ -95,30 +88,30 @@ gf110_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
 	if (!addr)
 		return -EINVAL;
 
-	data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
-	data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
-	data[2] = nv_rd32(priv, 0x61c130 + loff);
+	data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift);
+	data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift);
+	data[2] = nvkm_rd32(device, 0x61c130 + loff);
 	if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0)
 		data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8);
-	nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
-	nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
-	nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.tx_pu << 8));
-	data[3] = nv_rd32(priv, 0x61c13c + loff) & ~(0x000000ff << shift);
-	nv_wr32(priv, 0x61c13c + loff, data[3] | (ocfg.pc << shift));
+	nvkm_wr32(device, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
+	nvkm_wr32(device, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
+	nvkm_wr32(device, 0x61c130 + loff, data[2]);
+	data[3] = nvkm_rd32(device, 0x61c13c + loff) & ~(0x000000ff << shift);
+	nvkm_wr32(device, 0x61c13c + loff, data[3] | (ocfg.pc << shift));
 	return 0;
 }
 
-struct nvkm_output_dp_impl
-gf110_sor_dp_impl = {
-	.base.base.handle = DCB_OUTPUT_DP,
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_output_dp_ctor,
-		.dtor = _nvkm_output_dp_dtor,
-		.init = _nvkm_output_dp_init,
-		.fini = _nvkm_output_dp_fini,
-	},
-	.pattern = gf110_sor_dp_pattern,
+static const struct nvkm_output_dp_func
+gf119_sor_dp_func = {
+	.pattern = gf119_sor_dp_pattern,
 	.lnk_pwr = g94_sor_dp_lnk_pwr,
-	.lnk_ctl = gf110_sor_dp_lnk_ctl,
-	.drv_ctl = gf110_sor_dp_drv_ctl,
+	.lnk_ctl = gf119_sor_dp_lnk_ctl,
+	.drv_ctl = gf119_sor_dp_drv_ctl,
 };
+
+int
+gf119_sor_dp_new(struct nvkm_disp *disp, int index,
+		 struct dcb_output *dcbE, struct nvkm_output **poutp)
+{
+	return nvkm_output_dp_new_(&gf119_sor_dp_func, disp, index, dcbE, poutp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c
index 1e40dfe11319..029e5f16c2a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c
@@ -41,17 +41,17 @@ gm204_sor_loff(struct nvkm_output_dp *outp)
 void
 gm204_sor_magic(struct nvkm_output *outp)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+	struct nvkm_device *device = outp->disp->engine.subdev.device;
 	const u32 soff = outp->or * 0x100;
 	const u32 data = outp->or + 1;
 	if (outp->info.sorconf.link & 1)
-		nv_mask(priv, 0x612308 + soff, 0x0000001f, 0x00000000 | data);
+		nvkm_mask(device, 0x612308 + soff, 0x0000001f, 0x00000000 | data);
 	if (outp->info.sorconf.link & 2)
-		nv_mask(priv, 0x612388 + soff, 0x0000001f, 0x00000010 | data);
+		nvkm_mask(device, 0x612388 + soff, 0x0000001f, 0x00000010 | data);
 }
 
 static inline u32
-gm204_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+gm204_sor_dp_lane_map(struct nvkm_device *device, u8 lane)
 {
 	return lane * 0x08;
 }
@@ -59,30 +59,33 @@ gm204_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
 static int
 gm204_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
 	const u32 soff = gm204_sor_soff(outp);
 	const u32 data = 0x01010101 * pattern;
 	if (outp->base.info.sorconf.link & 1)
-		nv_mask(priv, 0x61c110 + soff, 0x0f0f0f0f, data);
+		nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
 	else
-		nv_mask(priv, 0x61c12c + soff, 0x0f0f0f0f, data);
+		nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
 	return 0;
 }
 
 static int
 gm204_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
 	const u32 soff = gm204_sor_soff(outp);
 	const u32 loff = gm204_sor_loff(outp);
 	u32 mask = 0, i;
 
 	for (i = 0; i < nr; i++)
-		mask |= 1 << (gm204_sor_dp_lane_map(priv, i) >> 3);
+		mask |= 1 << (gm204_sor_dp_lane_map(device, i) >> 3);
 
-	nv_mask(priv, 0x61c130 + loff, 0x0000000f, mask);
-	nv_mask(priv, 0x61c034 + soff, 0x80000000, 0x80000000);
-	nv_wait(priv, 0x61c034 + soff, 0x80000000, 0x00000000);
+	nvkm_mask(device, 0x61c130 + loff, 0x0000000f, mask);
+	nvkm_mask(device, 0x61c034 + soff, 0x80000000, 0x80000000);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61c034 + soff) & 0x80000000))
+			break;
+	);
 	return 0;
 }
 
@@ -90,9 +93,9 @@ static int
 gm204_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
 		     int ln, int vs, int pe, int pc)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
-	struct nvkm_bios *bios = nvkm_bios(priv);
-	const u32 shift = gm204_sor_dp_lane_map(priv, ln);
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+	struct nvkm_bios *bios = device->bios;
+	const u32 shift = gm204_sor_dp_lane_map(device, ln);
 	const u32 loff = gm204_sor_loff(outp);
 	u32 addr, data[4];
 	u8  ver, hdr, cnt, len;
@@ -109,31 +112,32 @@ gm204_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
 				  &ver, &hdr, &cnt, &len, &ocfg);
 	if (!addr)
 		return -EINVAL;
+	ocfg.tx_pu &= 0x0f;
 
-	data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
-	data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
-	data[2] = nv_rd32(priv, 0x61c130 + loff);
-	if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0)
-		data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8);
-	nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
-	nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
-	nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.tx_pu << 8));
-	data[3] = nv_rd32(priv, 0x61c13c + loff) & ~(0x000000ff << shift);
-	nv_wr32(priv, 0x61c13c + loff, data[3] | (ocfg.pc << shift));
+	data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift);
+	data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift);
+	data[2] = nvkm_rd32(device, 0x61c130 + loff);
+	if ((data[2] & 0x00000f00) < (ocfg.tx_pu << 8) || ln == 0)
+		data[2] = (data[2] & ~0x00000f00) | (ocfg.tx_pu << 8);
+	nvkm_wr32(device, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
+	nvkm_wr32(device, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
+	nvkm_wr32(device, 0x61c130 + loff, data[2]);
+	data[3] = nvkm_rd32(device, 0x61c13c + loff) & ~(0x000000ff << shift);
+	nvkm_wr32(device, 0x61c13c + loff, data[3] | (ocfg.pc << shift));
 	return 0;
 }
 
-struct nvkm_output_dp_impl
-gm204_sor_dp_impl = {
-	.base.base.handle = DCB_OUTPUT_DP,
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_output_dp_ctor,
-		.dtor = _nvkm_output_dp_dtor,
-		.init = _nvkm_output_dp_init,
-		.fini = _nvkm_output_dp_fini,
-	},
+static const struct nvkm_output_dp_func
+gm204_sor_dp_func = {
 	.pattern = gm204_sor_dp_pattern,
 	.lnk_pwr = gm204_sor_dp_lnk_pwr,
-	.lnk_ctl = gf110_sor_dp_lnk_ctl,
+	.lnk_ctl = gf119_sor_dp_lnk_ctl,
 	.drv_ctl = gm204_sor_dp_drv_ctl,
 };
+
+int
+gm204_sor_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
+		 struct nvkm_output **poutp)
+{
+	return nvkm_output_dp_new_(&gm204_sor_dp_func, disp, index, dcbE, poutp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c
index b229a311c78c..29e0d2a9a839 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c
@@ -33,6 +33,7 @@
 int
 nv50_sor_power(NV50_DISP_MTHD_V1)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	union {
 		struct nv50_disp_sor_pwr_v0 v0;
 	} *args = data;
@@ -40,17 +41,39 @@ nv50_sor_power(NV50_DISP_MTHD_V1)
 	u32 stat;
 	int ret;
 
-	nv_ioctl(object, "disp sor pwr size %d\n", size);
+	nvif_ioctl(object, "disp sor pwr size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp sor pwr vers %d state %d\n",
-			 args->v0.version, args->v0.state);
+		nvif_ioctl(object, "disp sor pwr vers %d state %d\n",
+			   args->v0.version, args->v0.state);
 		stat = !!args->v0.state;
 	} else
 		return ret;
 
-	nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
-	nv_mask(priv, 0x61c004 + soff, 0x80000001, 0x80000000 | stat);
-	nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
-	nv_wait(priv, 0x61c030 + soff, 0x10000000, 0x00000000);
+
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61c004 + soff) & 0x80000000))
+			break;
+	);
+	nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000000 | stat);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61c004 + soff) & 0x80000000))
+			break;
+	);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
+			break;
+	);
 	return 0;
 }
+
+static const struct nvkm_output_func
+nv50_sor_output_func = {
+};
+
+int
+nv50_sor_output_new(struct nvkm_disp *disp, int index,
+		    struct dcb_output *dcbE, struct nvkm_output **poutp)
+{
+	return nvkm_output_new_(&nv50_sor_output_func, disp,
+				index, dcbE, poutp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c
index c4622c7388d0..8bff95c6343f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c
@@ -23,131 +23,119 @@
  */
 #include <subdev/vga.h>
 
-#include <core/device.h>
-
 u8
-nv_rdport(void *obj, int head, u16 port)
+nvkm_rdport(struct nvkm_device *device, int head, u16 port)
 {
-	struct nvkm_device *device = nv_device(obj);
-
 	if (device->card_type >= NV_50)
-		return nv_rd08(obj, 0x601000 + port);
+		return nvkm_rd08(device, 0x601000 + port);
 
 	if (port == 0x03c0 || port == 0x03c1 ||	/* AR */
 	    port == 0x03c2 || port == 0x03da ||	/* INP0 */
 	    port == 0x03d4 || port == 0x03d5)	/* CR */
-		return nv_rd08(obj, 0x601000 + (head * 0x2000) + port);
+		return nvkm_rd08(device, 0x601000 + (head * 0x2000) + port);
 
 	if (port == 0x03c2 || port == 0x03cc ||	/* MISC */
 	    port == 0x03c4 || port == 0x03c5 ||	/* SR */
 	    port == 0x03ce || port == 0x03cf) {	/* GR */
 		if (device->card_type < NV_40)
 			head = 0; /* CR44 selects head */
-		return nv_rd08(obj, 0x0c0000 + (head * 0x2000) + port);
+		return nvkm_rd08(device, 0x0c0000 + (head * 0x2000) + port);
 	}
 
-	nv_error(obj, "unknown vga port 0x%04x\n", port);
 	return 0x00;
 }
 
 void
-nv_wrport(void *obj, int head, u16 port, u8 data)
+nvkm_wrport(struct nvkm_device *device, int head, u16 port, u8 data)
 {
-	struct nvkm_device *device = nv_device(obj);
-
 	if (device->card_type >= NV_50)
-		nv_wr08(obj, 0x601000 + port, data);
+		nvkm_wr08(device, 0x601000 + port, data);
 	else
 	if (port == 0x03c0 || port == 0x03c1 ||	/* AR */
 	    port == 0x03c2 || port == 0x03da ||	/* INP0 */
 	    port == 0x03d4 || port == 0x03d5)	/* CR */
-		nv_wr08(obj, 0x601000 + (head * 0x2000) + port, data);
+		nvkm_wr08(device, 0x601000 + (head * 0x2000) + port, data);
 	else
 	if (port == 0x03c2 || port == 0x03cc ||	/* MISC */
 	    port == 0x03c4 || port == 0x03c5 ||	/* SR */
 	    port == 0x03ce || port == 0x03cf) {	/* GR */
 		if (device->card_type < NV_40)
 			head = 0; /* CR44 selects head */
-		nv_wr08(obj, 0x0c0000 + (head * 0x2000) + port, data);
-	} else
-		nv_error(obj, "unknown vga port 0x%04x\n", port);
+		nvkm_wr08(device, 0x0c0000 + (head * 0x2000) + port, data);
+	}
 }
 
 u8
-nv_rdvgas(void *obj, int head, u8 index)
+nvkm_rdvgas(struct nvkm_device *device, int head, u8 index)
 {
-	nv_wrport(obj, head, 0x03c4, index);
-	return nv_rdport(obj, head, 0x03c5);
+	nvkm_wrport(device, head, 0x03c4, index);
+	return nvkm_rdport(device, head, 0x03c5);
 }
 
 void
-nv_wrvgas(void *obj, int head, u8 index, u8 value)
+nvkm_wrvgas(struct nvkm_device *device, int head, u8 index, u8 value)
 {
-	nv_wrport(obj, head, 0x03c4, index);
-	nv_wrport(obj, head, 0x03c5, value);
+	nvkm_wrport(device, head, 0x03c4, index);
+	nvkm_wrport(device, head, 0x03c5, value);
 }
 
 u8
-nv_rdvgag(void *obj, int head, u8 index)
+nvkm_rdvgag(struct nvkm_device *device, int head, u8 index)
 {
-	nv_wrport(obj, head, 0x03ce, index);
-	return nv_rdport(obj, head, 0x03cf);
+	nvkm_wrport(device, head, 0x03ce, index);
+	return nvkm_rdport(device, head, 0x03cf);
 }
 
 void
-nv_wrvgag(void *obj, int head, u8 index, u8 value)
+nvkm_wrvgag(struct nvkm_device *device, int head, u8 index, u8 value)
 {
-	nv_wrport(obj, head, 0x03ce, index);
-	nv_wrport(obj, head, 0x03cf, value);
+	nvkm_wrport(device, head, 0x03ce, index);
+	nvkm_wrport(device, head, 0x03cf, value);
 }
 
 u8
-nv_rdvgac(void *obj, int head, u8 index)
+nvkm_rdvgac(struct nvkm_device *device, int head, u8 index)
 {
-	nv_wrport(obj, head, 0x03d4, index);
-	return nv_rdport(obj, head, 0x03d5);
+	nvkm_wrport(device, head, 0x03d4, index);
+	return nvkm_rdport(device, head, 0x03d5);
 }
 
 void
-nv_wrvgac(void *obj, int head, u8 index, u8 value)
+nvkm_wrvgac(struct nvkm_device *device, int head, u8 index, u8 value)
 {
-	nv_wrport(obj, head, 0x03d4, index);
-	nv_wrport(obj, head, 0x03d5, value);
+	nvkm_wrport(device, head, 0x03d4, index);
+	nvkm_wrport(device, head, 0x03d5, value);
 }
 
 u8
-nv_rdvgai(void *obj, int head, u16 port, u8 index)
+nvkm_rdvgai(struct nvkm_device *device, int head, u16 port, u8 index)
 {
-	if (port == 0x03c4) return nv_rdvgas(obj, head, index);
-	if (port == 0x03ce) return nv_rdvgag(obj, head, index);
-	if (port == 0x03d4) return nv_rdvgac(obj, head, index);
-	nv_error(obj, "unknown indexed vga port 0x%04x\n", port);
+	if (port == 0x03c4) return nvkm_rdvgas(device, head, index);
+	if (port == 0x03ce) return nvkm_rdvgag(device, head, index);
+	if (port == 0x03d4) return nvkm_rdvgac(device, head, index);
 	return 0x00;
 }
 
 void
-nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value)
+nvkm_wrvgai(struct nvkm_device *device, int head, u16 port, u8 index, u8 value)
 {
-	if      (port == 0x03c4) nv_wrvgas(obj, head, index, value);
-	else if (port == 0x03ce) nv_wrvgag(obj, head, index, value);
-	else if (port == 0x03d4) nv_wrvgac(obj, head, index, value);
-	else nv_error(obj, "unknown indexed vga port 0x%04x\n", port);
+	if      (port == 0x03c4) nvkm_wrvgas(device, head, index, value);
+	else if (port == 0x03ce) nvkm_wrvgag(device, head, index, value);
+	else if (port == 0x03d4) nvkm_wrvgac(device, head, index, value);
 }
 
 bool
-nv_lockvgac(void *obj, bool lock)
+nvkm_lockvgac(struct nvkm_device *device, bool lock)
 {
-	struct nvkm_device *dev = nv_device(obj);
-
-	bool locked = !nv_rdvgac(obj, 0, 0x1f);
+	bool locked = !nvkm_rdvgac(device, 0, 0x1f);
 	u8 data = lock ? 0x99 : 0x57;
-	if (dev->card_type < NV_50)
-		nv_wrvgac(obj, 0, 0x1f, data);
+	if (device->card_type < NV_50)
+		nvkm_wrvgac(device, 0, 0x1f, data);
 	else
-		nv_wrvgac(obj, 0, 0x3f, data);
-	if (dev->chipset == 0x11) {
-		if (!(nv_rd32(obj, 0x001084) & 0x10000000))
-			nv_wrvgac(obj, 1, 0x1f, data);
+		nvkm_wrvgac(device, 0, 0x3f, data);
+	if (device->chipset == 0x11) {
+		if (!(nvkm_rd32(device, 0x001084) & 0x10000000))
+			nvkm_wrvgac(device, 1, 0x1f, data);
 	}
 	return locked;
 }
@@ -171,16 +159,16 @@ nv_lockvgac(void *obj, bool lock)
  * other values are treated as literal values to set
  */
 u8
-nv_rdvgaowner(void *obj)
+nvkm_rdvgaowner(struct nvkm_device *device)
 {
-	if (nv_device(obj)->card_type < NV_50) {
-		if (nv_device(obj)->chipset == 0x11) {
-			u32 tied = nv_rd32(obj, 0x001084) & 0x10000000;
+	if (device->card_type < NV_50) {
+		if (device->chipset == 0x11) {
+			u32 tied = nvkm_rd32(device, 0x001084) & 0x10000000;
 			if (tied == 0) {
-				u8 slA = nv_rdvgac(obj, 0, 0x28) & 0x80;
-				u8 tvA = nv_rdvgac(obj, 0, 0x33) & 0x01;
-				u8 slB = nv_rdvgac(obj, 1, 0x28) & 0x80;
-				u8 tvB = nv_rdvgac(obj, 1, 0x33) & 0x01;
+				u8 slA = nvkm_rdvgac(device, 0, 0x28) & 0x80;
+				u8 tvA = nvkm_rdvgac(device, 0, 0x33) & 0x01;
+				u8 slB = nvkm_rdvgac(device, 1, 0x28) & 0x80;
+				u8 tvB = nvkm_rdvgac(device, 1, 0x33) & 0x01;
 				if (slA && !tvA) return 0x00;
 				if (slB && !tvB) return 0x03;
 				if (slA) return 0x00;
@@ -190,30 +178,28 @@ nv_rdvgaowner(void *obj)
 			return 0x04;
 		}
 
-		return nv_rdvgac(obj, 0, 0x44);
+		return nvkm_rdvgac(device, 0, 0x44);
 	}
 
-	nv_error(obj, "rdvgaowner after nv4x\n");
 	return 0x00;
 }
 
 void
-nv_wrvgaowner(void *obj, u8 select)
+nvkm_wrvgaowner(struct nvkm_device *device, u8 select)
 {
-	if (nv_device(obj)->card_type < NV_50) {
+	if (device->card_type < NV_50) {
 		u8 owner = (select == 1) ? 3 : select;
-		if (nv_device(obj)->chipset == 0x11) {
+		if (device->chipset == 0x11) {
 			/* workaround hw lockup bug */
-			nv_rdvgac(obj, 0, 0x1f);
-			nv_rdvgac(obj, 1, 0x1f);
+			nvkm_rdvgac(device, 0, 0x1f);
+			nvkm_rdvgac(device, 1, 0x1f);
 		}
 
-		nv_wrvgac(obj, 0, 0x44, owner);
+		nvkm_wrvgac(device, 0, 0x44, owner);
 
-		if (nv_device(obj)->chipset == 0x11) {
-			nv_wrvgac(obj, 0, 0x2e, owner);
-			nv_wrvgac(obj, 0, 0x2e, owner);
+		if (device->chipset == 0x11) {
+			nvkm_wrvgac(device, 0, 0x2e, owner);
+			nvkm_wrvgac(device, 0, 0x2e, owner);
 		}
-	} else
-		nv_error(obj, "wrvgaowner after nv4x\n");
+	}
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild
new file mode 100644
index 000000000000..c4a2ce9b0d71
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild
@@ -0,0 +1,11 @@
+nvkm-y += nvkm/engine/dma/base.o
+nvkm-y += nvkm/engine/dma/nv04.o
+nvkm-y += nvkm/engine/dma/nv50.o
+nvkm-y += nvkm/engine/dma/gf100.o
+nvkm-y += nvkm/engine/dma/gf119.o
+
+nvkm-y += nvkm/engine/dma/user.o
+nvkm-y += nvkm/engine/dma/usernv04.o
+nvkm-y += nvkm/engine/dma/usernv50.o
+nvkm-y += nvkm/engine/dma/usergf100.o
+nvkm-y += nvkm/engine/dma/usergf119.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
new file mode 100644
index 000000000000..9769fc0d5351
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+#include <core/client.h>
+#include <engine/fifo.h>
+
+#include <nvif/class.h>
+
+struct nvkm_dmaobj *
+nvkm_dma_search(struct nvkm_dma *dma, struct nvkm_client *client, u64 object)
+{
+	struct rb_node *node = client->dmaroot.rb_node;
+	while (node) {
+		struct nvkm_dmaobj *dmaobj =
+			container_of(node, typeof(*dmaobj), rb);
+		if (object < dmaobj->handle)
+			node = node->rb_left;
+		else
+		if (object > dmaobj->handle)
+			node = node->rb_right;
+		else
+			return dmaobj;
+	}
+	return NULL;
+}
+
+static int
+nvkm_dma_oclass_new(struct nvkm_device *device,
+		    const struct nvkm_oclass *oclass, void *data, u32 size,
+		    struct nvkm_object **pobject)
+{
+	struct nvkm_dma *dma = nvkm_dma(oclass->engine);
+	struct nvkm_dmaobj *dmaobj = NULL;
+	struct nvkm_client *client = oclass->client;
+	struct rb_node **ptr = &client->dmaroot.rb_node;
+	struct rb_node *parent = NULL;
+	int ret;
+
+	ret = dma->func->class_new(dma, oclass, data, size, &dmaobj);
+	if (dmaobj)
+		*pobject = &dmaobj->object;
+	if (ret)
+		return ret;
+
+	dmaobj->handle = oclass->object;
+
+	while (*ptr) {
+		struct nvkm_dmaobj *obj = container_of(*ptr, typeof(*obj), rb);
+		parent = *ptr;
+		if (dmaobj->handle < obj->handle)
+			ptr = &parent->rb_left;
+		else
+		if (dmaobj->handle > obj->handle)
+			ptr = &parent->rb_right;
+		else
+			return -EEXIST;
+	}
+
+	rb_link_node(&dmaobj->rb, parent, ptr);
+	rb_insert_color(&dmaobj->rb, &client->dmaroot);
+	return 0;
+}
+
+static const struct nvkm_device_oclass
+nvkm_dma_oclass_base = {
+	.ctor = nvkm_dma_oclass_new,
+};
+
+static int
+nvkm_dma_oclass_fifo_new(const struct nvkm_oclass *oclass, void *data, u32 size,
+			 struct nvkm_object **pobject)
+{
+	return nvkm_dma_oclass_new(oclass->engine->subdev.device,
+				   oclass, data, size, pobject);
+}
+
+static const struct nvkm_sclass
+nvkm_dma_sclass[] = {
+	{ 0, 0, NV_DMA_FROM_MEMORY, NULL, nvkm_dma_oclass_fifo_new },
+	{ 0, 0, NV_DMA_TO_MEMORY, NULL, nvkm_dma_oclass_fifo_new },
+	{ 0, 0, NV_DMA_IN_MEMORY, NULL, nvkm_dma_oclass_fifo_new },
+};
+
+static int
+nvkm_dma_oclass_base_get(struct nvkm_oclass *sclass, int index,
+			 const struct nvkm_device_oclass **class)
+{
+	const int count = ARRAY_SIZE(nvkm_dma_sclass);
+	if (index < count) {
+		const struct nvkm_sclass *oclass = &nvkm_dma_sclass[index];
+		sclass->base = oclass[0];
+		sclass->engn = oclass;
+		*class = &nvkm_dma_oclass_base;
+		return index;
+	}
+	return count;
+}
+
+static int
+nvkm_dma_oclass_fifo_get(struct nvkm_oclass *oclass, int index)
+{
+	const int count = ARRAY_SIZE(nvkm_dma_sclass);
+	if (index < count) {
+		oclass->base = nvkm_dma_sclass[index];
+		return index;
+	}
+	return count;
+}
+
+static void *
+nvkm_dma_dtor(struct nvkm_engine *engine)
+{
+	return nvkm_dma(engine);
+}
+
+static const struct nvkm_engine_func
+nvkm_dma = {
+	.dtor = nvkm_dma_dtor,
+	.base.sclass = nvkm_dma_oclass_base_get,
+	.fifo.sclass = nvkm_dma_oclass_fifo_get,
+};
+
+int
+nvkm_dma_new_(const struct nvkm_dma_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_dma **pdma)
+{
+	struct nvkm_dma *dma;
+
+	if (!(dma = *pdma = kzalloc(sizeof(*dma), GFP_KERNEL)))
+		return -ENOMEM;
+	dma->func = func;
+
+	return nvkm_engine_ctor(&nvkm_dma, device, index,
+				0, true, &dma->engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c
index b7613059da08..efec5d322179 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c
@@ -21,24 +21,16 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
+#include "user.h"
 
-void
-nv40_mc_msi_rearm(struct nvkm_mc *pmc)
+static const struct nvkm_dma_func
+gf100_dma = {
+	.class_new = gf100_dmaobj_new,
+};
+
+int
+gf100_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
 {
-	struct nv04_mc_priv *priv = (void *)pmc;
-	nv_wr08(priv, 0x088068, 0xff);
+	return nvkm_dma_new_(&gf100_dma, device, index, pdma);
 }
-
-struct nvkm_oclass *
-nv40_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0x40),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv04_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
-	.intr = nv04_mc_intr,
-	.msi_rearm = nv40_mc_msi_rearm,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c
new file mode 100644
index 000000000000..34c766039aed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "user.h"
+
+static const struct nvkm_dma_func
+gf119_dma = {
+	.class_new = gf119_dmaobj_new,
+};
+
+int
+gf119_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+{
+	return nvkm_dma_new_(&gf119_dma, device, index, pdma);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c
new file mode 100644
index 000000000000..30747a0ce488
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "user.h"
+
+static const struct nvkm_dma_func
+nv04_dma = {
+	.class_new = nv04_dmaobj_new,
+};
+
+int
+nv04_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+{
+	return nvkm_dma_new_(&nv04_dma, device, index, pdma);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c
new file mode 100644
index 000000000000..77aca7b71c83
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "user.h"
+
+static const struct nvkm_dma_func
+nv50_dma = {
+	.class_new = nv50_dmaobj_new,
+};
+
+int
+nv50_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+{
+	return nvkm_dma_new_(&nv50_dma, device, index, pdma);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
new file mode 100644
index 000000000000..deb37ee55c0b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
@@ -0,0 +1,18 @@
+#ifndef __NVKM_DMA_PRIV_H__
+#define __NVKM_DMA_PRIV_H__
+#define nvkm_dma(p) container_of((p), struct nvkm_dma, engine)
+#include <engine/dma.h>
+
+struct nvkm_dmaobj_func {
+	int (*bind)(struct nvkm_dmaobj *, struct nvkm_gpuobj *, int align,
+		    struct nvkm_gpuobj **);
+};
+
+int nvkm_dma_new_(const struct nvkm_dma_func *, struct nvkm_device *,
+		  int index, struct nvkm_dma **);
+
+struct nvkm_dma_func {
+	int (*class_new)(struct nvkm_dma *, const struct nvkm_oclass *,
+			 void *data, u32 size, struct nvkm_dmaobj **);
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
index a2b60d86baba..45ab062661a4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
@@ -21,10 +21,10 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "user.h"
 
 #include <core/client.h>
-#include <core/device.h>
+#include <core/gpuobj.h>
 #include <subdev/fb.h>
 #include <subdev/instmem.h>
 
@@ -32,56 +32,56 @@
 #include <nvif/unpack.h>
 
 static int
-nvkm_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
-		 struct nvkm_gpuobj **pgpuobj)
+nvkm_dmaobj_bind(struct nvkm_object *base, struct nvkm_gpuobj *gpuobj,
+		 int align, struct nvkm_gpuobj **pgpuobj)
 {
-	const struct nvkm_dmaeng_impl *impl = (void *)
-		nv_oclass(nv_object(dmaobj)->engine);
-	int ret = 0;
-
-	if (nv_object(dmaobj) == parent) { /* ctor bind */
-		if (nv_mclass(parent->parent) == NV_DEVICE) {
-			/* delayed, or no, binding */
-			return 0;
-		}
-		ret = impl->bind(dmaobj, parent, pgpuobj);
-		if (ret == 0)
-			nvkm_object_ref(NULL, &parent);
-		return ret;
-	}
+	struct nvkm_dmaobj *dmaobj = nvkm_dmaobj(base);
+	return dmaobj->func->bind(dmaobj, gpuobj, align, pgpuobj);
+}
 
-	return impl->bind(dmaobj, parent, pgpuobj);
+static void *
+nvkm_dmaobj_dtor(struct nvkm_object *base)
+{
+	struct nvkm_dmaobj *dmaobj = nvkm_dmaobj(base);
+	if (!RB_EMPTY_NODE(&dmaobj->rb))
+		rb_erase(&dmaobj->rb, &dmaobj->object.client->dmaroot);
+	return dmaobj;
 }
 
+static const struct nvkm_object_func
+nvkm_dmaobj_func = {
+	.dtor = nvkm_dmaobj_dtor,
+	.bind = nvkm_dmaobj_bind,
+};
+
 int
-nvkm_dmaobj_create_(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void **pdata, u32 *psize,
-		    int length, void **pobject)
+nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
+		 const struct nvkm_oclass *oclass, void **pdata, u32 *psize,
+		 struct nvkm_dmaobj *dmaobj)
 {
 	union {
 		struct nv_dma_v0 v0;
 	} *args = *pdata;
-	struct nvkm_instmem *instmem = nvkm_instmem(parent);
-	struct nvkm_client *client = nvkm_client(parent);
-	struct nvkm_device *device = nv_device(parent);
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_dmaobj *dmaobj;
+	struct nvkm_device *device = dma->engine.subdev.device;
+	struct nvkm_client *client = oclass->client;
+	struct nvkm_object *parent = oclass->parent;
+	struct nvkm_instmem *instmem = device->imem;
+	struct nvkm_fb *fb = device->fb;
 	void *data = *pdata;
 	u32 size = *psize;
 	int ret;
 
-	ret = nvkm_object_create_(parent, engine, oclass, 0, length, pobject);
-	dmaobj = *pobject;
-	if (ret)
-		return ret;
+	nvkm_object_ctor(&nvkm_dmaobj_func, oclass, &dmaobj->object);
+	dmaobj->func = func;
+	dmaobj->dma = dma;
+	RB_CLEAR_NODE(&dmaobj->rb);
 
-	nv_ioctl(parent, "create dma size %d\n", *psize);
+	nvif_ioctl(parent, "create dma size %d\n", *psize);
 	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(parent, "create dma vers %d target %d access %d "
-				 "start %016llx limit %016llx\n",
-			 args->v0.version, args->v0.target, args->v0.access,
-			 args->v0.start, args->v0.limit);
+		nvif_ioctl(parent, "create dma vers %d target %d access %d "
+				   "start %016llx limit %016llx\n",
+			   args->v0.version, args->v0.target, args->v0.access,
+			   args->v0.start, args->v0.limit);
 		dmaobj->target = args->v0.target;
 		dmaobj->access = args->v0.access;
 		dmaobj->start  = args->v0.start;
@@ -101,7 +101,7 @@ nvkm_dmaobj_create_(struct nvkm_object *parent,
 		break;
 	case NV_DMA_V0_TARGET_VRAM:
 		if (!client->super) {
-			if (dmaobj->limit >= pfb->ram->size - instmem->reserved)
+			if (dmaobj->limit >= fb->ram->size - instmem->reserved)
 				return -EACCES;
 			if (device->card_type >= NV_50)
 				return -EACCES;
@@ -142,23 +142,3 @@ nvkm_dmaobj_create_(struct nvkm_object *parent,
 
 	return ret;
 }
-
-int
-_nvkm_dmaeng_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
-{
-	const struct nvkm_dmaeng_impl *impl = (void *)oclass;
-	struct nvkm_dmaeng *dmaeng;
-	int ret;
-
-	ret = nvkm_engine_create(parent, engine, oclass, true, "DMAOBJ",
-				 "dmaobj", &dmaeng);
-	*pobject = nv_object(dmaeng);
-	if (ret)
-		return ret;
-
-	nv_engine(dmaeng)->sclass = impl->sclass;
-	dmaeng->bind = nvkm_dmaobj_bind;
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h
new file mode 100644
index 000000000000..69a7f1034024
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h
@@ -0,0 +1,18 @@
+#ifndef __NVKM_DMA_USER_H__
+#define __NVKM_DMA_USER_H__
+#define nvkm_dmaobj(p) container_of((p), struct nvkm_dmaobj, object)
+#include "priv.h"
+
+int nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *, struct nvkm_dma *,
+		     const struct nvkm_oclass *, void **data, u32 *size,
+		     struct nvkm_dmaobj *);
+
+int nv04_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
+		    struct nvkm_dmaobj **);
+int nv50_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
+		    struct nvkm_dmaobj **);
+int gf100_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
+		     struct nvkm_dmaobj **);
+int gf119_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
+		     struct nvkm_dmaobj **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c
new file mode 100644
index 000000000000..13e341cc4e32
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define gf100_dmaobj(p) container_of((p), struct gf100_dmaobj, base)
+#include "user.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+struct gf100_dmaobj {
+	struct nvkm_dmaobj base;
+	u32 flags0;
+	u32 flags5;
+};
+
+static int
+gf100_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
+		  int align, struct nvkm_gpuobj **pgpuobj)
+{
+	struct gf100_dmaobj *dmaobj = gf100_dmaobj(base);
+	struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
+	int ret;
+
+	ret = nvkm_gpuobj_new(device, 24, align, false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
+		nvkm_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->base.limit));
+		nvkm_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->base.start));
+		nvkm_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->base.limit) << 24 |
+					  upper_32_bits(dmaobj->base.start));
+		nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x14, dmaobj->flags5);
+		nvkm_done(*pgpuobj);
+	}
+
+	return ret;
+}
+
+static const struct nvkm_dmaobj_func
+gf100_dmaobj_func = {
+	.bind = gf100_dmaobj_bind,
+};
+
+int
+gf100_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
+		 void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
+{
+	union {
+		struct gf100_dma_v0 v0;
+	} *args;
+	struct nvkm_object *parent = oclass->parent;
+	struct gf100_dmaobj *dmaobj;
+	u32 kind, user, unkn;
+	int ret;
+
+	if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
+		return -ENOMEM;
+	*pdmaobj = &dmaobj->base;
+
+	ret = nvkm_dmaobj_ctor(&gf100_dmaobj_func, dma, oclass,
+			       &data, &size, &dmaobj->base);
+	if (ret)
+		return ret;
+
+	args = data;
+
+	nvif_ioctl(parent, "create gf100 dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent,
+			   "create gf100 dma vers %d priv %d kind %02x\n",
+			   args->v0.version, args->v0.priv, args->v0.kind);
+		kind = args->v0.kind;
+		user = args->v0.priv;
+		unkn = 0;
+	} else
+	if (size == 0) {
+		if (dmaobj->base.target != NV_MEM_TARGET_VM) {
+			kind = GF100_DMA_V0_KIND_PITCH;
+			user = GF100_DMA_V0_PRIV_US;
+			unkn = 2;
+		} else {
+			kind = GF100_DMA_V0_KIND_VM;
+			user = GF100_DMA_V0_PRIV_VM;
+			unkn = 0;
+		}
+	} else
+		return ret;
+
+	if (user > 2)
+		return -EINVAL;
+	dmaobj->flags0 |= (kind << 22) | (user << 20) | oclass->base.oclass;
+	dmaobj->flags5 |= (unkn << 16);
+
+	switch (dmaobj->base.target) {
+	case NV_MEM_TARGET_VM:
+		dmaobj->flags0 |= 0x00000000;
+		break;
+	case NV_MEM_TARGET_VRAM:
+		dmaobj->flags0 |= 0x00010000;
+		break;
+	case NV_MEM_TARGET_PCI:
+		dmaobj->flags0 |= 0x00020000;
+		break;
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		dmaobj->flags0 |= 0x00030000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (dmaobj->base.access) {
+	case NV_MEM_ACCESS_VM:
+		break;
+	case NV_MEM_ACCESS_RO:
+		dmaobj->flags0 |= 0x00040000;
+		break;
+	case NV_MEM_ACCESS_WO:
+	case NV_MEM_ACCESS_RW:
+		dmaobj->flags0 |= 0x00080000;
+		break;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c
new file mode 100644
index 000000000000..0e1af8b4db84
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define gf119_dmaobj(p) container_of((p), struct gf119_dmaobj, base)
+#include "user.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+struct gf119_dmaobj {
+	struct nvkm_dmaobj base;
+	u32 flags0;
+};
+
+static int
+gf119_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
+		  int align, struct nvkm_gpuobj **pgpuobj)
+{
+	struct gf119_dmaobj *dmaobj = gf119_dmaobj(base);
+	struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
+	int ret;
+
+	ret = nvkm_gpuobj_new(device, 24, align, false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
+		nvkm_wo32(*pgpuobj, 0x04, dmaobj->base.start >> 8);
+		nvkm_wo32(*pgpuobj, 0x08, dmaobj->base.limit >> 8);
+		nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x14, 0x00000000);
+		nvkm_done(*pgpuobj);
+	}
+
+	return ret;
+}
+
+static const struct nvkm_dmaobj_func
+gf119_dmaobj_func = {
+	.bind = gf119_dmaobj_bind,
+};
+
+int
+gf119_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
+		 void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
+{
+	union {
+		struct gf119_dma_v0 v0;
+	} *args;
+	struct nvkm_object *parent = oclass->parent;
+	struct gf119_dmaobj *dmaobj;
+	u32 kind, page;
+	int ret;
+
+	if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
+		return -ENOMEM;
+	*pdmaobj = &dmaobj->base;
+
+	ret = nvkm_dmaobj_ctor(&gf119_dmaobj_func, dma, oclass,
+			       &data, &size, &dmaobj->base);
+	if (ret)
+		return ret;
+
+	args = data;
+
+	nvif_ioctl(parent, "create gf119 dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent,
+			   "create gf100 dma vers %d page %d kind %02x\n",
+			   args->v0.version, args->v0.page, args->v0.kind);
+		kind = args->v0.kind;
+		page = args->v0.page;
+	} else
+	if (size == 0) {
+		if (dmaobj->base.target != NV_MEM_TARGET_VM) {
+			kind = GF119_DMA_V0_KIND_PITCH;
+			page = GF119_DMA_V0_PAGE_SP;
+		} else {
+			kind = GF119_DMA_V0_KIND_VM;
+			page = GF119_DMA_V0_PAGE_LP;
+		}
+	} else
+		return ret;
+
+	if (page > 1)
+		return -EINVAL;
+	dmaobj->flags0 = (kind << 20) | (page << 6);
+
+	switch (dmaobj->base.target) {
+	case NV_MEM_TARGET_VRAM:
+		dmaobj->flags0 |= 0x00000009;
+		break;
+	case NV_MEM_TARGET_VM:
+	case NV_MEM_TARGET_PCI:
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		/* XXX: don't currently know how to construct a real one
+		 *      of these.  we only use them to represent pushbufs
+		 *      on these chipsets, and the classes that use them
+		 *      deal with the target themselves.
+		 */
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
new file mode 100644
index 000000000000..c95942ef8216
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define nv04_dmaobj(p) container_of((p), struct nv04_dmaobj, base)
+#include "user.h"
+
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+#include <subdev/mmu/nv04.h>
+
+#include <nvif/class.h>
+
+struct nv04_dmaobj {
+	struct nvkm_dmaobj base;
+	bool clone;
+	u32 flags0;
+	u32 flags2;
+};
+
+static int
+nv04_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
+		 int align, struct nvkm_gpuobj **pgpuobj)
+{
+	struct nv04_dmaobj *dmaobj = nv04_dmaobj(base);
+	struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
+	u64 offset = dmaobj->base.start & 0xfffff000;
+	u64 adjust = dmaobj->base.start & 0x00000fff;
+	u32 length = dmaobj->base.limit - dmaobj->base.start;
+	int ret;
+
+	if (dmaobj->clone) {
+		struct nv04_mmu *mmu = nv04_mmu(device->mmu);
+		struct nvkm_memory *pgt = mmu->vm->pgt[0].mem[0];
+		if (!dmaobj->base.start)
+			return nvkm_gpuobj_wrap(pgt, pgpuobj);
+		nvkm_kmap(pgt);
+		offset  = nvkm_ro32(pgt, 8 + (offset >> 10));
+		offset &= 0xfffff000;
+		nvkm_done(pgt);
+	}
+
+	ret = nvkm_gpuobj_new(device, 16, align, false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0 | (adjust << 20));
+		nvkm_wo32(*pgpuobj, 0x04, length);
+		nvkm_wo32(*pgpuobj, 0x08, dmaobj->flags2 | offset);
+		nvkm_wo32(*pgpuobj, 0x0c, dmaobj->flags2 | offset);
+		nvkm_done(*pgpuobj);
+	}
+
+	return ret;
+}
+
+static const struct nvkm_dmaobj_func
+nv04_dmaobj_func = {
+	.bind = nv04_dmaobj_bind,
+};
+
+int
+nv04_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
+		void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
+{
+	struct nvkm_device *device = dma->engine.subdev.device;
+	struct nv04_dmaobj *dmaobj;
+	int ret;
+
+	if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
+		return -ENOMEM;
+	*pdmaobj = &dmaobj->base;
+
+	ret = nvkm_dmaobj_ctor(&nv04_dmaobj_func, dma, oclass,
+			       &data, &size, &dmaobj->base);
+	if (ret)
+		return ret;
+
+	if (dmaobj->base.target == NV_MEM_TARGET_VM) {
+		if (device->mmu->func == &nv04_mmu)
+			dmaobj->clone = true;
+		dmaobj->base.target = NV_MEM_TARGET_PCI;
+		dmaobj->base.access = NV_MEM_ACCESS_RW;
+	}
+
+	dmaobj->flags0 = oclass->base.oclass;
+	switch (dmaobj->base.target) {
+	case NV_MEM_TARGET_VRAM:
+		dmaobj->flags0 |= 0x00003000;
+		break;
+	case NV_MEM_TARGET_PCI:
+		dmaobj->flags0 |= 0x00023000;
+		break;
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		dmaobj->flags0 |= 0x00033000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (dmaobj->base.access) {
+	case NV_MEM_ACCESS_RO:
+		dmaobj->flags0 |= 0x00004000;
+		break;
+	case NV_MEM_ACCESS_WO:
+		dmaobj->flags0 |= 0x00008000;
+	case NV_MEM_ACCESS_RW:
+		dmaobj->flags2 |= 0x00000002;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c
new file mode 100644
index 000000000000..5b7ce313ea14
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define nv50_dmaobj(p) container_of((p), struct nv50_dmaobj, base)
+#include "user.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+struct nv50_dmaobj {
+	struct nvkm_dmaobj base;
+	u32 flags0;
+	u32 flags5;
+};
+
+static int
+nv50_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
+		 int align, struct nvkm_gpuobj **pgpuobj)
+{
+	struct nv50_dmaobj *dmaobj = nv50_dmaobj(base);
+	struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
+	int ret;
+
+	ret = nvkm_gpuobj_new(device, 24, align, false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
+		nvkm_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->base.limit));
+		nvkm_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->base.start));
+		nvkm_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->base.limit) << 24 |
+					  upper_32_bits(dmaobj->base.start));
+		nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x14, dmaobj->flags5);
+		nvkm_done(*pgpuobj);
+	}
+
+	return ret;
+}
+
+static const struct nvkm_dmaobj_func
+nv50_dmaobj_func = {
+	.bind = nv50_dmaobj_bind,
+};
+
+int
+nv50_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
+		void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
+{
+	union {
+		struct nv50_dma_v0 v0;
+	} *args;
+	struct nvkm_object *parent = oclass->parent;
+	struct nv50_dmaobj *dmaobj;
+	u32 user, part, comp, kind;
+	int ret;
+
+	if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
+		return -ENOMEM;
+	*pdmaobj = &dmaobj->base;
+
+	ret = nvkm_dmaobj_ctor(&nv50_dmaobj_func, dma, oclass,
+			       &data, &size, &dmaobj->base);
+	if (ret)
+		return ret;
+
+	args = data;
+
+	nvif_ioctl(parent, "create nv50 dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create nv50 dma vers %d priv %d part %d "
+				   "comp %d kind %02x\n", args->v0.version,
+			   args->v0.priv, args->v0.part, args->v0.comp,
+			   args->v0.kind);
+		user = args->v0.priv;
+		part = args->v0.part;
+		comp = args->v0.comp;
+		kind = args->v0.kind;
+	} else
+	if (size == 0) {
+		if (dmaobj->base.target != NV_MEM_TARGET_VM) {
+			user = NV50_DMA_V0_PRIV_US;
+			part = NV50_DMA_V0_PART_256;
+			comp = NV50_DMA_V0_COMP_NONE;
+			kind = NV50_DMA_V0_KIND_PITCH;
+		} else {
+			user = NV50_DMA_V0_PRIV_VM;
+			part = NV50_DMA_V0_PART_VM;
+			comp = NV50_DMA_V0_COMP_VM;
+			kind = NV50_DMA_V0_KIND_VM;
+		}
+	} else
+		return ret;
+
+	if (user > 2 || part > 2 || comp > 3 || kind > 0x7f)
+		return -EINVAL;
+	dmaobj->flags0 = (comp << 29) | (kind << 22) | (user << 20) |
+			 oclass->base.oclass;
+	dmaobj->flags5 = (part << 16);
+
+	switch (dmaobj->base.target) {
+	case NV_MEM_TARGET_VM:
+		dmaobj->flags0 |= 0x00000000;
+		break;
+	case NV_MEM_TARGET_VRAM:
+		dmaobj->flags0 |= 0x00010000;
+		break;
+	case NV_MEM_TARGET_PCI:
+		dmaobj->flags0 |= 0x00020000;
+		break;
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		dmaobj->flags0 |= 0x00030000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (dmaobj->base.access) {
+	case NV_MEM_ACCESS_VM:
+		break;
+	case NV_MEM_ACCESS_RO:
+		dmaobj->flags0 |= 0x00040000;
+		break;
+	case NV_MEM_ACCESS_WO:
+	case NV_MEM_ACCESS_RW:
+		dmaobj->flags0 |= 0x00080000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/Kbuild
deleted file mode 100644
index 7529632dbedb..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/Kbuild
+++ /dev/null
@@ -1,5 +0,0 @@
-nvkm-y += nvkm/engine/dmaobj/base.o
-nvkm-y += nvkm/engine/dmaobj/nv04.o
-nvkm-y += nvkm/engine/dmaobj/nv50.o
-nvkm-y += nvkm/engine/dmaobj/gf100.o
-nvkm-y += nvkm/engine/dmaobj/gf110.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf100.c
deleted file mode 100644
index f880e5167e45..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf100.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <core/client.h>
-#include <core/gpuobj.h>
-#include <subdev/fb.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-struct gf100_dmaobj_priv {
-	struct nvkm_dmaobj base;
-	u32 flags0;
-	u32 flags5;
-};
-
-static int
-gf100_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
-		  struct nvkm_gpuobj **pgpuobj)
-{
-	struct gf100_dmaobj_priv *priv = (void *)dmaobj;
-	int ret;
-
-	if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
-		switch (nv_mclass(parent->parent)) {
-		case GT214_DISP_CORE_CHANNEL_DMA:
-		case GT214_DISP_BASE_CHANNEL_DMA:
-		case GT214_DISP_OVERLAY_CHANNEL_DMA:
-			break;
-		default:
-			return -EINVAL;
-		}
-	} else
-		return 0;
-
-	ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
-	if (ret == 0) {
-		nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj));
-		nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit));
-		nv_wo32(*pgpuobj, 0x08, lower_32_bits(priv->base.start));
-		nv_wo32(*pgpuobj, 0x0c, upper_32_bits(priv->base.limit) << 24 |
-					upper_32_bits(priv->base.start));
-		nv_wo32(*pgpuobj, 0x10, 0x00000000);
-		nv_wo32(*pgpuobj, 0x14, priv->flags5);
-	}
-
-	return ret;
-}
-
-static int
-gf100_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
-{
-	struct nvkm_dmaeng *dmaeng = (void *)engine;
-	union {
-		struct gf100_dma_v0 v0;
-	} *args;
-	struct gf100_dmaobj_priv *priv;
-	u32 kind, user, unkn;
-	int ret;
-
-	ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-	args = data;
-
-	nv_ioctl(parent, "create gf100 dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create gf100 dma vers %d priv %d kind %02x\n",
-			 args->v0.version, args->v0.priv, args->v0.kind);
-		kind = args->v0.kind;
-		user = args->v0.priv;
-		unkn = 0;
-	} else
-	if (size == 0) {
-		if (priv->base.target != NV_MEM_TARGET_VM) {
-			kind = GF100_DMA_V0_KIND_PITCH;
-			user = GF100_DMA_V0_PRIV_US;
-			unkn = 2;
-		} else {
-			kind = GF100_DMA_V0_KIND_VM;
-			user = GF100_DMA_V0_PRIV_VM;
-			unkn = 0;
-		}
-	} else
-		return ret;
-
-	if (user > 2)
-		return -EINVAL;
-	priv->flags0 |= (kind << 22) | (user << 20);
-	priv->flags5 |= (unkn << 16);
-
-	switch (priv->base.target) {
-	case NV_MEM_TARGET_VM:
-		priv->flags0 |= 0x00000000;
-		break;
-	case NV_MEM_TARGET_VRAM:
-		priv->flags0 |= 0x00010000;
-		break;
-	case NV_MEM_TARGET_PCI:
-		priv->flags0 |= 0x00020000;
-		break;
-	case NV_MEM_TARGET_PCI_NOSNOOP:
-		priv->flags0 |= 0x00030000;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	switch (priv->base.access) {
-	case NV_MEM_ACCESS_VM:
-		break;
-	case NV_MEM_ACCESS_RO:
-		priv->flags0 |= 0x00040000;
-		break;
-	case NV_MEM_ACCESS_WO:
-	case NV_MEM_ACCESS_RW:
-		priv->flags0 |= 0x00080000;
-		break;
-	}
-
-	return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
-}
-
-static struct nvkm_ofuncs
-gf100_dmaobj_ofuncs = {
-	.ctor =  gf100_dmaobj_ctor,
-	.dtor = _nvkm_dmaobj_dtor,
-	.init = _nvkm_dmaobj_init,
-	.fini = _nvkm_dmaobj_fini,
-};
-
-static struct nvkm_oclass
-gf100_dmaeng_sclass[] = {
-	{ NV_DMA_FROM_MEMORY, &gf100_dmaobj_ofuncs },
-	{ NV_DMA_TO_MEMORY, &gf100_dmaobj_ofuncs },
-	{ NV_DMA_IN_MEMORY, &gf100_dmaobj_ofuncs },
-	{}
-};
-
-struct nvkm_oclass *
-gf100_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
-	.base.handle = NV_ENGINE(DMAOBJ, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_dmaeng_ctor,
-		.dtor = _nvkm_dmaeng_dtor,
-		.init = _nvkm_dmaeng_init,
-		.fini = _nvkm_dmaeng_fini,
-	},
-	.sclass = gf100_dmaeng_sclass,
-	.bind = gf100_dmaobj_bind,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf110.c
deleted file mode 100644
index bf8f0f20976c..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf110.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <core/client.h>
-#include <core/gpuobj.h>
-#include <subdev/fb.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-struct gf110_dmaobj_priv {
-	struct nvkm_dmaobj base;
-	u32 flags0;
-};
-
-static int
-gf110_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
-		  struct nvkm_gpuobj **pgpuobj)
-{
-	struct gf110_dmaobj_priv *priv = (void *)dmaobj;
-	int ret;
-
-	if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
-		switch (nv_mclass(parent->parent)) {
-		case GF110_DISP_CORE_CHANNEL_DMA:
-		case GK104_DISP_CORE_CHANNEL_DMA:
-		case GK110_DISP_CORE_CHANNEL_DMA:
-		case GM107_DISP_CORE_CHANNEL_DMA:
-		case GM204_DISP_CORE_CHANNEL_DMA:
-		case GF110_DISP_BASE_CHANNEL_DMA:
-		case GK104_DISP_BASE_CHANNEL_DMA:
-		case GK110_DISP_BASE_CHANNEL_DMA:
-		case GF110_DISP_OVERLAY_CONTROL_DMA:
-		case GK104_DISP_OVERLAY_CONTROL_DMA:
-			break;
-		default:
-			return -EINVAL;
-		}
-	} else
-		return 0;
-
-	ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
-	if (ret == 0) {
-		nv_wo32(*pgpuobj, 0x00, priv->flags0);
-		nv_wo32(*pgpuobj, 0x04, priv->base.start >> 8);
-		nv_wo32(*pgpuobj, 0x08, priv->base.limit >> 8);
-		nv_wo32(*pgpuobj, 0x0c, 0x00000000);
-		nv_wo32(*pgpuobj, 0x10, 0x00000000);
-		nv_wo32(*pgpuobj, 0x14, 0x00000000);
-	}
-
-	return ret;
-}
-
-static int
-gf110_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
-{
-	struct nvkm_dmaeng *dmaeng = (void *)engine;
-	union {
-		struct gf110_dma_v0 v0;
-	} *args;
-	struct gf110_dmaobj_priv *priv;
-	u32 kind, page;
-	int ret;
-
-	ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-	args = data;
-
-	nv_ioctl(parent, "create gf110 dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create gf100 dma vers %d page %d kind %02x\n",
-			 args->v0.version, args->v0.page, args->v0.kind);
-		kind = args->v0.kind;
-		page = args->v0.page;
-	} else
-	if (size == 0) {
-		if (priv->base.target != NV_MEM_TARGET_VM) {
-			kind = GF110_DMA_V0_KIND_PITCH;
-			page = GF110_DMA_V0_PAGE_SP;
-		} else {
-			kind = GF110_DMA_V0_KIND_VM;
-			page = GF110_DMA_V0_PAGE_LP;
-		}
-	} else
-		return ret;
-
-	if (page > 1)
-		return -EINVAL;
-	priv->flags0 = (kind << 20) | (page << 6);
-
-	switch (priv->base.target) {
-	case NV_MEM_TARGET_VRAM:
-		priv->flags0 |= 0x00000009;
-		break;
-	case NV_MEM_TARGET_VM:
-	case NV_MEM_TARGET_PCI:
-	case NV_MEM_TARGET_PCI_NOSNOOP:
-		/* XXX: don't currently know how to construct a real one
-		 *      of these.  we only use them to represent pushbufs
-		 *      on these chipsets, and the classes that use them
-		 *      deal with the target themselves.
-		 */
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
-}
-
-static struct nvkm_ofuncs
-gf110_dmaobj_ofuncs = {
-	.ctor =  gf110_dmaobj_ctor,
-	.dtor = _nvkm_dmaobj_dtor,
-	.init = _nvkm_dmaobj_init,
-	.fini = _nvkm_dmaobj_fini,
-};
-
-static struct nvkm_oclass
-gf110_dmaeng_sclass[] = {
-	{ NV_DMA_FROM_MEMORY, &gf110_dmaobj_ofuncs },
-	{ NV_DMA_TO_MEMORY, &gf110_dmaobj_ofuncs },
-	{ NV_DMA_IN_MEMORY, &gf110_dmaobj_ofuncs },
-	{}
-};
-
-struct nvkm_oclass *
-gf110_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
-	.base.handle = NV_ENGINE(DMAOBJ, 0xd0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_dmaeng_ctor,
-		.dtor = _nvkm_dmaeng_dtor,
-		.init = _nvkm_dmaeng_init,
-		.fini = _nvkm_dmaeng_fini,
-	},
-	.sclass = gf110_dmaeng_sclass,
-	.bind = gf110_dmaobj_bind,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv04.c
deleted file mode 100644
index b4379c2a2fb5..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv04.c
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <core/gpuobj.h>
-#include <subdev/fb.h>
-#include <subdev/mmu/nv04.h>
-
-#include <nvif/class.h>
-
-struct nv04_dmaobj_priv {
-	struct nvkm_dmaobj base;
-	bool clone;
-	u32 flags0;
-	u32 flags2;
-};
-
-static int
-nv04_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
-		 struct nvkm_gpuobj **pgpuobj)
-{
-	struct nv04_dmaobj_priv *priv = (void *)dmaobj;
-	struct nvkm_gpuobj *gpuobj;
-	u64 offset = priv->base.start & 0xfffff000;
-	u64 adjust = priv->base.start & 0x00000fff;
-	u32 length = priv->base.limit - priv->base.start;
-	int ret;
-
-	if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
-		switch (nv_mclass(parent->parent)) {
-		case NV03_CHANNEL_DMA:
-		case NV10_CHANNEL_DMA:
-		case NV17_CHANNEL_DMA:
-		case NV40_CHANNEL_DMA:
-			break;
-		default:
-			return -EINVAL;
-		}
-	}
-
-	if (priv->clone) {
-		struct nv04_mmu_priv *mmu = nv04_mmu(dmaobj);
-		struct nvkm_gpuobj *pgt = mmu->vm->pgt[0].obj[0];
-		if (!dmaobj->start)
-			return nvkm_gpuobj_dup(parent, pgt, pgpuobj);
-		offset  = nv_ro32(pgt, 8 + (offset >> 10));
-		offset &= 0xfffff000;
-	}
-
-	ret = nvkm_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj);
-	*pgpuobj = gpuobj;
-	if (ret == 0) {
-		nv_wo32(*pgpuobj, 0x00, priv->flags0 | (adjust << 20));
-		nv_wo32(*pgpuobj, 0x04, length);
-		nv_wo32(*pgpuobj, 0x08, priv->flags2 | offset);
-		nv_wo32(*pgpuobj, 0x0c, priv->flags2 | offset);
-	}
-
-	return ret;
-}
-
-static int
-nv04_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
-{
-	struct nvkm_dmaeng *dmaeng = (void *)engine;
-	struct nv04_mmu_priv *mmu = nv04_mmu(engine);
-	struct nv04_dmaobj_priv *priv;
-	int ret;
-
-	ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
-	*pobject = nv_object(priv);
-	if (ret || (ret = -ENOSYS, size))
-		return ret;
-
-	if (priv->base.target == NV_MEM_TARGET_VM) {
-		if (nv_object(mmu)->oclass == &nv04_mmu_oclass)
-			priv->clone = true;
-		priv->base.target = NV_MEM_TARGET_PCI;
-		priv->base.access = NV_MEM_ACCESS_RW;
-	}
-
-	priv->flags0 = nv_mclass(priv);
-	switch (priv->base.target) {
-	case NV_MEM_TARGET_VRAM:
-		priv->flags0 |= 0x00003000;
-		break;
-	case NV_MEM_TARGET_PCI:
-		priv->flags0 |= 0x00023000;
-		break;
-	case NV_MEM_TARGET_PCI_NOSNOOP:
-		priv->flags0 |= 0x00033000;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	switch (priv->base.access) {
-	case NV_MEM_ACCESS_RO:
-		priv->flags0 |= 0x00004000;
-		break;
-	case NV_MEM_ACCESS_WO:
-		priv->flags0 |= 0x00008000;
-	case NV_MEM_ACCESS_RW:
-		priv->flags2 |= 0x00000002;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
-}
-
-static struct nvkm_ofuncs
-nv04_dmaobj_ofuncs = {
-	.ctor =  nv04_dmaobj_ctor,
-	.dtor = _nvkm_dmaobj_dtor,
-	.init = _nvkm_dmaobj_init,
-	.fini = _nvkm_dmaobj_fini,
-};
-
-static struct nvkm_oclass
-nv04_dmaeng_sclass[] = {
-	{ NV_DMA_FROM_MEMORY, &nv04_dmaobj_ofuncs },
-	{ NV_DMA_TO_MEMORY, &nv04_dmaobj_ofuncs },
-	{ NV_DMA_IN_MEMORY, &nv04_dmaobj_ofuncs },
-	{}
-};
-
-struct nvkm_oclass *
-nv04_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
-	.base.handle = NV_ENGINE(DMAOBJ, 0x04),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_dmaeng_ctor,
-		.dtor = _nvkm_dmaeng_dtor,
-		.init = _nvkm_dmaeng_init,
-		.fini = _nvkm_dmaeng_fini,
-	},
-	.sclass = nv04_dmaeng_sclass,
-	.bind = nv04_dmaobj_bind,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv50.c
deleted file mode 100644
index 4d3c828fe0e6..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv50.c
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <core/client.h>
-#include <core/gpuobj.h>
-#include <subdev/fb.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-struct nv50_dmaobj_priv {
-	struct nvkm_dmaobj base;
-	u32 flags0;
-	u32 flags5;
-};
-
-static int
-nv50_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
-		 struct nvkm_gpuobj **pgpuobj)
-{
-	struct nv50_dmaobj_priv *priv = (void *)dmaobj;
-	int ret;
-
-	if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
-		switch (nv_mclass(parent->parent)) {
-		case NV40_CHANNEL_DMA:
-		case NV50_CHANNEL_GPFIFO:
-		case G82_CHANNEL_GPFIFO:
-		case NV50_DISP_CORE_CHANNEL_DMA:
-		case G82_DISP_CORE_CHANNEL_DMA:
-		case GT206_DISP_CORE_CHANNEL_DMA:
-		case GT200_DISP_CORE_CHANNEL_DMA:
-		case GT214_DISP_CORE_CHANNEL_DMA:
-		case NV50_DISP_BASE_CHANNEL_DMA:
-		case G82_DISP_BASE_CHANNEL_DMA:
-		case GT200_DISP_BASE_CHANNEL_DMA:
-		case GT214_DISP_BASE_CHANNEL_DMA:
-		case NV50_DISP_OVERLAY_CHANNEL_DMA:
-		case G82_DISP_OVERLAY_CHANNEL_DMA:
-		case GT200_DISP_OVERLAY_CHANNEL_DMA:
-		case GT214_DISP_OVERLAY_CHANNEL_DMA:
-			break;
-		default:
-			return -EINVAL;
-		}
-	}
-
-	ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
-	if (ret == 0) {
-		nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj));
-		nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit));
-		nv_wo32(*pgpuobj, 0x08, lower_32_bits(priv->base.start));
-		nv_wo32(*pgpuobj, 0x0c, upper_32_bits(priv->base.limit) << 24 |
-					upper_32_bits(priv->base.start));
-		nv_wo32(*pgpuobj, 0x10, 0x00000000);
-		nv_wo32(*pgpuobj, 0x14, priv->flags5);
-	}
-
-	return ret;
-}
-
-static int
-nv50_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
-{
-	struct nvkm_dmaeng *dmaeng = (void *)engine;
-	union {
-		struct nv50_dma_v0 v0;
-	} *args;
-	struct nv50_dmaobj_priv *priv;
-	u32 user, part, comp, kind;
-	int ret;
-
-	ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-	args = data;
-
-	nv_ioctl(parent, "create nv50 dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create nv50 dma vers %d priv %d part %d "
-				 "comp %d kind %02x\n", args->v0.version,
-			 args->v0.priv, args->v0.part, args->v0.comp,
-			 args->v0.kind);
-		user = args->v0.priv;
-		part = args->v0.part;
-		comp = args->v0.comp;
-		kind = args->v0.kind;
-	} else
-	if (size == 0) {
-		if (priv->base.target != NV_MEM_TARGET_VM) {
-			user = NV50_DMA_V0_PRIV_US;
-			part = NV50_DMA_V0_PART_256;
-			comp = NV50_DMA_V0_COMP_NONE;
-			kind = NV50_DMA_V0_KIND_PITCH;
-		} else {
-			user = NV50_DMA_V0_PRIV_VM;
-			part = NV50_DMA_V0_PART_VM;
-			comp = NV50_DMA_V0_COMP_VM;
-			kind = NV50_DMA_V0_KIND_VM;
-		}
-	} else
-		return ret;
-
-	if (user > 2 || part > 2 || comp > 3 || kind > 0x7f)
-		return -EINVAL;
-	priv->flags0 = (comp << 29) | (kind << 22) | (user << 20);
-	priv->flags5 = (part << 16);
-
-	switch (priv->base.target) {
-	case NV_MEM_TARGET_VM:
-		priv->flags0 |= 0x00000000;
-		break;
-	case NV_MEM_TARGET_VRAM:
-		priv->flags0 |= 0x00010000;
-		break;
-	case NV_MEM_TARGET_PCI:
-		priv->flags0 |= 0x00020000;
-		break;
-	case NV_MEM_TARGET_PCI_NOSNOOP:
-		priv->flags0 |= 0x00030000;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	switch (priv->base.access) {
-	case NV_MEM_ACCESS_VM:
-		break;
-	case NV_MEM_ACCESS_RO:
-		priv->flags0 |= 0x00040000;
-		break;
-	case NV_MEM_ACCESS_WO:
-	case NV_MEM_ACCESS_RW:
-		priv->flags0 |= 0x00080000;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
-}
-
-static struct nvkm_ofuncs
-nv50_dmaobj_ofuncs = {
-	.ctor =  nv50_dmaobj_ctor,
-	.dtor = _nvkm_dmaobj_dtor,
-	.init = _nvkm_dmaobj_init,
-	.fini = _nvkm_dmaobj_fini,
-};
-
-static struct nvkm_oclass
-nv50_dmaeng_sclass[] = {
-	{ NV_DMA_FROM_MEMORY, &nv50_dmaobj_ofuncs },
-	{ NV_DMA_TO_MEMORY, &nv50_dmaobj_ofuncs },
-	{ NV_DMA_IN_MEMORY, &nv50_dmaobj_ofuncs },
-	{}
-};
-
-struct nvkm_oclass *
-nv50_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
-	.base.handle = NV_ENGINE(DMAOBJ, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_dmaeng_ctor,
-		.dtor = _nvkm_dmaeng_dtor,
-		.init = _nvkm_dmaeng_init,
-		.fini = _nvkm_dmaeng_fini,
-	},
-	.sclass = nv50_dmaeng_sclass,
-	.bind = nv50_dmaobj_bind,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h
deleted file mode 100644
index 44ae8a0ca65c..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef __NVKM_DMAOBJ_PRIV_H__
-#define __NVKM_DMAOBJ_PRIV_H__
-#include <engine/dmaobj.h>
-
-#define nvkm_dmaobj_create(p,e,c,pa,sa,d)                                      \
-	nvkm_dmaobj_create_((p), (e), (c), (pa), (sa), sizeof(**d), (void **)d)
-
-int nvkm_dmaobj_create_(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void **, u32 *,
-			int, void **);
-#define _nvkm_dmaobj_dtor nvkm_object_destroy
-#define _nvkm_dmaobj_init nvkm_object_init
-#define _nvkm_dmaobj_fini nvkm_object_fini
-
-int _nvkm_dmaeng_ctor(struct nvkm_object *, struct nvkm_object *,
-		      struct nvkm_oclass *, void *, u32,
-		      struct nvkm_object **);
-#define _nvkm_dmaeng_dtor _nvkm_engine_dtor
-#define _nvkm_dmaeng_init _nvkm_engine_init
-#define _nvkm_dmaeng_fini _nvkm_engine_fini
-
-struct nvkm_dmaeng_impl {
-	struct nvkm_oclass base;
-	struct nvkm_oclass *sclass;
-	int (*bind)(struct nvkm_dmaobj *, struct nvkm_object *,
-		    struct nvkm_gpuobj **);
-};
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
index 30958c19e61d..74000602fbb1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
@@ -21,40 +21,95 @@
  */
 #include <engine/falcon.h>
 
-#include <core/device.h>
+#include <core/gpuobj.h>
 #include <subdev/timer.h>
+#include <engine/fifo.h>
 
-void
-nvkm_falcon_intr(struct nvkm_subdev *subdev)
+static int
+nvkm_falcon_oclass_get(struct nvkm_oclass *oclass, int index)
 {
-	struct nvkm_falcon *falcon = (void *)subdev;
-	u32 dispatch = nv_ro32(falcon, 0x01c);
-	u32 intr = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
+	struct nvkm_falcon *falcon = nvkm_falcon(oclass->engine);
+	int c = 0;
+
+	while (falcon->func->sclass[c].oclass) {
+		if (c++ == index) {
+			oclass->base = falcon->func->sclass[index];
+			return index;
+		}
+	}
+
+	return c;
+}
+
+static int
+nvkm_falcon_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+			int align, struct nvkm_gpuobj **pgpuobj)
+{
+	return nvkm_gpuobj_new(object->engine->subdev.device, 256,
+			       align, true, parent, pgpuobj);
+}
+
+static const struct nvkm_object_func
+nvkm_falcon_cclass = {
+	.bind = nvkm_falcon_cclass_bind,
+};
+
+static void
+nvkm_falcon_intr(struct nvkm_engine *engine)
+{
+	struct nvkm_falcon *falcon = nvkm_falcon(engine);
+	struct nvkm_subdev *subdev = &falcon->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	const u32 base = falcon->addr;
+	u32 dest = nvkm_rd32(device, base + 0x01c);
+	u32 intr = nvkm_rd32(device, base + 0x008) & dest & ~(dest >> 16);
+	u32 inst = nvkm_rd32(device, base + 0x050) & 0x3fffffff;
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+
+	chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags);
+
+	if (intr & 0x00000040) {
+		if (falcon->func->intr) {
+			falcon->func->intr(falcon, chan);
+			nvkm_wr32(device, base + 0x004, 0x00000040);
+			intr &= ~0x00000040;
+		}
+	}
 
 	if (intr & 0x00000010) {
-		nv_debug(falcon, "ucode halted\n");
-		nv_wo32(falcon, 0x004, 0x00000010);
+		nvkm_debug(subdev, "ucode halted\n");
+		nvkm_wr32(device, base + 0x004, 0x00000010);
 		intr &= ~0x00000010;
 	}
 
 	if (intr)  {
-		nv_error(falcon, "unhandled intr 0x%08x\n", intr);
-		nv_wo32(falcon, 0x004, intr);
+		nvkm_error(subdev, "intr %08x\n", intr);
+		nvkm_wr32(device, base + 0x004, intr);
 	}
-}
 
-u32
-_nvkm_falcon_rd32(struct nvkm_object *object, u64 addr)
-{
-	struct nvkm_falcon *falcon = (void *)object;
-	return nv_rd32(falcon, falcon->addr + addr);
+	nvkm_fifo_chan_put(device->fifo, flags, &chan);
 }
 
-void
-_nvkm_falcon_wr32(struct nvkm_object *object, u64 addr, u32 data)
+static int
+nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend)
 {
-	struct nvkm_falcon *falcon = (void *)object;
-	nv_wr32(falcon, falcon->addr + addr, data);
+	struct nvkm_falcon *falcon = nvkm_falcon(engine);
+	struct nvkm_device *device = falcon->engine.subdev.device;
+	const u32 base = falcon->addr;
+
+	if (!suspend) {
+		nvkm_memory_del(&falcon->core);
+		if (falcon->external) {
+			vfree(falcon->data.data);
+			vfree(falcon->code.data);
+			falcon->code.data = NULL;
+		}
+	}
+
+	nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
+	nvkm_wr32(device, base + 0x014, 0xffffffff);
+	return 0;
 }
 
 static void *
@@ -67,51 +122,66 @@ vmemdup(const void *src, size_t len)
 	return p;
 }
 
-int
-_nvkm_falcon_init(struct nvkm_object *object)
+static int
+nvkm_falcon_oneinit(struct nvkm_engine *engine)
 {
-	struct nvkm_device *device = nv_device(object);
-	struct nvkm_falcon *falcon = (void *)object;
-	const struct firmware *fw;
-	char name[32] = "internal";
-	int ret, i;
+	struct nvkm_falcon *falcon = nvkm_falcon(engine);
+	struct nvkm_subdev *subdev = &falcon->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	const u32 base = falcon->addr;
 	u32 caps;
 
-	/* enable engine, and determine its capabilities */
-	ret = nvkm_engine_init(&falcon->base);
-	if (ret)
-		return ret;
-
+	/* determine falcon capabilities */
 	if (device->chipset <  0xa3 ||
 	    device->chipset == 0xaa || device->chipset == 0xac) {
 		falcon->version = 0;
 		falcon->secret  = (falcon->addr == 0x087000) ? 1 : 0;
 	} else {
-		caps = nv_ro32(falcon, 0x12c);
+		caps = nvkm_rd32(device, base + 0x12c);
 		falcon->version = (caps & 0x0000000f);
 		falcon->secret  = (caps & 0x00000030) >> 4;
 	}
 
-	caps = nv_ro32(falcon, 0x108);
+	caps = nvkm_rd32(device, base + 0x108);
 	falcon->code.limit = (caps & 0x000001ff) << 8;
 	falcon->data.limit = (caps & 0x0003fe00) >> 1;
 
-	nv_debug(falcon, "falcon version: %d\n", falcon->version);
-	nv_debug(falcon, "secret level: %d\n", falcon->secret);
-	nv_debug(falcon, "code limit: %d\n", falcon->code.limit);
-	nv_debug(falcon, "data limit: %d\n", falcon->data.limit);
+	nvkm_debug(subdev, "falcon version: %d\n", falcon->version);
+	nvkm_debug(subdev, "secret level: %d\n", falcon->secret);
+	nvkm_debug(subdev, "code limit: %d\n", falcon->code.limit);
+	nvkm_debug(subdev, "data limit: %d\n", falcon->data.limit);
+	return 0;
+}
+
+static int
+nvkm_falcon_init(struct nvkm_engine *engine)
+{
+	struct nvkm_falcon *falcon = nvkm_falcon(engine);
+	struct nvkm_subdev *subdev = &falcon->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	const struct firmware *fw;
+	char name[32] = "internal";
+	const u32 base = falcon->addr;
+	int ret, i;
 
 	/* wait for 'uc halted' to be signalled before continuing */
 	if (falcon->secret && falcon->version < 4) {
-		if (!falcon->version)
-			nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
-		else
-			nv_wait(falcon, 0x180, 0x80000000, 0);
-		nv_wo32(falcon, 0x004, 0x00000010);
+		if (!falcon->version) {
+			nvkm_msec(device, 2000,
+				if (nvkm_rd32(device, base + 0x008) & 0x00000010)
+					break;
+			);
+		} else {
+			nvkm_msec(device, 2000,
+				if (!(nvkm_rd32(device, base + 0x180) & 0x80000000))
+					break;
+			);
+		}
+		nvkm_wr32(device, base + 0x004, 0x00000010);
 	}
 
 	/* disable all interrupts */
-	nv_wo32(falcon, 0x014, 0xffffffff);
+	nvkm_wr32(device, base + 0x014, 0xffffffff);
 
 	/* no default ucode provided by the engine implementation, try and
 	 * locate a "self-bootstrapping" firmware image for the engine
@@ -120,7 +190,7 @@ _nvkm_falcon_init(struct nvkm_object *object)
 		snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x",
 			 device->chipset, falcon->addr >> 12);
 
-		ret = request_firmware(&fw, name, nv_device_base(device));
+		ret = request_firmware(&fw, name, device->dev);
 		if (ret == 0) {
 			falcon->code.data = vmemdup(fw->data, fw->size);
 			falcon->code.size = fw->size;
@@ -139,10 +209,10 @@ _nvkm_falcon_init(struct nvkm_object *object)
 		snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd",
 			 device->chipset, falcon->addr >> 12);
 
-		ret = request_firmware(&fw, name, nv_device_base(device));
+		ret = request_firmware(&fw, name, device->dev);
 		if (ret) {
-			nv_error(falcon, "unable to load firmware data\n");
-			return ret;
+			nvkm_error(subdev, "unable to load firmware data\n");
+			return -ENODEV;
 		}
 
 		falcon->data.data = vmemdup(fw->data, fw->size);
@@ -154,10 +224,10 @@ _nvkm_falcon_init(struct nvkm_object *object)
 		snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc",
 			 device->chipset, falcon->addr >> 12);
 
-		ret = request_firmware(&fw, name, nv_device_base(device));
+		ret = request_firmware(&fw, name, device->dev);
 		if (ret) {
-			nv_error(falcon, "unable to load firmware code\n");
-			return ret;
+			nvkm_error(subdev, "unable to load firmware code\n");
+			return -ENODEV;
 		}
 
 		falcon->code.data = vmemdup(fw->data, fw->size);
@@ -167,111 +237,117 @@ _nvkm_falcon_init(struct nvkm_object *object)
 			return -ENOMEM;
 	}
 
-	nv_debug(falcon, "firmware: %s (%s)\n", name, falcon->data.data ?
-		 "static code/data segments" : "self-bootstrapping");
+	nvkm_debug(subdev, "firmware: %s (%s)\n", name, falcon->data.data ?
+		   "static code/data segments" : "self-bootstrapping");
 
 	/* ensure any "self-bootstrapping" firmware image is in vram */
 	if (!falcon->data.data && !falcon->core) {
-		ret = nvkm_gpuobj_new(object->parent, NULL, falcon->code.size,
-				      256, 0, &falcon->core);
+		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+				      falcon->code.size, 256, false,
+				      &falcon->core);
 		if (ret) {
-			nv_error(falcon, "core allocation failed, %d\n", ret);
+			nvkm_error(subdev, "core allocation failed, %d\n", ret);
 			return ret;
 		}
 
+		nvkm_kmap(falcon->core);
 		for (i = 0; i < falcon->code.size; i += 4)
-			nv_wo32(falcon->core, i, falcon->code.data[i / 4]);
+			nvkm_wo32(falcon->core, i, falcon->code.data[i / 4]);
+		nvkm_done(falcon->core);
 	}
 
 	/* upload firmware bootloader (or the full code segments) */
 	if (falcon->core) {
+		u64 addr = nvkm_memory_addr(falcon->core);
 		if (device->card_type < NV_C0)
-			nv_wo32(falcon, 0x618, 0x04000000);
+			nvkm_wr32(device, base + 0x618, 0x04000000);
 		else
-			nv_wo32(falcon, 0x618, 0x00000114);
-		nv_wo32(falcon, 0x11c, 0);
-		nv_wo32(falcon, 0x110, falcon->core->addr >> 8);
-		nv_wo32(falcon, 0x114, 0);
-		nv_wo32(falcon, 0x118, 0x00006610);
+			nvkm_wr32(device, base + 0x618, 0x00000114);
+		nvkm_wr32(device, base + 0x11c, 0);
+		nvkm_wr32(device, base + 0x110, addr >> 8);
+		nvkm_wr32(device, base + 0x114, 0);
+		nvkm_wr32(device, base + 0x118, 0x00006610);
 	} else {
 		if (falcon->code.size > falcon->code.limit ||
 		    falcon->data.size > falcon->data.limit) {
-			nv_error(falcon, "ucode exceeds falcon limit(s)\n");
+			nvkm_error(subdev, "ucode exceeds falcon limit(s)\n");
 			return -EINVAL;
 		}
 
 		if (falcon->version < 3) {
-			nv_wo32(falcon, 0xff8, 0x00100000);
+			nvkm_wr32(device, base + 0xff8, 0x00100000);
 			for (i = 0; i < falcon->code.size / 4; i++)
-				nv_wo32(falcon, 0xff4, falcon->code.data[i]);
+				nvkm_wr32(device, base + 0xff4, falcon->code.data[i]);
 		} else {
-			nv_wo32(falcon, 0x180, 0x01000000);
+			nvkm_wr32(device, base + 0x180, 0x01000000);
 			for (i = 0; i < falcon->code.size / 4; i++) {
 				if ((i & 0x3f) == 0)
-					nv_wo32(falcon, 0x188, i >> 6);
-				nv_wo32(falcon, 0x184, falcon->code.data[i]);
+					nvkm_wr32(device, base + 0x188, i >> 6);
+				nvkm_wr32(device, base + 0x184, falcon->code.data[i]);
 			}
 		}
 	}
 
 	/* upload data segment (if necessary), zeroing the remainder */
 	if (falcon->version < 3) {
-		nv_wo32(falcon, 0xff8, 0x00000000);
+		nvkm_wr32(device, base + 0xff8, 0x00000000);
 		for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
-			nv_wo32(falcon, 0xff4, falcon->data.data[i]);
+			nvkm_wr32(device, base + 0xff4, falcon->data.data[i]);
 		for (; i < falcon->data.limit; i += 4)
-			nv_wo32(falcon, 0xff4, 0x00000000);
+			nvkm_wr32(device, base + 0xff4, 0x00000000);
 	} else {
-		nv_wo32(falcon, 0x1c0, 0x01000000);
+		nvkm_wr32(device, base + 0x1c0, 0x01000000);
 		for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
-			nv_wo32(falcon, 0x1c4, falcon->data.data[i]);
+			nvkm_wr32(device, base + 0x1c4, falcon->data.data[i]);
 		for (; i < falcon->data.limit / 4; i++)
-			nv_wo32(falcon, 0x1c4, 0x00000000);
+			nvkm_wr32(device, base + 0x1c4, 0x00000000);
 	}
 
 	/* start it running */
-	nv_wo32(falcon, 0x10c, 0x00000001); /* BLOCK_ON_FIFO */
-	nv_wo32(falcon, 0x104, 0x00000000); /* ENTRY */
-	nv_wo32(falcon, 0x100, 0x00000002); /* TRIGGER */
-	nv_wo32(falcon, 0x048, 0x00000003); /* FIFO | CHSW */
+	nvkm_wr32(device, base + 0x10c, 0x00000001); /* BLOCK_ON_FIFO */
+	nvkm_wr32(device, base + 0x104, 0x00000000); /* ENTRY */
+	nvkm_wr32(device, base + 0x100, 0x00000002); /* TRIGGER */
+	nvkm_wr32(device, base + 0x048, 0x00000003); /* FIFO | CHSW */
+
+	if (falcon->func->init)
+		falcon->func->init(falcon);
 	return 0;
 }
 
-int
-_nvkm_falcon_fini(struct nvkm_object *object, bool suspend)
+static void *
+nvkm_falcon_dtor(struct nvkm_engine *engine)
 {
-	struct nvkm_falcon *falcon = (void *)object;
-
-	if (!suspend) {
-		nvkm_gpuobj_ref(NULL, &falcon->core);
-		if (falcon->external) {
-			vfree(falcon->data.data);
-			vfree(falcon->code.data);
-			falcon->code.data = NULL;
-		}
-	}
-
-	nv_mo32(falcon, 0x048, 0x00000003, 0x00000000);
-	nv_wo32(falcon, 0x014, 0xffffffff);
-
-	return nvkm_engine_fini(&falcon->base, suspend);
+	return nvkm_falcon(engine);
 }
 
+static const struct nvkm_engine_func
+nvkm_falcon = {
+	.dtor = nvkm_falcon_dtor,
+	.oneinit = nvkm_falcon_oneinit,
+	.init = nvkm_falcon_init,
+	.fini = nvkm_falcon_fini,
+	.intr = nvkm_falcon_intr,
+	.fifo.sclass = nvkm_falcon_oclass_get,
+	.cclass = &nvkm_falcon_cclass,
+};
+
 int
-nvkm_falcon_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, u32 addr, bool enable,
-		    const char *iname, const char *fname,
-		    int length, void **pobject)
+nvkm_falcon_new_(const struct nvkm_falcon_func *func,
+		 struct nvkm_device *device, int index, bool enable,
+		 u32 addr, struct nvkm_engine **pengine)
 {
 	struct nvkm_falcon *falcon;
-	int ret;
-
-	ret = nvkm_engine_create_(parent, engine, oclass, enable, iname,
-				  fname, length, pobject);
-	falcon = *pobject;
-	if (ret)
-		return ret;
 
+	if (!(falcon = kzalloc(sizeof(*falcon), GFP_KERNEL)))
+		return -ENOMEM;
+	falcon->func = func;
 	falcon->addr = addr;
-	return 0;
+	falcon->code.data = func->code.data;
+	falcon->code.size = func->code.size;
+	falcon->data.data = func->data.data;
+	falcon->data.size = func->data.size;
+	*pengine = &falcon->engine;
+
+	return nvkm_engine_ctor(&nvkm_falcon, device, index, func->pmc_enable,
+				enable, &falcon->engine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index 42891cb71ea3..74993c144a84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -7,6 +7,24 @@ nvkm-y += nvkm/engine/fifo/nv50.o
 nvkm-y += nvkm/engine/fifo/g84.o
 nvkm-y += nvkm/engine/fifo/gf100.o
 nvkm-y += nvkm/engine/fifo/gk104.o
-nvkm-y += nvkm/engine/fifo/gk20a.o
 nvkm-y += nvkm/engine/fifo/gk208.o
+nvkm-y += nvkm/engine/fifo/gk20a.o
 nvkm-y += nvkm/engine/fifo/gm204.o
+nvkm-y += nvkm/engine/fifo/gm20b.o
+
+nvkm-y += nvkm/engine/fifo/chan.o
+nvkm-y += nvkm/engine/fifo/channv50.o
+nvkm-y += nvkm/engine/fifo/chang84.o
+
+nvkm-y += nvkm/engine/fifo/dmanv04.o
+nvkm-y += nvkm/engine/fifo/dmanv10.o
+nvkm-y += nvkm/engine/fifo/dmanv17.o
+nvkm-y += nvkm/engine/fifo/dmanv40.o
+nvkm-y += nvkm/engine/fifo/dmanv50.o
+nvkm-y += nvkm/engine/fifo/dmag84.o
+
+nvkm-y += nvkm/engine/fifo/gpfifonv50.o
+nvkm-y += nvkm/engine/fifo/gpfifog84.o
+nvkm-y += nvkm/engine/fifo/gpfifogf100.o
+nvkm-y += nvkm/engine/fifo/gpfifogk104.o
+nvkm-y += nvkm/engine/fifo/gpfifogm204.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index fa223f88d25e..1fbbfbe6ca9c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -21,156 +21,108 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/fifo.h>
+#include "priv.h"
+#include "chan.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <core/notify.h>
-#include <engine/dmaobj.h>
 
-#include <nvif/class.h>
 #include <nvif/event.h>
 #include <nvif/unpack.h>
 
-static int
-nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size,
-		     struct nvkm_notify *notify)
+void
+nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags)
 {
-	if (size == 0) {
-		notify->size  = 0;
-		notify->types = 1;
-		notify->index = 0;
-		return 0;
-	}
-	return -ENOSYS;
+	return fifo->func->pause(fifo, flags);
 }
 
-static const struct nvkm_event_func
-nvkm_fifo_event_func = {
-	.ctor = nvkm_fifo_event_ctor,
-};
-
-int
-nvkm_fifo_channel_create_(struct nvkm_object *parent,
-			  struct nvkm_object *engine,
-			  struct nvkm_oclass *oclass,
-			  int bar, u32 addr, u32 size, u32 pushbuf,
-			  u64 engmask, int len, void **ptr)
+void
+nvkm_fifo_start(struct nvkm_fifo *fifo, unsigned long *flags)
 {
-	struct nvkm_device *device = nv_device(engine);
-	struct nvkm_fifo *priv = (void *)engine;
-	struct nvkm_fifo_chan *chan;
-	struct nvkm_dmaeng *dmaeng;
-	unsigned long flags;
-	int ret;
-
-	/* create base object class */
-	ret = nvkm_namedb_create_(parent, engine, oclass, 0, NULL,
-				  engmask, len, ptr);
-	chan = *ptr;
-	if (ret)
-		return ret;
+	return fifo->func->start(fifo, flags);
+}
 
-	/* validate dma object representing push buffer */
-	chan->pushdma = (void *)nvkm_handle_ref(parent, pushbuf);
-	if (!chan->pushdma)
-		return -ENOENT;
-
-	dmaeng = (void *)chan->pushdma->base.engine;
-	switch (chan->pushdma->base.oclass->handle) {
-	case NV_DMA_FROM_MEMORY:
-	case NV_DMA_IN_MEMORY:
-		break;
-	default:
-		return -EINVAL;
+void
+nvkm_fifo_chan_put(struct nvkm_fifo *fifo, unsigned long flags,
+		   struct nvkm_fifo_chan **pchan)
+{
+	struct nvkm_fifo_chan *chan = *pchan;
+	if (likely(chan)) {
+		*pchan = NULL;
+		spin_unlock_irqrestore(&fifo->lock, flags);
 	}
+}
 
-	ret = dmaeng->bind(chan->pushdma, parent, &chan->pushgpu);
-	if (ret)
-		return ret;
-
-	/* find a free fifo channel */
-	spin_lock_irqsave(&priv->lock, flags);
-	for (chan->chid = priv->min; chan->chid < priv->max; chan->chid++) {
-		if (!priv->channel[chan->chid]) {
-			priv->channel[chan->chid] = nv_object(chan);
-			break;
+struct nvkm_fifo_chan *
+nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags)
+{
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	spin_lock_irqsave(&fifo->lock, flags);
+	list_for_each_entry(chan, &fifo->chan, head) {
+		if (chan->inst->addr == inst) {
+			list_del(&chan->head);
+			list_add(&chan->head, &fifo->chan);
+			*rflags = flags;
+			return chan;
 		}
 	}
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	if (chan->chid == priv->max) {
-		nv_error(priv, "no free channels\n");
-		return -ENOSPC;
-	}
-
-	chan->addr = nv_device_resource_start(device, bar) +
-		     addr + size * chan->chid;
-	chan->size = size;
-	nvkm_event_send(&priv->cevent, 1, 0, NULL, 0);
-	return 0;
+	spin_unlock_irqrestore(&fifo->lock, flags);
+	return NULL;
 }
 
-void
-nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *chan)
+struct nvkm_fifo_chan *
+nvkm_fifo_chan_chid(struct nvkm_fifo *fifo, int chid, unsigned long *rflags)
 {
-	struct nvkm_fifo *priv = (void *)nv_object(chan)->engine;
+	struct nvkm_fifo_chan *chan;
 	unsigned long flags;
-
-	if (chan->user)
-		iounmap(chan->user);
-
-	spin_lock_irqsave(&priv->lock, flags);
-	priv->channel[chan->chid] = NULL;
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	nvkm_gpuobj_ref(NULL, &chan->pushgpu);
-	nvkm_object_ref(NULL, (struct nvkm_object **)&chan->pushdma);
-	nvkm_namedb_destroy(&chan->namedb);
+	spin_lock_irqsave(&fifo->lock, flags);
+	list_for_each_entry(chan, &fifo->chan, head) {
+		if (chan->chid == chid) {
+			list_del(&chan->head);
+			list_add(&chan->head, &fifo->chan);
+			*rflags = flags;
+			return chan;
+		}
+	}
+	spin_unlock_irqrestore(&fifo->lock, flags);
+	return NULL;
 }
 
-void
-_nvkm_fifo_channel_dtor(struct nvkm_object *object)
+static int
+nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size,
+		     struct nvkm_notify *notify)
 {
-	struct nvkm_fifo_chan *chan = (void *)object;
-	nvkm_fifo_channel_destroy(chan);
+	if (size == 0) {
+		notify->size  = 0;
+		notify->types = 1;
+		notify->index = 0;
+		return 0;
+	}
+	return -ENOSYS;
 }
 
-int
-_nvkm_fifo_channel_map(struct nvkm_object *object, u64 *addr, u32 *size)
-{
-	struct nvkm_fifo_chan *chan = (void *)object;
-	*addr = chan->addr;
-	*size = chan->size;
-	return 0;
-}
+static const struct nvkm_event_func
+nvkm_fifo_event_func = {
+	.ctor = nvkm_fifo_event_ctor,
+};
 
-u32
-_nvkm_fifo_channel_rd32(struct nvkm_object *object, u64 addr)
+static void
+nvkm_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
 {
-	struct nvkm_fifo_chan *chan = (void *)object;
-	if (unlikely(!chan->user)) {
-		chan->user = ioremap(chan->addr, chan->size);
-		if (WARN_ON_ONCE(chan->user == NULL))
-			return 0;
-	}
-	return ioread32_native(chan->user + addr);
+	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+	fifo->func->uevent_fini(fifo);
 }
 
-void
-_nvkm_fifo_channel_wr32(struct nvkm_object *object, u64 addr, u32 data)
+static void
+nvkm_fifo_uevent_init(struct nvkm_event *event, int type, int index)
 {
-	struct nvkm_fifo_chan *chan = (void *)object;
-	if (unlikely(!chan->user)) {
-		chan->user = ioremap(chan->addr, chan->size);
-		if (WARN_ON_ONCE(chan->user == NULL))
-			return;
-	}
-	iowrite32_native(data, chan->user + addr);
+	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+	fifo->func->uevent_init(fifo);
 }
 
-int
+static int
 nvkm_fifo_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
 		      struct nvkm_notify *notify)
 {
@@ -188,6 +140,13 @@ nvkm_fifo_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
 	return ret;
 }
 
+static const struct nvkm_event_func
+nvkm_fifo_uevent_func = {
+	.ctor = nvkm_fifo_uevent_ctor,
+	.init = nvkm_fifo_uevent_init,
+	.fini = nvkm_fifo_uevent_fini,
+};
+
 void
 nvkm_fifo_uevent(struct nvkm_fifo *fifo)
 {
@@ -196,87 +155,123 @@ nvkm_fifo_uevent(struct nvkm_fifo *fifo)
 	nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep));
 }
 
-int
-_nvkm_fifo_channel_ntfy(struct nvkm_object *object, u32 type,
-			struct nvkm_event **event)
+static int
+nvkm_fifo_class_new(struct nvkm_device *device,
+		    const struct nvkm_oclass *oclass, void *data, u32 size,
+		    struct nvkm_object **pobject)
 {
-	struct nvkm_fifo *fifo = (void *)object->engine;
-	switch (type) {
-	case G82_CHANNEL_DMA_V0_NTFY_UEVENT:
-		if (nv_mclass(object) >= G82_CHANNEL_DMA) {
-			*event = &fifo->uevent;
-			return 0;
-		}
-		break;
-	default:
-		break;
-	}
-	return -EINVAL;
+	const struct nvkm_fifo_chan_oclass *sclass = oclass->engn;
+	struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
+	return sclass->ctor(fifo, oclass, data, size, pobject);
 }
 
+static const struct nvkm_device_oclass
+nvkm_fifo_class = {
+	.ctor = nvkm_fifo_class_new,
+};
+
 static int
-nvkm_fifo_chid(struct nvkm_fifo *priv, struct nvkm_object *object)
+nvkm_fifo_class_get(struct nvkm_oclass *oclass, int index,
+		    const struct nvkm_device_oclass **class)
 {
-	int engidx = nv_hclass(priv) & 0xff;
-
-	while (object && object->parent) {
-		if ( nv_iclass(object->parent, NV_ENGCTX_CLASS) &&
-		    (nv_hclass(object->parent) & 0xff) == engidx)
-			return nvkm_fifo_chan(object)->chid;
-		object = object->parent;
+	struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
+	const struct nvkm_fifo_chan_oclass *sclass;
+	int c = 0;
+
+	while ((sclass = fifo->func->chan[c])) {
+		if (c++ == index) {
+			oclass->base = sclass->base;
+			oclass->engn = sclass;
+			*class = &nvkm_fifo_class;
+			return 0;
+		}
 	}
 
-	return -1;
+	return c;
 }
 
-const char *
-nvkm_client_name_for_fifo_chid(struct nvkm_fifo *fifo, u32 chid)
+static void
+nvkm_fifo_intr(struct nvkm_engine *engine)
 {
-	struct nvkm_fifo_chan *chan = NULL;
-	unsigned long flags;
+	struct nvkm_fifo *fifo = nvkm_fifo(engine);
+	fifo->func->intr(fifo);
+}
 
-	spin_lock_irqsave(&fifo->lock, flags);
-	if (chid >= fifo->min && chid <= fifo->max)
-		chan = (void *)fifo->channel[chid];
-	spin_unlock_irqrestore(&fifo->lock, flags);
+static int
+nvkm_fifo_fini(struct nvkm_engine *engine, bool suspend)
+{
+	struct nvkm_fifo *fifo = nvkm_fifo(engine);
+	if (fifo->func->fini)
+		fifo->func->fini(fifo);
+	return 0;
+}
 
-	return nvkm_client_name(chan);
+static int
+nvkm_fifo_oneinit(struct nvkm_engine *engine)
+{
+	struct nvkm_fifo *fifo = nvkm_fifo(engine);
+	if (fifo->func->oneinit)
+		return fifo->func->oneinit(fifo);
+	return 0;
 }
 
-void
-nvkm_fifo_destroy(struct nvkm_fifo *priv)
+static int
+nvkm_fifo_init(struct nvkm_engine *engine)
 {
-	kfree(priv->channel);
-	nvkm_event_fini(&priv->uevent);
-	nvkm_event_fini(&priv->cevent);
-	nvkm_engine_destroy(&priv->base);
+	struct nvkm_fifo *fifo = nvkm_fifo(engine);
+	fifo->func->init(fifo);
+	return 0;
 }
 
+static void *
+nvkm_fifo_dtor(struct nvkm_engine *engine)
+{
+	struct nvkm_fifo *fifo = nvkm_fifo(engine);
+	void *data = fifo;
+	if (fifo->func->dtor)
+		data = fifo->func->dtor(fifo);
+	nvkm_event_fini(&fifo->cevent);
+	nvkm_event_fini(&fifo->uevent);
+	return data;
+}
+
+static const struct nvkm_engine_func
+nvkm_fifo = {
+	.dtor = nvkm_fifo_dtor,
+	.oneinit = nvkm_fifo_oneinit,
+	.init = nvkm_fifo_init,
+	.fini = nvkm_fifo_fini,
+	.intr = nvkm_fifo_intr,
+	.base.sclass = nvkm_fifo_class_get,
+};
+
 int
-nvkm_fifo_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass,
-		  int min, int max, int length, void **pobject)
+nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
+	       int index, int nr, struct nvkm_fifo *fifo)
 {
-	struct nvkm_fifo *priv;
 	int ret;
 
-	ret = nvkm_engine_create_(parent, engine, oclass, true, "PFIFO",
-				  "fifo", length, pobject);
-	priv = *pobject;
-	if (ret)
-		return ret;
+	fifo->func = func;
+	INIT_LIST_HEAD(&fifo->chan);
+	spin_lock_init(&fifo->lock);
 
-	priv->min = min;
-	priv->max = max;
-	priv->channel = kzalloc(sizeof(*priv->channel) * (max + 1), GFP_KERNEL);
-	if (!priv->channel)
-		return -ENOMEM;
+	if (WARN_ON(fifo->nr > NVKM_FIFO_CHID_NR))
+		fifo->nr = NVKM_FIFO_CHID_NR;
+	else
+		fifo->nr = nr;
+	bitmap_clear(fifo->mask, 0, fifo->nr);
 
-	ret = nvkm_event_init(&nvkm_fifo_event_func, 1, 1, &priv->cevent);
+	ret = nvkm_engine_ctor(&nvkm_fifo, device, index, 0x00000100,
+			       true, &fifo->engine);
 	if (ret)
 		return ret;
 
-	priv->chid = nvkm_fifo_chid;
-	spin_lock_init(&priv->lock);
-	return 0;
+	if (func->uevent_init) {
+		ret = nvkm_event_init(&nvkm_fifo_uevent_func, 1, 1,
+				      &fifo->uevent);
+		if (ret)
+			return ret;
+	}
+
+	return nvkm_event_init(&nvkm_fifo_event_func, 1, 1, &fifo->cevent);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
new file mode 100644
index 000000000000..dc6d4678f228
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
@@ -0,0 +1,415 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "chan.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <core/oproxy.h>
+#include <subdev/mmu.h>
+#include <engine/dma.h>
+
+struct nvkm_fifo_chan_object {
+	struct nvkm_oproxy oproxy;
+	struct nvkm_fifo_chan *chan;
+	int hash;
+};
+
+static int
+nvkm_fifo_chan_child_fini(struct nvkm_oproxy *base, bool suspend)
+{
+	struct nvkm_fifo_chan_object *object =
+		container_of(base, typeof(*object), oproxy);
+	struct nvkm_engine *engine  = object->oproxy.object->engine;
+	struct nvkm_fifo_chan *chan = object->chan;
+	struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
+	const char *name = nvkm_subdev_name[engine->subdev.index];
+	int ret = 0;
+
+	if (--engn->usecount)
+		return 0;
+
+	if (chan->func->engine_fini) {
+		ret = chan->func->engine_fini(chan, engine, suspend);
+		if (ret) {
+			nvif_error(&chan->object,
+				   "detach %s failed, %d\n", name, ret);
+			return ret;
+		}
+	}
+
+	if (engn->object) {
+		ret = nvkm_object_fini(engn->object, suspend);
+		if (ret && suspend)
+			return ret;
+	}
+
+	nvif_trace(&chan->object, "detached %s\n", name);
+	return ret;
+}
+
+static int
+nvkm_fifo_chan_child_init(struct nvkm_oproxy *base)
+{
+	struct nvkm_fifo_chan_object *object =
+		container_of(base, typeof(*object), oproxy);
+	struct nvkm_engine *engine  = object->oproxy.object->engine;
+	struct nvkm_fifo_chan *chan = object->chan;
+	struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
+	const char *name = nvkm_subdev_name[engine->subdev.index];
+	int ret;
+
+	if (engn->usecount++)
+		return 0;
+
+	if (engn->object) {
+		ret = nvkm_object_init(engn->object);
+		if (ret)
+			return ret;
+	}
+
+	if (chan->func->engine_init) {
+		ret = chan->func->engine_init(chan, engine);
+		if (ret) {
+			nvif_error(&chan->object,
+				   "attach %s failed, %d\n", name, ret);
+			return ret;
+		}
+	}
+
+	nvif_trace(&chan->object, "attached %s\n", name);
+	return 0;
+}
+
+static void
+nvkm_fifo_chan_child_del(struct nvkm_oproxy *base)
+{
+	struct nvkm_fifo_chan_object *object =
+		container_of(base, typeof(*object), oproxy);
+	struct nvkm_engine *engine  = object->oproxy.base.engine;
+	struct nvkm_fifo_chan *chan = object->chan;
+	struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
+
+	if (chan->func->object_dtor)
+		chan->func->object_dtor(chan, object->hash);
+
+	if (!--engn->refcount) {
+		if (chan->func->engine_dtor)
+			chan->func->engine_dtor(chan, engine);
+		nvkm_object_del(&engn->object);
+		if (chan->vm)
+			atomic_dec(&chan->vm->engref[engine->subdev.index]);
+	}
+}
+
+static const struct nvkm_oproxy_func
+nvkm_fifo_chan_child_func = {
+	.dtor[0] = nvkm_fifo_chan_child_del,
+	.init[0] = nvkm_fifo_chan_child_init,
+	.fini[0] = nvkm_fifo_chan_child_fini,
+};
+
+static int
+nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
+			 struct nvkm_object **pobject)
+{
+	struct nvkm_engine *engine = oclass->engine;
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(oclass->parent);
+	struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
+	struct nvkm_fifo_chan_object *object;
+	int ret = 0;
+
+	if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_oproxy_ctor(&nvkm_fifo_chan_child_func, oclass, &object->oproxy);
+	object->chan = chan;
+	*pobject = &object->oproxy.base;
+
+	if (!engn->refcount++) {
+		struct nvkm_oclass cclass = {
+			.client = oclass->client,
+			.engine = oclass->engine,
+		};
+
+		if (chan->vm)
+			atomic_inc(&chan->vm->engref[engine->subdev.index]);
+
+		if (engine->func->fifo.cclass) {
+			ret = engine->func->fifo.cclass(chan, &cclass,
+							&engn->object);
+		} else
+		if (engine->func->cclass) {
+			ret = nvkm_object_new_(engine->func->cclass, &cclass,
+					       NULL, 0, &engn->object);
+		}
+		if (ret)
+			return ret;
+
+		if (chan->func->engine_ctor) {
+			ret = chan->func->engine_ctor(chan, oclass->engine,
+						      engn->object);
+			if (ret)
+				return ret;
+		}
+	}
+
+	ret = oclass->base.ctor(&(const struct nvkm_oclass) {
+					.base = oclass->base,
+					.engn = oclass->engn,
+					.handle = oclass->handle,
+					.object = oclass->object,
+					.client = oclass->client,
+					.parent = engn->object ?
+						  engn->object :
+						  oclass->parent,
+					.engine = engine,
+				}, data, size, &object->oproxy.object);
+	if (ret)
+		return ret;
+
+	if (chan->func->object_ctor) {
+		object->hash =
+			chan->func->object_ctor(chan, object->oproxy.object);
+		if (object->hash < 0)
+			return object->hash;
+	}
+
+	return 0;
+}
+
+static int
+nvkm_fifo_chan_child_get(struct nvkm_object *object, int index,
+			 struct nvkm_oclass *oclass)
+{
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+	struct nvkm_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->engine.subdev.device;
+	struct nvkm_engine *engine;
+	u64 mask = chan->engines;
+	int ret, i, c;
+
+	for (; c = 0, i = __ffs64(mask), mask; mask &= ~(1ULL << i)) {
+		if (!(engine = nvkm_device_engine(device, i)))
+			continue;
+		oclass->engine = engine;
+		oclass->base.oclass = 0;
+
+		if (engine->func->fifo.sclass) {
+			ret = engine->func->fifo.sclass(oclass, index);
+			if (oclass->base.oclass) {
+				if (!oclass->base.ctor)
+					oclass->base.ctor = nvkm_object_new;
+				oclass->ctor = nvkm_fifo_chan_child_new;
+				return 0;
+			}
+
+			index -= ret;
+			continue;
+		}
+
+		while (engine->func->sclass[c].oclass) {
+			if (c++ == index) {
+				oclass->base = engine->func->sclass[index];
+				if (!oclass->base.ctor)
+					oclass->base.ctor = nvkm_object_new;
+				oclass->ctor = nvkm_fifo_chan_child_new;
+				return 0;
+			}
+		}
+		index -= c;
+	}
+
+	return -EINVAL;
+}
+
+static int
+nvkm_fifo_chan_ntfy(struct nvkm_object *object, u32 type,
+		    struct nvkm_event **pevent)
+{
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+	if (chan->func->ntfy)
+		return chan->func->ntfy(chan, type, pevent);
+	return -ENODEV;
+}
+
+static int
+nvkm_fifo_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
+{
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+	*addr = chan->addr;
+	*size = chan->size;
+	return 0;
+}
+
+static int
+nvkm_fifo_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
+{
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+	if (unlikely(!chan->user)) {
+		chan->user = ioremap(chan->addr, chan->size);
+		if (!chan->user)
+			return -ENOMEM;
+	}
+	if (unlikely(addr + 4 > chan->size))
+		return -EINVAL;
+	*data = ioread32_native(chan->user + addr);
+	return 0;
+}
+
+static int
+nvkm_fifo_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
+{
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+	if (unlikely(!chan->user)) {
+		chan->user = ioremap(chan->addr, chan->size);
+		if (!chan->user)
+			return -ENOMEM;
+	}
+	if (unlikely(addr + 4 > chan->size))
+		return -EINVAL;
+	iowrite32_native(data, chan->user + addr);
+	return 0;
+}
+
+static int
+nvkm_fifo_chan_fini(struct nvkm_object *object, bool suspend)
+{
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+	chan->func->fini(chan);
+	return 0;
+}
+
+static int
+nvkm_fifo_chan_init(struct nvkm_object *object)
+{
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+	chan->func->init(chan);
+	return 0;
+}
+
+static void *
+nvkm_fifo_chan_dtor(struct nvkm_object *object)
+{
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+	struct nvkm_fifo *fifo = chan->fifo;
+	void *data = chan->func->dtor(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&fifo->lock, flags);
+	if (!list_empty(&chan->head)) {
+		__clear_bit(chan->chid, fifo->mask);
+		list_del(&chan->head);
+	}
+	spin_unlock_irqrestore(&fifo->lock, flags);
+
+	if (chan->user)
+		iounmap(chan->user);
+
+	nvkm_vm_ref(NULL, &chan->vm, NULL);
+
+	nvkm_gpuobj_del(&chan->push);
+	nvkm_gpuobj_del(&chan->inst);
+	return data;
+}
+
+static const struct nvkm_object_func
+nvkm_fifo_chan_func = {
+	.dtor = nvkm_fifo_chan_dtor,
+	.init = nvkm_fifo_chan_init,
+	.fini = nvkm_fifo_chan_fini,
+	.ntfy = nvkm_fifo_chan_ntfy,
+	.map = nvkm_fifo_chan_map,
+	.rd32 = nvkm_fifo_chan_rd32,
+	.wr32 = nvkm_fifo_chan_wr32,
+	.sclass = nvkm_fifo_chan_child_get,
+};
+
+int
+nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
+		    struct nvkm_fifo *fifo, u32 size, u32 align, bool zero,
+		    u64 vm, u64 push, u64 engines, int bar, u32 base, u32 user,
+		    const struct nvkm_oclass *oclass,
+		    struct nvkm_fifo_chan *chan)
+{
+	struct nvkm_client *client = oclass->client;
+	struct nvkm_device *device = fifo->engine.subdev.device;
+	struct nvkm_mmu *mmu = device->mmu;
+	struct nvkm_dmaobj *dmaobj;
+	unsigned long flags;
+	int ret;
+
+	nvkm_object_ctor(&nvkm_fifo_chan_func, oclass, &chan->object);
+	chan->func = func;
+	chan->fifo = fifo;
+	chan->engines = engines;
+	INIT_LIST_HEAD(&chan->head);
+
+	/* instance memory */
+	ret = nvkm_gpuobj_new(device, size, align, zero, NULL, &chan->inst);
+	if (ret)
+		return ret;
+
+	/* allocate push buffer ctxdma instance */
+	if (push) {
+		dmaobj = nvkm_dma_search(device->dma, oclass->client, push);
+		if (!dmaobj)
+			return -ENOENT;
+
+		ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16,
+				       &chan->push);
+		if (ret)
+			return ret;
+	}
+
+	/* channel address space */
+	if (!vm && mmu) {
+		if (!client->vm || client->vm->mmu == mmu) {
+			ret = nvkm_vm_ref(client->vm, &chan->vm, NULL);
+			if (ret)
+				return ret;
+		} else {
+			return -EINVAL;
+		}
+	} else {
+		return -ENOENT;
+	}
+
+	/* allocate channel id */
+	spin_lock_irqsave(&fifo->lock, flags);
+	chan->chid = find_first_zero_bit(fifo->mask, NVKM_FIFO_CHID_NR);
+	if (chan->chid >= NVKM_FIFO_CHID_NR) {
+		spin_unlock_irqrestore(&fifo->lock, flags);
+		return -ENOSPC;
+	}
+	list_add(&chan->head, &fifo->chan);
+	__set_bit(chan->chid, fifo->mask);
+	spin_unlock_irqrestore(&fifo->lock, flags);
+
+	/* determine address of this channel's user registers */
+	chan->addr = device->func->resource_addr(device, bar) +
+		     base + user * chan->chid;
+	chan->size = user;
+
+	nvkm_event_send(&fifo->cevent, 1, 0, NULL, 0);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
new file mode 100644
index 000000000000..55dc415c5c08
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
@@ -0,0 +1,33 @@
+#ifndef __NVKM_FIFO_CHAN_H__
+#define __NVKM_FIFO_CHAN_H__
+#define nvkm_fifo_chan(p) container_of((p), struct nvkm_fifo_chan, object)
+#include "priv.h"
+
+struct nvkm_fifo_chan_func {
+	void *(*dtor)(struct nvkm_fifo_chan *);
+	void (*init)(struct nvkm_fifo_chan *);
+	void (*fini)(struct nvkm_fifo_chan *);
+	int (*ntfy)(struct nvkm_fifo_chan *, u32 type, struct nvkm_event **);
+	int  (*engine_ctor)(struct nvkm_fifo_chan *, struct nvkm_engine *,
+			    struct nvkm_object *);
+	void (*engine_dtor)(struct nvkm_fifo_chan *, struct nvkm_engine *);
+	int  (*engine_init)(struct nvkm_fifo_chan *, struct nvkm_engine *);
+	int  (*engine_fini)(struct nvkm_fifo_chan *, struct nvkm_engine *,
+			    bool suspend);
+	int  (*object_ctor)(struct nvkm_fifo_chan *, struct nvkm_object *);
+	void (*object_dtor)(struct nvkm_fifo_chan *, int);
+};
+
+int nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *, struct nvkm_fifo *,
+			u32 size, u32 align, bool zero, u64 vm, u64 push,
+			u64 engines, int bar, u32 base, u32 user,
+			const struct nvkm_oclass *, struct nvkm_fifo_chan *);
+
+struct nvkm_fifo_chan_oclass {
+	int (*ctor)(struct nvkm_fifo *, const struct nvkm_oclass *,
+		    void *data, u32 size, struct nvkm_object **);
+	struct nvkm_sclass base;
+};
+
+int g84_fifo_chan_ntfy(struct nvkm_fifo_chan *, u32, struct nvkm_event **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
new file mode 100644
index 000000000000..04305241ceed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/mmu.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+
+int
+g84_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type,
+		   struct nvkm_event **pevent)
+{
+	switch (type) {
+	case G82_CHANNEL_DMA_V0_NTFY_UEVENT:
+		*pevent = &chan->fifo->uevent;
+		return 0;
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+static int
+g84_fifo_chan_engine(struct nvkm_engine *engine)
+{
+	switch (engine->subdev.index) {
+	case NVKM_ENGINE_GR    : return 0;
+	case NVKM_ENGINE_MPEG  :
+	case NVKM_ENGINE_MSPPP : return 1;
+	case NVKM_ENGINE_CE0   : return 2;
+	case NVKM_ENGINE_VP    :
+	case NVKM_ENGINE_MSPDEC: return 3;
+	case NVKM_ENGINE_CIPHER:
+	case NVKM_ENGINE_SEC   : return 4;
+	case NVKM_ENGINE_BSP   :
+	case NVKM_ENGINE_MSVLD : return 5;
+	default:
+		WARN_ON(1);
+		return 0;
+	}
+}
+
+static int
+g84_fifo_chan_engine_addr(struct nvkm_engine *engine)
+{
+	switch (engine->subdev.index) {
+	case NVKM_ENGINE_DMAOBJ:
+	case NVKM_ENGINE_SW    : return -1;
+	case NVKM_ENGINE_GR    : return 0x0020;
+	case NVKM_ENGINE_VP    :
+	case NVKM_ENGINE_MSPDEC: return 0x0040;
+	case NVKM_ENGINE_MPEG  :
+	case NVKM_ENGINE_MSPPP : return 0x0060;
+	case NVKM_ENGINE_BSP   :
+	case NVKM_ENGINE_MSVLD : return 0x0080;
+	case NVKM_ENGINE_CIPHER:
+	case NVKM_ENGINE_SEC   : return 0x00a0;
+	case NVKM_ENGINE_CE0   : return 0x00c0;
+	default:
+		WARN_ON(1);
+		return -1;
+	}
+}
+
+static int
+g84_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
+			  struct nvkm_engine *engine, bool suspend)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	struct nv50_fifo *fifo = chan->fifo;
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 engn, save;
+	int offset;
+	bool done;
+
+	offset = g84_fifo_chan_engine_addr(engine);
+	if (offset < 0)
+		return 0;
+
+	engn = g84_fifo_chan_engine(engine);
+	save = nvkm_mask(device, 0x002520, 0x0000003f, 1 << engn);
+	nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12);
+	done = nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
+			break;
+	) >= 0;
+	nvkm_wr32(device, 0x002520, save);
+	if (!done) {
+		nvkm_error(subdev, "channel %d [%s] unload timeout\n",
+			   chan->base.chid, chan->base.object.client->name);
+		if (suspend)
+			return -EBUSY;
+	}
+
+	nvkm_kmap(chan->eng);
+	nvkm_wo32(chan->eng, offset + 0x00, 0x00000000);
+	nvkm_wo32(chan->eng, offset + 0x04, 0x00000000);
+	nvkm_wo32(chan->eng, offset + 0x08, 0x00000000);
+	nvkm_wo32(chan->eng, offset + 0x0c, 0x00000000);
+	nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
+	nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
+	nvkm_done(chan->eng);
+	return 0;
+}
+
+
+int
+g84_fifo_chan_engine_init(struct nvkm_fifo_chan *base,
+			  struct nvkm_engine *engine)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	struct nvkm_gpuobj *engn = chan->engn[engine->subdev.index];
+	u64 limit, start;
+	int offset;
+
+	offset = g84_fifo_chan_engine_addr(engine);
+	if (offset < 0)
+		return 0;
+	limit = engn->addr + engn->size - 1;
+	start = engn->addr;
+
+	nvkm_kmap(chan->eng);
+	nvkm_wo32(chan->eng, offset + 0x00, 0x00190000);
+	nvkm_wo32(chan->eng, offset + 0x04, lower_32_bits(limit));
+	nvkm_wo32(chan->eng, offset + 0x08, lower_32_bits(start));
+	nvkm_wo32(chan->eng, offset + 0x0c, upper_32_bits(limit) << 24 |
+					    upper_32_bits(start));
+	nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
+	nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
+	nvkm_done(chan->eng);
+	return 0;
+}
+
+static int
+g84_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base,
+			  struct nvkm_engine *engine,
+			  struct nvkm_object *object)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	int engn = engine->subdev.index;
+
+	if (g84_fifo_chan_engine_addr(engine) < 0)
+		return 0;
+
+	return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
+}
+
+int
+g84_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
+			  struct nvkm_object *object)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	u32 handle = object->handle;
+	u32 context;
+
+	switch (object->engine->subdev.index) {
+	case NVKM_ENGINE_DMAOBJ:
+	case NVKM_ENGINE_SW    : context = 0x00000000; break;
+	case NVKM_ENGINE_GR    : context = 0x00100000; break;
+	case NVKM_ENGINE_MPEG  :
+	case NVKM_ENGINE_MSPPP : context = 0x00200000; break;
+	case NVKM_ENGINE_ME    :
+	case NVKM_ENGINE_CE0   : context = 0x00300000; break;
+	case NVKM_ENGINE_VP    :
+	case NVKM_ENGINE_MSPDEC: context = 0x00400000; break;
+	case NVKM_ENGINE_CIPHER:
+	case NVKM_ENGINE_SEC   :
+	case NVKM_ENGINE_VIC   : context = 0x00500000; break;
+	case NVKM_ENGINE_BSP   :
+	case NVKM_ENGINE_MSVLD : context = 0x00600000; break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	return nvkm_ramht_insert(chan->ramht, object, 0, 4, handle, context);
+}
+
+static void
+g84_fifo_chan_init(struct nvkm_fifo_chan *base)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	struct nv50_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u64 addr = chan->ramfc->addr >> 8;
+	u32 chid = chan->base.chid;
+
+	nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | addr);
+	nv50_fifo_runlist_update(fifo);
+}
+
+static const struct nvkm_fifo_chan_func
+g84_fifo_chan_func = {
+	.dtor = nv50_fifo_chan_dtor,
+	.init = g84_fifo_chan_init,
+	.fini = nv50_fifo_chan_fini,
+	.ntfy = g84_fifo_chan_ntfy,
+	.engine_ctor = g84_fifo_chan_engine_ctor,
+	.engine_dtor = nv50_fifo_chan_engine_dtor,
+	.engine_init = g84_fifo_chan_engine_init,
+	.engine_fini = g84_fifo_chan_engine_fini,
+	.object_ctor = g84_fifo_chan_object_ctor,
+	.object_dtor = nv50_fifo_chan_object_dtor,
+};
+
+int
+g84_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push,
+		   const struct nvkm_oclass *oclass,
+		   struct nv50_fifo_chan *chan)
+{
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	int ret;
+
+	ret = nvkm_fifo_chan_ctor(&g84_fifo_chan_func, &fifo->base,
+				  0x10000, 0x1000, false, vm, push,
+				  (1ULL << NVKM_ENGINE_BSP) |
+				  (1ULL << NVKM_ENGINE_CE0) |
+				  (1ULL << NVKM_ENGINE_CIPHER) |
+				  (1ULL << NVKM_ENGINE_DMAOBJ) |
+				  (1ULL << NVKM_ENGINE_GR) |
+				  (1ULL << NVKM_ENGINE_ME) |
+				  (1ULL << NVKM_ENGINE_MPEG) |
+				  (1ULL << NVKM_ENGINE_MSPDEC) |
+				  (1ULL << NVKM_ENGINE_MSPPP) |
+				  (1ULL << NVKM_ENGINE_MSVLD) |
+				  (1ULL << NVKM_ENGINE_SEC) |
+				  (1ULL << NVKM_ENGINE_SW) |
+				  (1ULL << NVKM_ENGINE_VIC) |
+				  (1ULL << NVKM_ENGINE_VP),
+				  0, 0xc00000, 0x2000, oclass, &chan->base);
+	chan->fifo = fifo;
+	if (ret)
+		return ret;
+
+	ret = nvkm_gpuobj_new(device, 0x0200, 0, true, chan->base.inst,
+			      &chan->eng);
+	if (ret)
+		return ret;
+
+	ret = nvkm_gpuobj_new(device, 0x4000, 0, false, chan->base.inst,
+			      &chan->pgd);
+	if (ret)
+		return ret;
+
+	ret = nvkm_gpuobj_new(device, 0x1000, 0x400, true, chan->base.inst,
+			      &chan->cache);
+	if (ret)
+		return ret;
+
+	ret = nvkm_gpuobj_new(device, 0x100, 0x100, true, chan->base.inst,
+			      &chan->ramfc);
+	if (ret)
+		return ret;
+
+	ret = nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
+	if (ret)
+		return ret;
+
+	return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
new file mode 100644
index 000000000000..7d697e2dce1a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
@@ -0,0 +1,24 @@
+#ifndef __GF100_FIFO_CHAN_H__
+#define __GF100_FIFO_CHAN_H__
+#define gf100_fifo_chan(p) container_of((p), struct gf100_fifo_chan, base)
+#include "chan.h"
+#include "gf100.h"
+
+struct gf100_fifo_chan {
+	struct nvkm_fifo_chan base;
+	struct gf100_fifo *fifo;
+
+	struct list_head head;
+	bool killed;
+
+	struct nvkm_gpuobj *pgd;
+	struct nvkm_vm *vm;
+
+	struct {
+		struct nvkm_gpuobj *inst;
+		struct nvkm_vma vma;
+	} engn[NVKM_SUBDEV_NR];
+};
+
+extern const struct nvkm_fifo_chan_oclass gf100_fifo_gpfifo_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
new file mode 100644
index 000000000000..97bdddb7644a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -0,0 +1,29 @@
+#ifndef __GK104_FIFO_CHAN_H__
+#define __GK104_FIFO_CHAN_H__
+#define gk104_fifo_chan(p) container_of((p), struct gk104_fifo_chan, base)
+#include "chan.h"
+#include "gk104.h"
+
+struct gk104_fifo_chan {
+	struct nvkm_fifo_chan base;
+	struct gk104_fifo *fifo;
+	int engine;
+
+	struct list_head head;
+	bool killed;
+
+	struct nvkm_gpuobj *pgd;
+	struct nvkm_vm *vm;
+
+	struct {
+		struct nvkm_gpuobj *inst;
+		struct nvkm_vma vma;
+	} engn[NVKM_SUBDEV_NR];
+};
+
+int gk104_fifo_gpfifo_new(struct nvkm_fifo *, const struct nvkm_oclass *,
+			  void *data, u32 size, struct nvkm_object **);
+
+extern const struct nvkm_fifo_chan_oclass gk104_fifo_gpfifo_oclass;
+extern const struct nvkm_fifo_chan_oclass gm204_fifo_gpfifo_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
new file mode 100644
index 000000000000..3361a1fd0343
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
@@ -0,0 +1,24 @@
+#ifndef __NV04_FIFO_CHAN_H__
+#define __NV04_FIFO_CHAN_H__
+#define nv04_fifo_chan(p) container_of((p), struct nv04_fifo_chan, base)
+#include "chan.h"
+#include "nv04.h"
+
+struct nv04_fifo_chan {
+	struct nvkm_fifo_chan base;
+	struct nv04_fifo *fifo;
+	u32 ramfc;
+	struct nvkm_gpuobj *engn[NVKM_SUBDEV_NR];
+};
+
+extern const struct nvkm_fifo_chan_func nv04_fifo_dma_func;
+void *nv04_fifo_dma_dtor(struct nvkm_fifo_chan *);
+void nv04_fifo_dma_init(struct nvkm_fifo_chan *);
+void nv04_fifo_dma_fini(struct nvkm_fifo_chan *);
+void nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *, int);
+
+extern const struct nvkm_fifo_chan_oclass nv04_fifo_dma_oclass;
+extern const struct nvkm_fifo_chan_oclass nv10_fifo_dma_oclass;
+extern const struct nvkm_fifo_chan_oclass nv17_fifo_dma_oclass;
+extern const struct nvkm_fifo_chan_oclass nv40_fifo_dma_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
new file mode 100644
index 000000000000..25b60aff40e4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/mmu.h>
+#include <subdev/timer.h>
+
+static int
+nv50_fifo_chan_engine_addr(struct nvkm_engine *engine)
+{
+	switch (engine->subdev.index) {
+	case NVKM_ENGINE_DMAOBJ:
+	case NVKM_ENGINE_SW    : return -1;
+	case NVKM_ENGINE_GR    : return 0x0000;
+	case NVKM_ENGINE_MPEG  : return 0x0060;
+	default:
+		WARN_ON(1);
+		return -1;
+	}
+}
+
+static int
+nv50_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
+			   struct nvkm_engine *engine, bool suspend)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	struct nv50_fifo *fifo = chan->fifo;
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int offset, ret = 0;
+	u32 me;
+
+	offset = nv50_fifo_chan_engine_addr(engine);
+	if (offset < 0)
+		return 0;
+
+	/* HW bug workaround:
+	 *
+	 * PFIFO will hang forever if the connected engines don't report
+	 * that they've processed the context switch request.
+	 *
+	 * In order for the kickoff to work, we need to ensure all the
+	 * connected engines are in a state where they can answer.
+	 *
+	 * Newer chipsets don't seem to suffer from this issue, and well,
+	 * there's also a "ignore these engines" bitmask reg we can use
+	 * if we hit the issue there..
+	 */
+	me = nvkm_mask(device, 0x00b860, 0x00000001, 0x00000001);
+
+	/* do the kickoff... */
+	nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12);
+	if (nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
+			break;
+	) < 0) {
+		nvkm_error(subdev, "channel %d [%s] unload timeout\n",
+			   chan->base.chid, chan->base.object.client->name);
+		if (suspend)
+			ret = -EBUSY;
+	}
+	nvkm_wr32(device, 0x00b860, me);
+
+	if (ret == 0) {
+		nvkm_kmap(chan->eng);
+		nvkm_wo32(chan->eng, offset + 0x00, 0x00000000);
+		nvkm_wo32(chan->eng, offset + 0x04, 0x00000000);
+		nvkm_wo32(chan->eng, offset + 0x08, 0x00000000);
+		nvkm_wo32(chan->eng, offset + 0x0c, 0x00000000);
+		nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
+		nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
+		nvkm_done(chan->eng);
+	}
+
+	return ret;
+}
+
+static int
+nv50_fifo_chan_engine_init(struct nvkm_fifo_chan *base,
+			   struct nvkm_engine *engine)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	struct nvkm_gpuobj *engn = chan->engn[engine->subdev.index];
+	u64 limit, start;
+	int offset;
+
+	offset = nv50_fifo_chan_engine_addr(engine);
+	if (offset < 0)
+		return 0;
+	limit = engn->addr + engn->size - 1;
+	start = engn->addr;
+
+	nvkm_kmap(chan->eng);
+	nvkm_wo32(chan->eng, offset + 0x00, 0x00190000);
+	nvkm_wo32(chan->eng, offset + 0x04, lower_32_bits(limit));
+	nvkm_wo32(chan->eng, offset + 0x08, lower_32_bits(start));
+	nvkm_wo32(chan->eng, offset + 0x0c, upper_32_bits(limit) << 24 |
+					    upper_32_bits(start));
+	nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
+	nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
+	nvkm_done(chan->eng);
+	return 0;
+}
+
+void
+nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *base,
+			   struct nvkm_engine *engine)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	nvkm_gpuobj_del(&chan->engn[engine->subdev.index]);
+}
+
+static int
+nv50_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base,
+			   struct nvkm_engine *engine,
+			   struct nvkm_object *object)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	int engn = engine->subdev.index;
+
+	if (nv50_fifo_chan_engine_addr(engine) < 0)
+		return 0;
+
+	return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
+}
+
+void
+nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *base, int cookie)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	nvkm_ramht_remove(chan->ramht, cookie);
+}
+
+static int
+nv50_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
+			   struct nvkm_object *object)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	u32 handle = object->handle;
+	u32 context;
+
+	switch (object->engine->subdev.index) {
+	case NVKM_ENGINE_DMAOBJ:
+	case NVKM_ENGINE_SW    : context = 0x00000000; break;
+	case NVKM_ENGINE_GR    : context = 0x00100000; break;
+	case NVKM_ENGINE_MPEG  : context = 0x00200000; break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	return nvkm_ramht_insert(chan->ramht, object, 0, 4, handle, context);
+}
+
+void
+nv50_fifo_chan_fini(struct nvkm_fifo_chan *base)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	struct nv50_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u32 chid = chan->base.chid;
+
+	/* remove channel from runlist, fifo will unload context */
+	nvkm_mask(device, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
+	nv50_fifo_runlist_update(fifo);
+	nvkm_wr32(device, 0x002600 + (chid * 4), 0x00000000);
+}
+
+static void
+nv50_fifo_chan_init(struct nvkm_fifo_chan *base)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	struct nv50_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u64 addr = chan->ramfc->addr >> 12;
+	u32 chid = chan->base.chid;
+
+	nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | addr);
+	nv50_fifo_runlist_update(fifo);
+}
+
+void *
+nv50_fifo_chan_dtor(struct nvkm_fifo_chan *base)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
+	nvkm_ramht_del(&chan->ramht);
+	nvkm_gpuobj_del(&chan->pgd);
+	nvkm_gpuobj_del(&chan->eng);
+	nvkm_gpuobj_del(&chan->cache);
+	nvkm_gpuobj_del(&chan->ramfc);
+	return chan;
+}
+
+static const struct nvkm_fifo_chan_func
+nv50_fifo_chan_func = {
+	.dtor = nv50_fifo_chan_dtor,
+	.init = nv50_fifo_chan_init,
+	.fini = nv50_fifo_chan_fini,
+	.engine_ctor = nv50_fifo_chan_engine_ctor,
+	.engine_dtor = nv50_fifo_chan_engine_dtor,
+	.engine_init = nv50_fifo_chan_engine_init,
+	.engine_fini = nv50_fifo_chan_engine_fini,
+	.object_ctor = nv50_fifo_chan_object_ctor,
+	.object_dtor = nv50_fifo_chan_object_dtor,
+};
+
+int
+nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push,
+		    const struct nvkm_oclass *oclass,
+		    struct nv50_fifo_chan *chan)
+{
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	int ret;
+
+	ret = nvkm_fifo_chan_ctor(&nv50_fifo_chan_func, &fifo->base,
+				  0x10000, 0x1000, false, vm, push,
+				  (1ULL << NVKM_ENGINE_DMAOBJ) |
+				  (1ULL << NVKM_ENGINE_SW) |
+				  (1ULL << NVKM_ENGINE_GR) |
+				  (1ULL << NVKM_ENGINE_MPEG),
+				  0, 0xc00000, 0x2000, oclass, &chan->base);
+	chan->fifo = fifo;
+	if (ret)
+		return ret;
+
+	ret = nvkm_gpuobj_new(device, 0x0200, 0x1000, true, chan->base.inst,
+			      &chan->ramfc);
+	if (ret)
+		return ret;
+
+	ret = nvkm_gpuobj_new(device, 0x1200, 0, true, chan->base.inst,
+			      &chan->eng);
+	if (ret)
+		return ret;
+
+	ret = nvkm_gpuobj_new(device, 0x4000, 0, false, chan->base.inst,
+			      &chan->pgd);
+	if (ret)
+		return ret;
+
+	ret = nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
+	if (ret)
+		return ret;
+
+	return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
new file mode 100644
index 000000000000..4b9da469b704
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
@@ -0,0 +1,35 @@
+#ifndef __NV50_FIFO_CHAN_H__
+#define __NV50_FIFO_CHAN_H__
+#define nv50_fifo_chan(p) container_of((p), struct nv50_fifo_chan, base)
+#include "chan.h"
+#include "nv50.h"
+
+struct nv50_fifo_chan {
+	struct nv50_fifo *fifo;
+	struct nvkm_fifo_chan base;
+
+	struct nvkm_gpuobj *ramfc;
+	struct nvkm_gpuobj *cache;
+	struct nvkm_gpuobj *eng;
+	struct nvkm_gpuobj *pgd;
+	struct nvkm_ramht *ramht;
+	struct nvkm_vm *vm;
+
+	struct nvkm_gpuobj *engn[NVKM_SUBDEV_NR];
+};
+
+int nv50_fifo_chan_ctor(struct nv50_fifo *, u64 vm, u64 push,
+			const struct nvkm_oclass *, struct nv50_fifo_chan *);
+void *nv50_fifo_chan_dtor(struct nvkm_fifo_chan *);
+void nv50_fifo_chan_fini(struct nvkm_fifo_chan *);
+void nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *, struct nvkm_engine *);
+void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int);
+
+int g84_fifo_chan_ctor(struct nv50_fifo *, u64 vm, u64 push,
+		       const struct nvkm_oclass *, struct nv50_fifo_chan *);
+
+extern const struct nvkm_fifo_chan_oclass nv50_fifo_dma_oclass;
+extern const struct nvkm_fifo_chan_oclass nv50_fifo_gpfifo_oclass;
+extern const struct nvkm_fifo_chan_oclass g84_fifo_dma_oclass;
+extern const struct nvkm_fifo_chan_oclass g84_fifo_gpfifo_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
new file mode 100644
index 000000000000..a5ca52c7b74f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+g84_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		 void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct nv50_channel_dma_v0 v0;
+	} *args = data;
+	struct nv50_fifo *fifo = nv50_fifo(base);
+	struct nv50_fifo_chan *chan;
+	int ret;
+
+	nvif_ioctl(parent, "create channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel dma vers %d vm %llx "
+				   "pushbuf %llx offset %016llx\n",
+			   args->v0.version, args->v0.vm, args->v0.pushbuf,
+			   args->v0.offset);
+		if (!args->v0.pushbuf)
+			return -EINVAL;
+	} else
+		return ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = g84_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+				 oclass, chan);
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+
+	nvkm_kmap(chan->ramfc);
+	nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset));
+	nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset));
+	nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset));
+	nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset));
+	nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078);
+	nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
+	nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
+	nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff);
+	nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
+	nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
+	nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
+	nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+				     (4 << 24) /* SEARCH_FULL */ |
+				     (chan->ramht->gpuobj->node->offset >> 4));
+	nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10);
+	nvkm_wo32(chan->ramfc, 0x98, chan->base.inst->addr >> 12);
+	nvkm_done(chan->ramfc);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+g84_fifo_dma_oclass = {
+	.base.oclass = G82_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = g84_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
new file mode 100644
index 000000000000..bfcc6408a772
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv04.h"
+#include "regsnv04.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/instmem.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+void
+nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *base, int cookie)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
+	nvkm_ramht_remove(imem->ramht, cookie);
+}
+
+static int
+nv04_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
+			  struct nvkm_object *object)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
+	u32 context = 0x80000000 | chan->base.chid << 24;
+	u32 handle  = object->handle;
+	int hash;
+
+	switch (object->engine->subdev.index) {
+	case NVKM_ENGINE_DMAOBJ:
+	case NVKM_ENGINE_SW    : context |= 0x00000000; break;
+	case NVKM_ENGINE_GR    : context |= 0x00010000; break;
+	case NVKM_ENGINE_MPEG  : context |= 0x00020000; break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	mutex_lock(&chan->fifo->base.engine.subdev.mutex);
+	hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4,
+				 handle, context);
+	mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
+	return hash;
+}
+
+void
+nv04_fifo_dma_fini(struct nvkm_fifo_chan *base)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	struct nv04_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_memory *fctx = device->imem->ramfc;
+	const struct nv04_fifo_ramfc *c;
+	unsigned long flags;
+	u32 mask = fifo->base.nr - 1;
+	u32 data = chan->ramfc;
+	u32 chid;
+
+	/* prevent fifo context switches */
+	spin_lock_irqsave(&fifo->base.lock, flags);
+	nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
+
+	/* if this channel is active, replace it with a null context */
+	chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & mask;
+	if (chid == chan->base.chid) {
+		nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
+		nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0);
+		nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
+
+		c = fifo->ramfc;
+		do {
+			u32 rm = ((1ULL << c->bits) - 1) << c->regs;
+			u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
+			u32 rv = (nvkm_rd32(device, c->regp) &  rm) >> c->regs;
+			u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm);
+			nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
+		} while ((++c)->bits);
+
+		c = fifo->ramfc;
+		do {
+			nvkm_wr32(device, c->regp, 0x00000000);
+		} while ((++c)->bits);
+
+		nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0);
+		nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0);
+		nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, mask);
+		nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
+		nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+	}
+
+	/* restore normal operation, after disabling dma mode */
+	nvkm_mask(device, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
+	nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
+void
+nv04_fifo_dma_init(struct nvkm_fifo_chan *base)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	struct nv04_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u32 mask = 1 << chan->base.chid;
+	unsigned long flags;
+	spin_lock_irqsave(&fifo->base.lock, flags);
+	nvkm_mask(device, NV04_PFIFO_MODE, mask, mask);
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
+void *
+nv04_fifo_dma_dtor(struct nvkm_fifo_chan *base)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	struct nv04_fifo *fifo = chan->fifo;
+	struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
+	const struct nv04_fifo_ramfc *c = fifo->ramfc;
+
+	nvkm_kmap(imem->ramfc);
+	do {
+		nvkm_wo32(imem->ramfc, chan->ramfc + c->ctxp, 0x00000000);
+	} while ((++c)->bits);
+	nvkm_done(imem->ramfc);
+	return chan;
+}
+
+const struct nvkm_fifo_chan_func
+nv04_fifo_dma_func = {
+	.dtor = nv04_fifo_dma_dtor,
+	.init = nv04_fifo_dma_init,
+	.fini = nv04_fifo_dma_fini,
+	.object_ctor = nv04_fifo_dma_object_ctor,
+	.object_dtor = nv04_fifo_dma_object_dtor,
+};
+
+static int
+nv04_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		  void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct nv03_channel_dma_v0 v0;
+	} *args = data;
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nv04_fifo_chan *chan = NULL;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_instmem *imem = device->imem;
+	int ret;
+
+	nvif_ioctl(parent, "create channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
+				   "offset %08x\n", args->v0.version,
+			   args->v0.pushbuf, args->v0.offset);
+		if (!args->v0.pushbuf)
+			return -EINVAL;
+	} else
+		return ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
+				  0x1000, 0x1000, false, 0, args->v0.pushbuf,
+				  (1ULL << NVKM_ENGINE_DMAOBJ) |
+				  (1ULL << NVKM_ENGINE_GR) |
+				  (1ULL << NVKM_ENGINE_SW),
+				  0, 0x800000, 0x10000, oclass, &chan->base);
+	chan->fifo = fifo;
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+	chan->ramfc = chan->base.chid * 32;
+
+	nvkm_kmap(imem->ramfc);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x08, chan->base.push->addr >> 4);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x10,
+			       NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+			       NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+			       NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+			       NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+	nvkm_done(imem->ramfc);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv04_fifo_dma_oclass = {
+	.base.oclass = NV03_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv04_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
new file mode 100644
index 000000000000..34f68e5bd040
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv04.h"
+#include "regsnv04.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/instmem.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+nv10_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		  void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct nv03_channel_dma_v0 v0;
+	} *args = data;
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nv04_fifo_chan *chan = NULL;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_instmem *imem = device->imem;
+	int ret;
+
+	nvif_ioctl(parent, "create channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
+				   "offset %08x\n", args->v0.version,
+			   args->v0.pushbuf, args->v0.offset);
+		if (!args->v0.pushbuf)
+			return -EINVAL;
+	} else
+		return ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
+				  0x1000, 0x1000, false, 0, args->v0.pushbuf,
+				  (1ULL << NVKM_ENGINE_DMAOBJ) |
+				  (1ULL << NVKM_ENGINE_GR) |
+				  (1ULL << NVKM_ENGINE_SW),
+				  0, 0x800000, 0x10000, oclass, &chan->base);
+	chan->fifo = fifo;
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+	chan->ramfc = chan->base.chid * 32;
+
+	nvkm_kmap(imem->ramfc);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x14,
+			       NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+			       NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+			       NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+			       NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+	nvkm_done(imem->ramfc);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv10_fifo_dma_oclass = {
+	.base.oclass = NV10_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv10_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
new file mode 100644
index 000000000000..ed7cc9f2b540
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv04.h"
+#include "regsnv04.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/instmem.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+nv17_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		  void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct nv03_channel_dma_v0 v0;
+	} *args = data;
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nv04_fifo_chan *chan = NULL;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_instmem *imem = device->imem;
+	int ret;
+
+	nvif_ioctl(parent, "create channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
+				   "offset %08x\n", args->v0.version,
+			   args->v0.pushbuf, args->v0.offset);
+		if (!args->v0.pushbuf)
+			return -EINVAL;
+	} else
+		return ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
+				  0x1000, 0x1000, false, 0, args->v0.pushbuf,
+				  (1ULL << NVKM_ENGINE_DMAOBJ) |
+				  (1ULL << NVKM_ENGINE_GR) |
+				  (1ULL << NVKM_ENGINE_MPEG) | /* NV31- */
+				  (1ULL << NVKM_ENGINE_SW),
+				  0, 0x800000, 0x10000, oclass, &chan->base);
+	chan->fifo = fifo;
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+	chan->ramfc = chan->base.chid * 64;
+
+	nvkm_kmap(imem->ramfc);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x14,
+			       NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+			       NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+			       NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+			       NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+	nvkm_done(imem->ramfc);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv17_fifo_dma_oclass = {
+	.base.oclass = NV17_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv17_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
new file mode 100644
index 000000000000..043b6c325949
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv04.h"
+#include "regsnv04.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/instmem.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static bool
+nv40_fifo_dma_engine(struct nvkm_engine *engine, u32 *reg, u32 *ctx)
+{
+	switch (engine->subdev.index) {
+	case NVKM_ENGINE_DMAOBJ:
+	case NVKM_ENGINE_SW:
+		return false;
+	case NVKM_ENGINE_GR:
+		*reg = 0x0032e0;
+		*ctx = 0x38;
+		return true;
+	case NVKM_ENGINE_MPEG:
+		*reg = 0x00330c;
+		*ctx = 0x54;
+		return true;
+	default:
+		WARN_ON(1);
+		return false;
+	}
+}
+
+static int
+nv40_fifo_dma_engine_fini(struct nvkm_fifo_chan *base,
+			  struct nvkm_engine *engine, bool suspend)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	struct nv04_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_instmem *imem = device->imem;
+	unsigned long flags;
+	u32 reg, ctx;
+	int chid;
+
+	if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
+		return 0;
+
+	spin_lock_irqsave(&fifo->base.lock, flags);
+	nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
+
+	chid = nvkm_rd32(device, 0x003204) & (fifo->base.nr - 1);
+	if (chid == chan->base.chid)
+		nvkm_wr32(device, reg, 0x00000000);
+	nvkm_kmap(imem->ramfc);
+	nvkm_wo32(imem->ramfc, chan->ramfc + ctx, 0x00000000);
+	nvkm_done(imem->ramfc);
+
+	nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
+	return 0;
+}
+
+static int
+nv40_fifo_dma_engine_init(struct nvkm_fifo_chan *base,
+			  struct nvkm_engine *engine)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	struct nv04_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_instmem *imem = device->imem;
+	unsigned long flags;
+	u32 inst, reg, ctx;
+	int chid;
+
+	if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
+		return 0;
+	inst = chan->engn[engine->subdev.index]->addr >> 4;
+
+	spin_lock_irqsave(&fifo->base.lock, flags);
+	nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
+
+	chid = nvkm_rd32(device, 0x003204) & (fifo->base.nr - 1);
+	if (chid == chan->base.chid)
+		nvkm_wr32(device, reg, inst);
+	nvkm_kmap(imem->ramfc);
+	nvkm_wo32(imem->ramfc, chan->ramfc + ctx, inst);
+	nvkm_done(imem->ramfc);
+
+	nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
+	return 0;
+}
+
+static void
+nv40_fifo_dma_engine_dtor(struct nvkm_fifo_chan *base,
+			  struct nvkm_engine *engine)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	nvkm_gpuobj_del(&chan->engn[engine->subdev.index]);
+}
+
+static int
+nv40_fifo_dma_engine_ctor(struct nvkm_fifo_chan *base,
+			  struct nvkm_engine *engine,
+			  struct nvkm_object *object)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	const int engn = engine->subdev.index;
+	u32 reg, ctx;
+
+	if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
+		return 0;
+
+	return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
+}
+
+static int
+nv40_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
+			  struct nvkm_object *object)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
+	u32 context = chan->base.chid << 23;
+	u32 handle  = object->handle;
+	int hash;
+
+	switch (object->engine->subdev.index) {
+	case NVKM_ENGINE_DMAOBJ:
+	case NVKM_ENGINE_SW    : context |= 0x00000000; break;
+	case NVKM_ENGINE_GR    : context |= 0x00100000; break;
+	case NVKM_ENGINE_MPEG  : context |= 0x00200000; break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	mutex_lock(&chan->fifo->base.engine.subdev.mutex);
+	hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4,
+				 handle, context);
+	mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
+	return hash;
+}
+
+static const struct nvkm_fifo_chan_func
+nv40_fifo_dma_func = {
+	.dtor = nv04_fifo_dma_dtor,
+	.init = nv04_fifo_dma_init,
+	.fini = nv04_fifo_dma_fini,
+	.engine_ctor = nv40_fifo_dma_engine_ctor,
+	.engine_dtor = nv40_fifo_dma_engine_dtor,
+	.engine_init = nv40_fifo_dma_engine_init,
+	.engine_fini = nv40_fifo_dma_engine_fini,
+	.object_ctor = nv40_fifo_dma_object_ctor,
+	.object_dtor = nv04_fifo_dma_object_dtor,
+};
+
+static int
+nv40_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		  void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct nv03_channel_dma_v0 v0;
+	} *args = data;
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nv04_fifo_chan *chan = NULL;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_instmem *imem = device->imem;
+	int ret;
+
+	nvif_ioctl(parent, "create channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
+				   "offset %08x\n", args->v0.version,
+			   args->v0.pushbuf, args->v0.offset);
+		if (!args->v0.pushbuf)
+			return -EINVAL;
+	} else
+		return ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = nvkm_fifo_chan_ctor(&nv40_fifo_dma_func, &fifo->base,
+				  0x1000, 0x1000, false, 0, args->v0.pushbuf,
+				  (1ULL << NVKM_ENGINE_DMAOBJ) |
+				  (1ULL << NVKM_ENGINE_GR) |
+				  (1ULL << NVKM_ENGINE_MPEG) |
+				  (1ULL << NVKM_ENGINE_SW),
+				  0, 0xc00000, 0x1000, oclass, &chan->base);
+	chan->fifo = fifo;
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+	chan->ramfc = chan->base.chid * 128;
+
+	nvkm_kmap(imem->ramfc);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x18, 0x30000000 |
+			       NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+			       NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+			       NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+			       NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
+	nvkm_done(imem->ramfc);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv40_fifo_dma_oclass = {
+	.base.oclass = NV40_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv40_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
new file mode 100644
index 000000000000..6b3b15f12c39
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+nv50_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		  void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct nv50_channel_dma_v0 v0;
+	} *args = data;
+	struct nv50_fifo *fifo = nv50_fifo(base);
+	struct nv50_fifo_chan *chan;
+	int ret;
+
+	nvif_ioctl(parent, "create channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel dma vers %d vm %llx "
+				   "pushbuf %llx offset %016llx\n",
+			   args->v0.version, args->v0.vm, args->v0.pushbuf,
+			   args->v0.offset);
+		if (!args->v0.pushbuf)
+			return -EINVAL;
+	} else
+		return ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = nv50_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+				  oclass, chan);
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+
+	nvkm_kmap(chan->ramfc);
+	nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset));
+	nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset));
+	nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset));
+	nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset));
+	nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078);
+	nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
+	nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
+	nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff);
+	nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
+	nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
+	nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
+	nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+				     (4 << 24) /* SEARCH_FULL */ |
+				     (chan->ramht->gpuobj->node->offset >> 4));
+	nvkm_done(chan->ramfc);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv50_fifo_dma_oclass = {
+	.base.oclass = NV50_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
index a04920b3cf84..ff7b529764fe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
@@ -22,466 +22,41 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
-#include "nv04.h"
-
-#include <core/client.h>
-#include <core/engctx.h>
-#include <core/ramht.h>
-#include <subdev/bar.h>
-#include <subdev/mmu.h>
-#include <subdev/timer.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
-
-static int
-g84_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
-{
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_fifo_base *base = (void *)parent->parent;
-	struct nvkm_gpuobj *ectx = (void *)object;
-	u64 limit = ectx->addr + ectx->size - 1;
-	u64 start = ectx->addr;
-	u32 addr;
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_SW    : return 0;
-	case NVDEV_ENGINE_GR    : addr = 0x0020; break;
-	case NVDEV_ENGINE_VP    :
-	case NVDEV_ENGINE_MSPDEC: addr = 0x0040; break;
-	case NVDEV_ENGINE_MSPPP :
-	case NVDEV_ENGINE_MPEG  : addr = 0x0060; break;
-	case NVDEV_ENGINE_BSP   :
-	case NVDEV_ENGINE_MSVLD : addr = 0x0080; break;
-	case NVDEV_ENGINE_CIPHER:
-	case NVDEV_ENGINE_SEC   : addr = 0x00a0; break;
-	case NVDEV_ENGINE_CE0   : addr = 0x00c0; break;
-	default:
-		return -EINVAL;
-	}
-
-	nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
-	nv_wo32(base->eng, addr + 0x00, 0x00190000);
-	nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
-	nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
-	nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
-					upper_32_bits(start));
-	nv_wo32(base->eng, addr + 0x10, 0x00000000);
-	nv_wo32(base->eng, addr + 0x14, 0x00000000);
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-g84_fifo_context_detach(struct nvkm_object *parent, bool suspend,
-			struct nvkm_object *object)
-{
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_fifo_priv *priv = (void *)parent->engine;
-	struct nv50_fifo_base *base = (void *)parent->parent;
-	struct nv50_fifo_chan *chan = (void *)parent;
-	u32 addr, save, engn;
-	bool done;
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_SW    : return 0;
-	case NVDEV_ENGINE_GR    : engn = 0; addr = 0x0020; break;
-	case NVDEV_ENGINE_VP    :
-	case NVDEV_ENGINE_MSPDEC: engn = 3; addr = 0x0040; break;
-	case NVDEV_ENGINE_MSPPP :
-	case NVDEV_ENGINE_MPEG  : engn = 1; addr = 0x0060; break;
-	case NVDEV_ENGINE_BSP   :
-	case NVDEV_ENGINE_MSVLD : engn = 5; addr = 0x0080; break;
-	case NVDEV_ENGINE_CIPHER:
-	case NVDEV_ENGINE_SEC   : engn = 4; addr = 0x00a0; break;
-	case NVDEV_ENGINE_CE0   : engn = 2; addr = 0x00c0; break;
-	default:
-		return -EINVAL;
-	}
-
-	save = nv_mask(priv, 0x002520, 0x0000003f, 1 << engn);
-	nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
-	done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff);
-	nv_wr32(priv, 0x002520, save);
-	if (!done) {
-		nv_error(priv, "channel %d [%s] unload timeout\n",
-			 chan->base.chid, nvkm_client_name(chan));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	nv_wo32(base->eng, addr + 0x00, 0x00000000);
-	nv_wo32(base->eng, addr + 0x04, 0x00000000);
-	nv_wo32(base->eng, addr + 0x08, 0x00000000);
-	nv_wo32(base->eng, addr + 0x0c, 0x00000000);
-	nv_wo32(base->eng, addr + 0x10, 0x00000000);
-	nv_wo32(base->eng, addr + 0x14, 0x00000000);
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-g84_fifo_object_attach(struct nvkm_object *parent,
-		       struct nvkm_object *object, u32 handle)
-{
-	struct nv50_fifo_chan *chan = (void *)parent;
-	u32 context;
-
-	if (nv_iclass(object, NV_GPUOBJ_CLASS))
-		context = nv_gpuobj(object)->node->offset >> 4;
-	else
-		context = 0x00000004; /* just non-zero */
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_DMAOBJ:
-	case NVDEV_ENGINE_SW    : context |= 0x00000000; break;
-	case NVDEV_ENGINE_GR    : context |= 0x00100000; break;
-	case NVDEV_ENGINE_MPEG  :
-	case NVDEV_ENGINE_MSPPP : context |= 0x00200000; break;
-	case NVDEV_ENGINE_ME    :
-	case NVDEV_ENGINE_CE0   : context |= 0x00300000; break;
-	case NVDEV_ENGINE_VP    :
-	case NVDEV_ENGINE_MSPDEC: context |= 0x00400000; break;
-	case NVDEV_ENGINE_CIPHER:
-	case NVDEV_ENGINE_SEC   :
-	case NVDEV_ENGINE_VIC   : context |= 0x00500000; break;
-	case NVDEV_ENGINE_BSP   :
-	case NVDEV_ENGINE_MSVLD : context |= 0x00600000; break;
-	default:
-		return -EINVAL;
-	}
-
-	return nvkm_ramht_insert(chan->ramht, 0, handle, context);
-}
-
-static int
-g84_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **pobject)
-{
-	union {
-		struct nv03_channel_dma_v0 v0;
-	} *args = data;
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_fifo_base *base = (void *)parent;
-	struct nv50_fifo_chan *chan;
-	int ret;
-
-	nv_ioctl(parent, "create channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
-				 "offset %016llx\n", args->v0.version,
-			 args->v0.pushbuf, args->v0.offset);
-	} else
-		return ret;
-
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
-				       0x2000, args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR) |
-				       (1ULL << NVDEV_ENGINE_MPEG) |
-				       (1ULL << NVDEV_ENGINE_ME) |
-				       (1ULL << NVDEV_ENGINE_VP) |
-				       (1ULL << NVDEV_ENGINE_CIPHER) |
-				       (1ULL << NVDEV_ENGINE_SEC) |
-				       (1ULL << NVDEV_ENGINE_BSP) |
-				       (1ULL << NVDEV_ENGINE_MSVLD) |
-				       (1ULL << NVDEV_ENGINE_MSPDEC) |
-				       (1ULL << NVDEV_ENGINE_MSPPP) |
-				       (1ULL << NVDEV_ENGINE_CE0) |
-				       (1ULL << NVDEV_ENGINE_VIC), &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	args->v0.chid = chan->base.chid;
-
-	ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
-			     &chan->ramht);
-	if (ret)
-		return ret;
-
-	nv_parent(chan)->context_attach = g84_fifo_context_attach;
-	nv_parent(chan)->context_detach = g84_fifo_context_detach;
-	nv_parent(chan)->object_attach = g84_fifo_object_attach;
-	nv_parent(chan)->object_detach = nv50_fifo_object_detach;
-
-	nv_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
-	nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
-	nv_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
-	nv_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
-	nv_wo32(base->ramfc, 0x3c, 0x003f6078);
-	nv_wo32(base->ramfc, 0x44, 0x01003fff);
-	nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
-	nv_wo32(base->ramfc, 0x4c, 0xffffffff);
-	nv_wo32(base->ramfc, 0x60, 0x7fffffff);
-	nv_wo32(base->ramfc, 0x78, 0x00000000);
-	nv_wo32(base->ramfc, 0x7c, 0x30000001);
-	nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
-				   (4 << 24) /* SEARCH_FULL */ |
-				   (chan->ramht->gpuobj.node->offset >> 4));
-	nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
-	nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-g84_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **pobject)
-{
-	union {
-		struct nv50_channel_gpfifo_v0 v0;
-	} *args = data;
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_fifo_base *base = (void *)parent;
-	struct nv50_fifo_chan *chan;
-	u64 ioffset, ilength;
-	int ret;
-
-	nv_ioctl(parent, "create channel gpfifo size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
-				 "ioffset %016llx ilength %08x\n",
-			 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
-			 args->v0.ilength);
-	} else
-		return ret;
-
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
-				       0x2000, args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR) |
-				       (1ULL << NVDEV_ENGINE_MPEG) |
-				       (1ULL << NVDEV_ENGINE_ME) |
-				       (1ULL << NVDEV_ENGINE_VP) |
-				       (1ULL << NVDEV_ENGINE_CIPHER) |
-				       (1ULL << NVDEV_ENGINE_SEC) |
-				       (1ULL << NVDEV_ENGINE_BSP) |
-				       (1ULL << NVDEV_ENGINE_MSVLD) |
-				       (1ULL << NVDEV_ENGINE_MSPDEC) |
-				       (1ULL << NVDEV_ENGINE_MSPPP) |
-				       (1ULL << NVDEV_ENGINE_CE0) |
-				       (1ULL << NVDEV_ENGINE_VIC), &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	args->v0.chid = chan->base.chid;
-
-	ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
-			     &chan->ramht);
-	if (ret)
-		return ret;
-
-	nv_parent(chan)->context_attach = g84_fifo_context_attach;
-	nv_parent(chan)->context_detach = g84_fifo_context_detach;
-	nv_parent(chan)->object_attach = g84_fifo_object_attach;
-	nv_parent(chan)->object_detach = nv50_fifo_object_detach;
-
-	ioffset = args->v0.ioffset;
-	ilength = order_base_2(args->v0.ilength / 8);
-
-	nv_wo32(base->ramfc, 0x3c, 0x403f6078);
-	nv_wo32(base->ramfc, 0x44, 0x01003fff);
-	nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
-	nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
-	nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
-	nv_wo32(base->ramfc, 0x60, 0x7fffffff);
-	nv_wo32(base->ramfc, 0x78, 0x00000000);
-	nv_wo32(base->ramfc, 0x7c, 0x30000001);
-	nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
-				   (4 << 24) /* SEARCH_FULL */ |
-				   (chan->ramht->gpuobj.node->offset >> 4));
-	nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
-	nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-g84_fifo_chan_init(struct nvkm_object *object)
-{
-	struct nv50_fifo_priv *priv = (void *)object->engine;
-	struct nv50_fifo_base *base = (void *)object->parent;
-	struct nv50_fifo_chan *chan = (void *)object;
-	struct nvkm_gpuobj *ramfc = base->ramfc;
-	u32 chid = chan->base.chid;
-	int ret;
-
-	ret = nvkm_fifo_channel_init(&chan->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 8);
-	nv50_fifo_playlist_update(priv);
-	return 0;
-}
-
-static struct nvkm_ofuncs
-g84_fifo_ofuncs_dma = {
-	.ctor = g84_fifo_chan_ctor_dma,
-	.dtor = nv50_fifo_chan_dtor,
-	.init = g84_fifo_chan_init,
-	.fini = nv50_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_ofuncs
-g84_fifo_ofuncs_ind = {
-	.ctor = g84_fifo_chan_ctor_ind,
-	.dtor = nv50_fifo_chan_dtor,
-	.init = g84_fifo_chan_init,
-	.fini = nv50_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-g84_fifo_sclass[] = {
-	{ G82_CHANNEL_DMA, &g84_fifo_ofuncs_dma },
-	{ G82_CHANNEL_GPFIFO, &g84_fifo_ofuncs_ind },
-	{}
-};
-
-/*******************************************************************************
- * FIFO context - basically just the instmem reserved for the channel
- ******************************************************************************/
-
-static int
-g84_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		      struct nvkm_oclass *oclass, void *data, u32 size,
-		      struct nvkm_object **pobject)
-{
-	struct nv50_fifo_base *base;
-	int ret;
-
-	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
-				       0x1000, NVOBJ_FLAG_HEAP, &base);
-	*pobject = nv_object(base);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0200, 0,
-			      NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0,
-			      0, &base->pgd);
-	if (ret)
-		return ret;
-
-	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x1000,
-			      0x400, NVOBJ_FLAG_ZERO_ALLOC, &base->cache);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0100,
-			      0x100, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static struct nvkm_oclass
-g84_fifo_cclass = {
-	.handle = NV_ENGCTX(FIFO, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g84_fifo_context_ctor,
-		.dtor = nv50_fifo_context_dtor,
-		.init = _nvkm_fifo_context_init,
-		.fini = _nvkm_fifo_context_fini,
-		.rd32 = _nvkm_fifo_context_rd32,
-		.wr32 = _nvkm_fifo_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
+#include "channv50.h"
 
 static void
-g84_fifo_uevent_init(struct nvkm_event *event, int type, int index)
+g84_fifo_uevent_fini(struct nvkm_fifo *fifo)
 {
-	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
-	nv_mask(fifo, 0x002140, 0x40000000, 0x40000000);
+	struct nvkm_device *device = fifo->engine.subdev.device;
+	nvkm_mask(device, 0x002140, 0x40000000, 0x00000000);
 }
 
 static void
-g84_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
-{
-	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
-	nv_mask(fifo, 0x002140, 0x40000000, 0x00000000);
-}
-
-static const struct nvkm_event_func
-g84_fifo_uevent_func = {
-	.ctor = nvkm_fifo_uevent_ctor,
-	.init = g84_fifo_uevent_init,
-	.fini = g84_fifo_uevent_fini,
+g84_fifo_uevent_init(struct nvkm_fifo *fifo)
+{
+	struct nvkm_device *device = fifo->engine.subdev.device;
+	nvkm_mask(device, 0x002140, 0x40000000, 0x40000000);
+}
+
+static const struct nvkm_fifo_func
+g84_fifo = {
+	.dtor = nv50_fifo_dtor,
+	.oneinit = nv50_fifo_oneinit,
+	.init = nv50_fifo_init,
+	.intr = nv04_fifo_intr,
+	.pause = nv04_fifo_pause,
+	.start = nv04_fifo_start,
+	.uevent_init = g84_fifo_uevent_init,
+	.uevent_fini = g84_fifo_uevent_fini,
+	.chan = {
+		&g84_fifo_dma_oclass,
+		&g84_fifo_gpfifo_oclass,
+		NULL
+	},
 };
 
-static int
-g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+int
+g84_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
 {
-	struct nv50_fifo_priv *priv;
-	int ret;
-
-	ret = nvkm_fifo_create(parent, engine, oclass, 1, 127, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
-			      &priv->playlist[0]);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
-			      &priv->playlist[1]);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&g84_fifo_uevent_func, 1, 1, &priv->base.uevent);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000100;
-	nv_subdev(priv)->intr = nv04_fifo_intr;
-	nv_engine(priv)->cclass = &g84_fifo_cclass;
-	nv_engine(priv)->sclass = g84_fifo_sclass;
-	priv->base.pause = nv04_fifo_pause;
-	priv->base.start = nv04_fifo_start;
-	return 0;
+	return nv50_fifo_new_(&g84_fifo, device, index, pfifo);
 }
-
-struct nvkm_oclass *
-g84_fifo_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(FIFO, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g84_fifo_ctor,
-		.dtor = nv50_fifo_dtor,
-		.init = nv50_fifo_init,
-		.fini = _nvkm_fifo_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index b745252f2261..ff6fcbda615b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -21,365 +21,72 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/fifo.h>
+#include "gf100.h"
+#include "changf100.h"
 
 #include <core/client.h>
-#include <core/engctx.h>
 #include <core/enum.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <subdev/bar.h>
-#include <subdev/fb.h>
-#include <subdev/mmu.h>
-#include <subdev/timer.h>
+#include <engine/sw.h>
 
 #include <nvif/class.h>
-#include <nvif/unpack.h>
-
-struct gf100_fifo_priv {
-	struct nvkm_fifo base;
-
-	struct work_struct fault;
-	u64 mask;
-
-	struct {
-		struct nvkm_gpuobj *mem[2];
-		int active;
-		wait_queue_head_t wait;
-	} runlist;
-
-	struct {
-		struct nvkm_gpuobj *mem;
-		struct nvkm_vma bar;
-	} user;
-	int spoon_nr;
-};
-
-struct gf100_fifo_base {
-	struct nvkm_fifo_base base;
-	struct nvkm_gpuobj *pgd;
-	struct nvkm_vm *vm;
-};
-
-struct gf100_fifo_chan {
-	struct nvkm_fifo_chan base;
-	enum {
-		STOPPED,
-		RUNNING,
-		KILLED
-	} state;
-};
-
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
 
 static void
-gf100_fifo_runlist_update(struct gf100_fifo_priv *priv)
+gf100_fifo_uevent_init(struct nvkm_fifo *fifo)
 {
-	struct nvkm_bar *bar = nvkm_bar(priv);
-	struct nvkm_gpuobj *cur;
-	int i, p;
-
-	mutex_lock(&nv_subdev(priv)->mutex);
-	cur = priv->runlist.mem[priv->runlist.active];
-	priv->runlist.active = !priv->runlist.active;
-
-	for (i = 0, p = 0; i < 128; i++) {
-		struct gf100_fifo_chan *chan = (void *)priv->base.channel[i];
-		if (chan && chan->state == RUNNING) {
-			nv_wo32(cur, p + 0, i);
-			nv_wo32(cur, p + 4, 0x00000004);
-			p += 8;
-		}
-	}
-	bar->flush(bar);
-
-	nv_wr32(priv, 0x002270, cur->addr >> 12);
-	nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3));
-
-	if (wait_event_timeout(priv->runlist.wait,
-			       !(nv_rd32(priv, 0x00227c) & 0x00100000),
-			       msecs_to_jiffies(2000)) == 0)
-		nv_error(priv, "runlist update timeout\n");
-	mutex_unlock(&nv_subdev(priv)->mutex);
+	struct nvkm_device *device = fifo->engine.subdev.device;
+	nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
 }
 
-static int
-gf100_fifo_context_attach(struct nvkm_object *parent,
-			  struct nvkm_object *object)
-{
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct gf100_fifo_base *base = (void *)parent->parent;
-	struct nvkm_engctx *ectx = (void *)object;
-	u32 addr;
-	int ret;
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_SW    : return 0;
-	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
-	case NVDEV_ENGINE_CE0   : addr = 0x0230; break;
-	case NVDEV_ENGINE_CE1   : addr = 0x0240; break;
-	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
-	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
-	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
-	default:
-		return -EINVAL;
-	}
-
-	if (!ectx->vma.node) {
-		ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
-					 NV_MEM_ACCESS_RW, &ectx->vma);
-		if (ret)
-			return ret;
-
-		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
-	}
-
-	nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
-	nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
-			  struct nvkm_object *object)
+static void
+gf100_fifo_uevent_fini(struct nvkm_fifo *fifo)
 {
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct gf100_fifo_priv *priv = (void *)parent->engine;
-	struct gf100_fifo_base *base = (void *)parent->parent;
-	struct gf100_fifo_chan *chan = (void *)parent;
-	u32 addr;
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_SW    : return 0;
-	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
-	case NVDEV_ENGINE_CE0   : addr = 0x0230; break;
-	case NVDEV_ENGINE_CE1   : addr = 0x0240; break;
-	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
-	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
-	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
-	default:
-		return -EINVAL;
-	}
-
-	nv_wr32(priv, 0x002634, chan->base.chid);
-	if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
-		nv_error(priv, "channel %d [%s] kick timeout\n",
-			 chan->base.chid, nvkm_client_name(chan));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	nv_wo32(base, addr + 0x00, 0x00000000);
-	nv_wo32(base, addr + 0x04, 0x00000000);
-	bar->flush(bar);
-	return 0;
+	struct nvkm_device *device = fifo->engine.subdev.device;
+	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
 }
 
-static int
-gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+void
+gf100_fifo_runlist_update(struct gf100_fifo *fifo)
 {
-	union {
-		struct nv50_channel_gpfifo_v0 v0;
-	} *args = data;
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct gf100_fifo_priv *priv = (void *)engine;
-	struct gf100_fifo_base *base = (void *)parent;
 	struct gf100_fifo_chan *chan;
-	u64 usermem, ioffset, ilength;
-	int ret, i;
-
-	nv_ioctl(parent, "create channel gpfifo size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
-				 "ioffset %016llx ilength %08x\n",
-			 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
-			 args->v0.ilength);
-	} else
-		return ret;
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_memory *cur;
+	int nr = 0;
 
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
-				       priv->user.bar.offset, 0x1000,
-				       args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR) |
-				       (1ULL << NVDEV_ENGINE_CE0) |
-				       (1ULL << NVDEV_ENGINE_CE1) |
-				       (1ULL << NVDEV_ENGINE_MSVLD) |
-				       (1ULL << NVDEV_ENGINE_MSPDEC) |
-				       (1ULL << NVDEV_ENGINE_MSPPP), &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
+	mutex_lock(&subdev->mutex);
+	cur = fifo->runlist.mem[fifo->runlist.active];
+	fifo->runlist.active = !fifo->runlist.active;
 
-	args->v0.chid = chan->base.chid;
-
-	nv_parent(chan)->context_attach = gf100_fifo_context_attach;
-	nv_parent(chan)->context_detach = gf100_fifo_context_detach;
-
-	usermem = chan->base.chid * 0x1000;
-	ioffset = args->v0.ioffset;
-	ilength = order_base_2(args->v0.ilength / 8);
-
-	for (i = 0; i < 0x1000; i += 4)
-		nv_wo32(priv->user.mem, usermem + i, 0x00000000);
-
-	nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
-	nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
-	nv_wo32(base, 0x10, 0x0000face);
-	nv_wo32(base, 0x30, 0xfffff902);
-	nv_wo32(base, 0x48, lower_32_bits(ioffset));
-	nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
-	nv_wo32(base, 0x54, 0x00000002);
-	nv_wo32(base, 0x84, 0x20400000);
-	nv_wo32(base, 0x94, 0x30000001);
-	nv_wo32(base, 0x9c, 0x00000100);
-	nv_wo32(base, 0xa4, 0x1f1f1f1f);
-	nv_wo32(base, 0xa8, 0x1f1f1f1f);
-	nv_wo32(base, 0xac, 0x0000001f);
-	nv_wo32(base, 0xb8, 0xf8000000);
-	nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
-	nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-gf100_fifo_chan_init(struct nvkm_object *object)
-{
-	struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
-	struct gf100_fifo_priv *priv = (void *)object->engine;
-	struct gf100_fifo_chan *chan = (void *)object;
-	u32 chid = chan->base.chid;
-	int ret;
-
-	ret = nvkm_fifo_channel_init(&chan->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12);
-
-	if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
-		nv_wr32(priv, 0x003004 + (chid * 8), 0x001f0001);
-		gf100_fifo_runlist_update(priv);
-	}
-
-	return 0;
-}
-
-static void gf100_fifo_intr_engine(struct gf100_fifo_priv *priv);
-
-static int
-gf100_fifo_chan_fini(struct nvkm_object *object, bool suspend)
-{
-	struct gf100_fifo_priv *priv = (void *)object->engine;
-	struct gf100_fifo_chan *chan = (void *)object;
-	u32 chid = chan->base.chid;
-
-	if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
-		nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
-		gf100_fifo_runlist_update(priv);
+	nvkm_kmap(cur);
+	list_for_each_entry(chan, &fifo->chan, head) {
+		nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
+		nvkm_wo32(cur, (nr * 8) + 4, 0x00000004);
+		nr++;
 	}
+	nvkm_done(cur);
 
-	gf100_fifo_intr_engine(priv);
-
-	nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000);
-	return nvkm_fifo_channel_fini(&chan->base, suspend);
-}
-
-static struct nvkm_ofuncs
-gf100_fifo_ofuncs = {
-	.ctor = gf100_fifo_chan_ctor,
-	.dtor = _nvkm_fifo_channel_dtor,
-	.init = gf100_fifo_chan_init,
-	.fini = gf100_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-gf100_fifo_sclass[] = {
-	{ FERMI_CHANNEL_GPFIFO, &gf100_fifo_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * FIFO context - instmem heap and vm setup
- ******************************************************************************/
-
-static int
-gf100_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-			struct nvkm_oclass *oclass, void *data, u32 size,
-			struct nvkm_object **pobject)
-{
-	struct gf100_fifo_base *base;
-	int ret;
-
-	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
-				       0x1000, NVOBJ_FLAG_ZERO_ALLOC |
-				       NVOBJ_FLAG_HEAP, &base);
-	*pobject = nv_object(base);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
-			      &base->pgd);
-	if (ret)
-		return ret;
-
-	nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
-	nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
-	nv_wo32(base, 0x0208, 0xffffffff);
-	nv_wo32(base, 0x020c, 0x000000ff);
+	nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
+	nvkm_wr32(device, 0x002274, 0x01f00000 | nr);
 
-	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static void
-gf100_fifo_context_dtor(struct nvkm_object *object)
-{
-	struct gf100_fifo_base *base = (void *)object;
-	nvkm_vm_ref(NULL, &base->vm, base->pgd);
-	nvkm_gpuobj_ref(NULL, &base->pgd);
-	nvkm_fifo_context_destroy(&base->base);
+	if (wait_event_timeout(fifo->runlist.wait,
+			       !(nvkm_rd32(device, 0x00227c) & 0x00100000),
+			       msecs_to_jiffies(2000)) == 0)
+		nvkm_error(subdev, "runlist update timeout\n");
+	mutex_unlock(&subdev->mutex);
 }
 
-static struct nvkm_oclass
-gf100_fifo_cclass = {
-	.handle = NV_ENGCTX(FIFO, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_fifo_context_ctor,
-		.dtor = gf100_fifo_context_dtor,
-		.init = _nvkm_fifo_context_init,
-		.fini = _nvkm_fifo_context_fini,
-		.rd32 = _nvkm_fifo_context_rd32,
-		.wr32 = _nvkm_fifo_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
 static inline int
-gf100_fifo_engidx(struct gf100_fifo_priv *priv, u32 engn)
+gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn)
 {
 	switch (engn) {
-	case NVDEV_ENGINE_GR    : engn = 0; break;
-	case NVDEV_ENGINE_MSVLD : engn = 1; break;
-	case NVDEV_ENGINE_MSPPP : engn = 2; break;
-	case NVDEV_ENGINE_MSPDEC: engn = 3; break;
-	case NVDEV_ENGINE_CE0   : engn = 4; break;
-	case NVDEV_ENGINE_CE1   : engn = 5; break;
+	case NVKM_ENGINE_GR    : engn = 0; break;
+	case NVKM_ENGINE_MSVLD : engn = 1; break;
+	case NVKM_ENGINE_MSPPP : engn = 2; break;
+	case NVKM_ENGINE_MSPDEC: engn = 3; break;
+	case NVKM_ENGINE_CE0   : engn = 4; break;
+	case NVKM_ENGINE_CE1   : engn = 5; break;
 	default:
 		return -1;
 	}
@@ -388,95 +95,73 @@ gf100_fifo_engidx(struct gf100_fifo_priv *priv, u32 engn)
 }
 
 static inline struct nvkm_engine *
-gf100_fifo_engine(struct gf100_fifo_priv *priv, u32 engn)
+gf100_fifo_engine(struct gf100_fifo *fifo, u32 engn)
 {
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+
 	switch (engn) {
-	case 0: engn = NVDEV_ENGINE_GR; break;
-	case 1: engn = NVDEV_ENGINE_MSVLD; break;
-	case 2: engn = NVDEV_ENGINE_MSPPP; break;
-	case 3: engn = NVDEV_ENGINE_MSPDEC; break;
-	case 4: engn = NVDEV_ENGINE_CE0; break;
-	case 5: engn = NVDEV_ENGINE_CE1; break;
+	case 0: engn = NVKM_ENGINE_GR; break;
+	case 1: engn = NVKM_ENGINE_MSVLD; break;
+	case 2: engn = NVKM_ENGINE_MSPPP; break;
+	case 3: engn = NVKM_ENGINE_MSPDEC; break;
+	case 4: engn = NVKM_ENGINE_CE0; break;
+	case 5: engn = NVKM_ENGINE_CE1; break;
 	default:
 		return NULL;
 	}
 
-	return nvkm_engine(priv, engn);
+	return nvkm_device_engine(device, engn);
 }
 
 static void
 gf100_fifo_recover_work(struct work_struct *work)
 {
-	struct gf100_fifo_priv *priv = container_of(work, typeof(*priv), fault);
-	struct nvkm_object *engine;
+	struct gf100_fifo *fifo = container_of(work, typeof(*fifo), fault);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_engine *engine;
 	unsigned long flags;
 	u32 engn, engm = 0;
 	u64 mask, todo;
 
-	spin_lock_irqsave(&priv->base.lock, flags);
-	mask = priv->mask;
-	priv->mask = 0ULL;
-	spin_unlock_irqrestore(&priv->base.lock, flags);
+	spin_lock_irqsave(&fifo->base.lock, flags);
+	mask = fifo->mask;
+	fifo->mask = 0ULL;
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
 
 	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
-		engm |= 1 << gf100_fifo_engidx(priv, engn);
-	nv_mask(priv, 0x002630, engm, engm);
+		engm |= 1 << gf100_fifo_engidx(fifo, engn);
+	nvkm_mask(device, 0x002630, engm, engm);
 
 	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
-		if ((engine = (void *)nvkm_engine(priv, engn))) {
-			nv_ofuncs(engine)->fini(engine, false);
-			WARN_ON(nv_ofuncs(engine)->init(engine));
+		if ((engine = nvkm_device_engine(device, engn))) {
+			nvkm_subdev_fini(&engine->subdev, false);
+			WARN_ON(nvkm_subdev_init(&engine->subdev));
 		}
 	}
 
-	gf100_fifo_runlist_update(priv);
-	nv_wr32(priv, 0x00262c, engm);
-	nv_mask(priv, 0x002630, engm, 0x00000000);
+	gf100_fifo_runlist_update(fifo);
+	nvkm_wr32(device, 0x00262c, engm);
+	nvkm_mask(device, 0x002630, engm, 0x00000000);
 }
 
 static void
-gf100_fifo_recover(struct gf100_fifo_priv *priv, struct nvkm_engine *engine,
+gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
 		   struct gf100_fifo_chan *chan)
 {
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 chid = chan->base.chid;
-	unsigned long flags;
 
-	nv_error(priv, "%s engine fault on channel %d, recovering...\n",
-		       nv_subdev(engine)->name, chid);
+	nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
+		   nvkm_subdev_name[engine->subdev.index], chid);
+	assert_spin_locked(&fifo->base.lock);
 
-	nv_mask(priv, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
-	chan->state = KILLED;
+	nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
+	list_del_init(&chan->head);
+	chan->killed = true;
 
-	spin_lock_irqsave(&priv->base.lock, flags);
-	priv->mask |= 1ULL << nv_engidx(engine);
-	spin_unlock_irqrestore(&priv->base.lock, flags);
-	schedule_work(&priv->fault);
-}
-
-static int
-gf100_fifo_swmthd(struct gf100_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
-{
-	struct gf100_fifo_chan *chan = NULL;
-	struct nvkm_handle *bind;
-	unsigned long flags;
-	int ret = -EINVAL;
-
-	spin_lock_irqsave(&priv->base.lock, flags);
-	if (likely(chid >= priv->base.min && chid <= priv->base.max))
-		chan = (void *)priv->base.channel[chid];
-	if (unlikely(!chan))
-		goto out;
-
-	bind = nvkm_namedb_get_class(nv_namedb(chan), 0x906e);
-	if (likely(bind)) {
-		if (!mthd || !nv_call(bind->object, mthd, data))
-			ret = 0;
-		nvkm_namedb_put(bind);
-	}
-
-out:
-	spin_unlock_irqrestore(&priv->base.lock, flags);
-	return ret;
+	fifo->mask |= 1ULL << engine->subdev.index;
+	schedule_work(&fifo->fault);
 }
 
 static const struct nvkm_enum
@@ -486,14 +171,17 @@ gf100_fifo_sched_reason[] = {
 };
 
 static void
-gf100_fifo_intr_sched_ctxsw(struct gf100_fifo_priv *priv)
+gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
 {
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	struct nvkm_engine *engine;
 	struct gf100_fifo_chan *chan;
+	unsigned long flags;
 	u32 engn;
 
+	spin_lock_irqsave(&fifo->base.lock, flags);
 	for (engn = 0; engn < 6; engn++) {
-		u32 stat = nv_rd32(priv, 0x002640 + (engn * 0x04));
+		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
 		u32 busy = (stat & 0x80000000);
 		u32 save = (stat & 0x00100000); /* maybe? */
 		u32 unk0 = (stat & 0x00040000);
@@ -502,32 +190,36 @@ gf100_fifo_intr_sched_ctxsw(struct gf100_fifo_priv *priv)
 		(void)save;
 
 		if (busy && unk0 && unk1) {
-			if (!(chan = (void *)priv->base.channel[chid]))
-				continue;
-			if (!(engine = gf100_fifo_engine(priv, engn)))
-				continue;
-			gf100_fifo_recover(priv, engine, chan);
+			list_for_each_entry(chan, &fifo->chan, head) {
+				if (chan->base.chid == chid) {
+					engine = gf100_fifo_engine(fifo, engn);
+					if (!engine)
+						break;
+					gf100_fifo_recover(fifo, engine, chan);
+					break;
+				}
+			}
 		}
 	}
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
 }
 
 static void
-gf100_fifo_intr_sched(struct gf100_fifo_priv *priv)
+gf100_fifo_intr_sched(struct gf100_fifo *fifo)
 {
-	u32 intr = nv_rd32(priv, 0x00254c);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 intr = nvkm_rd32(device, 0x00254c);
 	u32 code = intr & 0x000000ff;
 	const struct nvkm_enum *en;
-	char enunk[6] = "";
 
 	en = nvkm_enum_find(gf100_fifo_sched_reason, code);
-	if (!en)
-		snprintf(enunk, sizeof(enunk), "UNK%02x", code);
 
-	nv_error(priv, "SCHED_ERROR [ %s ]\n", en ? en->name : enunk);
+	nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
 
 	switch (code) {
 	case 0x0a:
-		gf100_fifo_intr_sched_ctxsw(priv);
+		gf100_fifo_intr_sched_ctxsw(fifo);
 		break;
 	default:
 		break;
@@ -536,17 +228,17 @@ gf100_fifo_intr_sched(struct gf100_fifo_priv *priv)
 
 static const struct nvkm_enum
 gf100_fifo_fault_engine[] = {
-	{ 0x00, "PGRAPH", NULL, NVDEV_ENGINE_GR },
-	{ 0x03, "PEEPHOLE", NULL, NVDEV_ENGINE_IFB },
-	{ 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
-	{ 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
-	{ 0x07, "PFIFO", NULL, NVDEV_ENGINE_FIFO },
-	{ 0x10, "PMSVLD", NULL, NVDEV_ENGINE_MSVLD },
-	{ 0x11, "PMSPPP", NULL, NVDEV_ENGINE_MSPPP },
+	{ 0x00, "PGRAPH", NULL, NVKM_ENGINE_GR },
+	{ 0x03, "PEEPHOLE", NULL, NVKM_ENGINE_IFB },
+	{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+	{ 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
+	{ 0x07, "PFIFO", NULL, NVKM_ENGINE_FIFO },
+	{ 0x10, "PMSVLD", NULL, NVKM_ENGINE_MSVLD },
+	{ 0x11, "PMSPPP", NULL, NVKM_ENGINE_MSPPP },
 	{ 0x13, "PCOUNTER" },
-	{ 0x14, "PMSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
-	{ 0x15, "PCE0", NULL, NVDEV_ENGINE_CE0 },
-	{ 0x16, "PCE1", NULL, NVDEV_ENGINE_CE1 },
+	{ 0x14, "PMSPDEC", NULL, NVKM_ENGINE_MSPDEC },
+	{ 0x15, "PCE0", NULL, NVKM_ENGINE_CE0 },
+	{ 0x16, "PCE1", NULL, NVKM_ENGINE_CE1 },
 	{ 0x17, "PDAEMON" },
 	{}
 };
@@ -594,79 +286,65 @@ gf100_fifo_fault_gpcclient[] = {
 };
 
 static void
-gf100_fifo_intr_fault(struct gf100_fifo_priv *priv, int unit)
+gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
 {
-	u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10));
-	u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10));
-	u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10));
-	u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10));
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
+	u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
+	u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
+	u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
 	u32 gpc    = (stat & 0x1f000000) >> 24;
 	u32 client = (stat & 0x00001f00) >> 8;
 	u32 write  = (stat & 0x00000080);
 	u32 hub    = (stat & 0x00000040);
 	u32 reason = (stat & 0x0000000f);
-	struct nvkm_object *engctx = NULL, *object;
-	struct nvkm_engine *engine = NULL;
 	const struct nvkm_enum *er, *eu, *ec;
-	char erunk[6] = "";
-	char euunk[6] = "";
-	char ecunk[6] = "";
-	char gpcid[3] = "";
+	struct nvkm_engine *engine = NULL;
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	char gpcid[8] = "";
 
 	er = nvkm_enum_find(gf100_fifo_fault_reason, reason);
-	if (!er)
-		snprintf(erunk, sizeof(erunk), "UNK%02X", reason);
-
 	eu = nvkm_enum_find(gf100_fifo_fault_engine, unit);
+	if (hub) {
+		ec = nvkm_enum_find(gf100_fifo_fault_hubclient, client);
+	} else {
+		ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, client);
+		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
+	}
+
 	if (eu) {
 		switch (eu->data2) {
-		case NVDEV_SUBDEV_BAR:
-			nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
+		case NVKM_SUBDEV_BAR:
+			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
 			break;
-		case NVDEV_SUBDEV_INSTMEM:
-			nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
+		case NVKM_SUBDEV_INSTMEM:
+			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
 			break;
-		case NVDEV_ENGINE_IFB:
-			nv_mask(priv, 0x001718, 0x00000000, 0x00000000);
+		case NVKM_ENGINE_IFB:
+			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
 			break;
 		default:
-			engine = nvkm_engine(priv, eu->data2);
-			if (engine)
-				engctx = nvkm_engctx_get(engine, inst);
+			engine = nvkm_device_engine(device, eu->data2);
 			break;
 		}
-	} else {
-		snprintf(euunk, sizeof(euunk), "UNK%02x", unit);
 	}
 
-	if (hub) {
-		ec = nvkm_enum_find(gf100_fifo_fault_hubclient, client);
-	} else {
-		ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, client);
-		snprintf(gpcid, sizeof(gpcid), "%d", gpc);
-	}
+	chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
 
-	if (!ec)
-		snprintf(ecunk, sizeof(ecunk), "UNK%02x", client);
-
-	nv_error(priv, "%s fault at 0x%010llx [%s] from %s/%s%s%s%s on "
-		       "channel 0x%010llx [%s]\n", write ? "write" : "read",
-		 (u64)vahi << 32 | valo, er ? er->name : erunk,
-		 eu ? eu->name : euunk, hub ? "" : "GPC", gpcid, hub ? "" : "/",
-		 ec ? ec->name : ecunk, (u64)inst << 12,
-		 nvkm_client_name(engctx));
-
-	object = engctx;
-	while (object) {
-		switch (nv_mclass(object)) {
-		case FERMI_CHANNEL_GPFIFO:
-			gf100_fifo_recover(priv, engine, (void *)object);
-			break;
-		}
-		object = object->parent;
-	}
+	nvkm_error(subdev,
+		   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
+		   "reason %02x [%s] on channel %d [%010llx %s]\n",
+		   write ? "write" : "read", (u64)vahi << 32 | valo,
+		   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
+		   reason, er ? er->name : "", chan ? chan->chid : -1,
+		   (u64)inst << 12,
+		   chan ? chan->object.client->name : "unknown");
 
-	nvkm_engctx_put(engctx);
+	if (engine && chan)
+		gf100_fifo_recover(fifo, engine, (void *)chan);
+	nvkm_fifo_chan_put(&fifo->base, flags, &chan);
 }
 
 static const struct nvkm_bitfield
@@ -678,290 +356,288 @@ gf100_fifo_pbdma_intr[] = {
 };
 
 static void
-gf100_fifo_intr_pbdma(struct gf100_fifo_priv *priv, int unit)
+gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
 {
-	u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
-	u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
-	u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
-	u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0x7f;
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000));
+	u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
+	u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
+	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f;
 	u32 subc = (addr & 0x00070000) >> 16;
 	u32 mthd = (addr & 0x00003ffc);
-	u32 show = stat;
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	u32 show= stat;
+	char msg[128];
 
 	if (stat & 0x00800000) {
-		if (!gf100_fifo_swmthd(priv, chid, mthd, data))
-			show &= ~0x00800000;
+		if (device->sw) {
+			if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
+				show &= ~0x00800000;
+		}
 	}
 
 	if (show) {
-		nv_error(priv, "PBDMA%d:", unit);
-		nvkm_bitfield_print(gf100_fifo_pbdma_intr, show);
-		pr_cont("\n");
-		nv_error(priv,
-			 "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
-			 unit, chid,
-			 nvkm_client_name_for_fifo_chid(&priv->base, chid),
-			 subc, mthd, data);
-	}
-
-	nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
-	nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
+		nvkm_snprintbf(msg, sizeof(msg), gf100_fifo_pbdma_intr, show);
+		chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
+		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
+				   "subc %d mthd %04x data %08x\n",
+			   unit, show, msg, chid, chan ? chan->inst->addr : 0,
+			   chan ? chan->object.client->name : "unknown",
+			   subc, mthd, data);
+		nvkm_fifo_chan_put(&fifo->base, flags, &chan);
+	}
+
+	nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
+	nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
 }
 
 static void
-gf100_fifo_intr_runlist(struct gf100_fifo_priv *priv)
+gf100_fifo_intr_runlist(struct gf100_fifo *fifo)
 {
-	u32 intr = nv_rd32(priv, 0x002a00);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 intr = nvkm_rd32(device, 0x002a00);
 
 	if (intr & 0x10000000) {
-		wake_up(&priv->runlist.wait);
-		nv_wr32(priv, 0x002a00, 0x10000000);
+		wake_up(&fifo->runlist.wait);
+		nvkm_wr32(device, 0x002a00, 0x10000000);
 		intr &= ~0x10000000;
 	}
 
 	if (intr) {
-		nv_error(priv, "RUNLIST 0x%08x\n", intr);
-		nv_wr32(priv, 0x002a00, intr);
+		nvkm_error(subdev, "RUNLIST %08x\n", intr);
+		nvkm_wr32(device, 0x002a00, intr);
 	}
 }
 
 static void
-gf100_fifo_intr_engine_unit(struct gf100_fifo_priv *priv, int engn)
+gf100_fifo_intr_engine_unit(struct gf100_fifo *fifo, int engn)
 {
-	u32 intr = nv_rd32(priv, 0x0025a8 + (engn * 0x04));
-	u32 inte = nv_rd32(priv, 0x002628);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 intr = nvkm_rd32(device, 0x0025a8 + (engn * 0x04));
+	u32 inte = nvkm_rd32(device, 0x002628);
 	u32 unkn;
 
-	nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
+	nvkm_wr32(device, 0x0025a8 + (engn * 0x04), intr);
 
 	for (unkn = 0; unkn < 8; unkn++) {
 		u32 ints = (intr >> (unkn * 0x04)) & inte;
 		if (ints & 0x1) {
-			nvkm_fifo_uevent(&priv->base);
+			nvkm_fifo_uevent(&fifo->base);
 			ints &= ~1;
 		}
 		if (ints) {
-			nv_error(priv, "ENGINE %d %d %01x", engn, unkn, ints);
-			nv_mask(priv, 0x002628, ints, 0);
+			nvkm_error(subdev, "ENGINE %d %d %01x",
+				   engn, unkn, ints);
+			nvkm_mask(device, 0x002628, ints, 0);
 		}
 	}
 }
 
-static void
-gf100_fifo_intr_engine(struct gf100_fifo_priv *priv)
+void
+gf100_fifo_intr_engine(struct gf100_fifo *fifo)
 {
-	u32 mask = nv_rd32(priv, 0x0025a4);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u32 mask = nvkm_rd32(device, 0x0025a4);
 	while (mask) {
 		u32 unit = __ffs(mask);
-		gf100_fifo_intr_engine_unit(priv, unit);
+		gf100_fifo_intr_engine_unit(fifo, unit);
 		mask &= ~(1 << unit);
 	}
 }
 
 static void
-gf100_fifo_intr(struct nvkm_subdev *subdev)
+gf100_fifo_intr(struct nvkm_fifo *base)
 {
-	struct gf100_fifo_priv *priv = (void *)subdev;
-	u32 mask = nv_rd32(priv, 0x002140);
-	u32 stat = nv_rd32(priv, 0x002100) & mask;
+	struct gf100_fifo *fifo = gf100_fifo(base);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mask = nvkm_rd32(device, 0x002140);
+	u32 stat = nvkm_rd32(device, 0x002100) & mask;
 
 	if (stat & 0x00000001) {
-		u32 intr = nv_rd32(priv, 0x00252c);
-		nv_warn(priv, "INTR 0x00000001: 0x%08x\n", intr);
-		nv_wr32(priv, 0x002100, 0x00000001);
+		u32 intr = nvkm_rd32(device, 0x00252c);
+		nvkm_warn(subdev, "INTR 00000001: %08x\n", intr);
+		nvkm_wr32(device, 0x002100, 0x00000001);
 		stat &= ~0x00000001;
 	}
 
 	if (stat & 0x00000100) {
-		gf100_fifo_intr_sched(priv);
-		nv_wr32(priv, 0x002100, 0x00000100);
+		gf100_fifo_intr_sched(fifo);
+		nvkm_wr32(device, 0x002100, 0x00000100);
 		stat &= ~0x00000100;
 	}
 
 	if (stat & 0x00010000) {
-		u32 intr = nv_rd32(priv, 0x00256c);
-		nv_warn(priv, "INTR 0x00010000: 0x%08x\n", intr);
-		nv_wr32(priv, 0x002100, 0x00010000);
+		u32 intr = nvkm_rd32(device, 0x00256c);
+		nvkm_warn(subdev, "INTR 00010000: %08x\n", intr);
+		nvkm_wr32(device, 0x002100, 0x00010000);
 		stat &= ~0x00010000;
 	}
 
 	if (stat & 0x01000000) {
-		u32 intr = nv_rd32(priv, 0x00258c);
-		nv_warn(priv, "INTR 0x01000000: 0x%08x\n", intr);
-		nv_wr32(priv, 0x002100, 0x01000000);
+		u32 intr = nvkm_rd32(device, 0x00258c);
+		nvkm_warn(subdev, "INTR 01000000: %08x\n", intr);
+		nvkm_wr32(device, 0x002100, 0x01000000);
 		stat &= ~0x01000000;
 	}
 
 	if (stat & 0x10000000) {
-		u32 mask = nv_rd32(priv, 0x00259c);
+		u32 mask = nvkm_rd32(device, 0x00259c);
 		while (mask) {
 			u32 unit = __ffs(mask);
-			gf100_fifo_intr_fault(priv, unit);
-			nv_wr32(priv, 0x00259c, (1 << unit));
+			gf100_fifo_intr_fault(fifo, unit);
+			nvkm_wr32(device, 0x00259c, (1 << unit));
 			mask &= ~(1 << unit);
 		}
 		stat &= ~0x10000000;
 	}
 
 	if (stat & 0x20000000) {
-		u32 mask = nv_rd32(priv, 0x0025a0);
+		u32 mask = nvkm_rd32(device, 0x0025a0);
 		while (mask) {
 			u32 unit = __ffs(mask);
-			gf100_fifo_intr_pbdma(priv, unit);
-			nv_wr32(priv, 0x0025a0, (1 << unit));
+			gf100_fifo_intr_pbdma(fifo, unit);
+			nvkm_wr32(device, 0x0025a0, (1 << unit));
 			mask &= ~(1 << unit);
 		}
 		stat &= ~0x20000000;
 	}
 
 	if (stat & 0x40000000) {
-		gf100_fifo_intr_runlist(priv);
+		gf100_fifo_intr_runlist(fifo);
 		stat &= ~0x40000000;
 	}
 
 	if (stat & 0x80000000) {
-		gf100_fifo_intr_engine(priv);
+		gf100_fifo_intr_engine(fifo);
 		stat &= ~0x80000000;
 	}
 
 	if (stat) {
-		nv_error(priv, "INTR 0x%08x\n", stat);
-		nv_mask(priv, 0x002140, stat, 0x00000000);
-		nv_wr32(priv, 0x002100, stat);
+		nvkm_error(subdev, "INTR %08x\n", stat);
+		nvkm_mask(device, 0x002140, stat, 0x00000000);
+		nvkm_wr32(device, 0x002100, stat);
 	}
 }
 
-static void
-gf100_fifo_uevent_init(struct nvkm_event *event, int type, int index)
-{
-	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
-	nv_mask(fifo, 0x002140, 0x80000000, 0x80000000);
-}
-
-static void
-gf100_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
-{
-	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
-	nv_mask(fifo, 0x002140, 0x80000000, 0x00000000);
-}
-
-static const struct nvkm_event_func
-gf100_fifo_uevent_func = {
-	.ctor = nvkm_fifo_uevent_ctor,
-	.init = gf100_fifo_uevent_init,
-	.fini = gf100_fifo_uevent_fini,
-};
-
 static int
-gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+gf100_fifo_oneinit(struct nvkm_fifo *base)
 {
-	struct gf100_fifo_priv *priv;
+	struct gf100_fifo *fifo = gf100_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	int ret;
 
-	ret = nvkm_fifo_create(parent, engine, oclass, 0, 127, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	INIT_WORK(&priv->fault, gf100_fifo_recover_work);
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
-			      &priv->runlist.mem[0]);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
+			      false, &fifo->runlist.mem[0]);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
-			      &priv->runlist.mem[1]);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
+			      false, &fifo->runlist.mem[1]);
 	if (ret)
 		return ret;
 
-	init_waitqueue_head(&priv->runlist.wait);
+	init_waitqueue_head(&fifo->runlist.wait);
 
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 0x1000, 0x1000, 0,
-			      &priv->user.mem);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 0x1000,
+			      0x1000, false, &fifo->user.mem);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
-			      &priv->user.bar);
+	ret = nvkm_bar_umap(device->bar, 128 * 0x1000, 12, &fifo->user.bar);
 	if (ret)
 		return ret;
 
-	ret = nvkm_event_init(&gf100_fifo_uevent_func, 1, 1, &priv->base.uevent);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000100;
-	nv_subdev(priv)->intr = gf100_fifo_intr;
-	nv_engine(priv)->cclass = &gf100_fifo_cclass;
-	nv_engine(priv)->sclass = gf100_fifo_sclass;
+	nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
 	return 0;
 }
 
 static void
-gf100_fifo_dtor(struct nvkm_object *object)
+gf100_fifo_fini(struct nvkm_fifo *base)
 {
-	struct gf100_fifo_priv *priv = (void *)object;
-
-	nvkm_gpuobj_unmap(&priv->user.bar);
-	nvkm_gpuobj_ref(NULL, &priv->user.mem);
-	nvkm_gpuobj_ref(NULL, &priv->runlist.mem[0]);
-	nvkm_gpuobj_ref(NULL, &priv->runlist.mem[1]);
-
-	nvkm_fifo_destroy(&priv->base);
+	struct gf100_fifo *fifo = gf100_fifo(base);
+	flush_work(&fifo->fault);
 }
 
-static int
-gf100_fifo_init(struct nvkm_object *object)
+static void
+gf100_fifo_init(struct nvkm_fifo *base)
 {
-	struct gf100_fifo_priv *priv = (void *)object;
-	int ret, i;
-
-	ret = nvkm_fifo_init(&priv->base);
-	if (ret)
-		return ret;
+	struct gf100_fifo *fifo = gf100_fifo(base);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int i;
 
-	nv_wr32(priv, 0x000204, 0xffffffff);
-	nv_wr32(priv, 0x002204, 0xffffffff);
+	nvkm_wr32(device, 0x000204, 0xffffffff);
+	nvkm_wr32(device, 0x002204, 0xffffffff);
 
-	priv->spoon_nr = hweight32(nv_rd32(priv, 0x002204));
-	nv_debug(priv, "%d PBDMA unit(s)\n", priv->spoon_nr);
+	fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x002204));
+	nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
 
 	/* assign engines to PBDMAs */
-	if (priv->spoon_nr >= 3) {
-		nv_wr32(priv, 0x002208, ~(1 << 0)); /* PGRAPH */
-		nv_wr32(priv, 0x00220c, ~(1 << 1)); /* PVP */
-		nv_wr32(priv, 0x002210, ~(1 << 1)); /* PMSPP */
-		nv_wr32(priv, 0x002214, ~(1 << 1)); /* PMSVLD */
-		nv_wr32(priv, 0x002218, ~(1 << 2)); /* PCE0 */
-		nv_wr32(priv, 0x00221c, ~(1 << 1)); /* PCE1 */
+	if (fifo->spoon_nr >= 3) {
+		nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
+		nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
+		nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
+		nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */
+		nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */
+		nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
 	}
 
 	/* PBDMA[n] */
-	for (i = 0; i < priv->spoon_nr; i++) {
-		nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
-		nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
-		nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
+	for (i = 0; i < fifo->spoon_nr; i++) {
+		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
 	}
 
-	nv_mask(priv, 0x002200, 0x00000001, 0x00000001);
-	nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
+	nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
+	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
 
-	nv_wr32(priv, 0x002100, 0xffffffff);
-	nv_wr32(priv, 0x002140, 0x7fffffff);
-	nv_wr32(priv, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
-	return 0;
+	nvkm_wr32(device, 0x002100, 0xffffffff);
+	nvkm_wr32(device, 0x002140, 0x7fffffff);
+	nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
+}
+
+static void *
+gf100_fifo_dtor(struct nvkm_fifo *base)
+{
+	struct gf100_fifo *fifo = gf100_fifo(base);
+	nvkm_vm_put(&fifo->user.bar);
+	nvkm_memory_del(&fifo->user.mem);
+	nvkm_memory_del(&fifo->runlist.mem[0]);
+	nvkm_memory_del(&fifo->runlist.mem[1]);
+	return fifo;
 }
 
-struct nvkm_oclass *
-gf100_fifo_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(FIFO, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_fifo_ctor,
-		.dtor = gf100_fifo_dtor,
-		.init = gf100_fifo_init,
-		.fini = _nvkm_fifo_fini,
+static const struct nvkm_fifo_func
+gf100_fifo = {
+	.dtor = gf100_fifo_dtor,
+	.oneinit = gf100_fifo_oneinit,
+	.init = gf100_fifo_init,
+	.fini = gf100_fifo_fini,
+	.intr = gf100_fifo_intr,
+	.uevent_init = gf100_fifo_uevent_init,
+	.uevent_fini = gf100_fifo_uevent_fini,
+	.chan = {
+		&gf100_fifo_gpfifo_oclass,
+		NULL
 	},
 };
+
+int
+gf100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	struct gf100_fifo *fifo;
+
+	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+		return -ENOMEM;
+	INIT_LIST_HEAD(&fifo->chan);
+	INIT_WORK(&fifo->fault, gf100_fifo_recover_work);
+	*pfifo = &fifo->base;
+
+	return nvkm_fifo_ctor(&gf100_fifo, device, index, 128, &fifo->base);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
new file mode 100644
index 000000000000..c649ca9b53e3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
@@ -0,0 +1,31 @@
+#ifndef __GF100_FIFO_H__
+#define __GF100_FIFO_H__
+#define gf100_fifo(p) container_of((p), struct gf100_fifo, base)
+#include "priv.h"
+
+#include <subdev/mmu.h>
+
+struct gf100_fifo {
+	struct nvkm_fifo base;
+
+	struct list_head chan;
+
+	struct work_struct fault;
+	u64 mask;
+
+	struct {
+		struct nvkm_memory *mem[2];
+		int active;
+		wait_queue_head_t wait;
+	} runlist;
+
+	struct {
+		struct nvkm_memory *mem;
+		struct nvkm_vma bar;
+	} user;
+	int spoon_nr;
+};
+
+void gf100_fifo_intr_engine(struct gf100_fifo *);
+void gf100_fifo_runlist_update(struct gf100_fifo *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index e10f9644140f..98970a0b7a66 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -22,486 +22,121 @@
  * Authors: Ben Skeggs
  */
 #include "gk104.h"
+#include "changk104.h"
 
 #include <core/client.h>
-#include <core/engctx.h>
 #include <core/enum.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <subdev/bar.h>
-#include <subdev/fb.h>
-#include <subdev/mmu.h>
-#include <subdev/timer.h>
+#include <engine/sw.h>
 
 #include <nvif/class.h>
-#include <nvif/unpack.h>
-
-#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
-static const struct {
-	u64 subdev;
-	u64 mask;
-} fifo_engine[] = {
-	_(NVDEV_ENGINE_GR      , (1ULL << NVDEV_ENGINE_SW) |
-				 (1ULL << NVDEV_ENGINE_CE2)),
-	_(NVDEV_ENGINE_MSPDEC  , 0),
-	_(NVDEV_ENGINE_MSPPP   , 0),
-	_(NVDEV_ENGINE_MSVLD   , 0),
-	_(NVDEV_ENGINE_CE0     , 0),
-	_(NVDEV_ENGINE_CE1     , 0),
-	_(NVDEV_ENGINE_MSENC   , 0),
-};
-#undef _
-#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
-
-struct gk104_fifo_engn {
-	struct nvkm_gpuobj *runlist[2];
-	int cur_runlist;
-	wait_queue_head_t wait;
-};
-
-struct gk104_fifo_priv {
-	struct nvkm_fifo base;
-
-	struct work_struct fault;
-	u64 mask;
-
-	struct gk104_fifo_engn engine[FIFO_ENGINE_NR];
-	struct {
-		struct nvkm_gpuobj *mem;
-		struct nvkm_vma bar;
-	} user;
-	int spoon_nr;
-};
-
-struct gk104_fifo_base {
-	struct nvkm_fifo_base base;
-	struct nvkm_gpuobj *pgd;
-	struct nvkm_vm *vm;
-};
-
-struct gk104_fifo_chan {
-	struct nvkm_fifo_chan base;
-	u32 engine;
-	enum {
-		STOPPED,
-		RUNNING,
-		KILLED
-	} state;
-};
-
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
-
-static void
-gk104_fifo_runlist_update(struct gk104_fifo_priv *priv, u32 engine)
-{
-	struct nvkm_bar *bar = nvkm_bar(priv);
-	struct gk104_fifo_engn *engn = &priv->engine[engine];
-	struct nvkm_gpuobj *cur;
-	int i, p;
-
-	mutex_lock(&nv_subdev(priv)->mutex);
-	cur = engn->runlist[engn->cur_runlist];
-	engn->cur_runlist = !engn->cur_runlist;
-
-	for (i = 0, p = 0; i < priv->base.max; i++) {
-		struct gk104_fifo_chan *chan = (void *)priv->base.channel[i];
-		if (chan && chan->state == RUNNING && chan->engine == engine) {
-			nv_wo32(cur, p + 0, i);
-			nv_wo32(cur, p + 4, 0x00000000);
-			p += 8;
-		}
-	}
-	bar->flush(bar);
-
-	nv_wr32(priv, 0x002270, cur->addr >> 12);
-	nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
-
-	if (wait_event_timeout(engn->wait, !(nv_rd32(priv, 0x002284 +
-			       (engine * 0x08)) & 0x00100000),
-				msecs_to_jiffies(2000)) == 0)
-		nv_error(priv, "runlist %d update timeout\n", engine);
-	mutex_unlock(&nv_subdev(priv)->mutex);
-}
 
-static int
-gk104_fifo_context_attach(struct nvkm_object *parent,
-			  struct nvkm_object *object)
+void
+gk104_fifo_uevent_fini(struct nvkm_fifo *fifo)
 {
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct gk104_fifo_base *base = (void *)parent->parent;
-	struct nvkm_engctx *ectx = (void *)object;
-	u32 addr;
-	int ret;
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_SW   :
-		return 0;
-	case NVDEV_ENGINE_CE0:
-	case NVDEV_ENGINE_CE1:
-	case NVDEV_ENGINE_CE2:
-		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
-		return 0;
-	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
-	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
-	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
-	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
-	default:
-		return -EINVAL;
-	}
-
-	if (!ectx->vma.node) {
-		ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
-					 NV_MEM_ACCESS_RW, &ectx->vma);
-		if (ret)
-			return ret;
-
-		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
-	}
-
-	nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
-	nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
-	bar->flush(bar);
-	return 0;
+	struct nvkm_device *device = fifo->engine.subdev.device;
+	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
 }
 
-static int
-gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
-			  struct nvkm_object *object)
+void
+gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
 {
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct gk104_fifo_priv *priv = (void *)parent->engine;
-	struct gk104_fifo_base *base = (void *)parent->parent;
-	struct gk104_fifo_chan *chan = (void *)parent;
-	u32 addr;
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_SW    : return 0;
-	case NVDEV_ENGINE_CE0   :
-	case NVDEV_ENGINE_CE1   :
-	case NVDEV_ENGINE_CE2   : addr = 0x0000; break;
-	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
-	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
-	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
-	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
-	default:
-		return -EINVAL;
-	}
-
-	nv_wr32(priv, 0x002634, chan->base.chid);
-	if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
-		nv_error(priv, "channel %d [%s] kick timeout\n",
-			 chan->base.chid, nvkm_client_name(chan));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	if (addr) {
-		nv_wo32(base, addr + 0x00, 0x00000000);
-		nv_wo32(base, addr + 0x04, 0x00000000);
-		bar->flush(bar);
-	}
-
-	return 0;
+	struct nvkm_device *device = fifo->engine.subdev.device;
+	nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
 }
 
-static int
-gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+void
+gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
 {
-	union {
-		struct kepler_channel_gpfifo_a_v0 v0;
-	} *args = data;
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct gk104_fifo_priv *priv = (void *)engine;
-	struct gk104_fifo_base *base = (void *)parent;
+	struct gk104_fifo_engn *engn = &fifo->engine[engine];
 	struct gk104_fifo_chan *chan;
-	u64 usermem, ioffset, ilength;
-	int ret, i;
-
-	nv_ioctl(parent, "create channel gpfifo size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
-				 "ioffset %016llx ilength %08x engine %08x\n",
-			 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
-			 args->v0.ilength, args->v0.engine);
-	} else
-		return ret;
-
-	for (i = 0; i < FIFO_ENGINE_NR; i++) {
-		if (args->v0.engine & (1 << i)) {
-			if (nvkm_engine(parent, fifo_engine[i].subdev)) {
-				args->v0.engine = (1 << i);
-				break;
-			}
-		}
-	}
-
-	if (i == FIFO_ENGINE_NR) {
-		nv_error(priv, "unsupported engines 0x%08x\n", args->v0.engine);
-		return -ENODEV;
-	}
-
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
-				       priv->user.bar.offset, 0x200,
-				       args->v0.pushbuf,
-				       fifo_engine[i].mask, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	args->v0.chid = chan->base.chid;
-
-	nv_parent(chan)->context_attach = gk104_fifo_context_attach;
-	nv_parent(chan)->context_detach = gk104_fifo_context_detach;
-	chan->engine = i;
-
-	usermem = chan->base.chid * 0x200;
-	ioffset = args->v0.ioffset;
-	ilength = order_base_2(args->v0.ilength / 8);
-
-	for (i = 0; i < 0x200; i += 4)
-		nv_wo32(priv->user.mem, usermem + i, 0x00000000);
-
-	nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
-	nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
-	nv_wo32(base, 0x10, 0x0000face);
-	nv_wo32(base, 0x30, 0xfffff902);
-	nv_wo32(base, 0x48, lower_32_bits(ioffset));
-	nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
-	nv_wo32(base, 0x84, 0x20400000);
-	nv_wo32(base, 0x94, 0x30000001);
-	nv_wo32(base, 0x9c, 0x00000100);
-	nv_wo32(base, 0xac, 0x0000001f);
-	nv_wo32(base, 0xe8, chan->base.chid);
-	nv_wo32(base, 0xb8, 0xf8000000);
-	nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
-	nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-gk104_fifo_chan_init(struct nvkm_object *object)
-{
-	struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
-	struct gk104_fifo_priv *priv = (void *)object->engine;
-	struct gk104_fifo_chan *chan = (void *)object;
-	u32 chid = chan->base.chid;
-	int ret;
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_memory *cur;
+	int nr = 0;
 
-	ret = nvkm_fifo_channel_init(&chan->base);
-	if (ret)
-		return ret;
-
-	nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
-	nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
-
-	if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
-		nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
-		gk104_fifo_runlist_update(priv, chan->engine);
-		nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
-	}
-
-	return 0;
-}
-
-static int
-gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
-{
-	struct gk104_fifo_priv *priv = (void *)object->engine;
-	struct gk104_fifo_chan *chan = (void *)object;
-	u32 chid = chan->base.chid;
+	mutex_lock(&subdev->mutex);
+	cur = engn->runlist[engn->cur_runlist];
+	engn->cur_runlist = !engn->cur_runlist;
 
-	if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
-		nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
-		gk104_fifo_runlist_update(priv, chan->engine);
+	nvkm_kmap(cur);
+	list_for_each_entry(chan, &engn->chan, head) {
+		nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
+		nvkm_wo32(cur, (nr * 8) + 4, 0x00000000);
+		nr++;
 	}
+	nvkm_done(cur);
 
-	nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
-	return nvkm_fifo_channel_fini(&chan->base, suspend);
-}
-
-struct nvkm_ofuncs
-gk104_fifo_chan_ofuncs = {
-	.ctor = gk104_fifo_chan_ctor,
-	.dtor = _nvkm_fifo_channel_dtor,
-	.init = gk104_fifo_chan_init,
-	.fini = gk104_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-gk104_fifo_sclass[] = {
-	{ KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * FIFO context - instmem heap and vm setup
- ******************************************************************************/
-
-static int
-gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-			struct nvkm_oclass *oclass, void *data, u32 size,
-			struct nvkm_object **pobject)
-{
-	struct gk104_fifo_base *base;
-	int ret;
-
-	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
-				       0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
-	*pobject = nv_object(base);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
-			      &base->pgd);
-	if (ret)
-		return ret;
-
-	nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
-	nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
-	nv_wo32(base, 0x0208, 0xffffffff);
-	nv_wo32(base, 0x020c, 0x000000ff);
-
-	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static void
-gk104_fifo_context_dtor(struct nvkm_object *object)
-{
-	struct gk104_fifo_base *base = (void *)object;
-	nvkm_vm_ref(NULL, &base->vm, base->pgd);
-	nvkm_gpuobj_ref(NULL, &base->pgd);
-	nvkm_fifo_context_destroy(&base->base);
-}
-
-static struct nvkm_oclass
-gk104_fifo_cclass = {
-	.handle = NV_ENGCTX(FIFO, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_fifo_context_ctor,
-		.dtor = gk104_fifo_context_dtor,
-		.init = _nvkm_fifo_context_init,
-		.fini = _nvkm_fifo_context_fini,
-		.rd32 = _nvkm_fifo_context_rd32,
-		.wr32 = _nvkm_fifo_context_wr32,
-	},
-};
+	nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
+	nvkm_wr32(device, 0x002274, (engine << 20) | nr);
 
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
-static inline int
-gk104_fifo_engidx(struct gk104_fifo_priv *priv, u32 engn)
-{
-	switch (engn) {
-	case NVDEV_ENGINE_GR    :
-	case NVDEV_ENGINE_CE2   : engn = 0; break;
-	case NVDEV_ENGINE_MSVLD : engn = 1; break;
-	case NVDEV_ENGINE_MSPPP : engn = 2; break;
-	case NVDEV_ENGINE_MSPDEC: engn = 3; break;
-	case NVDEV_ENGINE_CE0   : engn = 4; break;
-	case NVDEV_ENGINE_CE1   : engn = 5; break;
-	case NVDEV_ENGINE_MSENC : engn = 6; break;
-	default:
-		return -1;
-	}
-
-	return engn;
+	if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 +
+			       (engine * 0x08)) & 0x00100000),
+				msecs_to_jiffies(2000)) == 0)
+		nvkm_error(subdev, "runlist %d update timeout\n", engine);
+	mutex_unlock(&subdev->mutex);
 }
 
 static inline struct nvkm_engine *
-gk104_fifo_engine(struct gk104_fifo_priv *priv, u32 engn)
+gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn)
 {
-	if (engn >= ARRAY_SIZE(fifo_engine))
-		return NULL;
-	return nvkm_engine(priv, fifo_engine[engn].subdev);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u64 subdevs = gk104_fifo_engine_subdev(engn);
+	if (subdevs)
+		return nvkm_device_engine(device, __ffs(subdevs));
+	return NULL;
 }
 
 static void
 gk104_fifo_recover_work(struct work_struct *work)
 {
-	struct gk104_fifo_priv *priv = container_of(work, typeof(*priv), fault);
-	struct nvkm_object *engine;
+	struct gk104_fifo *fifo = container_of(work, typeof(*fifo), fault);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_engine *engine;
 	unsigned long flags;
 	u32 engn, engm = 0;
 	u64 mask, todo;
 
-	spin_lock_irqsave(&priv->base.lock, flags);
-	mask = priv->mask;
-	priv->mask = 0ULL;
-	spin_unlock_irqrestore(&priv->base.lock, flags);
+	spin_lock_irqsave(&fifo->base.lock, flags);
+	mask = fifo->mask;
+	fifo->mask = 0ULL;
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
 
 	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
-		engm |= 1 << gk104_fifo_engidx(priv, engn);
-	nv_mask(priv, 0x002630, engm, engm);
+		engm |= 1 << gk104_fifo_subdev_engine(engn);
+	nvkm_mask(device, 0x002630, engm, engm);
 
 	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
-		if ((engine = (void *)nvkm_engine(priv, engn))) {
-			nv_ofuncs(engine)->fini(engine, false);
-			WARN_ON(nv_ofuncs(engine)->init(engine));
+		if ((engine = nvkm_device_engine(device, engn))) {
+			nvkm_subdev_fini(&engine->subdev, false);
+			WARN_ON(nvkm_subdev_init(&engine->subdev));
 		}
-		gk104_fifo_runlist_update(priv, gk104_fifo_engidx(priv, engn));
+		gk104_fifo_runlist_update(fifo, gk104_fifo_subdev_engine(engn));
 	}
 
-	nv_wr32(priv, 0x00262c, engm);
-	nv_mask(priv, 0x002630, engm, 0x00000000);
+	nvkm_wr32(device, 0x00262c, engm);
+	nvkm_mask(device, 0x002630, engm, 0x00000000);
 }
 
 static void
-gk104_fifo_recover(struct gk104_fifo_priv *priv, struct nvkm_engine *engine,
+gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
 		  struct gk104_fifo_chan *chan)
 {
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 chid = chan->base.chid;
-	unsigned long flags;
 
-	nv_error(priv, "%s engine fault on channel %d, recovering...\n",
-		       nv_subdev(engine)->name, chid);
+	nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
+		   nvkm_subdev_name[engine->subdev.index], chid);
+	assert_spin_locked(&fifo->base.lock);
 
-	nv_mask(priv, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
-	chan->state = KILLED;
+	nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
+	list_del_init(&chan->head);
+	chan->killed = true;
 
-	spin_lock_irqsave(&priv->base.lock, flags);
-	priv->mask |= 1ULL << nv_engidx(engine);
-	spin_unlock_irqrestore(&priv->base.lock, flags);
-	schedule_work(&priv->fault);
-}
-
-static int
-gk104_fifo_swmthd(struct gk104_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
-{
-	struct gk104_fifo_chan *chan = NULL;
-	struct nvkm_handle *bind;
-	unsigned long flags;
-	int ret = -EINVAL;
-
-	spin_lock_irqsave(&priv->base.lock, flags);
-	if (likely(chid >= priv->base.min && chid <= priv->base.max))
-		chan = (void *)priv->base.channel[chid];
-	if (unlikely(!chan))
-		goto out;
-
-	bind = nvkm_namedb_get_class(nv_namedb(chan), 0x906e);
-	if (likely(bind)) {
-		if (!mthd || !nv_call(bind->object, mthd, data))
-			ret = 0;
-		nvkm_namedb_put(bind);
-	}
-
-out:
-	spin_unlock_irqrestore(&priv->base.lock, flags);
-	return ret;
+	fifo->mask |= 1ULL << engine->subdev.index;
+	schedule_work(&fifo->fault);
 }
 
 static const struct nvkm_enum
@@ -516,18 +151,16 @@ gk104_fifo_bind_reason[] = {
 };
 
 static void
-gk104_fifo_intr_bind(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_bind(struct gk104_fifo *fifo)
 {
-	u32 intr = nv_rd32(priv, 0x00252c);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 intr = nvkm_rd32(device, 0x00252c);
 	u32 code = intr & 0x000000ff;
-	const struct nvkm_enum *en;
-	char enunk[6] = "";
-
-	en = nvkm_enum_find(gk104_fifo_bind_reason, code);
-	if (!en)
-		snprintf(enunk, sizeof(enunk), "UNK%02x", code);
+	const struct nvkm_enum *en =
+		nvkm_enum_find(gk104_fifo_bind_reason, code);
 
-	nv_error(priv, "BIND_ERROR [ %s ]\n", en ? en->name : enunk);
+	nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
 }
 
 static const struct nvkm_enum
@@ -537,14 +170,17 @@ gk104_fifo_sched_reason[] = {
 };
 
 static void
-gk104_fifo_intr_sched_ctxsw(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
 {
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	struct nvkm_engine *engine;
 	struct gk104_fifo_chan *chan;
+	unsigned long flags;
 	u32 engn;
 
-	for (engn = 0; engn < ARRAY_SIZE(fifo_engine); engn++) {
-		u32 stat = nv_rd32(priv, 0x002640 + (engn * 0x04));
+	spin_lock_irqsave(&fifo->base.lock, flags);
+	for (engn = 0; engn < ARRAY_SIZE(fifo->engine); engn++) {
+		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
 		u32 busy = (stat & 0x80000000);
 		u32 next = (stat & 0x07ff0000) >> 16;
 		u32 chsw = (stat & 0x00008000);
@@ -555,32 +191,35 @@ gk104_fifo_intr_sched_ctxsw(struct gk104_fifo_priv *priv)
 		(void)save;
 
 		if (busy && chsw) {
-			if (!(chan = (void *)priv->base.channel[chid]))
-				continue;
-			if (!(engine = gk104_fifo_engine(priv, engn)))
-				continue;
-			gk104_fifo_recover(priv, engine, chan);
+			list_for_each_entry(chan, &fifo->engine[engn].chan, head) {
+				if (chan->base.chid == chid) {
+					engine = gk104_fifo_engine(fifo, engn);
+					if (!engine)
+						break;
+					gk104_fifo_recover(fifo, engine, chan);
+					break;
+				}
+			}
 		}
 	}
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
 }
 
 static void
-gk104_fifo_intr_sched(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_sched(struct gk104_fifo *fifo)
 {
-	u32 intr = nv_rd32(priv, 0x00254c);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 intr = nvkm_rd32(device, 0x00254c);
 	u32 code = intr & 0x000000ff;
-	const struct nvkm_enum *en;
-	char enunk[6] = "";
-
-	en = nvkm_enum_find(gk104_fifo_sched_reason, code);
-	if (!en)
-		snprintf(enunk, sizeof(enunk), "UNK%02x", code);
+	const struct nvkm_enum *en =
+		nvkm_enum_find(gk104_fifo_sched_reason, code);
 
-	nv_error(priv, "SCHED_ERROR [ %s ]\n", en ? en->name : enunk);
+	nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
 
 	switch (code) {
 	case 0x0a:
-		gk104_fifo_intr_sched_ctxsw(priv);
+		gk104_fifo_intr_sched_ctxsw(fifo);
 		break;
 	default:
 		break;
@@ -588,38 +227,42 @@ gk104_fifo_intr_sched(struct gk104_fifo_priv *priv)
 }
 
 static void
-gk104_fifo_intr_chsw(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
 {
-	u32 stat = nv_rd32(priv, 0x00256c);
-	nv_error(priv, "CHSW_ERROR 0x%08x\n", stat);
-	nv_wr32(priv, 0x00256c, stat);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x00256c);
+	nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
+	nvkm_wr32(device, 0x00256c, stat);
 }
 
 static void
-gk104_fifo_intr_dropped_fault(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
 {
-	u32 stat = nv_rd32(priv, 0x00259c);
-	nv_error(priv, "DROPPED_MMU_FAULT 0x%08x\n", stat);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x00259c);
+	nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
 }
 
 static const struct nvkm_enum
 gk104_fifo_fault_engine[] = {
-	{ 0x00, "GR", NULL, NVDEV_ENGINE_GR },
-	{ 0x03, "IFB", NULL, NVDEV_ENGINE_IFB },
-	{ 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
-	{ 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
-	{ 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO },
-	{ 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO },
-	{ 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO },
-	{ 0x10, "MSVLD", NULL, NVDEV_ENGINE_MSVLD },
-	{ 0x11, "MSPPP", NULL, NVDEV_ENGINE_MSPPP },
+	{ 0x00, "GR", NULL, NVKM_ENGINE_GR },
+	{ 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
+	{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+	{ 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
+	{ 0x07, "PBDMA0", NULL, NVKM_ENGINE_FIFO },
+	{ 0x08, "PBDMA1", NULL, NVKM_ENGINE_FIFO },
+	{ 0x09, "PBDMA2", NULL, NVKM_ENGINE_FIFO },
+	{ 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD },
+	{ 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
 	{ 0x13, "PERF" },
-	{ 0x14, "MSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
-	{ 0x15, "CE0", NULL, NVDEV_ENGINE_CE0 },
-	{ 0x16, "CE1", NULL, NVDEV_ENGINE_CE1 },
+	{ 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC },
+	{ 0x15, "CE0", NULL, NVKM_ENGINE_CE0 },
+	{ 0x16, "CE1", NULL, NVKM_ENGINE_CE1 },
 	{ 0x17, "PMU" },
-	{ 0x19, "MSENC", NULL, NVDEV_ENGINE_MSENC },
-	{ 0x1b, "CE2", NULL, NVDEV_ENGINE_CE2 },
+	{ 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC },
+	{ 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 },
 	{}
 };
 
@@ -708,80 +351,65 @@ gk104_fifo_fault_gpcclient[] = {
 };
 
 static void
-gk104_fifo_intr_fault(struct gk104_fifo_priv *priv, int unit)
+gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
 {
-	u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10));
-	u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10));
-	u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10));
-	u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10));
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
+	u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
+	u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
+	u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
 	u32 gpc    = (stat & 0x1f000000) >> 24;
 	u32 client = (stat & 0x00001f00) >> 8;
 	u32 write  = (stat & 0x00000080);
 	u32 hub    = (stat & 0x00000040);
 	u32 reason = (stat & 0x0000000f);
-	struct nvkm_object *engctx = NULL, *object;
-	struct nvkm_engine *engine = NULL;
 	const struct nvkm_enum *er, *eu, *ec;
-	char erunk[6] = "";
-	char euunk[6] = "";
-	char ecunk[6] = "";
-	char gpcid[3] = "";
+	struct nvkm_engine *engine = NULL;
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	char gpcid[8] = "";
 
 	er = nvkm_enum_find(gk104_fifo_fault_reason, reason);
-	if (!er)
-		snprintf(erunk, sizeof(erunk), "UNK%02X", reason);
-
 	eu = nvkm_enum_find(gk104_fifo_fault_engine, unit);
+	if (hub) {
+		ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
+	} else {
+		ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
+		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
+	}
+
 	if (eu) {
 		switch (eu->data2) {
-		case NVDEV_SUBDEV_BAR:
-			nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
+		case NVKM_SUBDEV_BAR:
+			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
 			break;
-		case NVDEV_SUBDEV_INSTMEM:
-			nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
+		case NVKM_SUBDEV_INSTMEM:
+			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
 			break;
-		case NVDEV_ENGINE_IFB:
-			nv_mask(priv, 0x001718, 0x00000000, 0x00000000);
+		case NVKM_ENGINE_IFB:
+			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
 			break;
 		default:
-			engine = nvkm_engine(priv, eu->data2);
-			if (engine)
-				engctx = nvkm_engctx_get(engine, inst);
+			engine = nvkm_device_engine(device, eu->data2);
 			break;
 		}
-	} else {
-		snprintf(euunk, sizeof(euunk), "UNK%02x", unit);
 	}
 
-	if (hub) {
-		ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
-	} else {
-		ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
-		snprintf(gpcid, sizeof(gpcid), "%d", gpc);
-	}
+	chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
 
-	if (!ec)
-		snprintf(ecunk, sizeof(ecunk), "UNK%02x", client);
-
-	nv_error(priv, "%s fault at 0x%010llx [%s] from %s/%s%s%s%s on "
-		       "channel 0x%010llx [%s]\n", write ? "write" : "read",
-		 (u64)vahi << 32 | valo, er ? er->name : erunk,
-		 eu ? eu->name : euunk, hub ? "" : "GPC", gpcid, hub ? "" : "/",
-		 ec ? ec->name : ecunk, (u64)inst << 12,
-		 nvkm_client_name(engctx));
-
-	object = engctx;
-	while (object) {
-		switch (nv_mclass(object)) {
-		case KEPLER_CHANNEL_GPFIFO_A:
-		case MAXWELL_CHANNEL_GPFIFO_A:
-			gk104_fifo_recover(priv, engine, (void *)object);
-			break;
-		}
-		object = object->parent;
-	}
+	nvkm_error(subdev,
+		   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
+		   "reason %02x [%s] on channel %d [%010llx %s]\n",
+		   write ? "write" : "read", (u64)vahi << 32 | valo,
+		   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
+		   reason, er ? er->name : "", chan ? chan->chid : -1,
+		   (u64)inst << 12,
+		   chan ? chan->object.client->name : "unknown");
 
-	nvkm_engctx_put(engctx);
+	if (engine && chan)
+		gk104_fifo_recover(fifo, engine, (void *)chan);
+	nvkm_fifo_chan_put(&fifo->base, flags, &chan);
 }
 
 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
@@ -819,35 +447,42 @@ static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
 };
 
 static void
-gk104_fifo_intr_pbdma_0(struct gk104_fifo_priv *priv, int unit)
+gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
 {
-	u32 mask = nv_rd32(priv, 0x04010c + (unit * 0x2000));
-	u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000)) & mask;
-	u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
-	u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
-	u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000));
+	u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask;
+	u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
+	u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
+	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
 	u32 subc = (addr & 0x00070000) >> 16;
 	u32 mthd = (addr & 0x00003ffc);
 	u32 show = stat;
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	char msg[128];
 
 	if (stat & 0x00800000) {
-		if (!gk104_fifo_swmthd(priv, chid, mthd, data))
-			show &= ~0x00800000;
-		nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
+		if (device->sw) {
+			if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
+				show &= ~0x00800000;
+		}
+		nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
 	}
 
 	if (show) {
-		nv_error(priv, "PBDMA%d:", unit);
-		nvkm_bitfield_print(gk104_fifo_pbdma_intr_0, show);
-		pr_cont("\n");
-		nv_error(priv,
-			 "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
-			 unit, chid,
-			 nvkm_client_name_for_fifo_chid(&priv->base, chid),
-			 subc, mthd, data);
+		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
+		chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
+		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
+				   "subc %d mthd %04x data %08x\n",
+			   unit, show, msg, chid, chan ? chan->inst->addr : 0,
+			   chan ? chan->object.client->name : "unknown",
+			   subc, mthd, data);
+		nvkm_fifo_chan_put(&fifo->base, flags, &chan);
 	}
 
-	nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
+	nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
 }
 
 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
@@ -860,280 +495,266 @@ static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
 };
 
 static void
-gk104_fifo_intr_pbdma_1(struct gk104_fifo_priv *priv, int unit)
+gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
 {
-	u32 mask = nv_rd32(priv, 0x04014c + (unit * 0x2000));
-	u32 stat = nv_rd32(priv, 0x040148 + (unit * 0x2000)) & mask;
-	u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000));
+	u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask;
+	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
+	char msg[128];
 
 	if (stat) {
-		nv_error(priv, "PBDMA%d:", unit);
-		nvkm_bitfield_print(gk104_fifo_pbdma_intr_1, stat);
-		pr_cont("\n");
-		nv_error(priv, "PBDMA%d: ch %d %08x %08x\n", unit, chid,
-			 nv_rd32(priv, 0x040150 + (unit * 0x2000)),
-			 nv_rd32(priv, 0x040154 + (unit * 0x2000)));
+		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat);
+		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
+			   unit, stat, msg, chid,
+			   nvkm_rd32(device, 0x040150 + (unit * 0x2000)),
+			   nvkm_rd32(device, 0x040154 + (unit * 0x2000)));
 	}
 
-	nv_wr32(priv, 0x040148 + (unit * 0x2000), stat);
+	nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
 }
 
 static void
-gk104_fifo_intr_runlist(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
 {
-	u32 mask = nv_rd32(priv, 0x002a00);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u32 mask = nvkm_rd32(device, 0x002a00);
 	while (mask) {
 		u32 engn = __ffs(mask);
-		wake_up(&priv->engine[engn].wait);
-		nv_wr32(priv, 0x002a00, 1 << engn);
+		wake_up(&fifo->engine[engn].wait);
+		nvkm_wr32(device, 0x002a00, 1 << engn);
 		mask &= ~(1 << engn);
 	}
 }
 
 static void
-gk104_fifo_intr_engine(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_engine(struct gk104_fifo *fifo)
 {
-	nvkm_fifo_uevent(&priv->base);
+	nvkm_fifo_uevent(&fifo->base);
 }
 
-static void
-gk104_fifo_intr(struct nvkm_subdev *subdev)
+void
+gk104_fifo_intr(struct nvkm_fifo *base)
 {
-	struct gk104_fifo_priv *priv = (void *)subdev;
-	u32 mask = nv_rd32(priv, 0x002140);
-	u32 stat = nv_rd32(priv, 0x002100) & mask;
+	struct gk104_fifo *fifo = gk104_fifo(base);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mask = nvkm_rd32(device, 0x002140);
+	u32 stat = nvkm_rd32(device, 0x002100) & mask;
 
 	if (stat & 0x00000001) {
-		gk104_fifo_intr_bind(priv);
-		nv_wr32(priv, 0x002100, 0x00000001);
+		gk104_fifo_intr_bind(fifo);
+		nvkm_wr32(device, 0x002100, 0x00000001);
 		stat &= ~0x00000001;
 	}
 
 	if (stat & 0x00000010) {
-		nv_error(priv, "PIO_ERROR\n");
-		nv_wr32(priv, 0x002100, 0x00000010);
+		nvkm_error(subdev, "PIO_ERROR\n");
+		nvkm_wr32(device, 0x002100, 0x00000010);
 		stat &= ~0x00000010;
 	}
 
 	if (stat & 0x00000100) {
-		gk104_fifo_intr_sched(priv);
-		nv_wr32(priv, 0x002100, 0x00000100);
+		gk104_fifo_intr_sched(fifo);
+		nvkm_wr32(device, 0x002100, 0x00000100);
 		stat &= ~0x00000100;
 	}
 
 	if (stat & 0x00010000) {
-		gk104_fifo_intr_chsw(priv);
-		nv_wr32(priv, 0x002100, 0x00010000);
+		gk104_fifo_intr_chsw(fifo);
+		nvkm_wr32(device, 0x002100, 0x00010000);
 		stat &= ~0x00010000;
 	}
 
 	if (stat & 0x00800000) {
-		nv_error(priv, "FB_FLUSH_TIMEOUT\n");
-		nv_wr32(priv, 0x002100, 0x00800000);
+		nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
+		nvkm_wr32(device, 0x002100, 0x00800000);
 		stat &= ~0x00800000;
 	}
 
 	if (stat & 0x01000000) {
-		nv_error(priv, "LB_ERROR\n");
-		nv_wr32(priv, 0x002100, 0x01000000);
+		nvkm_error(subdev, "LB_ERROR\n");
+		nvkm_wr32(device, 0x002100, 0x01000000);
 		stat &= ~0x01000000;
 	}
 
 	if (stat & 0x08000000) {
-		gk104_fifo_intr_dropped_fault(priv);
-		nv_wr32(priv, 0x002100, 0x08000000);
+		gk104_fifo_intr_dropped_fault(fifo);
+		nvkm_wr32(device, 0x002100, 0x08000000);
 		stat &= ~0x08000000;
 	}
 
 	if (stat & 0x10000000) {
-		u32 mask = nv_rd32(priv, 0x00259c);
+		u32 mask = nvkm_rd32(device, 0x00259c);
 		while (mask) {
 			u32 unit = __ffs(mask);
-			gk104_fifo_intr_fault(priv, unit);
-			nv_wr32(priv, 0x00259c, (1 << unit));
+			gk104_fifo_intr_fault(fifo, unit);
+			nvkm_wr32(device, 0x00259c, (1 << unit));
 			mask &= ~(1 << unit);
 		}
 		stat &= ~0x10000000;
 	}
 
 	if (stat & 0x20000000) {
-		u32 mask = nv_rd32(priv, 0x0025a0);
+		u32 mask = nvkm_rd32(device, 0x0025a0);
 		while (mask) {
 			u32 unit = __ffs(mask);
-			gk104_fifo_intr_pbdma_0(priv, unit);
-			gk104_fifo_intr_pbdma_1(priv, unit);
-			nv_wr32(priv, 0x0025a0, (1 << unit));
+			gk104_fifo_intr_pbdma_0(fifo, unit);
+			gk104_fifo_intr_pbdma_1(fifo, unit);
+			nvkm_wr32(device, 0x0025a0, (1 << unit));
 			mask &= ~(1 << unit);
 		}
 		stat &= ~0x20000000;
 	}
 
 	if (stat & 0x40000000) {
-		gk104_fifo_intr_runlist(priv);
+		gk104_fifo_intr_runlist(fifo);
 		stat &= ~0x40000000;
 	}
 
 	if (stat & 0x80000000) {
-		nv_wr32(priv, 0x002100, 0x80000000);
-		gk104_fifo_intr_engine(priv);
+		nvkm_wr32(device, 0x002100, 0x80000000);
+		gk104_fifo_intr_engine(fifo);
 		stat &= ~0x80000000;
 	}
 
 	if (stat) {
-		nv_error(priv, "INTR 0x%08x\n", stat);
-		nv_mask(priv, 0x002140, stat, 0x00000000);
-		nv_wr32(priv, 0x002100, stat);
+		nvkm_error(subdev, "INTR %08x\n", stat);
+		nvkm_mask(device, 0x002140, stat, 0x00000000);
+		nvkm_wr32(device, 0x002100, stat);
 	}
 }
 
-static void
-gk104_fifo_uevent_init(struct nvkm_event *event, int type, int index)
+void
+gk104_fifo_fini(struct nvkm_fifo *base)
 {
-	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
-	nv_mask(fifo, 0x002140, 0x80000000, 0x80000000);
+	struct gk104_fifo *fifo = gk104_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	flush_work(&fifo->fault);
+	/* allow mmu fault interrupts, even when we're not using fifo */
+	nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
 }
 
-static void
-gk104_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
+int
+gk104_fifo_oneinit(struct nvkm_fifo *base)
 {
-	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
-	nv_mask(fifo, 0x002140, 0x80000000, 0x00000000);
-}
+	struct gk104_fifo *fifo = gk104_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	int ret, i;
 
-static const struct nvkm_event_func
-gk104_fifo_uevent_func = {
-	.ctor = nvkm_fifo_uevent_ctor,
-	.init = gk104_fifo_uevent_init,
-	.fini = gk104_fifo_uevent_fini,
-};
+	for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
+		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+				      0x8000, 0x1000, false,
+				      &fifo->engine[i].runlist[0]);
+		if (ret)
+			return ret;
 
-int
-gk104_fifo_fini(struct nvkm_object *object, bool suspend)
-{
-	struct gk104_fifo_priv *priv = (void *)object;
-	int ret;
+		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+				      0x8000, 0x1000, false,
+				      &fifo->engine[i].runlist[1]);
+		if (ret)
+			return ret;
+
+		init_waitqueue_head(&fifo->engine[i].wait);
+		INIT_LIST_HEAD(&fifo->engine[i].chan);
+	}
 
-	ret = nvkm_fifo_fini(&priv->base, suspend);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+			      fifo->base.nr * 0x200, 0x1000, true,
+			      &fifo->user.mem);
 	if (ret)
 		return ret;
 
-	/* allow mmu fault interrupts, even when we're not using fifo */
-	nv_mask(priv, 0x002140, 0x10000000, 0x10000000);
+	ret = nvkm_bar_umap(device->bar, fifo->base.nr * 0x200, 12,
+			    &fifo->user.bar);
+	if (ret)
+		return ret;
+
+	nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
 	return 0;
 }
 
-int
-gk104_fifo_init(struct nvkm_object *object)
+void
+gk104_fifo_init(struct nvkm_fifo *base)
 {
-	struct gk104_fifo_priv *priv = (void *)object;
-	int ret, i;
-
-	ret = nvkm_fifo_init(&priv->base);
-	if (ret)
-		return ret;
+	struct gk104_fifo *fifo = gk104_fifo(base);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int i;
 
 	/* enable all available PBDMA units */
-	nv_wr32(priv, 0x000204, 0xffffffff);
-	priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204));
-	nv_debug(priv, "%d PBDMA unit(s)\n", priv->spoon_nr);
+	nvkm_wr32(device, 0x000204, 0xffffffff);
+	fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x000204));
+	nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
 
 	/* PBDMA[n] */
-	for (i = 0; i < priv->spoon_nr; i++) {
-		nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
-		nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
-		nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
+	for (i = 0; i < fifo->spoon_nr; i++) {
+		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
 	}
 
 	/* PBDMA[n].HCE */
-	for (i = 0; i < priv->spoon_nr; i++) {
-		nv_wr32(priv, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
-		nv_wr32(priv, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
+	for (i = 0; i < fifo->spoon_nr; i++) {
+		nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
+		nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
 	}
 
-	nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
+	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
 
-	nv_wr32(priv, 0x002100, 0xffffffff);
-	nv_wr32(priv, 0x002140, 0x7fffffff);
-	return 0;
+	nvkm_wr32(device, 0x002100, 0xffffffff);
+	nvkm_wr32(device, 0x002140, 0x7fffffff);
 }
 
-void
-gk104_fifo_dtor(struct nvkm_object *object)
+void *
+gk104_fifo_dtor(struct nvkm_fifo *base)
 {
-	struct gk104_fifo_priv *priv = (void *)object;
+	struct gk104_fifo *fifo = gk104_fifo(base);
 	int i;
 
-	nvkm_gpuobj_unmap(&priv->user.bar);
-	nvkm_gpuobj_ref(NULL, &priv->user.mem);
+	nvkm_vm_put(&fifo->user.bar);
+	nvkm_memory_del(&fifo->user.mem);
 
-	for (i = 0; i < FIFO_ENGINE_NR; i++) {
-		nvkm_gpuobj_ref(NULL, &priv->engine[i].runlist[1]);
-		nvkm_gpuobj_ref(NULL, &priv->engine[i].runlist[0]);
+	for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
+		nvkm_memory_del(&fifo->engine[i].runlist[1]);
+		nvkm_memory_del(&fifo->engine[i].runlist[0]);
 	}
 
-	nvkm_fifo_destroy(&priv->base);
+	return fifo;
 }
 
 int
-gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+gk104_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
+		int index, int nr, struct nvkm_fifo **pfifo)
 {
-	struct gk104_fifo_impl *impl = (void *)oclass;
-	struct gk104_fifo_priv *priv;
-	int ret, i;
+	struct gk104_fifo *fifo;
 
-	ret = nvkm_fifo_create(parent, engine, oclass, 0,
-			       impl->channels - 1, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	INIT_WORK(&priv->fault, gk104_fifo_recover_work);
-
-	for (i = 0; i < FIFO_ENGINE_NR; i++) {
-		ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
-				      0, &priv->engine[i].runlist[0]);
-		if (ret)
-			return ret;
-
-		ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
-				      0, &priv->engine[i].runlist[1]);
-		if (ret)
-			return ret;
-
-		init_waitqueue_head(&priv->engine[i].wait);
-	}
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, impl->channels * 0x200,
-			      0x1000, NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
-	if (ret)
-		return ret;
+	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+		return -ENOMEM;
+	INIT_WORK(&fifo->fault, gk104_fifo_recover_work);
+	*pfifo = &fifo->base;
 
-	ret = nvkm_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
-			      &priv->user.bar);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&gk104_fifo_uevent_func, 1, 1, &priv->base.uevent);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000100;
-	nv_subdev(priv)->intr = gk104_fifo_intr;
-	nv_engine(priv)->cclass = &gk104_fifo_cclass;
-	nv_engine(priv)->sclass = gk104_fifo_sclass;
-	return 0;
+	return nvkm_fifo_ctor(func, device, index, nr, &fifo->base);
 }
 
-struct nvkm_oclass *
-gk104_fifo_oclass = &(struct gk104_fifo_impl) {
-	.base.handle = NV_ENGINE(FIFO, 0xe0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_fifo_ctor,
-		.dtor = gk104_fifo_dtor,
-		.init = gk104_fifo_init,
-		.fini = gk104_fifo_fini,
+static const struct nvkm_fifo_func
+gk104_fifo = {
+	.dtor = gk104_fifo_dtor,
+	.oneinit = gk104_fifo_oneinit,
+	.init = gk104_fifo_init,
+	.fini = gk104_fifo_fini,
+	.intr = gk104_fifo_intr,
+	.uevent_init = gk104_fifo_uevent_init,
+	.uevent_fini = gk104_fifo_uevent_fini,
+	.chan = {
+		&gk104_fifo_gpfifo_oclass,
+		NULL
 	},
-	.channels = 4096,
-}.base;
+};
+
+int
+gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 318d30d6ee1a..5afd9b5ec5d1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -1,18 +1,77 @@
-#ifndef __NVKM_FIFO_NVE0_H__
-#define __NVKM_FIFO_NVE0_H__
-#include <engine/fifo.h>
-
-int  gk104_fifo_ctor(struct nvkm_object *, struct nvkm_object *,
-		    struct nvkm_oclass *, void *, u32,
-		    struct nvkm_object **);
-void gk104_fifo_dtor(struct nvkm_object *);
-int  gk104_fifo_init(struct nvkm_object *);
-int  gk104_fifo_fini(struct nvkm_object *, bool);
-
-struct gk104_fifo_impl {
-	struct nvkm_oclass base;
-	u32 channels;
+#ifndef __GK104_FIFO_H__
+#define __GK104_FIFO_H__
+#define gk104_fifo(p) container_of((p), struct gk104_fifo, base)
+#include "priv.h"
+
+#include <subdev/mmu.h>
+
+struct gk104_fifo_engn {
+	struct nvkm_memory *runlist[2];
+	int cur_runlist;
+	wait_queue_head_t wait;
+	struct list_head chan;
+};
+
+struct gk104_fifo {
+	struct nvkm_fifo base;
+
+	struct work_struct fault;
+	u64 mask;
+
+	struct gk104_fifo_engn engine[7];
+	struct {
+		struct nvkm_memory *mem;
+		struct nvkm_vma bar;
+	} user;
+	int spoon_nr;
 };
 
-extern struct nvkm_ofuncs gk104_fifo_chan_ofuncs;
+int gk104_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *,
+		    int index, int nr, struct nvkm_fifo **);
+void *gk104_fifo_dtor(struct nvkm_fifo *);
+int gk104_fifo_oneinit(struct nvkm_fifo *);
+void gk104_fifo_init(struct nvkm_fifo *);
+void gk104_fifo_fini(struct nvkm_fifo *);
+void gk104_fifo_intr(struct nvkm_fifo *);
+void gk104_fifo_uevent_init(struct nvkm_fifo *);
+void gk104_fifo_uevent_fini(struct nvkm_fifo *);
+void gk104_fifo_runlist_update(struct gk104_fifo *, u32 engine);
+
+static inline u64
+gk104_fifo_engine_subdev(int engine)
+{
+	switch (engine) {
+	case 0: return (1ULL << NVKM_ENGINE_GR) |
+		       (1ULL << NVKM_ENGINE_SW) |
+		       (1ULL << NVKM_ENGINE_CE2);
+	case 1: return (1ULL << NVKM_ENGINE_MSPDEC);
+	case 2: return (1ULL << NVKM_ENGINE_MSPPP);
+	case 3: return (1ULL << NVKM_ENGINE_MSVLD);
+	case 4: return (1ULL << NVKM_ENGINE_CE0);
+	case 5: return (1ULL << NVKM_ENGINE_CE1);
+	case 6: return (1ULL << NVKM_ENGINE_MSENC);
+	default:
+		WARN_ON(1);
+		return 0;
+	}
+}
+
+static inline int
+gk104_fifo_subdev_engine(int subdev)
+{
+	switch (subdev) {
+	case NVKM_ENGINE_GR:
+	case NVKM_ENGINE_SW:
+	case NVKM_ENGINE_CE2   : return 0;
+	case NVKM_ENGINE_MSPDEC: return 1;
+	case NVKM_ENGINE_MSPPP : return 2;
+	case NVKM_ENGINE_MSVLD : return 3;
+	case NVKM_ENGINE_CE0   : return 4;
+	case NVKM_ENGINE_CE1   : return 5;
+	case NVKM_ENGINE_MSENC : return 6;
+	default:
+		WARN_ON(1);
+		return 0;
+	}
+}
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
index 927092217a06..ce01c1a7d41c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
@@ -22,15 +22,25 @@
  * Authors: Ben Skeggs
  */
 #include "gk104.h"
+#include "changk104.h"
 
-struct nvkm_oclass *
-gk208_fifo_oclass = &(struct gk104_fifo_impl) {
-	.base.handle = NV_ENGINE(FIFO, 0x08),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_fifo_ctor,
-		.dtor = gk104_fifo_dtor,
-		.init = gk104_fifo_init,
-		.fini = _nvkm_fifo_fini,
+static const struct nvkm_fifo_func
+gk208_fifo = {
+	.dtor = gk104_fifo_dtor,
+	.oneinit = gk104_fifo_oneinit,
+	.init = gk104_fifo_init,
+	.fini = gk104_fifo_fini,
+	.intr = gk104_fifo_intr,
+	.uevent_init = gk104_fifo_uevent_init,
+	.uevent_fini = gk104_fifo_uevent_fini,
+	.chan = {
+		&gk104_fifo_gpfifo_oclass,
+		NULL
 	},
-	.channels = 1024,
-}.base;
+};
+
+int
+gk208_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	return gk104_fifo_new_(&gk208_fifo, device, index, 1024, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
index b30dc87a1357..b47fe98f4181 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
@@ -20,15 +20,25 @@
  * DEALINGS IN THE SOFTWARE.
  */
 #include "gk104.h"
+#include "changk104.h"
 
-struct nvkm_oclass *
-gk20a_fifo_oclass = &(struct gk104_fifo_impl) {
-	.base.handle = NV_ENGINE(FIFO, 0xea),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_fifo_ctor,
-		.dtor = gk104_fifo_dtor,
-		.init = gk104_fifo_init,
-		.fini = gk104_fifo_fini,
+static const struct nvkm_fifo_func
+gk20a_fifo = {
+	.dtor = gk104_fifo_dtor,
+	.oneinit = gk104_fifo_oneinit,
+	.init = gk104_fifo_init,
+	.fini = gk104_fifo_fini,
+	.intr = gk104_fifo_intr,
+	.uevent_init = gk104_fifo_uevent_init,
+	.uevent_fini = gk104_fifo_uevent_fini,
+	.chan = {
+		&gk104_fifo_gpfifo_oclass,
+		NULL
 	},
-	.channels = 128,
-}.base;
+};
+
+int
+gk20a_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	return gk104_fifo_new_(&gk20a_fifo, device, index, 128, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
index 749d525dd8e3..2db629f1bf7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
@@ -22,36 +22,25 @@
  * Authors: Ben Skeggs
  */
 #include "gk104.h"
+#include "changk104.h"
 
-#include <nvif/class.h>
-
-static struct nvkm_oclass
-gm204_fifo_sclass[] = {
-	{ MAXWELL_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs },
-	{}
+static const struct nvkm_fifo_func
+gm204_fifo = {
+	.dtor = gk104_fifo_dtor,
+	.oneinit = gk104_fifo_oneinit,
+	.init = gk104_fifo_init,
+	.fini = gk104_fifo_fini,
+	.intr = gk104_fifo_intr,
+	.uevent_init = gk104_fifo_uevent_init,
+	.uevent_fini = gk104_fifo_uevent_fini,
+	.chan = {
+		&gm204_fifo_gpfifo_oclass,
+		NULL
+	},
 };
 
-static int
-gm204_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+gm204_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
 {
-	int ret = gk104_fifo_ctor(parent, engine, oclass, data, size, pobject);
-	if (ret == 0) {
-		struct gk104_fifo_priv *priv = (void *)*pobject;
-		nv_engine(priv)->sclass = gm204_fifo_sclass;
-	}
-	return ret;
+	return gk104_fifo_new_(&gm204_fifo, device, index, 4096, pfifo);
 }
-
-struct nvkm_oclass *
-gm204_fifo_oclass = &(struct gk104_fifo_impl) {
-	.base.handle = NV_ENGINE(FIFO, 0x24),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm204_fifo_ctor,
-		.dtor = gk104_fifo_dtor,
-		.init = gk104_fifo_init,
-		.fini = _nvkm_fifo_fini,
-	},
-	.channels = 4096,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
new file mode 100644
index 000000000000..ae6375d9760f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "gk104.h"
+#include "changk104.h"
+
+static const struct nvkm_fifo_func
+gm20b_fifo = {
+	.dtor = gk104_fifo_dtor,
+	.oneinit = gk104_fifo_oneinit,
+	.init = gk104_fifo_init,
+	.fini = gk104_fifo_fini,
+	.intr = gk104_fifo_intr,
+	.uevent_init = gk104_fifo_uevent_init,
+	.uevent_fini = gk104_fifo_uevent_fini,
+	.chan = {
+		&gm204_fifo_gpfifo_oclass,
+		NULL
+	},
+};
+
+int
+gm20b_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	return gk104_fifo_new_(&gm20b_fifo, device, index, 512, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
new file mode 100644
index 000000000000..820132363f68
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+g84_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct nv50_channel_gpfifo_v0 v0;
+	} *args = data;
+	struct nv50_fifo *fifo = nv50_fifo(base);
+	struct nv50_fifo_chan *chan;
+	u64 ioffset, ilength;
+	int ret;
+
+	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+				   "pushbuf %llx ioffset %016llx "
+				   "ilength %08x\n",
+			   args->v0.version, args->v0.vm, args->v0.pushbuf,
+			   args->v0.ioffset, args->v0.ilength);
+		if (!args->v0.pushbuf)
+			return -EINVAL;
+	} else
+		return ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = g84_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+				 oclass, chan);
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+	ioffset = args->v0.ioffset;
+	ilength = order_base_2(args->v0.ilength / 8);
+
+	nvkm_kmap(chan->ramfc);
+	nvkm_wo32(chan->ramfc, 0x3c, 0x403f6078);
+	nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
+	nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
+	nvkm_wo32(chan->ramfc, 0x50, lower_32_bits(ioffset));
+	nvkm_wo32(chan->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
+	nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
+	nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
+	nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
+	nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+				     (4 << 24) /* SEARCH_FULL */ |
+				     (chan->ramht->gpuobj->node->offset >> 4));
+	nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10);
+	nvkm_wo32(chan->ramfc, 0x98, chan->base.inst->addr >> 12);
+	nvkm_done(chan->ramfc);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+g84_fifo_gpfifo_oclass = {
+	.base.oclass = G82_CHANNEL_GPFIFO,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = g84_fifo_gpfifo_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
new file mode 100644
index 000000000000..e7cbc139c1d4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "changf100.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static u32
+gf100_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
+{
+	switch (engine->subdev.index) {
+	case NVKM_ENGINE_SW    : return 0;
+	case NVKM_ENGINE_GR    : return 0x0210;
+	case NVKM_ENGINE_CE0   : return 0x0230;
+	case NVKM_ENGINE_CE1   : return 0x0240;
+	case NVKM_ENGINE_MSPDEC: return 0x0250;
+	case NVKM_ENGINE_MSPPP : return 0x0260;
+	case NVKM_ENGINE_MSVLD : return 0x0270;
+	default:
+		WARN_ON(1);
+		return 0;
+	}
+}
+
+static int
+gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
+			      struct nvkm_engine *engine, bool suspend)
+{
+	const u32 offset = gf100_fifo_gpfifo_engine_addr(engine);
+	struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+	struct nvkm_subdev *subdev = &chan->fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_gpuobj *inst = chan->base.inst;
+	int ret = 0;
+
+	nvkm_wr32(device, 0x002634, chan->base.chid);
+	if (nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x002634) == chan->base.chid)
+			break;
+	) < 0) {
+		nvkm_error(subdev, "channel %d [%s] kick timeout\n",
+			   chan->base.chid, chan->base.object.client->name);
+		ret = -EBUSY;
+		if (suspend)
+			return ret;
+	}
+
+	if (offset) {
+		nvkm_kmap(inst);
+		nvkm_wo32(inst, offset + 0x00, 0x00000000);
+		nvkm_wo32(inst, offset + 0x04, 0x00000000);
+		nvkm_done(inst);
+	}
+
+	return ret;
+}
+
+static int
+gf100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
+			      struct nvkm_engine *engine)
+{
+	const u32 offset = gf100_fifo_gpfifo_engine_addr(engine);
+	struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+	struct nvkm_gpuobj *inst = chan->base.inst;
+
+	if (offset) {
+		u64 addr = chan->engn[engine->subdev.index].vma.offset;
+		nvkm_kmap(inst);
+		nvkm_wo32(inst, offset + 0x00, lower_32_bits(addr) | 4);
+		nvkm_wo32(inst, offset + 0x04, upper_32_bits(addr));
+		nvkm_done(inst);
+	}
+
+	return 0;
+}
+
+static void
+gf100_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
+			      struct nvkm_engine *engine)
+{
+	struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+	nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma);
+	nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
+}
+
+static int
+gf100_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
+			      struct nvkm_engine *engine,
+			      struct nvkm_object *object)
+{
+	struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+	int engn = engine->subdev.index;
+	int ret;
+
+	if (!gf100_fifo_gpfifo_engine_addr(engine))
+		return 0;
+
+	ret = nvkm_object_bind(object, NULL, 0, &chan->engn[engn].inst);
+	if (ret)
+		return ret;
+
+	return nvkm_gpuobj_map(chan->engn[engn].inst, chan->vm,
+			       NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
+}
+
+static void
+gf100_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
+{
+	struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+	struct gf100_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u32 coff = chan->base.chid * 8;
+
+	if (!list_empty(&chan->head) && !chan->killed) {
+		list_del_init(&chan->head);
+		nvkm_mask(device, 0x003004 + coff, 0x00000001, 0x00000000);
+		gf100_fifo_runlist_update(fifo);
+	}
+
+	gf100_fifo_intr_engine(fifo);
+
+	nvkm_wr32(device, 0x003000 + coff, 0x00000000);
+}
+
+static void
+gf100_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
+{
+	struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+	struct gf100_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u32 addr = chan->base.inst->addr >> 12;
+	u32 coff = chan->base.chid * 8;
+
+	nvkm_wr32(device, 0x003000 + coff, 0xc0000000 | addr);
+
+	if (list_empty(&chan->head) && !chan->killed) {
+		list_add_tail(&chan->head, &fifo->chan);
+		nvkm_wr32(device, 0x003004 + coff, 0x001f0001);
+		gf100_fifo_runlist_update(fifo);
+	}
+}
+
+static void *
+gf100_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
+{
+	struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+	nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
+	nvkm_gpuobj_del(&chan->pgd);
+	return chan;
+}
+
+static const struct nvkm_fifo_chan_func
+gf100_fifo_gpfifo_func = {
+	.dtor = gf100_fifo_gpfifo_dtor,
+	.init = gf100_fifo_gpfifo_init,
+	.fini = gf100_fifo_gpfifo_fini,
+	.ntfy = g84_fifo_chan_ntfy,
+	.engine_ctor = gf100_fifo_gpfifo_engine_ctor,
+	.engine_dtor = gf100_fifo_gpfifo_engine_dtor,
+	.engine_init = gf100_fifo_gpfifo_engine_init,
+	.engine_fini = gf100_fifo_gpfifo_engine_fini,
+};
+
+static int
+gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		      void *data, u32 size, struct nvkm_object **pobject)
+{
+	union {
+		struct fermi_channel_gpfifo_v0 v0;
+	} *args = data;
+	struct gf100_fifo *fifo = gf100_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_object *parent = oclass->parent;
+	struct gf100_fifo_chan *chan;
+	u64 usermem, ioffset, ilength;
+	int ret, i;
+
+	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+				   "ioffset %016llx ilength %08x\n",
+			   args->v0.version, args->v0.vm, args->v0.ioffset,
+			   args->v0.ilength);
+	} else
+		return ret;
+
+	/* allocate channel */
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+	chan->fifo = fifo;
+	INIT_LIST_HEAD(&chan->head);
+
+	ret = nvkm_fifo_chan_ctor(&gf100_fifo_gpfifo_func, &fifo->base,
+				  0x1000, 0x1000, true, args->v0.vm, 0,
+				  (1ULL << NVKM_ENGINE_CE0) |
+				  (1ULL << NVKM_ENGINE_CE1) |
+				  (1ULL << NVKM_ENGINE_GR) |
+				  (1ULL << NVKM_ENGINE_MSPDEC) |
+				  (1ULL << NVKM_ENGINE_MSPPP) |
+				  (1ULL << NVKM_ENGINE_MSVLD) |
+				  (1ULL << NVKM_ENGINE_SW),
+				  1, fifo->user.bar.offset, 0x1000,
+				  oclass, &chan->base);
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+
+	/* page directory */
+	ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &chan->pgd);
+	if (ret)
+		return ret;
+
+	nvkm_kmap(chan->base.inst);
+	nvkm_wo32(chan->base.inst, 0x0200, lower_32_bits(chan->pgd->addr));
+	nvkm_wo32(chan->base.inst, 0x0204, upper_32_bits(chan->pgd->addr));
+	nvkm_wo32(chan->base.inst, 0x0208, 0xffffffff);
+	nvkm_wo32(chan->base.inst, 0x020c, 0x000000ff);
+	nvkm_done(chan->base.inst);
+
+	ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
+	if (ret)
+		return ret;
+
+	/* clear channel control registers */
+
+	usermem = chan->base.chid * 0x1000;
+	ioffset = args->v0.ioffset;
+	ilength = order_base_2(args->v0.ilength / 8);
+
+	nvkm_kmap(fifo->user.mem);
+	for (i = 0; i < 0x1000; i += 4)
+		nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
+	nvkm_done(fifo->user.mem);
+	usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
+
+	/* RAMFC */
+	nvkm_kmap(chan->base.inst);
+	nvkm_wo32(chan->base.inst, 0x08, lower_32_bits(usermem));
+	nvkm_wo32(chan->base.inst, 0x0c, upper_32_bits(usermem));
+	nvkm_wo32(chan->base.inst, 0x10, 0x0000face);
+	nvkm_wo32(chan->base.inst, 0x30, 0xfffff902);
+	nvkm_wo32(chan->base.inst, 0x48, lower_32_bits(ioffset));
+	nvkm_wo32(chan->base.inst, 0x4c, upper_32_bits(ioffset) |
+					 (ilength << 16));
+	nvkm_wo32(chan->base.inst, 0x54, 0x00000002);
+	nvkm_wo32(chan->base.inst, 0x84, 0x20400000);
+	nvkm_wo32(chan->base.inst, 0x94, 0x30000001);
+	nvkm_wo32(chan->base.inst, 0x9c, 0x00000100);
+	nvkm_wo32(chan->base.inst, 0xa4, 0x1f1f1f1f);
+	nvkm_wo32(chan->base.inst, 0xa8, 0x1f1f1f1f);
+	nvkm_wo32(chan->base.inst, 0xac, 0x0000001f);
+	nvkm_wo32(chan->base.inst, 0xb8, 0xf8000000);
+	nvkm_wo32(chan->base.inst, 0xf8, 0x10003080); /* 0x002310 */
+	nvkm_wo32(chan->base.inst, 0xfc, 0x10000010); /* 0x002350 */
+	nvkm_done(chan->base.inst);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+gf100_fifo_gpfifo_oclass = {
+	.base.oclass = FERMI_CHANNEL_GPFIFO,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = gf100_fifo_gpfifo_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
new file mode 100644
index 000000000000..0b817540a9e4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -0,0 +1,323 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "changk104.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+#include <subdev/mmu.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
+{
+	struct gk104_fifo *fifo = chan->fifo;
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_client *client = chan->base.object.client;
+
+	nvkm_wr32(device, 0x002634, chan->base.chid);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "channel %d [%s] kick timeout\n",
+			   chan->base.chid, client->name);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static u32
+gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
+{
+	switch (engine->subdev.index) {
+	case NVKM_ENGINE_SW    :
+	case NVKM_ENGINE_CE0   :
+	case NVKM_ENGINE_CE1   :
+	case NVKM_ENGINE_CE2   : return 0x0000;
+	case NVKM_ENGINE_GR    : return 0x0210;
+	case NVKM_ENGINE_MSPDEC: return 0x0250;
+	case NVKM_ENGINE_MSPPP : return 0x0260;
+	case NVKM_ENGINE_MSVLD : return 0x0270;
+	default:
+		WARN_ON(1);
+		return 0;
+	}
+}
+
+static int
+gk104_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
+			      struct nvkm_engine *engine, bool suspend)
+{
+	const u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
+	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	struct nvkm_gpuobj *inst = chan->base.inst;
+	int ret;
+
+	ret = gk104_fifo_gpfifo_kick(chan);
+	if (ret && suspend)
+		return ret;
+
+	if (offset) {
+		nvkm_kmap(inst);
+		nvkm_wo32(inst, offset + 0x00, 0x00000000);
+		nvkm_wo32(inst, offset + 0x04, 0x00000000);
+		nvkm_done(inst);
+	}
+
+	return ret;
+}
+
+static int
+gk104_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
+			      struct nvkm_engine *engine)
+{
+	const u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
+	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	struct nvkm_gpuobj *inst = chan->base.inst;
+
+	if (offset) {
+		u64 addr = chan->engn[engine->subdev.index].vma.offset;
+		nvkm_kmap(inst);
+		nvkm_wo32(inst, offset + 0x00, lower_32_bits(addr) | 4);
+		nvkm_wo32(inst, offset + 0x04, upper_32_bits(addr));
+		nvkm_done(inst);
+	}
+
+	return 0;
+}
+
+static void
+gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
+			      struct nvkm_engine *engine)
+{
+	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma);
+	nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
+}
+
+static int
+gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
+			      struct nvkm_engine *engine,
+			      struct nvkm_object *object)
+{
+	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	int engn = engine->subdev.index;
+	int ret;
+
+	if (!gk104_fifo_gpfifo_engine_addr(engine))
+		return 0;
+
+	ret = nvkm_object_bind(object, NULL, 0, &chan->engn[engn].inst);
+	if (ret)
+		return ret;
+
+	return nvkm_gpuobj_map(chan->engn[engn].inst, chan->vm,
+			       NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
+}
+
+static void
+gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
+{
+	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	struct gk104_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u32 coff = chan->base.chid * 8;
+
+	if (!list_empty(&chan->head)) {
+		list_del_init(&chan->head);
+		nvkm_mask(device, 0x800004 + coff, 0x00000800, 0x00000800);
+		gk104_fifo_runlist_update(fifo, chan->engine);
+	}
+
+	nvkm_wr32(device, 0x800000 + coff, 0x00000000);
+}
+
+static void
+gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
+{
+	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	struct gk104_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u32 addr = chan->base.inst->addr >> 12;
+	u32 coff = chan->base.chid * 8;
+
+	nvkm_mask(device, 0x800004 + coff, 0x000f0000, chan->engine << 16);
+	nvkm_wr32(device, 0x800000 + coff, 0x80000000 | addr);
+
+	if (list_empty(&chan->head) && !chan->killed) {
+		list_add_tail(&chan->head, &fifo->engine[chan->engine].chan);
+		nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
+		gk104_fifo_runlist_update(fifo, chan->engine);
+		nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
+	}
+}
+
+static void *
+gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
+{
+	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
+	nvkm_gpuobj_del(&chan->pgd);
+	return chan;
+}
+
+static const struct nvkm_fifo_chan_func
+gk104_fifo_gpfifo_func = {
+	.dtor = gk104_fifo_gpfifo_dtor,
+	.init = gk104_fifo_gpfifo_init,
+	.fini = gk104_fifo_gpfifo_fini,
+	.ntfy = g84_fifo_chan_ntfy,
+	.engine_ctor = gk104_fifo_gpfifo_engine_ctor,
+	.engine_dtor = gk104_fifo_gpfifo_engine_dtor,
+	.engine_init = gk104_fifo_gpfifo_engine_init,
+	.engine_fini = gk104_fifo_gpfifo_engine_fini,
+};
+
+int
+gk104_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		      void *data, u32 size, struct nvkm_object **pobject)
+{
+	union {
+		struct kepler_channel_gpfifo_a_v0 v0;
+	} *args = data;
+	struct gk104_fifo *fifo = gk104_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_object *parent = oclass->parent;
+	struct gk104_fifo_chan *chan;
+	u64 usermem, ioffset, ilength;
+	u32 engines;
+	int ret, i;
+
+	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+				   "ioffset %016llx ilength %08x engine %08x\n",
+			   args->v0.version, args->v0.vm, args->v0.ioffset,
+			   args->v0.ilength, args->v0.engine);
+	} else
+		return ret;
+
+	/* determine which downstream engines are present */
+	for (i = 0, engines = 0; i < ARRAY_SIZE(fifo->engine); i++) {
+		u64 subdevs = gk104_fifo_engine_subdev(i);
+		if (!nvkm_device_engine(device, __ffs64(subdevs)))
+			continue;
+		engines |= (1 << i);
+	}
+
+	/* if this is an engine mask query, we're done */
+	if (!args->v0.engine) {
+		args->v0.engine = engines;
+		return nvkm_object_new(oclass, NULL, 0, pobject);
+	}
+
+	/* check that we support a requested engine - note that the user
+	 * argument is a mask in order to allow the user to request (for
+	 * example) *any* copy engine, but doesn't matter which.
+	 */
+	args->v0.engine &= engines;
+	if (!args->v0.engine) {
+		nvif_ioctl(parent, "no supported engine\n");
+		return -ENODEV;
+	}
+
+	/* allocate the channel */
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+	chan->fifo = fifo;
+	chan->engine = __ffs(args->v0.engine);
+	INIT_LIST_HEAD(&chan->head);
+
+	ret = nvkm_fifo_chan_ctor(&gk104_fifo_gpfifo_func, &fifo->base,
+				  0x1000, 0x1000, true, args->v0.vm, 0,
+				  gk104_fifo_engine_subdev(chan->engine),
+				  1, fifo->user.bar.offset, 0x200,
+				  oclass, &chan->base);
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+
+	/* page directory */
+	ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &chan->pgd);
+	if (ret)
+		return ret;
+
+	nvkm_kmap(chan->base.inst);
+	nvkm_wo32(chan->base.inst, 0x0200, lower_32_bits(chan->pgd->addr));
+	nvkm_wo32(chan->base.inst, 0x0204, upper_32_bits(chan->pgd->addr));
+	nvkm_wo32(chan->base.inst, 0x0208, 0xffffffff);
+	nvkm_wo32(chan->base.inst, 0x020c, 0x000000ff);
+	nvkm_done(chan->base.inst);
+
+	ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
+	if (ret)
+		return ret;
+
+	/* clear channel control registers */
+	usermem = chan->base.chid * 0x200;
+	ioffset = args->v0.ioffset;
+	ilength = order_base_2(args->v0.ilength / 8);
+
+	nvkm_kmap(fifo->user.mem);
+	for (i = 0; i < 0x200; i += 4)
+		nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
+	nvkm_done(fifo->user.mem);
+	usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
+
+	/* RAMFC */
+	nvkm_kmap(chan->base.inst);
+	nvkm_wo32(chan->base.inst, 0x08, lower_32_bits(usermem));
+	nvkm_wo32(chan->base.inst, 0x0c, upper_32_bits(usermem));
+	nvkm_wo32(chan->base.inst, 0x10, 0x0000face);
+	nvkm_wo32(chan->base.inst, 0x30, 0xfffff902);
+	nvkm_wo32(chan->base.inst, 0x48, lower_32_bits(ioffset));
+	nvkm_wo32(chan->base.inst, 0x4c, upper_32_bits(ioffset) |
+					 (ilength << 16));
+	nvkm_wo32(chan->base.inst, 0x84, 0x20400000);
+	nvkm_wo32(chan->base.inst, 0x94, 0x30000001);
+	nvkm_wo32(chan->base.inst, 0x9c, 0x00000100);
+	nvkm_wo32(chan->base.inst, 0xac, 0x0000001f);
+	nvkm_wo32(chan->base.inst, 0xe8, chan->base.chid);
+	nvkm_wo32(chan->base.inst, 0xb8, 0xf8000000);
+	nvkm_wo32(chan->base.inst, 0xf8, 0x10003080); /* 0x002310 */
+	nvkm_wo32(chan->base.inst, 0xfc, 0x10000010); /* 0x002350 */
+	nvkm_done(chan->base.inst);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+gk104_fifo_gpfifo_oclass = {
+	.base.oclass = KEPLER_CHANNEL_GPFIFO_A,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = gk104_fifo_gpfifo_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c
new file mode 100644
index 000000000000..6511d6e21ecc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "changk104.h"
+
+#include <nvif/class.h>
+
+const struct nvkm_fifo_chan_oclass
+gm204_fifo_gpfifo_oclass = {
+	.base.oclass = MAXWELL_CHANNEL_GPFIFO_A,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = gk104_fifo_gpfifo_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
new file mode 100644
index 000000000000..a8c69f878221
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+nv50_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		     void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct nv50_channel_gpfifo_v0 v0;
+	} *args = data;
+	struct nv50_fifo *fifo = nv50_fifo(base);
+	struct nv50_fifo_chan *chan;
+	u64 ioffset, ilength;
+	int ret;
+
+	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+				   "pushbuf %llx ioffset %016llx "
+				   "ilength %08x\n",
+			   args->v0.version, args->v0.vm, args->v0.pushbuf,
+			   args->v0.ioffset, args->v0.ilength);
+		if (!args->v0.pushbuf)
+			return -EINVAL;
+	} else
+		return ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = nv50_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+				  oclass, chan);
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+	ioffset = args->v0.ioffset;
+	ilength = order_base_2(args->v0.ilength / 8);
+
+	nvkm_kmap(chan->ramfc);
+	nvkm_wo32(chan->ramfc, 0x3c, 0x403f6078);
+	nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
+	nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
+	nvkm_wo32(chan->ramfc, 0x50, lower_32_bits(ioffset));
+	nvkm_wo32(chan->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
+	nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
+	nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
+	nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
+	nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+				     (4 << 24) /* SEARCH_FULL */ |
+				     (chan->ramht->gpuobj->node->offset >> 4));
+	nvkm_done(chan->ramfc);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv50_fifo_gpfifo_oclass = {
+	.base.oclass = NV50_CHANNEL_GPFIFO,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_fifo_gpfifo_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
index 043e4296084c..ad707ff176cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
@@ -22,20 +22,17 @@
  * Authors: Ben Skeggs
  */
 #include "nv04.h"
+#include "channv04.h"
+#include "regsnv04.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/engctx.h>
-#include <core/handle.h>
 #include <core/ramht.h>
-#include <subdev/instmem/nv04.h>
+#include <subdev/instmem.h>
 #include <subdev/timer.h>
+#include <engine/sw.h>
 
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-static struct ramfc_desc
-nv04_ramfc[] = {
+static const struct nv04_fifo_ramfc
+nv04_fifo_ramfc[] = {
 	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
 	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
 	{ 16,  0, 0x08,  0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
@@ -47,268 +44,19 @@ nv04_ramfc[] = {
 	{}
 };
 
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
-
-int
-nv04_fifo_object_attach(struct nvkm_object *parent,
-			struct nvkm_object *object, u32 handle)
-{
-	struct nv04_fifo_priv *priv = (void *)parent->engine;
-	struct nv04_fifo_chan *chan = (void *)parent;
-	u32 context, chid = chan->base.chid;
-	int ret;
-
-	if (nv_iclass(object, NV_GPUOBJ_CLASS))
-		context = nv_gpuobj(object)->addr >> 4;
-	else
-		context = 0x00000004; /* just non-zero */
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_DMAOBJ:
-	case NVDEV_ENGINE_SW:
-		context |= 0x00000000;
-		break;
-	case NVDEV_ENGINE_GR:
-		context |= 0x00010000;
-		break;
-	case NVDEV_ENGINE_MPEG:
-		context |= 0x00020000;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	context |= 0x80000000; /* valid */
-	context |= chid << 24;
-
-	mutex_lock(&nv_subdev(priv)->mutex);
-	ret = nvkm_ramht_insert(priv->ramht, chid, handle, context);
-	mutex_unlock(&nv_subdev(priv)->mutex);
-	return ret;
-}
-
-void
-nv04_fifo_object_detach(struct nvkm_object *parent, int cookie)
-{
-	struct nv04_fifo_priv *priv = (void *)parent->engine;
-	mutex_lock(&nv_subdev(priv)->mutex);
-	nvkm_ramht_remove(priv->ramht, cookie);
-	mutex_unlock(&nv_subdev(priv)->mutex);
-}
-
-int
-nv04_fifo_context_attach(struct nvkm_object *parent,
-			 struct nvkm_object *object)
-{
-	nv_engctx(object)->addr = nvkm_fifo_chan(parent)->chid;
-	return 0;
-}
-
-static int
-nv04_fifo_chan_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	union {
-		struct nv03_channel_dma_v0 v0;
-	} *args = data;
-	struct nv04_fifo_priv *priv = (void *)engine;
-	struct nv04_fifo_chan *chan;
-	int ret;
-
-	nv_ioctl(parent, "create channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
-				 "offset %016llx\n", args->v0.version,
-			 args->v0.pushbuf, args->v0.offset);
-	} else
-		return ret;
-
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
-				       0x10000, args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR), &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	args->v0.chid = chan->base.chid;
-
-	nv_parent(chan)->object_attach = nv04_fifo_object_attach;
-	nv_parent(chan)->object_detach = nv04_fifo_object_detach;
-	nv_parent(chan)->context_attach = nv04_fifo_context_attach;
-	chan->ramfc = chan->base.chid * 32;
-
-	nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x10,
-			     NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
-			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
-#ifdef __BIG_ENDIAN
-			     NV_PFIFO_CACHE1_BIG_ENDIAN |
-#endif
-			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
-	return 0;
-}
-
-void
-nv04_fifo_chan_dtor(struct nvkm_object *object)
-{
-	struct nv04_fifo_priv *priv = (void *)object->engine;
-	struct nv04_fifo_chan *chan = (void *)object;
-	struct ramfc_desc *c = priv->ramfc_desc;
-
-	do {
-		nv_wo32(priv->ramfc, chan->ramfc + c->ctxp, 0x00000000);
-	} while ((++c)->bits);
-
-	nvkm_fifo_channel_destroy(&chan->base);
-}
-
-int
-nv04_fifo_chan_init(struct nvkm_object *object)
-{
-	struct nv04_fifo_priv *priv = (void *)object->engine;
-	struct nv04_fifo_chan *chan = (void *)object;
-	u32 mask = 1 << chan->base.chid;
-	unsigned long flags;
-	int ret;
-
-	ret = nvkm_fifo_channel_init(&chan->base);
-	if (ret)
-		return ret;
-
-	spin_lock_irqsave(&priv->base.lock, flags);
-	nv_mask(priv, NV04_PFIFO_MODE, mask, mask);
-	spin_unlock_irqrestore(&priv->base.lock, flags);
-	return 0;
-}
-
-int
-nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv04_fifo_priv *priv = (void *)object->engine;
-	struct nv04_fifo_chan *chan = (void *)object;
-	struct nvkm_gpuobj *fctx = priv->ramfc;
-	struct ramfc_desc *c;
-	unsigned long flags;
-	u32 data = chan->ramfc;
-	u32 chid;
-
-	/* prevent fifo context switches */
-	spin_lock_irqsave(&priv->base.lock, flags);
-	nv_wr32(priv, NV03_PFIFO_CACHES, 0);
-
-	/* if this channel is active, replace it with a null context */
-	chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
-	if (chid == chan->base.chid) {
-		nv_mask(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
-		nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 0);
-		nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
-
-		c = priv->ramfc_desc;
-		do {
-			u32 rm = ((1ULL << c->bits) - 1) << c->regs;
-			u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
-			u32 rv = (nv_rd32(priv, c->regp) &  rm) >> c->regs;
-			u32 cv = (nv_ro32(fctx, c->ctxp + data) & ~cm);
-			nv_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
-		} while ((++c)->bits);
-
-		c = priv->ramfc_desc;
-		do {
-			nv_wr32(priv, c->regp, 0x00000000);
-		} while ((++c)->bits);
-
-		nv_wr32(priv, NV03_PFIFO_CACHE1_GET, 0);
-		nv_wr32(priv, NV03_PFIFO_CACHE1_PUT, 0);
-		nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
-		nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
-		nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
-	}
-
-	/* restore normal operation, after disabling dma mode */
-	nv_mask(priv, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
-	nv_wr32(priv, NV03_PFIFO_CACHES, 1);
-	spin_unlock_irqrestore(&priv->base.lock, flags);
-
-	return nvkm_fifo_channel_fini(&chan->base, suspend);
-}
-
-static struct nvkm_ofuncs
-nv04_fifo_ofuncs = {
-	.ctor = nv04_fifo_chan_ctor,
-	.dtor = nv04_fifo_chan_dtor,
-	.init = nv04_fifo_chan_init,
-	.fini = nv04_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-nv04_fifo_sclass[] = {
-	{ NV03_CHANNEL_DMA, &nv04_fifo_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * FIFO context - basically just the instmem reserved for the channel
- ******************************************************************************/
-
-int
-nv04_fifo_context_ctor(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **pobject)
-{
-	struct nv04_fifo_base *base;
-	int ret;
-
-	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
-				       0x1000, NVOBJ_FLAG_HEAP, &base);
-	*pobject = nv_object(base);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static struct nvkm_oclass
-nv04_fifo_cclass = {
-	.handle = NV_ENGCTX(FIFO, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fifo_context_ctor,
-		.dtor = _nvkm_fifo_context_dtor,
-		.init = _nvkm_fifo_context_init,
-		.fini = _nvkm_fifo_context_fini,
-		.rd32 = _nvkm_fifo_context_rd32,
-		.wr32 = _nvkm_fifo_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
 void
-nv04_fifo_pause(struct nvkm_fifo *pfifo, unsigned long *pflags)
-__acquires(priv->base.lock)
+nv04_fifo_pause(struct nvkm_fifo *base, unsigned long *pflags)
+__acquires(fifo->base.lock)
 {
-	struct nv04_fifo_priv *priv = (void *)pfifo;
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	unsigned long flags;
 
-	spin_lock_irqsave(&priv->base.lock, flags);
+	spin_lock_irqsave(&fifo->base.lock, flags);
 	*pflags = flags;
 
-	nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000000);
-	nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
+	nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000000);
+	nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
 
 	/* in some cases the puller may be left in an inconsistent state
 	 * if you try to stop it while it's busy translating handles.
@@ -319,28 +67,31 @@ __acquires(priv->base.lock)
 	 * to avoid this, we invalidate the most recently calculated
 	 * instance.
 	 */
-	if (!nv_wait(priv, NV04_PFIFO_CACHE1_PULL0,
-			   NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0x00000000))
-		nv_warn(priv, "timeout idling puller\n");
+	nvkm_msec(device, 2000,
+		u32 tmp = nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0);
+		if (!(tmp & NV04_PFIFO_CACHE1_PULL0_HASH_BUSY))
+			break;
+	);
 
-	if (nv_rd32(priv, NV04_PFIFO_CACHE1_PULL0) &
+	if (nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0) &
 			  NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
-		nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
+		nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
 
-	nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0x00000000);
+	nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0x00000000);
 }
 
 void
-nv04_fifo_start(struct nvkm_fifo *pfifo, unsigned long *pflags)
-__releases(priv->base.lock)
+nv04_fifo_start(struct nvkm_fifo *base, unsigned long *pflags)
+__releases(fifo->base.lock)
 {
-	struct nv04_fifo_priv *priv = (void *)pfifo;
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	unsigned long flags = *pflags;
 
-	nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
-	nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000001);
+	nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
+	nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000001);
 
-	spin_unlock_irqrestore(&priv->base.lock, flags);
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
 }
 
 static const char *
@@ -354,61 +105,40 @@ nv_dma_state_err(u32 state)
 }
 
 static bool
-nv04_fifo_swmthd(struct nv04_fifo_priv *priv, u32 chid, u32 addr, u32 data)
+nv04_fifo_swmthd(struct nvkm_device *device, u32 chid, u32 addr, u32 data)
 {
-	struct nv04_fifo_chan *chan = NULL;
-	struct nvkm_handle *bind;
-	const int subc = (addr >> 13) & 0x7;
-	const int mthd = addr & 0x1ffc;
+	struct nvkm_sw *sw = device->sw;
+	const int subc = (addr & 0x0000e000) >> 13;
+	const int mthd = (addr & 0x00001ffc);
+	const u32 mask = 0x0000000f << (subc * 4);
+	u32 engine = nvkm_rd32(device, 0x003280);
 	bool handled = false;
-	unsigned long flags;
-	u32 engine;
-
-	spin_lock_irqsave(&priv->base.lock, flags);
-	if (likely(chid >= priv->base.min && chid <= priv->base.max))
-		chan = (void *)priv->base.channel[chid];
-	if (unlikely(!chan))
-		goto out;
 
 	switch (mthd) {
-	case 0x0000:
-		bind = nvkm_namedb_get(nv_namedb(chan), data);
-		if (unlikely(!bind))
-			break;
-
-		if (nv_engidx(bind->object->engine) == NVDEV_ENGINE_SW) {
-			engine = 0x0000000f << (subc * 4);
-			chan->subc[subc] = data;
-			handled = true;
-
-			nv_mask(priv, NV04_PFIFO_CACHE1_ENGINE, engine, 0);
-		}
-
-		nvkm_namedb_put(bind);
+	case 0x0000 ... 0x0000: /* subchannel's engine -> software */
+		nvkm_wr32(device, 0x003280, (engine &= ~mask));
+	case 0x0180 ... 0x01fc: /* handle -> instance */
+		data = nvkm_rd32(device, 0x003258) & 0x0000ffff;
+	case 0x0100 ... 0x017c:
+	case 0x0200 ... 0x1ffc: /* pass method down to sw */
+		if (!(engine & mask) && sw)
+			handled = nvkm_sw_mthd(sw, chid, subc, mthd, data);
 		break;
 	default:
-		engine = nv_rd32(priv, NV04_PFIFO_CACHE1_ENGINE);
-		if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
-			break;
-
-		bind = nvkm_namedb_get(nv_namedb(chan), chan->subc[subc]);
-		if (likely(bind)) {
-			if (!nv_call(bind->object, mthd, data))
-				handled = true;
-			nvkm_namedb_put(bind);
-		}
 		break;
 	}
 
-out:
-	spin_unlock_irqrestore(&priv->base.lock, flags);
 	return handled;
 }
 
 static void
-nv04_fifo_cache_error(struct nvkm_device *device,
-		      struct nv04_fifo_priv *priv, u32 chid, u32 get)
+nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get)
 {
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	u32 pull0 = nvkm_rd32(device, 0x003250);
 	u32 mthd, data;
 	int ptr;
 
@@ -420,216 +150,214 @@ nv04_fifo_cache_error(struct nvkm_device *device,
 	ptr = (get & 0x7ff) >> 2;
 
 	if (device->card_type < NV_40) {
-		mthd = nv_rd32(priv, NV04_PFIFO_CACHE1_METHOD(ptr));
-		data = nv_rd32(priv, NV04_PFIFO_CACHE1_DATA(ptr));
+		mthd = nvkm_rd32(device, NV04_PFIFO_CACHE1_METHOD(ptr));
+		data = nvkm_rd32(device, NV04_PFIFO_CACHE1_DATA(ptr));
 	} else {
-		mthd = nv_rd32(priv, NV40_PFIFO_CACHE1_METHOD(ptr));
-		data = nv_rd32(priv, NV40_PFIFO_CACHE1_DATA(ptr));
+		mthd = nvkm_rd32(device, NV40_PFIFO_CACHE1_METHOD(ptr));
+		data = nvkm_rd32(device, NV40_PFIFO_CACHE1_DATA(ptr));
 	}
 
-	if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
-		const char *client_name =
-			nvkm_client_name_for_fifo_chid(&priv->base, chid);
-		nv_error(priv,
-			 "CACHE_ERROR - ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
-			 chid, client_name, (mthd >> 13) & 7, mthd & 0x1ffc,
-			 data);
+	if (!(pull0 & 0x00000100) ||
+	    !nv04_fifo_swmthd(device, chid, mthd, data)) {
+		chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
+		nvkm_error(subdev, "CACHE_ERROR - "
+			   "ch %d [%s] subc %d mthd %04x data %08x\n",
+			   chid, chan ? chan->object.client->name : "unknown",
+			   (mthd >> 13) & 7, mthd & 0x1ffc, data);
+		nvkm_fifo_chan_put(&fifo->base, flags, &chan);
 	}
 
-	nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
-	nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
+	nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+	nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
 
-	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
-		nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1);
-	nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
-	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
-		nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1);
-	nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0);
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0,
+		nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) & ~1);
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4);
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0,
+		nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) | 1);
+	nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0);
 
-	nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH,
-		nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
-	nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+	nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH,
+		nvkm_rd32(device, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+	nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
 }
 
 static void
-nv04_fifo_dma_pusher(struct nvkm_device *device,
-		     struct nv04_fifo_priv *priv, u32 chid)
+nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid)
 {
-	const char *client_name;
-	u32 dma_get = nv_rd32(priv, 0x003244);
-	u32 dma_put = nv_rd32(priv, 0x003240);
-	u32 push = nv_rd32(priv, 0x003220);
-	u32 state = nv_rd32(priv, 0x003228);
-
-	client_name = nvkm_client_name_for_fifo_chid(&priv->base, chid);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 dma_get = nvkm_rd32(device, 0x003244);
+	u32 dma_put = nvkm_rd32(device, 0x003240);
+	u32 push = nvkm_rd32(device, 0x003220);
+	u32 state = nvkm_rd32(device, 0x003228);
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	const char *name;
 
+	chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
+	name = chan ? chan->object.client->name : "unknown";
 	if (device->card_type == NV_50) {
-		u32 ho_get = nv_rd32(priv, 0x003328);
-		u32 ho_put = nv_rd32(priv, 0x003320);
-		u32 ib_get = nv_rd32(priv, 0x003334);
-		u32 ib_put = nv_rd32(priv, 0x003330);
-
-		nv_error(priv,
-			 "DMA_PUSHER - ch %d [%s] get 0x%02x%08x put 0x%02x%08x ib_get 0x%08x ib_put 0x%08x state 0x%08x (err: %s) push 0x%08x\n",
-			 chid, client_name, ho_get, dma_get, ho_put, dma_put,
-			 ib_get, ib_put, state, nv_dma_state_err(state), push);
+		u32 ho_get = nvkm_rd32(device, 0x003328);
+		u32 ho_put = nvkm_rd32(device, 0x003320);
+		u32 ib_get = nvkm_rd32(device, 0x003334);
+		u32 ib_put = nvkm_rd32(device, 0x003330);
+
+		nvkm_error(subdev, "DMA_PUSHER - "
+			   "ch %d [%s] get %02x%08x put %02x%08x ib_get %08x "
+			   "ib_put %08x state %08x (err: %s) push %08x\n",
+			   chid, name, ho_get, dma_get, ho_put, dma_put,
+			   ib_get, ib_put, state, nv_dma_state_err(state),
+			   push);
 
 		/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
-		nv_wr32(priv, 0x003364, 0x00000000);
+		nvkm_wr32(device, 0x003364, 0x00000000);
 		if (dma_get != dma_put || ho_get != ho_put) {
-			nv_wr32(priv, 0x003244, dma_put);
-			nv_wr32(priv, 0x003328, ho_put);
+			nvkm_wr32(device, 0x003244, dma_put);
+			nvkm_wr32(device, 0x003328, ho_put);
 		} else
 		if (ib_get != ib_put)
-			nv_wr32(priv, 0x003334, ib_put);
+			nvkm_wr32(device, 0x003334, ib_put);
 	} else {
-		nv_error(priv,
-			 "DMA_PUSHER - ch %d [%s] get 0x%08x put 0x%08x state 0x%08x (err: %s) push 0x%08x\n",
-			 chid, client_name, dma_get, dma_put, state,
-			 nv_dma_state_err(state), push);
+		nvkm_error(subdev, "DMA_PUSHER - ch %d [%s] get %08x put %08x "
+				   "state %08x (err: %s) push %08x\n",
+			   chid, name, dma_get, dma_put, state,
+			   nv_dma_state_err(state), push);
 
 		if (dma_get != dma_put)
-			nv_wr32(priv, 0x003244, dma_put);
+			nvkm_wr32(device, 0x003244, dma_put);
 	}
+	nvkm_fifo_chan_put(&fifo->base, flags, &chan);
 
-	nv_wr32(priv, 0x003228, 0x00000000);
-	nv_wr32(priv, 0x003220, 0x00000001);
-	nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+	nvkm_wr32(device, 0x003228, 0x00000000);
+	nvkm_wr32(device, 0x003220, 0x00000001);
+	nvkm_wr32(device, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
 }
 
 void
-nv04_fifo_intr(struct nvkm_subdev *subdev)
+nv04_fifo_intr(struct nvkm_fifo *base)
 {
-	struct nvkm_device *device = nv_device(subdev);
-	struct nv04_fifo_priv *priv = (void *)subdev;
-	u32 mask = nv_rd32(priv, NV03_PFIFO_INTR_EN_0);
-	u32 stat = nv_rd32(priv, NV03_PFIFO_INTR_0) & mask;
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mask = nvkm_rd32(device, NV03_PFIFO_INTR_EN_0);
+	u32 stat = nvkm_rd32(device, NV03_PFIFO_INTR_0) & mask;
 	u32 reassign, chid, get, sem;
 
-	reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1;
-	nv_wr32(priv, NV03_PFIFO_CACHES, 0);
+	reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1;
+	nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
 
-	chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
-	get  = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
+	chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & (fifo->base.nr - 1);
+	get  = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET);
 
 	if (stat & NV_PFIFO_INTR_CACHE_ERROR) {
-		nv04_fifo_cache_error(device, priv, chid, get);
+		nv04_fifo_cache_error(fifo, chid, get);
 		stat &= ~NV_PFIFO_INTR_CACHE_ERROR;
 	}
 
 	if (stat & NV_PFIFO_INTR_DMA_PUSHER) {
-		nv04_fifo_dma_pusher(device, priv, chid);
+		nv04_fifo_dma_pusher(fifo, chid);
 		stat &= ~NV_PFIFO_INTR_DMA_PUSHER;
 	}
 
 	if (stat & NV_PFIFO_INTR_SEMAPHORE) {
 		stat &= ~NV_PFIFO_INTR_SEMAPHORE;
-		nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE);
+		nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE);
 
-		sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE);
-		nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
+		sem = nvkm_rd32(device, NV10_PFIFO_CACHE1_SEMAPHORE);
+		nvkm_wr32(device, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
 
-		nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
-		nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+		nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4);
+		nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
 	}
 
 	if (device->card_type == NV_50) {
 		if (stat & 0x00000010) {
 			stat &= ~0x00000010;
-			nv_wr32(priv, 0x002100, 0x00000010);
+			nvkm_wr32(device, 0x002100, 0x00000010);
 		}
 
 		if (stat & 0x40000000) {
-			nv_wr32(priv, 0x002100, 0x40000000);
-			nvkm_fifo_uevent(&priv->base);
+			nvkm_wr32(device, 0x002100, 0x40000000);
+			nvkm_fifo_uevent(&fifo->base);
 			stat &= ~0x40000000;
 		}
 	}
 
 	if (stat) {
-		nv_warn(priv, "unknown intr 0x%08x\n", stat);
-		nv_mask(priv, NV03_PFIFO_INTR_EN_0, stat, 0x00000000);
-		nv_wr32(priv, NV03_PFIFO_INTR_0, stat);
+		nvkm_warn(subdev, "intr %08x\n", stat);
+		nvkm_mask(device, NV03_PFIFO_INTR_EN_0, stat, 0x00000000);
+		nvkm_wr32(device, NV03_PFIFO_INTR_0, stat);
 	}
 
-	nv_wr32(priv, NV03_PFIFO_CACHES, reassign);
-}
-
-static int
-nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nv04_instmem_priv *imem = nv04_instmem(parent);
-	struct nv04_fifo_priv *priv;
-	int ret;
-
-	ret = nvkm_fifo_create(parent, engine, oclass, 0, 15, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nvkm_ramht_ref(imem->ramht, &priv->ramht);
-	nvkm_gpuobj_ref(imem->ramro, &priv->ramro);
-	nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc);
-
-	nv_subdev(priv)->unit = 0x00000100;
-	nv_subdev(priv)->intr = nv04_fifo_intr;
-	nv_engine(priv)->cclass = &nv04_fifo_cclass;
-	nv_engine(priv)->sclass = nv04_fifo_sclass;
-	priv->base.pause = nv04_fifo_pause;
-	priv->base.start = nv04_fifo_start;
-	priv->ramfc_desc = nv04_ramfc;
-	return 0;
+	nvkm_wr32(device, NV03_PFIFO_CACHES, reassign);
 }
 
 void
-nv04_fifo_dtor(struct nvkm_object *object)
+nv04_fifo_init(struct nvkm_fifo *base)
 {
-	struct nv04_fifo_priv *priv = (void *)object;
-	nvkm_gpuobj_ref(NULL, &priv->ramfc);
-	nvkm_gpuobj_ref(NULL, &priv->ramro);
-	nvkm_ramht_ref(NULL, &priv->ramht);
-	nvkm_fifo_destroy(&priv->base);
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_instmem *imem = device->imem;
+	struct nvkm_ramht *ramht = imem->ramht;
+	struct nvkm_memory *ramro = imem->ramro;
+	struct nvkm_memory *ramfc = imem->ramfc;
+
+	nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff);
+	nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
+
+	nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+					    ((ramht->bits - 9) << 16) |
+					    (ramht->gpuobj->addr >> 8));
+	nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
+	nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8);
+
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1);
+
+	nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
+	nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
+	nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+	nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
 }
 
 int
-nv04_fifo_init(struct nvkm_object *object)
+nv04_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
+	       int index, int nr, const struct nv04_fifo_ramfc *ramfc,
+	       struct nvkm_fifo **pfifo)
 {
-	struct nv04_fifo_priv *priv = (void *)object;
+	struct nv04_fifo *fifo;
 	int ret;
 
-	ret = nvkm_fifo_init(&priv->base);
+	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+		return -ENOMEM;
+	fifo->ramfc = ramfc;
+	*pfifo = &fifo->base;
+
+	ret = nvkm_fifo_ctor(func, device, index, nr, &fifo->base);
 	if (ret)
 		return ret;
 
-	nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff);
-	nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
-
-	nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
-				       ((priv->ramht->bits - 9) << 16) |
-				        (priv->ramht->gpuobj.addr >> 8));
-	nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
-	nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8);
-
-	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
-
-	nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
-	nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
-
-	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
-	nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
-	nv_wr32(priv, NV03_PFIFO_CACHES, 1);
+	set_bit(nr - 1, fifo->base.mask); /* inactive channel */
 	return 0;
 }
 
-struct nvkm_oclass *
-nv04_fifo_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(FIFO, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fifo_ctor,
-		.dtor = nv04_fifo_dtor,
-		.init = nv04_fifo_init,
-		.fini = _nvkm_fifo_fini,
+static const struct nvkm_fifo_func
+nv04_fifo = {
+	.init = nv04_fifo_init,
+	.intr = nv04_fifo_intr,
+	.pause = nv04_fifo_pause,
+	.start = nv04_fifo_start,
+	.chan = {
+		&nv04_fifo_dma_oclass,
+		NULL
 	},
 };
+
+int
+nv04_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	return nv04_fifo_new_(&nv04_fifo, device, index, 16,
+			      nv04_fifo_ramfc, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
index e0e0c47cb4ca..03f60004bf7c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
@@ -1,137 +1,9 @@
 #ifndef __NV04_FIFO_H__
 #define __NV04_FIFO_H__
-#include <engine/fifo.h>
+#define nv04_fifo(p) container_of((p), struct nv04_fifo, base)
+#include "priv.h"
 
-#define NV04_PFIFO_DELAY_0                                 0x00002040
-#define NV04_PFIFO_DMA_TIMESLICE                           0x00002044
-#define NV04_PFIFO_NEXT_CHANNEL                            0x00002050
-#define NV03_PFIFO_INTR_0                                  0x00002100
-#define NV03_PFIFO_INTR_EN_0                               0x00002140
-#    define NV_PFIFO_INTR_CACHE_ERROR                          (1<<0)
-#    define NV_PFIFO_INTR_RUNOUT                               (1<<4)
-#    define NV_PFIFO_INTR_RUNOUT_OVERFLOW                      (1<<8)
-#    define NV_PFIFO_INTR_DMA_PUSHER                          (1<<12)
-#    define NV_PFIFO_INTR_DMA_PT                              (1<<16)
-#    define NV_PFIFO_INTR_SEMAPHORE                           (1<<20)
-#    define NV_PFIFO_INTR_ACQUIRE_TIMEOUT                     (1<<24)
-#define NV03_PFIFO_RAMHT                                   0x00002210
-#define NV03_PFIFO_RAMFC                                   0x00002214
-#define NV03_PFIFO_RAMRO                                   0x00002218
-#define NV40_PFIFO_RAMFC                                   0x00002220
-#define NV03_PFIFO_CACHES                                  0x00002500
-#define NV04_PFIFO_MODE                                    0x00002504
-#define NV04_PFIFO_DMA                                     0x00002508
-#define NV04_PFIFO_SIZE                                    0x0000250c
-#define NV50_PFIFO_CTX_TABLE(c)                        (0x2600+(c)*4)
-#define NV50_PFIFO_CTX_TABLE__SIZE                                128
-#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED                  (1<<31)
-#define NV50_PFIFO_CTX_TABLE_UNK30_BAD                        (1<<30)
-#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80             0x0FFFFFFF
-#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84             0x00FFFFFF
-#define NV03_PFIFO_CACHE0_PUSH0                            0x00003000
-#define NV03_PFIFO_CACHE0_PULL0                            0x00003040
-#define NV04_PFIFO_CACHE0_PULL0                            0x00003050
-#define NV04_PFIFO_CACHE0_PULL1                            0x00003054
-#define NV03_PFIFO_CACHE1_PUSH0                            0x00003200
-#define NV03_PFIFO_CACHE1_PUSH1                            0x00003204
-#define NV03_PFIFO_CACHE1_PUSH1_DMA                            (1<<8)
-#define NV40_PFIFO_CACHE1_PUSH1_DMA                           (1<<16)
-#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000000f
-#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000001f
-#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000007f
-#define NV03_PFIFO_CACHE1_PUT                              0x00003210
-#define NV04_PFIFO_CACHE1_DMA_PUSH                         0x00003220
-#define NV04_PFIFO_CACHE1_DMA_FETCH                        0x00003224
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES         0x00000000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES        0x00000008
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES        0x00000010
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES        0x00000018
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES        0x00000020
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES        0x00000028
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES        0x00000030
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES        0x00000038
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES        0x00000040
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES        0x00000048
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES        0x00000050
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES        0x00000058
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES       0x00000060
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES       0x00000068
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES       0x00000070
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES       0x00000078
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES       0x00000080
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES       0x00000088
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES       0x00000090
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES       0x00000098
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES       0x000000A0
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES       0x000000A8
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES       0x000000B0
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES       0x000000B8
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES       0x000000C0
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES       0x000000C8
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES       0x000000D0
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES       0x000000D8
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES       0x000000E0
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES       0x000000E8
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES       0x000000F0
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES       0x000000F8
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE                 0x0000E000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES        0x00000000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES        0x00002000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES        0x00004000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES       0x00006000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES       0x00008000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES       0x0000A000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES       0x0000C000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES       0x0000E000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS             0x001F0000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0           0x00000000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1           0x00010000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2           0x00020000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3           0x00030000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4           0x00040000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5           0x00050000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6           0x00060000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7           0x00070000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8           0x00080000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9           0x00090000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10          0x000A0000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11          0x000B0000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12          0x000C0000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13          0x000D0000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14          0x000E0000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15          0x000F0000
-#    define NV_PFIFO_CACHE1_ENDIAN                         0x80000000
-#    define NV_PFIFO_CACHE1_LITTLE_ENDIAN                  0x7FFFFFFF
-#    define NV_PFIFO_CACHE1_BIG_ENDIAN                     0x80000000
-#define NV04_PFIFO_CACHE1_DMA_STATE                        0x00003228
-#define NV04_PFIFO_CACHE1_DMA_INSTANCE                     0x0000322c
-#define NV04_PFIFO_CACHE1_DMA_CTL                          0x00003230
-#define NV04_PFIFO_CACHE1_DMA_PUT                          0x00003240
-#define NV04_PFIFO_CACHE1_DMA_GET                          0x00003244
-#define NV10_PFIFO_CACHE1_REF_CNT                          0x00003248
-#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE                   0x0000324C
-#define NV03_PFIFO_CACHE1_PULL0                            0x00003240
-#define NV04_PFIFO_CACHE1_PULL0                            0x00003250
-#    define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED            0x00000010
-#    define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY              0x00001000
-#define NV03_PFIFO_CACHE1_PULL1                            0x00003250
-#define NV04_PFIFO_CACHE1_PULL1                            0x00003254
-#define NV04_PFIFO_CACHE1_HASH                             0x00003258
-#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT                  0x00003260
-#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP                0x00003264
-#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE                    0x00003268
-#define NV10_PFIFO_CACHE1_SEMAPHORE                        0x0000326C
-#define NV03_PFIFO_CACHE1_GET                              0x00003270
-#define NV04_PFIFO_CACHE1_ENGINE                           0x00003280
-#define NV04_PFIFO_CACHE1_DMA_DCOUNT                       0x000032A0
-#define NV40_PFIFO_GRCTX_INSTANCE                          0x000032E0
-#define NV40_PFIFO_UNK32E4                                 0x000032E4
-#define NV04_PFIFO_CACHE1_METHOD(i)                (0x00003800+(i*8))
-#define NV04_PFIFO_CACHE1_DATA(i)                  (0x00003804+(i*8))
-#define NV40_PFIFO_CACHE1_METHOD(i)                (0x00090000+(i*8))
-#define NV40_PFIFO_CACHE1_DATA(i)                  (0x00090004+(i*8))
-
-struct ramfc_desc {
+struct nv04_fifo_ramfc {
 	unsigned bits:6;
 	unsigned ctxs:5;
 	unsigned ctxp:8;
@@ -139,37 +11,13 @@ struct ramfc_desc {
 	unsigned regp;
 };
 
-struct nv04_fifo_priv {
+struct nv04_fifo {
 	struct nvkm_fifo base;
-	struct ramfc_desc *ramfc_desc;
-	struct nvkm_ramht  *ramht;
-	struct nvkm_gpuobj *ramro;
-	struct nvkm_gpuobj *ramfc;
-};
-
-struct nv04_fifo_base {
-	struct nvkm_fifo_base base;
-};
-
-struct nv04_fifo_chan {
-	struct nvkm_fifo_chan base;
-	u32 subc[8];
-	u32 ramfc;
+	const struct nv04_fifo_ramfc *ramfc;
 };
 
-int  nv04_fifo_object_attach(struct nvkm_object *, struct nvkm_object *, u32);
-void nv04_fifo_object_detach(struct nvkm_object *, int);
-
-void nv04_fifo_chan_dtor(struct nvkm_object *);
-int  nv04_fifo_chan_init(struct nvkm_object *);
-int  nv04_fifo_chan_fini(struct nvkm_object *, bool suspend);
-
-int  nv04_fifo_context_ctor(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, void *, u32,
-			    struct nvkm_object **);
-
-void nv04_fifo_dtor(struct nvkm_object *);
-int  nv04_fifo_init(struct nvkm_object *);
-void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *);
-void nv04_fifo_start(struct nvkm_fifo *, unsigned long *);
+int nv04_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *,
+		   int index, int nr, const struct nv04_fifo_ramfc *,
+		   struct nvkm_fifo **);
+void nv04_fifo_init(struct nvkm_fifo *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
index 48ce4af6f543..f9a87deb2b3d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
@@ -22,17 +22,11 @@
  * Authors: Ben Skeggs
  */
 #include "nv04.h"
+#include "channv04.h"
+#include "regsnv04.h"
 
-#include <core/client.h>
-#include <core/engctx.h>
-#include <core/ramht.h>
-#include <subdev/instmem/nv04.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-static struct ramfc_desc
-nv10_ramfc[] = {
+static const struct nv04_fifo_ramfc
+nv10_fifo_ramfc[] = {
 	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
 	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
 	{ 32,  0, 0x08,  0, NV10_PFIFO_CACHE1_REF_CNT },
@@ -45,134 +39,21 @@ nv10_ramfc[] = {
 	{}
 };
 
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
-
-static int
-nv10_fifo_chan_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	union {
-		struct nv03_channel_dma_v0 v0;
-	} *args = data;
-	struct nv04_fifo_priv *priv = (void *)engine;
-	struct nv04_fifo_chan *chan;
-	int ret;
-
-	nv_ioctl(parent, "create channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
-				 "offset %016llx\n", args->v0.version,
-			 args->v0.pushbuf, args->v0.offset);
-	} else
-		return ret;
-
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
-				       0x10000, args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR), &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	args->v0.chid = chan->base.chid;
-
-	nv_parent(chan)->object_attach = nv04_fifo_object_attach;
-	nv_parent(chan)->object_detach = nv04_fifo_object_detach;
-	nv_parent(chan)->context_attach = nv04_fifo_context_attach;
-	chan->ramfc = chan->base.chid * 32;
-
-	nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x14,
-			     NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
-			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
-#ifdef __BIG_ENDIAN
-			     NV_PFIFO_CACHE1_BIG_ENDIAN |
-#endif
-			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
-	return 0;
-}
-
-static struct nvkm_ofuncs
-nv10_fifo_ofuncs = {
-	.ctor = nv10_fifo_chan_ctor,
-	.dtor = nv04_fifo_chan_dtor,
-	.init = nv04_fifo_chan_init,
-	.fini = nv04_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-nv10_fifo_sclass[] = {
-	{ NV10_CHANNEL_DMA, &nv10_fifo_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * FIFO context - basically just the instmem reserved for the channel
- ******************************************************************************/
-
-static struct nvkm_oclass
-nv10_fifo_cclass = {
-	.handle = NV_ENGCTX(FIFO, 0x10),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fifo_context_ctor,
-		.dtor = _nvkm_fifo_context_dtor,
-		.init = _nvkm_fifo_context_init,
-		.fini = _nvkm_fifo_context_fini,
-		.rd32 = _nvkm_fifo_context_rd32,
-		.wr32 = _nvkm_fifo_context_wr32,
+static const struct nvkm_fifo_func
+nv10_fifo = {
+	.init = nv04_fifo_init,
+	.intr = nv04_fifo_intr,
+	.pause = nv04_fifo_pause,
+	.start = nv04_fifo_start,
+	.chan = {
+		&nv10_fifo_dma_oclass,
+		NULL
 	},
 };
 
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
-static int
-nv10_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+nv10_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
 {
-	struct nv04_instmem_priv *imem = nv04_instmem(parent);
-	struct nv04_fifo_priv *priv;
-	int ret;
-
-	ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nvkm_ramht_ref(imem->ramht, &priv->ramht);
-	nvkm_gpuobj_ref(imem->ramro, &priv->ramro);
-	nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc);
-
-	nv_subdev(priv)->unit = 0x00000100;
-	nv_subdev(priv)->intr = nv04_fifo_intr;
-	nv_engine(priv)->cclass = &nv10_fifo_cclass;
-	nv_engine(priv)->sclass = nv10_fifo_sclass;
-	priv->base.pause = nv04_fifo_pause;
-	priv->base.start = nv04_fifo_start;
-	priv->ramfc_desc = nv10_ramfc;
-	return 0;
+	return nv04_fifo_new_(&nv10_fifo, device, index, 32,
+			      nv10_fifo_ramfc, pfifo);
 }
-
-struct nvkm_oclass *
-nv10_fifo_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(FIFO, 0x10),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv10_fifo_ctor,
-		.dtor = nv04_fifo_dtor,
-		.init = nv04_fifo_init,
-		.fini = _nvkm_fifo_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
index 4a20a6fd3887..f6d383a21222 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
@@ -22,17 +22,14 @@
  * Authors: Ben Skeggs
  */
 #include "nv04.h"
+#include "channv04.h"
+#include "regsnv04.h"
 
-#include <core/client.h>
-#include <core/engctx.h>
 #include <core/ramht.h>
-#include <subdev/instmem/nv04.h>
+#include <subdev/instmem.h>
 
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-static struct ramfc_desc
-nv17_ramfc[] = {
+static const struct nv04_fifo_ramfc
+nv17_fifo_ramfc[] = {
 	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
 	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
 	{ 32,  0, 0x08,  0, NV10_PFIFO_CACHE1_REF_CNT },
@@ -50,166 +47,51 @@ nv17_ramfc[] = {
 	{}
 };
 
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
-
-static int
-nv17_fifo_chan_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
+static void
+nv17_fifo_init(struct nvkm_fifo *base)
 {
-	union {
-		struct nv03_channel_dma_v0 v0;
-	} *args = data;
-	struct nv04_fifo_priv *priv = (void *)engine;
-	struct nv04_fifo_chan *chan;
-	int ret;
-
-	nv_ioctl(parent, "create channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
-				 "offset %016llx\n", args->v0.version,
-			 args->v0.pushbuf, args->v0.offset);
-	} else
-		return ret;
-
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
-				       0x10000, args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR) |
-				       (1ULL << NVDEV_ENGINE_MPEG), /* NV31- */
-				       &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	args->v0.chid = chan->base.chid;
-
-	nv_parent(chan)->object_attach = nv04_fifo_object_attach;
-	nv_parent(chan)->object_detach = nv04_fifo_object_detach;
-	nv_parent(chan)->context_attach = nv04_fifo_context_attach;
-	chan->ramfc = chan->base.chid * 64;
-
-	nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x14,
-			     NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
-			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
-#ifdef __BIG_ENDIAN
-			     NV_PFIFO_CACHE1_BIG_ENDIAN |
-#endif
-			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
-	return 0;
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_instmem *imem = device->imem;
+	struct nvkm_ramht *ramht = imem->ramht;
+	struct nvkm_memory *ramro = imem->ramro;
+	struct nvkm_memory *ramfc = imem->ramfc;
+
+	nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff);
+	nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
+
+	nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+					    ((ramht->bits - 9) << 16) |
+					    (ramht->gpuobj->addr >> 8));
+	nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
+	nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8 |
+					    0x00010000);
+
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1);
+
+	nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
+	nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
+	nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+	nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
 }
 
-static struct nvkm_ofuncs
-nv17_fifo_ofuncs = {
-	.ctor = nv17_fifo_chan_ctor,
-	.dtor = nv04_fifo_chan_dtor,
-	.init = nv04_fifo_chan_init,
-	.fini = nv04_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-nv17_fifo_sclass[] = {
-	{ NV17_CHANNEL_DMA, &nv17_fifo_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * FIFO context - basically just the instmem reserved for the channel
- ******************************************************************************/
-
-static struct nvkm_oclass
-nv17_fifo_cclass = {
-	.handle = NV_ENGCTX(FIFO, 0x17),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fifo_context_ctor,
-		.dtor = _nvkm_fifo_context_dtor,
-		.init = _nvkm_fifo_context_init,
-		.fini = _nvkm_fifo_context_fini,
-		.rd32 = _nvkm_fifo_context_rd32,
-		.wr32 = _nvkm_fifo_context_wr32,
+static const struct nvkm_fifo_func
+nv17_fifo = {
+	.init = nv17_fifo_init,
+	.intr = nv04_fifo_intr,
+	.pause = nv04_fifo_pause,
+	.start = nv04_fifo_start,
+	.chan = {
+		&nv17_fifo_dma_oclass,
+		NULL
 	},
 };
 
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
-static int
-nv17_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+nv17_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
 {
-	struct nv04_instmem_priv *imem = nv04_instmem(parent);
-	struct nv04_fifo_priv *priv;
-	int ret;
-
-	ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nvkm_ramht_ref(imem->ramht, &priv->ramht);
-	nvkm_gpuobj_ref(imem->ramro, &priv->ramro);
-	nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc);
-
-	nv_subdev(priv)->unit = 0x00000100;
-	nv_subdev(priv)->intr = nv04_fifo_intr;
-	nv_engine(priv)->cclass = &nv17_fifo_cclass;
-	nv_engine(priv)->sclass = nv17_fifo_sclass;
-	priv->base.pause = nv04_fifo_pause;
-	priv->base.start = nv04_fifo_start;
-	priv->ramfc_desc = nv17_ramfc;
-	return 0;
-}
-
-static int
-nv17_fifo_init(struct nvkm_object *object)
-{
-	struct nv04_fifo_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_fifo_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff);
-	nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
-
-	nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
-				       ((priv->ramht->bits - 9) << 16) |
-				        (priv->ramht->gpuobj.addr >> 8));
-	nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
-	nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8 | 0x00010000);
-
-	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
-
-	nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
-	nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
-
-	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
-	nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
-	nv_wr32(priv, NV03_PFIFO_CACHES, 1);
-	return 0;
+	return nv04_fifo_new_(&nv17_fifo, device, index, 32,
+			      nv17_fifo_ramfc, pfifo);
 }
-
-struct nvkm_oclass *
-nv17_fifo_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(FIFO, 0x17),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv17_fifo_ctor,
-		.dtor = nv04_fifo_dtor,
-		.init = nv17_fifo_init,
-		.fini = _nvkm_fifo_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
index 5bfc96265f3b..8c7ba32763c4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
@@ -22,19 +22,15 @@
  * Authors: Ben Skeggs
  */
 #include "nv04.h"
+#include "channv04.h"
+#include "regsnv04.h"
 
-#include <core/client.h>
-#include <core/device.h>
-#include <core/engctx.h>
 #include <core/ramht.h>
 #include <subdev/fb.h>
-#include <subdev/instmem/nv04.h>
+#include <subdev/instmem.h>
 
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-static struct ramfc_desc
-nv40_ramfc[] = {
+static const struct nv04_fifo_ramfc
+nv40_fifo_ramfc[] = {
 	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
 	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
 	{ 32,  0, 0x08,  0, NV10_PFIFO_CACHE1_REF_CNT },
@@ -60,297 +56,72 @@ nv40_ramfc[] = {
 	{}
 };
 
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
-
-static int
-nv40_fifo_object_attach(struct nvkm_object *parent,
-			struct nvkm_object *object, u32 handle)
-{
-	struct nv04_fifo_priv *priv = (void *)parent->engine;
-	struct nv04_fifo_chan *chan = (void *)parent;
-	u32 context, chid = chan->base.chid;
-	int ret;
-
-	if (nv_iclass(object, NV_GPUOBJ_CLASS))
-		context = nv_gpuobj(object)->addr >> 4;
-	else
-		context = 0x00000004; /* just non-zero */
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_DMAOBJ:
-	case NVDEV_ENGINE_SW:
-		context |= 0x00000000;
-		break;
-	case NVDEV_ENGINE_GR:
-		context |= 0x00100000;
-		break;
-	case NVDEV_ENGINE_MPEG:
-		context |= 0x00200000;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	context |= chid << 23;
-
-	mutex_lock(&nv_subdev(priv)->mutex);
-	ret = nvkm_ramht_insert(priv->ramht, chid, handle, context);
-	mutex_unlock(&nv_subdev(priv)->mutex);
-	return ret;
-}
-
-static int
-nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx)
-{
-	struct nv04_fifo_priv *priv = (void *)parent->engine;
-	struct nv04_fifo_chan *chan = (void *)parent;
-	unsigned long flags;
-	u32 reg, ctx;
-
-	switch (nv_engidx(engctx->engine)) {
-	case NVDEV_ENGINE_SW:
-		return 0;
-	case NVDEV_ENGINE_GR:
-		reg = 0x32e0;
-		ctx = 0x38;
-		break;
-	case NVDEV_ENGINE_MPEG:
-		reg = 0x330c;
-		ctx = 0x54;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	spin_lock_irqsave(&priv->base.lock, flags);
-	nv_engctx(engctx)->addr = nv_gpuobj(engctx)->addr >> 4;
-	nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
-
-	if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
-		nv_wr32(priv, reg, nv_engctx(engctx)->addr);
-	nv_wo32(priv->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr);
-
-	nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
-	spin_unlock_irqrestore(&priv->base.lock, flags);
-	return 0;
-}
-
-static int
-nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend,
-			 struct nvkm_object *engctx)
+static void
+nv40_fifo_init(struct nvkm_fifo *base)
 {
-	struct nv04_fifo_priv *priv = (void *)parent->engine;
-	struct nv04_fifo_chan *chan = (void *)parent;
-	unsigned long flags;
-	u32 reg, ctx;
-
-	switch (nv_engidx(engctx->engine)) {
-	case NVDEV_ENGINE_SW:
-		return 0;
-	case NVDEV_ENGINE_GR:
-		reg = 0x32e0;
-		ctx = 0x38;
-		break;
-	case NVDEV_ENGINE_MPEG:
-		reg = 0x330c;
-		ctx = 0x54;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	spin_lock_irqsave(&priv->base.lock, flags);
-	nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
-
-	if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
-		nv_wr32(priv, reg, 0x00000000);
-	nv_wo32(priv->ramfc, chan->ramfc + ctx, 0x00000000);
-
-	nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
-	spin_unlock_irqrestore(&priv->base.lock, flags);
-	return 0;
-}
-
-static int
-nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	union {
-		struct nv03_channel_dma_v0 v0;
-	} *args = data;
-	struct nv04_fifo_priv *priv = (void *)engine;
-	struct nv04_fifo_chan *chan;
-	int ret;
-
-	nv_ioctl(parent, "create channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
-				 "offset %016llx\n", args->v0.version,
-			 args->v0.pushbuf, args->v0.offset);
-	} else
-		return ret;
-
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
-				       0x1000, args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR) |
-				       (1ULL << NVDEV_ENGINE_MPEG), &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	args->v0.chid = chan->base.chid;
-
-	nv_parent(chan)->context_attach = nv40_fifo_context_attach;
-	nv_parent(chan)->context_detach = nv40_fifo_context_detach;
-	nv_parent(chan)->object_attach = nv40_fifo_object_attach;
-	nv_parent(chan)->object_detach = nv04_fifo_object_detach;
-	chan->ramfc = chan->base.chid * 128;
-
-	nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x18, 0x30000000 |
-			     NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
-			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
-#ifdef __BIG_ENDIAN
-			     NV_PFIFO_CACHE1_BIG_ENDIAN |
-#endif
-			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
-	return 0;
-}
-
-static struct nvkm_ofuncs
-nv40_fifo_ofuncs = {
-	.ctor = nv40_fifo_chan_ctor,
-	.dtor = nv04_fifo_chan_dtor,
-	.init = nv04_fifo_chan_init,
-	.fini = nv04_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-nv40_fifo_sclass[] = {
-	{ NV40_CHANNEL_DMA, &nv40_fifo_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * FIFO context - basically just the instmem reserved for the channel
- ******************************************************************************/
-
-static struct nvkm_oclass
-nv40_fifo_cclass = {
-	.handle = NV_ENGCTX(FIFO, 0x40),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fifo_context_ctor,
-		.dtor = _nvkm_fifo_context_dtor,
-		.init = _nvkm_fifo_context_init,
-		.fini = _nvkm_fifo_context_fini,
-		.rd32 = _nvkm_fifo_context_rd32,
-		.wr32 = _nvkm_fifo_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
-static int
-nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nv04_instmem_priv *imem = nv04_instmem(parent);
-	struct nv04_fifo_priv *priv;
-	int ret;
-
-	ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nvkm_ramht_ref(imem->ramht, &priv->ramht);
-	nvkm_gpuobj_ref(imem->ramro, &priv->ramro);
-	nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc);
-
-	nv_subdev(priv)->unit = 0x00000100;
-	nv_subdev(priv)->intr = nv04_fifo_intr;
-	nv_engine(priv)->cclass = &nv40_fifo_cclass;
-	nv_engine(priv)->sclass = nv40_fifo_sclass;
-	priv->base.pause = nv04_fifo_pause;
-	priv->base.start = nv04_fifo_start;
-	priv->ramfc_desc = nv40_ramfc;
-	return 0;
-}
-
-static int
-nv40_fifo_init(struct nvkm_object *object)
-{
-	struct nv04_fifo_priv *priv = (void *)object;
-	struct nvkm_fb *pfb = nvkm_fb(object);
-	int ret;
-
-	ret = nvkm_fifo_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x002040, 0x000000ff);
-	nv_wr32(priv, 0x002044, 0x2101ffff);
-	nv_wr32(priv, 0x002058, 0x00000001);
-
-	nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
-				       ((priv->ramht->bits - 9) << 16) |
-				        (priv->ramht->gpuobj.addr >> 8));
-	nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
-
-	switch (nv_device(priv)->chipset) {
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_fb *fb = device->fb;
+	struct nvkm_instmem *imem = device->imem;
+	struct nvkm_ramht *ramht = imem->ramht;
+	struct nvkm_memory *ramro = imem->ramro;
+	struct nvkm_memory *ramfc = imem->ramfc;
+
+	nvkm_wr32(device, 0x002040, 0x000000ff);
+	nvkm_wr32(device, 0x002044, 0x2101ffff);
+	nvkm_wr32(device, 0x002058, 0x00000001);
+
+	nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+					    ((ramht->bits - 9) << 16) |
+					    (ramht->gpuobj->addr >> 8));
+	nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
+
+	switch (device->chipset) {
 	case 0x47:
 	case 0x49:
 	case 0x4b:
-		nv_wr32(priv, 0x002230, 0x00000001);
+		nvkm_wr32(device, 0x002230, 0x00000001);
 	case 0x40:
 	case 0x41:
 	case 0x42:
 	case 0x43:
 	case 0x45:
 	case 0x48:
-		nv_wr32(priv, 0x002220, 0x00030002);
+		nvkm_wr32(device, 0x002220, 0x00030002);
 		break;
 	default:
-		nv_wr32(priv, 0x002230, 0x00000000);
-		nv_wr32(priv, 0x002220, ((pfb->ram->size - 512 * 1024 +
-					 priv->ramfc->addr) >> 16) |
-					0x00030000);
+		nvkm_wr32(device, 0x002230, 0x00000000);
+		nvkm_wr32(device, 0x002220, ((fb->ram->size - 512 * 1024 +
+					      nvkm_memory_addr(ramfc)) >> 16) |
+					    0x00030000);
 		break;
 	}
 
-	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1);
 
-	nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
-	nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+	nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
+	nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
 
-	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
-	nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
-	nv_wr32(priv, NV03_PFIFO_CACHES, 1);
-	return 0;
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
+	nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+	nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
 }
 
-struct nvkm_oclass *
-nv40_fifo_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(FIFO, 0x40),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_fifo_ctor,
-		.dtor = nv04_fifo_dtor,
-		.init = nv40_fifo_init,
-		.fini = _nvkm_fifo_fini,
+static const struct nvkm_fifo_func
+nv40_fifo = {
+	.init = nv40_fifo_init,
+	.intr = nv04_fifo_intr,
+	.pause = nv04_fifo_pause,
+	.start = nv04_fifo_start,
+	.chan = {
+		&nv40_fifo_dma_oclass,
+		NULL
 	},
 };
+
+int
+nv40_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	return nv04_fifo_new_(&nv40_fifo, device, index, 32,
+			      nv40_fifo_ramfc, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
index f25f0fd0655d..66eb12c2b5ba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
@@ -22,513 +22,126 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
-#include "nv04.h"
+#include "channv50.h"
 
-#include <core/client.h>
-#include <core/engctx.h>
-#include <core/ramht.h>
-#include <subdev/bar.h>
-#include <subdev/mmu.h>
-#include <subdev/timer.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
+#include <core/gpuobj.h>
 
 static void
-nv50_fifo_playlist_update_locked(struct nv50_fifo_priv *priv)
+nv50_fifo_runlist_update_locked(struct nv50_fifo *fifo)
 {
-	struct nvkm_bar *bar = nvkm_bar(priv);
-	struct nvkm_gpuobj *cur;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_memory *cur;
 	int i, p;
 
-	cur = priv->playlist[priv->cur_playlist];
-	priv->cur_playlist = !priv->cur_playlist;
-
-	for (i = priv->base.min, p = 0; i < priv->base.max; i++) {
-		if (nv_rd32(priv, 0x002600 + (i * 4)) & 0x80000000)
-			nv_wo32(cur, p++ * 4, i);
-	}
-
-	bar->flush(bar);
-
-	nv_wr32(priv, 0x0032f4, cur->addr >> 12);
-	nv_wr32(priv, 0x0032ec, p);
-	nv_wr32(priv, 0x002500, 0x00000101);
-}
-
-void
-nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
-{
-	mutex_lock(&nv_subdev(priv)->mutex);
-	nv50_fifo_playlist_update_locked(priv);
-	mutex_unlock(&nv_subdev(priv)->mutex);
-}
-
-static int
-nv50_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
-{
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_fifo_base *base = (void *)parent->parent;
-	struct nvkm_gpuobj *ectx = (void *)object;
-	u64 limit = ectx->addr + ectx->size - 1;
-	u64 start = ectx->addr;
-	u32 addr;
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_SW   : return 0;
-	case NVDEV_ENGINE_GR   : addr = 0x0000; break;
-	case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
-	default:
-		return -EINVAL;
-	}
-
-	nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
-	nv_wo32(base->eng, addr + 0x00, 0x00190000);
-	nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
-	nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
-	nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
-					upper_32_bits(start));
-	nv_wo32(base->eng, addr + 0x10, 0x00000000);
-	nv_wo32(base->eng, addr + 0x14, 0x00000000);
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-nv50_fifo_context_detach(struct nvkm_object *parent, bool suspend,
-			 struct nvkm_object *object)
-{
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_fifo_priv *priv = (void *)parent->engine;
-	struct nv50_fifo_base *base = (void *)parent->parent;
-	struct nv50_fifo_chan *chan = (void *)parent;
-	u32 addr, me;
-	int ret = 0;
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_SW   : return 0;
-	case NVDEV_ENGINE_GR   : addr = 0x0000; break;
-	case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
-	default:
-		return -EINVAL;
-	}
-
-	/* HW bug workaround:
-	 *
-	 * PFIFO will hang forever if the connected engines don't report
-	 * that they've processed the context switch request.
-	 *
-	 * In order for the kickoff to work, we need to ensure all the
-	 * connected engines are in a state where they can answer.
-	 *
-	 * Newer chipsets don't seem to suffer from this issue, and well,
-	 * there's also a "ignore these engines" bitmask reg we can use
-	 * if we hit the issue there..
-	 */
-	me = nv_mask(priv, 0x00b860, 0x00000001, 0x00000001);
-
-	/* do the kickoff... */
-	nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
-	if (!nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff)) {
-		nv_error(priv, "channel %d [%s] unload timeout\n",
-			 chan->base.chid, nvkm_client_name(chan));
-		if (suspend)
-			ret = -EBUSY;
-	}
-	nv_wr32(priv, 0x00b860, me);
-
-	if (ret == 0) {
-		nv_wo32(base->eng, addr + 0x00, 0x00000000);
-		nv_wo32(base->eng, addr + 0x04, 0x00000000);
-		nv_wo32(base->eng, addr + 0x08, 0x00000000);
-		nv_wo32(base->eng, addr + 0x0c, 0x00000000);
-		nv_wo32(base->eng, addr + 0x10, 0x00000000);
-		nv_wo32(base->eng, addr + 0x14, 0x00000000);
-		bar->flush(bar);
-	}
-
-	return ret;
-}
-
-static int
-nv50_fifo_object_attach(struct nvkm_object *parent,
-			struct nvkm_object *object, u32 handle)
-{
-	struct nv50_fifo_chan *chan = (void *)parent;
-	u32 context;
-
-	if (nv_iclass(object, NV_GPUOBJ_CLASS))
-		context = nv_gpuobj(object)->node->offset >> 4;
-	else
-		context = 0x00000004; /* just non-zero */
+	cur = fifo->runlist[fifo->cur_runlist];
+	fifo->cur_runlist = !fifo->cur_runlist;
 
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_DMAOBJ:
-	case NVDEV_ENGINE_SW    : context |= 0x00000000; break;
-	case NVDEV_ENGINE_GR    : context |= 0x00100000; break;
-	case NVDEV_ENGINE_MPEG  : context |= 0x00200000; break;
-	default:
-		return -EINVAL;
+	nvkm_kmap(cur);
+	for (i = 0, p = 0; i < fifo->base.nr; i++) {
+		if (nvkm_rd32(device, 0x002600 + (i * 4)) & 0x80000000)
+			nvkm_wo32(cur, p++ * 4, i);
 	}
+	nvkm_done(cur);
 
-	return nvkm_ramht_insert(chan->ramht, 0, handle, context);
+	nvkm_wr32(device, 0x0032f4, nvkm_memory_addr(cur) >> 12);
+	nvkm_wr32(device, 0x0032ec, p);
+	nvkm_wr32(device, 0x002500, 0x00000101);
 }
 
 void
-nv50_fifo_object_detach(struct nvkm_object *parent, int cookie)
+nv50_fifo_runlist_update(struct nv50_fifo *fifo)
 {
-	struct nv50_fifo_chan *chan = (void *)parent;
-	nvkm_ramht_remove(chan->ramht, cookie);
+	mutex_lock(&fifo->base.engine.subdev.mutex);
+	nv50_fifo_runlist_update_locked(fifo);
+	mutex_unlock(&fifo->base.engine.subdev.mutex);
 }
 
-static int
-nv50_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
-			struct nvkm_oclass *oclass, void *data, u32 size,
-			struct nvkm_object **pobject)
+int
+nv50_fifo_oneinit(struct nvkm_fifo *base)
 {
-	union {
-		struct nv03_channel_dma_v0 v0;
-	} *args = data;
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_fifo_base *base = (void *)parent;
-	struct nv50_fifo_chan *chan;
+	struct nv50_fifo *fifo = nv50_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	int ret;
 
-	nv_ioctl(parent, "create channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
-				 "offset %016llx\n", args->v0.version,
-			 args->v0.pushbuf, args->v0.offset);
-	} else
-		return ret;
-
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
-				       0x2000, args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR) |
-				       (1ULL << NVDEV_ENGINE_MPEG), &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	args->v0.chid = chan->base.chid;
-
-	nv_parent(chan)->context_attach = nv50_fifo_context_attach;
-	nv_parent(chan)->context_detach = nv50_fifo_context_detach;
-	nv_parent(chan)->object_attach = nv50_fifo_object_attach;
-	nv_parent(chan)->object_detach = nv50_fifo_object_detach;
-
-	ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
-			     &chan->ramht);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
+			      false, &fifo->runlist[0]);
 	if (ret)
 		return ret;
 
-	nv_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
-	nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
-	nv_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
-	nv_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
-	nv_wo32(base->ramfc, 0x3c, 0x003f6078);
-	nv_wo32(base->ramfc, 0x44, 0x01003fff);
-	nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
-	nv_wo32(base->ramfc, 0x4c, 0xffffffff);
-	nv_wo32(base->ramfc, 0x60, 0x7fffffff);
-	nv_wo32(base->ramfc, 0x78, 0x00000000);
-	nv_wo32(base->ramfc, 0x7c, 0x30000001);
-	nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
-				   (4 << 24) /* SEARCH_FULL */ |
-				   (chan->ramht->gpuobj.node->offset >> 4));
-	bar->flush(bar);
-	return 0;
+	return nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
+			       false, &fifo->runlist[1]);
 }
 
-static int
-nv50_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
-			struct nvkm_oclass *oclass, void *data, u32 size,
-			struct nvkm_object **pobject)
+void
+nv50_fifo_init(struct nvkm_fifo *base)
 {
-	union {
-		struct nv50_channel_gpfifo_v0 v0;
-	} *args = data;
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_fifo_base *base = (void *)parent;
-	struct nv50_fifo_chan *chan;
-	u64 ioffset, ilength;
-	int ret;
+	struct nv50_fifo *fifo = nv50_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	int i;
 
-	nv_ioctl(parent, "create channel gpfifo size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
-				 "ioffset %016llx ilength %08x\n",
-			 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
-			 args->v0.ilength);
-	} else
-		return ret;
-
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
-				       0x2000, args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR) |
-				       (1ULL << NVDEV_ENGINE_MPEG), &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
+	nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
+	nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
+	nvkm_wr32(device, 0x00250c, 0x6f3cfc34);
+	nvkm_wr32(device, 0x002044, 0x01003fff);
 
-	args->v0.chid = chan->base.chid;
+	nvkm_wr32(device, 0x002100, 0xffffffff);
+	nvkm_wr32(device, 0x002140, 0xbfffffff);
 
-	nv_parent(chan)->context_attach = nv50_fifo_context_attach;
-	nv_parent(chan)->context_detach = nv50_fifo_context_detach;
-	nv_parent(chan)->object_attach = nv50_fifo_object_attach;
-	nv_parent(chan)->object_detach = nv50_fifo_object_detach;
-
-	ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
-			     &chan->ramht);
-	if (ret)
-		return ret;
-
-	ioffset = args->v0.ioffset;
-	ilength = order_base_2(args->v0.ilength / 8);
+	for (i = 0; i < 128; i++)
+		nvkm_wr32(device, 0x002600 + (i * 4), 0x00000000);
+	nv50_fifo_runlist_update_locked(fifo);
 
-	nv_wo32(base->ramfc, 0x3c, 0x403f6078);
-	nv_wo32(base->ramfc, 0x44, 0x01003fff);
-	nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
-	nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
-	nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
-	nv_wo32(base->ramfc, 0x60, 0x7fffffff);
-	nv_wo32(base->ramfc, 0x78, 0x00000000);
-	nv_wo32(base->ramfc, 0x7c, 0x30000001);
-	nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
-				   (4 << 24) /* SEARCH_FULL */ |
-				   (chan->ramht->gpuobj.node->offset >> 4));
-	bar->flush(bar);
-	return 0;
+	nvkm_wr32(device, 0x003200, 0x00000001);
+	nvkm_wr32(device, 0x003250, 0x00000001);
+	nvkm_wr32(device, 0x002500, 0x00000001);
 }
 
-void
-nv50_fifo_chan_dtor(struct nvkm_object *object)
+void *
+nv50_fifo_dtor(struct nvkm_fifo *base)
 {
-	struct nv50_fifo_chan *chan = (void *)object;
-	nvkm_ramht_ref(NULL, &chan->ramht);
-	nvkm_fifo_channel_destroy(&chan->base);
-}
-
-static int
-nv50_fifo_chan_init(struct nvkm_object *object)
-{
-	struct nv50_fifo_priv *priv = (void *)object->engine;
-	struct nv50_fifo_base *base = (void *)object->parent;
-	struct nv50_fifo_chan *chan = (void *)object;
-	struct nvkm_gpuobj *ramfc = base->ramfc;
-	u32 chid = chan->base.chid;
-	int ret;
-
-	ret = nvkm_fifo_channel_init(&chan->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 12);
-	nv50_fifo_playlist_update(priv);
-	return 0;
+	struct nv50_fifo *fifo = nv50_fifo(base);
+	nvkm_memory_del(&fifo->runlist[1]);
+	nvkm_memory_del(&fifo->runlist[0]);
+	return fifo;
 }
 
 int
-nv50_fifo_chan_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv50_fifo_priv *priv = (void *)object->engine;
-	struct nv50_fifo_chan *chan = (void *)object;
-	u32 chid = chan->base.chid;
-
-	/* remove channel from playlist, fifo will unload context */
-	nv_mask(priv, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
-	nv50_fifo_playlist_update(priv);
-	nv_wr32(priv, 0x002600 + (chid * 4), 0x00000000);
-
-	return nvkm_fifo_channel_fini(&chan->base, suspend);
-}
-
-static struct nvkm_ofuncs
-nv50_fifo_ofuncs_dma = {
-	.ctor = nv50_fifo_chan_ctor_dma,
-	.dtor = nv50_fifo_chan_dtor,
-	.init = nv50_fifo_chan_init,
-	.fini = nv50_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_ofuncs
-nv50_fifo_ofuncs_ind = {
-	.ctor = nv50_fifo_chan_ctor_ind,
-	.dtor = nv50_fifo_chan_dtor,
-	.init = nv50_fifo_chan_init,
-	.fini = nv50_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-nv50_fifo_sclass[] = {
-	{ NV50_CHANNEL_DMA, &nv50_fifo_ofuncs_dma },
-	{ NV50_CHANNEL_GPFIFO, &nv50_fifo_ofuncs_ind },
-	{}
-};
-
-/*******************************************************************************
- * FIFO context - basically just the instmem reserved for the channel
- ******************************************************************************/
-
-static int
-nv50_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **pobject)
+nv50_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
+	       int index, struct nvkm_fifo **pfifo)
 {
-	struct nv50_fifo_base *base;
+	struct nv50_fifo *fifo;
 	int ret;
 
-	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
-				       0x1000, NVOBJ_FLAG_HEAP, &base);
-	*pobject = nv_object(base);
-	if (ret)
-		return ret;
+	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+		return -ENOMEM;
+	*pfifo = &fifo->base;
 
-	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0200,
-			      0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x1200, 0,
-			      NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0, 0,
-			      &base->pgd);
-	if (ret)
-		return ret;
-
-	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
+	ret = nvkm_fifo_ctor(func, device, index, 128, &fifo->base);
 	if (ret)
 		return ret;
 
+	set_bit(0, fifo->base.mask); /* PIO channel */
+	set_bit(127, fifo->base.mask); /* inactive channel */
 	return 0;
 }
 
-void
-nv50_fifo_context_dtor(struct nvkm_object *object)
-{
-	struct nv50_fifo_base *base = (void *)object;
-	nvkm_vm_ref(NULL, &base->vm, base->pgd);
-	nvkm_gpuobj_ref(NULL, &base->pgd);
-	nvkm_gpuobj_ref(NULL, &base->eng);
-	nvkm_gpuobj_ref(NULL, &base->ramfc);
-	nvkm_gpuobj_ref(NULL, &base->cache);
-	nvkm_fifo_context_destroy(&base->base);
-}
-
-static struct nvkm_oclass
-nv50_fifo_cclass = {
-	.handle = NV_ENGCTX(FIFO, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_fifo_context_ctor,
-		.dtor = nv50_fifo_context_dtor,
-		.init = _nvkm_fifo_context_init,
-		.fini = _nvkm_fifo_context_fini,
-		.rd32 = _nvkm_fifo_context_rd32,
-		.wr32 = _nvkm_fifo_context_wr32,
+static const struct nvkm_fifo_func
+nv50_fifo = {
+	.dtor = nv50_fifo_dtor,
+	.oneinit = nv50_fifo_oneinit,
+	.init = nv50_fifo_init,
+	.intr = nv04_fifo_intr,
+	.pause = nv04_fifo_pause,
+	.start = nv04_fifo_start,
+	.chan = {
+		&nv50_fifo_dma_oclass,
+		&nv50_fifo_gpfifo_oclass,
+		NULL
 	},
 };
 
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
-static int
-nv50_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nv50_fifo_priv *priv;
-	int ret;
-
-	ret = nvkm_fifo_create(parent, engine, oclass, 1, 127, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
-			      &priv->playlist[0]);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
-			      &priv->playlist[1]);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000100;
-	nv_subdev(priv)->intr = nv04_fifo_intr;
-	nv_engine(priv)->cclass = &nv50_fifo_cclass;
-	nv_engine(priv)->sclass = nv50_fifo_sclass;
-	priv->base.pause = nv04_fifo_pause;
-	priv->base.start = nv04_fifo_start;
-	return 0;
-}
-
-void
-nv50_fifo_dtor(struct nvkm_object *object)
-{
-	struct nv50_fifo_priv *priv = (void *)object;
-
-	nvkm_gpuobj_ref(NULL, &priv->playlist[1]);
-	nvkm_gpuobj_ref(NULL, &priv->playlist[0]);
-
-	nvkm_fifo_destroy(&priv->base);
-}
-
 int
-nv50_fifo_init(struct nvkm_object *object)
+nv50_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
 {
-	struct nv50_fifo_priv *priv = (void *)object;
-	int ret, i;
-
-	ret = nvkm_fifo_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
-	nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
-	nv_wr32(priv, 0x00250c, 0x6f3cfc34);
-	nv_wr32(priv, 0x002044, 0x01003fff);
-
-	nv_wr32(priv, 0x002100, 0xffffffff);
-	nv_wr32(priv, 0x002140, 0xbfffffff);
-
-	for (i = 0; i < 128; i++)
-		nv_wr32(priv, 0x002600 + (i * 4), 0x00000000);
-	nv50_fifo_playlist_update_locked(priv);
-
-	nv_wr32(priv, 0x003200, 0x00000001);
-	nv_wr32(priv, 0x003250, 0x00000001);
-	nv_wr32(priv, 0x002500, 0x00000001);
-	return 0;
+	return nv50_fifo_new_(&nv50_fifo, device, index, pfifo);
 }
-
-struct nvkm_oclass *
-nv50_fifo_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(FIFO, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_fifo_ctor,
-		.dtor = nv50_fifo_dtor,
-		.init = nv50_fifo_init,
-		.fini = _nvkm_fifo_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
index 09ed93c66567..8ab53948cbb4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
@@ -1,36 +1,19 @@
 #ifndef __NV50_FIFO_H__
 #define __NV50_FIFO_H__
-#include <engine/fifo.h>
+#define nv50_fifo(p) container_of((p), struct nv50_fifo, base)
+#include "priv.h"
 
-struct nv50_fifo_priv {
+struct nv50_fifo {
 	struct nvkm_fifo base;
-	struct nvkm_gpuobj *playlist[2];
-	int cur_playlist;
+	struct nvkm_memory *runlist[2];
+	int cur_runlist;
 };
 
-struct nv50_fifo_base {
-	struct nvkm_fifo_base base;
-	struct nvkm_gpuobj *ramfc;
-	struct nvkm_gpuobj *cache;
-	struct nvkm_gpuobj *eng;
-	struct nvkm_gpuobj *pgd;
-	struct nvkm_vm *vm;
-};
-
-struct nv50_fifo_chan {
-	struct nvkm_fifo_chan base;
-	u32 subc[8];
-	struct nvkm_ramht *ramht;
-};
-
-void nv50_fifo_playlist_update(struct nv50_fifo_priv *);
-
-void nv50_fifo_object_detach(struct nvkm_object *, int);
-void nv50_fifo_chan_dtor(struct nvkm_object *);
-int  nv50_fifo_chan_fini(struct nvkm_object *, bool);
-
-void nv50_fifo_context_dtor(struct nvkm_object *);
+int nv50_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *,
+		   int index, struct nvkm_fifo **);
 
-void nv50_fifo_dtor(struct nvkm_object *);
-int  nv50_fifo_init(struct nvkm_object *);
+void *nv50_fifo_dtor(struct nvkm_fifo *);
+int nv50_fifo_oneinit(struct nvkm_fifo *);
+void nv50_fifo_init(struct nvkm_fifo *);
+void nv50_fifo_runlist_update(struct nv50_fifo *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
new file mode 100644
index 000000000000..cb1432e9be08
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -0,0 +1,26 @@
+#ifndef __NVKM_FIFO_PRIV_H__
+#define __NVKM_FIFO_PRIV_H__
+#define nvkm_fifo(p) container_of((p), struct nvkm_fifo, engine)
+#include <engine/fifo.h>
+
+int nvkm_fifo_ctor(const struct nvkm_fifo_func *, struct nvkm_device *,
+		   int index, int nr, struct nvkm_fifo *);
+void nvkm_fifo_uevent(struct nvkm_fifo *);
+
+struct nvkm_fifo_func {
+	void *(*dtor)(struct nvkm_fifo *);
+	int (*oneinit)(struct nvkm_fifo *);
+	void (*init)(struct nvkm_fifo *);
+	void (*fini)(struct nvkm_fifo *);
+	void (*intr)(struct nvkm_fifo *);
+	void (*pause)(struct nvkm_fifo *, unsigned long *);
+	void (*start)(struct nvkm_fifo *, unsigned long *);
+	void (*uevent_init)(struct nvkm_fifo *);
+	void (*uevent_fini)(struct nvkm_fifo *);
+	const struct nvkm_fifo_chan_oclass *chan[];
+};
+
+void nv04_fifo_intr(struct nvkm_fifo *);
+void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *);
+void nv04_fifo_start(struct nvkm_fifo *, unsigned long *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h
new file mode 100644
index 000000000000..92d56221197b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h
@@ -0,0 +1,132 @@
+#ifndef __NV04_FIFO_REGS_H__
+#define __NV04_FIFO_REGS_H__
+
+#define NV04_PFIFO_DELAY_0                                 0x00002040
+#define NV04_PFIFO_DMA_TIMESLICE                           0x00002044
+#define NV04_PFIFO_NEXT_CHANNEL                            0x00002050
+#define NV03_PFIFO_INTR_0                                  0x00002100
+#define NV03_PFIFO_INTR_EN_0                               0x00002140
+#    define NV_PFIFO_INTR_CACHE_ERROR                          (1<<0)
+#    define NV_PFIFO_INTR_RUNOUT                               (1<<4)
+#    define NV_PFIFO_INTR_RUNOUT_OVERFLOW                      (1<<8)
+#    define NV_PFIFO_INTR_DMA_PUSHER                          (1<<12)
+#    define NV_PFIFO_INTR_DMA_PT                              (1<<16)
+#    define NV_PFIFO_INTR_SEMAPHORE                           (1<<20)
+#    define NV_PFIFO_INTR_ACQUIRE_TIMEOUT                     (1<<24)
+#define NV03_PFIFO_RAMHT                                   0x00002210
+#define NV03_PFIFO_RAMFC                                   0x00002214
+#define NV03_PFIFO_RAMRO                                   0x00002218
+#define NV40_PFIFO_RAMFC                                   0x00002220
+#define NV03_PFIFO_CACHES                                  0x00002500
+#define NV04_PFIFO_MODE                                    0x00002504
+#define NV04_PFIFO_DMA                                     0x00002508
+#define NV04_PFIFO_SIZE                                    0x0000250c
+#define NV50_PFIFO_CTX_TABLE(c)                        (0x2600+(c)*4)
+#define NV50_PFIFO_CTX_TABLE__SIZE                                128
+#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED                  (1<<31)
+#define NV50_PFIFO_CTX_TABLE_UNK30_BAD                        (1<<30)
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80             0x0FFFFFFF
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84             0x00FFFFFF
+#define NV03_PFIFO_CACHE0_PUSH0                            0x00003000
+#define NV03_PFIFO_CACHE0_PULL0                            0x00003040
+#define NV04_PFIFO_CACHE0_PULL0                            0x00003050
+#define NV04_PFIFO_CACHE0_PULL1                            0x00003054
+#define NV03_PFIFO_CACHE1_PUSH0                            0x00003200
+#define NV03_PFIFO_CACHE1_PUSH1                            0x00003204
+#define NV03_PFIFO_CACHE1_PUSH1_DMA                            (1<<8)
+#define NV40_PFIFO_CACHE1_PUSH1_DMA                           (1<<16)
+#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000000f
+#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000001f
+#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000007f
+#define NV03_PFIFO_CACHE1_PUT                              0x00003210
+#define NV04_PFIFO_CACHE1_DMA_PUSH                         0x00003220
+#define NV04_PFIFO_CACHE1_DMA_FETCH                        0x00003224
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES         0x00000000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES        0x00000008
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES        0x00000010
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES        0x00000018
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES        0x00000020
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES        0x00000028
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES        0x00000030
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES        0x00000038
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES        0x00000040
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES        0x00000048
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES        0x00000050
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES        0x00000058
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES       0x00000060
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES       0x00000068
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES       0x00000070
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES       0x00000078
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES       0x00000080
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES       0x00000088
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES       0x00000090
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES       0x00000098
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES       0x000000A0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES       0x000000A8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES       0x000000B0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES       0x000000B8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES       0x000000C0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES       0x000000C8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES       0x000000D0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES       0x000000D8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES       0x000000E0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES       0x000000E8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES       0x000000F0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES       0x000000F8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE                 0x0000E000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES        0x00000000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES        0x00002000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES        0x00004000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES       0x00006000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES       0x00008000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES       0x0000A000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES       0x0000C000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES       0x0000E000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS             0x001F0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0           0x00000000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1           0x00010000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2           0x00020000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3           0x00030000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4           0x00040000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5           0x00050000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6           0x00060000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7           0x00070000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8           0x00080000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9           0x00090000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10          0x000A0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11          0x000B0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12          0x000C0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13          0x000D0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14          0x000E0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15          0x000F0000
+#    define NV_PFIFO_CACHE1_ENDIAN                         0x80000000
+#    define NV_PFIFO_CACHE1_LITTLE_ENDIAN                  0x7FFFFFFF
+#    define NV_PFIFO_CACHE1_BIG_ENDIAN                     0x80000000
+#define NV04_PFIFO_CACHE1_DMA_STATE                        0x00003228
+#define NV04_PFIFO_CACHE1_DMA_INSTANCE                     0x0000322c
+#define NV04_PFIFO_CACHE1_DMA_CTL                          0x00003230
+#define NV04_PFIFO_CACHE1_DMA_PUT                          0x00003240
+#define NV04_PFIFO_CACHE1_DMA_GET                          0x00003244
+#define NV10_PFIFO_CACHE1_REF_CNT                          0x00003248
+#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE                   0x0000324C
+#define NV03_PFIFO_CACHE1_PULL0                            0x00003240
+#define NV04_PFIFO_CACHE1_PULL0                            0x00003250
+#    define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED            0x00000010
+#    define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY              0x00001000
+#define NV03_PFIFO_CACHE1_PULL1                            0x00003250
+#define NV04_PFIFO_CACHE1_PULL1                            0x00003254
+#define NV04_PFIFO_CACHE1_HASH                             0x00003258
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT                  0x00003260
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP                0x00003264
+#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE                    0x00003268
+#define NV10_PFIFO_CACHE1_SEMAPHORE                        0x0000326C
+#define NV03_PFIFO_CACHE1_GET                              0x00003270
+#define NV04_PFIFO_CACHE1_ENGINE                           0x00003280
+#define NV04_PFIFO_CACHE1_DMA_DCOUNT                       0x000032A0
+#define NV40_PFIFO_GRCTX_INSTANCE                          0x000032E0
+#define NV40_PFIFO_UNK32E4                                 0x000032E4
+#define NV04_PFIFO_CACHE1_METHOD(i)                (0x00003800+(i*8))
+#define NV04_PFIFO_CACHE1_DATA(i)                  (0x00003804+(i*8))
+#define NV40_PFIFO_CACHE1_METHOD(i)                (0x00090000+(i*8))
+#define NV40_PFIFO_CACHE1_DATA(i)                  (0x00090004+(i*8))
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index 2e1b92f71d9e..9ad0d0e78a96 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -1,21 +1,8 @@
-nvkm-y += nvkm/engine/gr/ctxnv40.o
-nvkm-y += nvkm/engine/gr/ctxnv50.o
-nvkm-y += nvkm/engine/gr/ctxgf100.o
-nvkm-y += nvkm/engine/gr/ctxgf108.o
-nvkm-y += nvkm/engine/gr/ctxgf104.o
-nvkm-y += nvkm/engine/gr/ctxgf110.o
-nvkm-y += nvkm/engine/gr/ctxgf117.o
-nvkm-y += nvkm/engine/gr/ctxgf119.o
-nvkm-y += nvkm/engine/gr/ctxgk104.o
-nvkm-y += nvkm/engine/gr/ctxgk20a.o
-nvkm-y += nvkm/engine/gr/ctxgk110.o
-nvkm-y += nvkm/engine/gr/ctxgk110b.o
-nvkm-y += nvkm/engine/gr/ctxgk208.o
-nvkm-y += nvkm/engine/gr/ctxgm107.o
-nvkm-y += nvkm/engine/gr/ctxgm204.o
-nvkm-y += nvkm/engine/gr/ctxgm206.o
+nvkm-y += nvkm/engine/gr/base.o
 nvkm-y += nvkm/engine/gr/nv04.o
 nvkm-y += nvkm/engine/gr/nv10.o
+nvkm-y += nvkm/engine/gr/nv15.o
+nvkm-y += nvkm/engine/gr/nv17.o
 nvkm-y += nvkm/engine/gr/nv20.o
 nvkm-y += nvkm/engine/gr/nv25.o
 nvkm-y += nvkm/engine/gr/nv2a.o
@@ -23,18 +10,43 @@ nvkm-y += nvkm/engine/gr/nv30.o
 nvkm-y += nvkm/engine/gr/nv34.o
 nvkm-y += nvkm/engine/gr/nv35.o
 nvkm-y += nvkm/engine/gr/nv40.o
+nvkm-y += nvkm/engine/gr/nv44.o
 nvkm-y += nvkm/engine/gr/nv50.o
+nvkm-y += nvkm/engine/gr/g84.o
+nvkm-y += nvkm/engine/gr/gt200.o
+nvkm-y += nvkm/engine/gr/mcp79.o
+nvkm-y += nvkm/engine/gr/gt215.o
+nvkm-y += nvkm/engine/gr/mcp89.o
 nvkm-y += nvkm/engine/gr/gf100.o
-nvkm-y += nvkm/engine/gr/gf108.o
 nvkm-y += nvkm/engine/gr/gf104.o
+nvkm-y += nvkm/engine/gr/gf108.o
 nvkm-y += nvkm/engine/gr/gf110.o
 nvkm-y += nvkm/engine/gr/gf117.o
 nvkm-y += nvkm/engine/gr/gf119.o
 nvkm-y += nvkm/engine/gr/gk104.o
-nvkm-y += nvkm/engine/gr/gk20a.o
 nvkm-y += nvkm/engine/gr/gk110.o
 nvkm-y += nvkm/engine/gr/gk110b.o
 nvkm-y += nvkm/engine/gr/gk208.o
+nvkm-y += nvkm/engine/gr/gk20a.o
 nvkm-y += nvkm/engine/gr/gm107.o
 nvkm-y += nvkm/engine/gr/gm204.o
 nvkm-y += nvkm/engine/gr/gm206.o
+nvkm-y += nvkm/engine/gr/gm20b.o
+
+nvkm-y += nvkm/engine/gr/ctxnv40.o
+nvkm-y += nvkm/engine/gr/ctxnv50.o
+nvkm-y += nvkm/engine/gr/ctxgf100.o
+nvkm-y += nvkm/engine/gr/ctxgf104.o
+nvkm-y += nvkm/engine/gr/ctxgf108.o
+nvkm-y += nvkm/engine/gr/ctxgf110.o
+nvkm-y += nvkm/engine/gr/ctxgf117.o
+nvkm-y += nvkm/engine/gr/ctxgf119.o
+nvkm-y += nvkm/engine/gr/ctxgk104.o
+nvkm-y += nvkm/engine/gr/ctxgk110.o
+nvkm-y += nvkm/engine/gr/ctxgk110b.o
+nvkm-y += nvkm/engine/gr/ctxgk208.o
+nvkm-y += nvkm/engine/gr/ctxgk20a.o
+nvkm-y += nvkm/engine/gr/ctxgm107.o
+nvkm-y += nvkm/engine/gr/ctxgm204.o
+nvkm-y += nvkm/engine/gr/ctxgm206.o
+nvkm-y += nvkm/engine/gr/ctxgm20b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
new file mode 100644
index 000000000000..090765ff070d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+#include <engine/fifo.h>
+
+static void
+nvkm_gr_tile(struct nvkm_engine *engine, int region, struct nvkm_fb_tile *tile)
+{
+	struct nvkm_gr *gr = nvkm_gr(engine);
+	if (gr->func->tile)
+		gr->func->tile(gr, region, tile);
+}
+
+u64
+nvkm_gr_units(struct nvkm_gr *gr)
+{
+	if (gr->func->units)
+		return gr->func->units(gr);
+	return 0;
+}
+
+int
+nvkm_gr_tlb_flush(struct nvkm_gr *gr)
+{
+	if (gr->func->tlb_flush)
+		return gr->func->tlb_flush(gr);
+	return -ENODEV;
+}
+
+static int
+nvkm_gr_oclass_get(struct nvkm_oclass *oclass, int index)
+{
+	struct nvkm_gr *gr = nvkm_gr(oclass->engine);
+	int c = 0;
+
+	if (gr->func->object_get) {
+		int ret = gr->func->object_get(gr, index, &oclass->base);
+		if (oclass->base.oclass)
+			return index;
+		return ret;
+	}
+
+	while (gr->func->sclass[c].oclass) {
+		if (c++ == index) {
+			oclass->base = gr->func->sclass[index];
+			return index;
+		}
+	}
+
+	return c;
+}
+
+static int
+nvkm_gr_cclass_new(struct nvkm_fifo_chan *chan,
+		   const struct nvkm_oclass *oclass,
+		   struct nvkm_object **pobject)
+{
+	struct nvkm_gr *gr = nvkm_gr(oclass->engine);
+	if (gr->func->chan_new)
+		return gr->func->chan_new(gr, chan, oclass, pobject);
+	return 0;
+}
+
+static void
+nvkm_gr_intr(struct nvkm_engine *engine)
+{
+	struct nvkm_gr *gr = nvkm_gr(engine);
+	gr->func->intr(gr);
+}
+
+static int
+nvkm_gr_oneinit(struct nvkm_engine *engine)
+{
+	struct nvkm_gr *gr = nvkm_gr(engine);
+	if (gr->func->oneinit)
+		return gr->func->oneinit(gr);
+	return 0;
+}
+
+static int
+nvkm_gr_init(struct nvkm_engine *engine)
+{
+	struct nvkm_gr *gr = nvkm_gr(engine);
+	return gr->func->init(gr);
+}
+
+static void *
+nvkm_gr_dtor(struct nvkm_engine *engine)
+{
+	struct nvkm_gr *gr = nvkm_gr(engine);
+	if (gr->func->dtor)
+		return gr->func->dtor(gr);
+	return gr;
+}
+
+static const struct nvkm_engine_func
+nvkm_gr = {
+	.dtor = nvkm_gr_dtor,
+	.oneinit = nvkm_gr_oneinit,
+	.init = nvkm_gr_init,
+	.intr = nvkm_gr_intr,
+	.tile = nvkm_gr_tile,
+	.fifo.cclass = nvkm_gr_cclass_new,
+	.fifo.sclass = nvkm_gr_oclass_get,
+};
+
+int
+nvkm_gr_ctor(const struct nvkm_gr_func *func, struct nvkm_device *device,
+	     int index, u32 pmc_enable, bool enable, struct nvkm_gr *gr)
+{
+	gr->func = func;
+	return nvkm_engine_ctor(&nvkm_gr, device, index, pmc_enable,
+				enable, &gr->engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 57e2c5b13123..56f392d3d4fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -23,7 +23,6 @@
  */
 #include "ctxgf100.h"
 
-#include <subdev/bar.h>
 #include <subdev/fb.h>
 #include <subdev/mc.h>
 #include <subdev/timer.h>
@@ -1005,6 +1004,7 @@ void
 gf100_grctx_mmio_item(struct gf100_grctx *info, u32 addr, u32 data,
 		      int shift, int buffer)
 {
+	struct nvkm_device *device = info->gr->base.engine.subdev.device;
 	if (info->data) {
 		if (shift >= 0) {
 			info->mmio->addr = addr;
@@ -1021,29 +1021,29 @@ gf100_grctx_mmio_item(struct gf100_grctx *info, u32 addr, u32 data,
 			return;
 	}
 
-	nv_wr32(info->priv, addr, data);
+	nvkm_wr32(device, addr, data);
 }
 
 void
 gf100_grctx_generate_bundle(struct gf100_grctx *info)
 {
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
+	const struct gf100_grctx_func *grctx = info->gr->func->grctx;
 	const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
 	const int s = 8;
-	const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
+	const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
 	mmio_refn(info, 0x408004, 0x00000000, s, b);
-	mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
+	mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
 	mmio_refn(info, 0x418808, 0x00000000, s, b);
-	mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s));
+	mmio_wr32(info, 0x41880c, 0x80000000 | (grctx->bundle_size >> s));
 }
 
 void
 gf100_grctx_generate_pagepool(struct gf100_grctx *info)
 {
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
+	const struct gf100_grctx_func *grctx = info->gr->func->grctx;
 	const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
 	const int s = 8;
-	const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
+	const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
 	mmio_refn(info, 0x40800c, 0x00000000, s, b);
 	mmio_wr32(info, 0x408010, 0x80000000);
 	mmio_refn(info, 0x419004, 0x00000000, s, b);
@@ -1053,13 +1053,13 @@ gf100_grctx_generate_pagepool(struct gf100_grctx *info)
 void
 gf100_grctx_generate_attrib(struct gf100_grctx *info)
 {
-	struct gf100_gr_priv *priv = info->priv;
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(priv);
-	const u32 attrib = impl->attrib_nr;
-	const u32   size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
+	struct gf100_gr *gr = info->gr;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	const u32 attrib = grctx->attrib_nr;
+	const u32   size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
 	const u32 access = NV_MEM_ACCESS_RW;
 	const int s = 12;
-	const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
+	const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
 	int gpc, tpc;
 	u32 bo = 0;
 
@@ -1067,91 +1067,95 @@ gf100_grctx_generate_attrib(struct gf100_grctx *info)
 	mmio_refn(info, 0x419848, 0x10000000, s, b);
 	mmio_wr32(info, 0x405830, (attrib << 16));
 
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
 			const u32 o = TPC_UNIT(gpc, tpc, 0x0520);
 			mmio_skip(info, o, (attrib << 16) | ++bo);
 			mmio_wr32(info, o, (attrib << 16) | --bo);
-			bo += impl->attrib_nr_max;
+			bo += grctx->attrib_nr_max;
 		}
 	}
 }
 
 void
-gf100_grctx_generate_unkn(struct gf100_gr_priv *priv)
+gf100_grctx_generate_unkn(struct gf100_gr *gr)
 {
 }
 
 void
-gf100_grctx_generate_tpcid(struct gf100_gr_priv *priv)
+gf100_grctx_generate_tpcid(struct gf100_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	int gpc, tpc, id;
 
 	for (tpc = 0, id = 0; tpc < 4; tpc++) {
-		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-			if (tpc < priv->tpc_nr[gpc]) {
-				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id);
-				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x4e8), id);
-				nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
-				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x088), id);
+		for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+			if (tpc < gr->tpc_nr[gpc]) {
+				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
+				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x4e8), id);
+				nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), id);
 				id++;
 			}
 
-			nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
-			nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
+			nvkm_wr32(device, GPC_UNIT(gpc, 0x0c08), gr->tpc_nr[gpc]);
+			nvkm_wr32(device, GPC_UNIT(gpc, 0x0c8c), gr->tpc_nr[gpc]);
 		}
 	}
 }
 
 void
-gf100_grctx_generate_r406028(struct gf100_gr_priv *priv)
+gf100_grctx_generate_r406028(struct gf100_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u32 tmp[GPC_MAX / 8] = {}, i = 0;
-	for (i = 0; i < priv->gpc_nr; i++)
-		tmp[i / 8] |= priv->tpc_nr[i] << ((i % 8) * 4);
+	for (i = 0; i < gr->gpc_nr; i++)
+		tmp[i / 8] |= gr->tpc_nr[i] << ((i % 8) * 4);
 	for (i = 0; i < 4; i++) {
-		nv_wr32(priv, 0x406028 + (i * 4), tmp[i]);
-		nv_wr32(priv, 0x405870 + (i * 4), tmp[i]);
+		nvkm_wr32(device, 0x406028 + (i * 4), tmp[i]);
+		nvkm_wr32(device, 0x405870 + (i * 4), tmp[i]);
 	}
 }
 
 void
-gf100_grctx_generate_r4060a8(struct gf100_gr_priv *priv)
+gf100_grctx_generate_r4060a8(struct gf100_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u8  tpcnr[GPC_MAX], data[TPC_MAX];
 	int gpc, tpc, i;
 
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
 	memset(data, 0x1f, sizeof(data));
 
 	gpc = -1;
-	for (tpc = 0; tpc < priv->tpc_total; tpc++) {
+	for (tpc = 0; tpc < gr->tpc_total; tpc++) {
 		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
+			gpc = (gpc + 1) % gr->gpc_nr;
 		} while (!tpcnr[gpc]);
 		tpcnr[gpc]--;
 		data[tpc] = gpc;
 	}
 
 	for (i = 0; i < 4; i++)
-		nv_wr32(priv, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
+		nvkm_wr32(device, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
 }
 
 void
-gf100_grctx_generate_r418bb8(struct gf100_gr_priv *priv)
+gf100_grctx_generate_r418bb8(struct gf100_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u32 data[6] = {}, data2[2] = {};
 	u8  tpcnr[GPC_MAX];
 	u8  shift, ntpcv;
 	int gpc, tpc, i;
 
 	/* calculate first set of magics */
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
 
 	gpc = -1;
-	for (tpc = 0; tpc < priv->tpc_total; tpc++) {
+	for (tpc = 0; tpc < gr->tpc_total; tpc++) {
 		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
+			gpc = (gpc + 1) % gr->gpc_nr;
 		} while (!tpcnr[gpc]);
 		tpcnr[gpc]--;
 
@@ -1163,7 +1167,7 @@ gf100_grctx_generate_r418bb8(struct gf100_gr_priv *priv)
 
 	/* and the second... */
 	shift = 0;
-	ntpcv = priv->tpc_total;
+	ntpcv = gr->tpc_total;
 	while (!(ntpcv & (1 << 4))) {
 		ntpcv <<= 1;
 		shift++;
@@ -1176,202 +1180,211 @@ gf100_grctx_generate_r418bb8(struct gf100_gr_priv *priv)
 		data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
 
 	/* GPC_BROADCAST */
-	nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) |
-				 priv->magic_not_rop_nr);
+	nvkm_wr32(device, 0x418bb8, (gr->tpc_total << 8) |
+				 gr->magic_not_rop_nr);
 	for (i = 0; i < 6; i++)
-		nv_wr32(priv, 0x418b08 + (i * 4), data[i]);
+		nvkm_wr32(device, 0x418b08 + (i * 4), data[i]);
 
 	/* GPC_BROADCAST.TP_BROADCAST */
-	nv_wr32(priv, 0x419bd0, (priv->tpc_total << 8) |
-				 priv->magic_not_rop_nr | data2[0]);
-	nv_wr32(priv, 0x419be4, data2[1]);
+	nvkm_wr32(device, 0x419bd0, (gr->tpc_total << 8) |
+				 gr->magic_not_rop_nr | data2[0]);
+	nvkm_wr32(device, 0x419be4, data2[1]);
 	for (i = 0; i < 6; i++)
-		nv_wr32(priv, 0x419b00 + (i * 4), data[i]);
+		nvkm_wr32(device, 0x419b00 + (i * 4), data[i]);
 
 	/* UNK78xx */
-	nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) |
-				 priv->magic_not_rop_nr);
+	nvkm_wr32(device, 0x4078bc, (gr->tpc_total << 8) |
+				 gr->magic_not_rop_nr);
 	for (i = 0; i < 6; i++)
-		nv_wr32(priv, 0x40780c + (i * 4), data[i]);
+		nvkm_wr32(device, 0x40780c + (i * 4), data[i]);
 }
 
 void
-gf100_grctx_generate_r406800(struct gf100_gr_priv *priv)
+gf100_grctx_generate_r406800(struct gf100_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u64 tpc_mask = 0, tpc_set = 0;
 	u8  tpcnr[GPC_MAX];
 	int gpc, tpc;
 	int i, a, b;
 
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++)
-		tpc_mask |= ((1ULL << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++)
+		tpc_mask |= ((1ULL << gr->tpc_nr[gpc]) - 1) << (gpc * 8);
 
 	for (i = 0, gpc = -1, b = -1; i < 32; i++) {
-		a = (i * (priv->tpc_total - 1)) / 32;
+		a = (i * (gr->tpc_total - 1)) / 32;
 		if (a != b) {
 			b = a;
 			do {
-				gpc = (gpc + 1) % priv->gpc_nr;
+				gpc = (gpc + 1) % gr->gpc_nr;
 			} while (!tpcnr[gpc]);
-			tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+			tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
 
 			tpc_set |= 1ULL << ((gpc * 8) + tpc);
 		}
 
-		nv_wr32(priv, 0x406800 + (i * 0x20), lower_32_bits(tpc_set));
-		nv_wr32(priv, 0x406c00 + (i * 0x20), lower_32_bits(tpc_set ^ tpc_mask));
-		if (priv->gpc_nr > 4) {
-			nv_wr32(priv, 0x406804 + (i * 0x20), upper_32_bits(tpc_set));
-			nv_wr32(priv, 0x406c04 + (i * 0x20), upper_32_bits(tpc_set ^ tpc_mask));
+		nvkm_wr32(device, 0x406800 + (i * 0x20), lower_32_bits(tpc_set));
+		nvkm_wr32(device, 0x406c00 + (i * 0x20), lower_32_bits(tpc_set ^ tpc_mask));
+		if (gr->gpc_nr > 4) {
+			nvkm_wr32(device, 0x406804 + (i * 0x20), upper_32_bits(tpc_set));
+			nvkm_wr32(device, 0x406c04 + (i * 0x20), upper_32_bits(tpc_set ^ tpc_mask));
 		}
 	}
 }
 
 void
-gf100_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
+gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 {
-	struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
-
-	nvkm_mc(priv)->unk260(nvkm_mc(priv), 0);
-
-	gf100_gr_mmio(priv, oclass->hub);
-	gf100_gr_mmio(priv, oclass->gpc);
-	gf100_gr_mmio(priv, oclass->zcull);
-	gf100_gr_mmio(priv, oclass->tpc);
-	gf100_gr_mmio(priv, oclass->ppc);
-
-	nv_wr32(priv, 0x404154, 0x00000000);
-
-	oclass->bundle(info);
-	oclass->pagepool(info);
-	oclass->attrib(info);
-	oclass->unkn(priv);
-
-	gf100_grctx_generate_tpcid(priv);
-	gf100_grctx_generate_r406028(priv);
-	gf100_grctx_generate_r4060a8(priv);
-	gf100_grctx_generate_r418bb8(priv);
-	gf100_grctx_generate_r406800(priv);
-
-	gf100_gr_icmd(priv, oclass->icmd);
-	nv_wr32(priv, 0x404154, 0x00000400);
-	gf100_gr_mthd(priv, oclass->mthd);
-	nvkm_mc(priv)->unk260(nvkm_mc(priv), 1);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+
+	nvkm_mc_unk260(device->mc, 0);
+
+	gf100_gr_mmio(gr, grctx->hub);
+	gf100_gr_mmio(gr, grctx->gpc);
+	gf100_gr_mmio(gr, grctx->zcull);
+	gf100_gr_mmio(gr, grctx->tpc);
+	gf100_gr_mmio(gr, grctx->ppc);
+
+	nvkm_wr32(device, 0x404154, 0x00000000);
+
+	grctx->bundle(info);
+	grctx->pagepool(info);
+	grctx->attrib(info);
+	grctx->unkn(gr);
+
+	gf100_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_r406028(gr);
+	gf100_grctx_generate_r4060a8(gr);
+	gf100_grctx_generate_r418bb8(gr);
+	gf100_grctx_generate_r406800(gr);
+
+	gf100_gr_icmd(gr, grctx->icmd);
+	nvkm_wr32(device, 0x404154, 0x00000400);
+	gf100_gr_mthd(gr, grctx->mthd);
+	nvkm_mc_unk260(device->mc, 1);
 }
 
 int
-gf100_grctx_generate(struct gf100_gr_priv *priv)
+gf100_grctx_generate(struct gf100_gr *gr)
 {
-	struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
-	struct nvkm_bar *bar = nvkm_bar(priv);
-	struct nvkm_gpuobj *chan;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_memory *chan;
 	struct gf100_grctx info;
 	int ret, i;
+	u64 addr;
 
 	/* allocate memory to for a "channel", which we'll use to generate
 	 * the default context values
 	 */
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x80000 + priv->size,
-			      0x1000, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x80000 + gr->size,
+			      0x1000, true, &chan);
 	if (ret) {
-		nv_error(priv, "failed to allocate channel memory, %d\n", ret);
+		nvkm_error(subdev, "failed to allocate chan memory, %d\n", ret);
 		return ret;
 	}
 
+	addr = nvkm_memory_addr(chan);
+
 	/* PGD pointer */
-	nv_wo32(chan, 0x0200, lower_32_bits(chan->addr + 0x1000));
-	nv_wo32(chan, 0x0204, upper_32_bits(chan->addr + 0x1000));
-	nv_wo32(chan, 0x0208, 0xffffffff);
-	nv_wo32(chan, 0x020c, 0x000000ff);
+	nvkm_kmap(chan);
+	nvkm_wo32(chan, 0x0200, lower_32_bits(addr + 0x1000));
+	nvkm_wo32(chan, 0x0204, upper_32_bits(addr + 0x1000));
+	nvkm_wo32(chan, 0x0208, 0xffffffff);
+	nvkm_wo32(chan, 0x020c, 0x000000ff);
 
 	/* PGT[0] pointer */
-	nv_wo32(chan, 0x1000, 0x00000000);
-	nv_wo32(chan, 0x1004, 0x00000001 | (chan->addr + 0x2000) >> 8);
+	nvkm_wo32(chan, 0x1000, 0x00000000);
+	nvkm_wo32(chan, 0x1004, 0x00000001 | (addr + 0x2000) >> 8);
 
 	/* identity-map the whole "channel" into its own vm */
-	for (i = 0; i < chan->size / 4096; i++) {
-		u64 addr = ((chan->addr + (i * 4096)) >> 8) | 1;
-		nv_wo32(chan, 0x2000 + (i * 8), lower_32_bits(addr));
-		nv_wo32(chan, 0x2004 + (i * 8), upper_32_bits(addr));
+	for (i = 0; i < nvkm_memory_size(chan) / 4096; i++) {
+		u64 addr = ((nvkm_memory_addr(chan) + (i * 4096)) >> 8) | 1;
+		nvkm_wo32(chan, 0x2000 + (i * 8), lower_32_bits(addr));
+		nvkm_wo32(chan, 0x2004 + (i * 8), upper_32_bits(addr));
 	}
 
 	/* context pointer (virt) */
-	nv_wo32(chan, 0x0210, 0x00080004);
-	nv_wo32(chan, 0x0214, 0x00000000);
+	nvkm_wo32(chan, 0x0210, 0x00080004);
+	nvkm_wo32(chan, 0x0214, 0x00000000);
+	nvkm_done(chan);
 
-	bar->flush(bar);
-
-	nv_wr32(priv, 0x100cb8, (chan->addr + 0x1000) >> 8);
-	nv_wr32(priv, 0x100cbc, 0x80000001);
-	nv_wait(priv, 0x100c80, 0x00008000, 0x00008000);
+	nvkm_wr32(device, 0x100cb8, (addr + 0x1000) >> 8);
+	nvkm_wr32(device, 0x100cbc, 0x80000001);
+	nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x100c80) & 0x00008000)
+			break;
+	);
 
 	/* setup default state for mmio list construction */
-	info.priv = priv;
-	info.data = priv->mmio_data;
-	info.mmio = priv->mmio_list;
+	info.gr = gr;
+	info.data = gr->mmio_data;
+	info.mmio = gr->mmio_list;
 	info.addr = 0x2000 + (i * 8);
 	info.buffer_nr = 0;
 
 	/* make channel current */
-	if (priv->firmware) {
-		nv_wr32(priv, 0x409840, 0x00000030);
-		nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12);
-		nv_wr32(priv, 0x409504, 0x00000003);
-		if (!nv_wait(priv, 0x409800, 0x00000010, 0x00000010))
-			nv_error(priv, "load_ctx timeout\n");
-
-		nv_wo32(chan, 0x8001c, 1);
-		nv_wo32(chan, 0x80020, 0);
-		nv_wo32(chan, 0x80028, 0);
-		nv_wo32(chan, 0x8002c, 0);
-		bar->flush(bar);
+	if (gr->firmware) {
+		nvkm_wr32(device, 0x409840, 0x00000030);
+		nvkm_wr32(device, 0x409500, 0x80000000 | addr >> 12);
+		nvkm_wr32(device, 0x409504, 0x00000003);
+		nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, 0x409800) & 0x00000010)
+				break;
+		);
+
+		nvkm_kmap(chan);
+		nvkm_wo32(chan, 0x8001c, 1);
+		nvkm_wo32(chan, 0x80020, 0);
+		nvkm_wo32(chan, 0x80028, 0);
+		nvkm_wo32(chan, 0x8002c, 0);
+		nvkm_done(chan);
 	} else {
-		nv_wr32(priv, 0x409840, 0x80000000);
-		nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12);
-		nv_wr32(priv, 0x409504, 0x00000001);
-		if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000))
-			nv_error(priv, "HUB_SET_CHAN timeout\n");
+		nvkm_wr32(device, 0x409840, 0x80000000);
+		nvkm_wr32(device, 0x409500, 0x80000000 | addr >> 12);
+		nvkm_wr32(device, 0x409504, 0x00000001);
+		nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, 0x409800) & 0x80000000)
+				break;
+		);
 	}
 
-	oclass->main(priv, &info);
+	grctx->main(gr, &info);
 
 	/* trigger a context unload by unsetting the "next channel valid" bit
 	 * and faking a context switch interrupt
 	 */
-	nv_mask(priv, 0x409b04, 0x80000000, 0x00000000);
-	nv_wr32(priv, 0x409000, 0x00000100);
-	if (!nv_wait(priv, 0x409b00, 0x80000000, 0x00000000)) {
-		nv_error(priv, "grctx template channel unload timeout\n");
+	nvkm_mask(device, 0x409b04, 0x80000000, 0x00000000);
+	nvkm_wr32(device, 0x409000, 0x00000100);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x409b00) & 0x80000000))
+			break;
+	) < 0) {
 		ret = -EBUSY;
 		goto done;
 	}
 
-	priv->data = kmalloc(priv->size, GFP_KERNEL);
-	if (priv->data) {
-		for (i = 0; i < priv->size; i += 4)
-			priv->data[i / 4] = nv_ro32(chan, 0x80000 + i);
+	gr->data = kmalloc(gr->size, GFP_KERNEL);
+	if (gr->data) {
+		nvkm_kmap(chan);
+		for (i = 0; i < gr->size; i += 4)
+			gr->data[i / 4] = nvkm_ro32(chan, 0x80000 + i);
+		nvkm_done(chan);
 		ret = 0;
 	} else {
 		ret = -ENOMEM;
 	}
 
 done:
-	nvkm_gpuobj_ref(NULL, &chan);
+	nvkm_memory_del(&chan);
 	return ret;
 }
 
-struct nvkm_oclass *
-gf100_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gf100_grctx = {
 	.main  = gf100_grctx_generate_main,
 	.unkn  = gf100_grctx_generate_unkn,
 	.hub   = gf100_grctx_pack_hub,
@@ -1387,4 +1400,4 @@ gf100_grctx_oclass = &(struct gf100_grctx_oclass) {
 	.attrib = gf100_grctx_generate_attrib,
 	.attrib_nr_max = 0x324,
 	.attrib_nr = 0x218,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 3676a3342bc5..3c64040ec5a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -3,7 +3,7 @@
 #include "gf100.h"
 
 struct gf100_grctx {
-	struct gf100_gr_priv *priv;
+	struct gf100_gr *gr;
 	struct gf100_gr_data *data;
 	struct gf100_gr_mmio *mmio;
 	int buffer_nr;
@@ -19,12 +19,11 @@ void gf100_grctx_mmio_item(struct gf100_grctx *, u32 addr, u32 data, int s, int)
 #define mmio_skip(a,b,c) mmio_refn((a), (b), (c), -1, -1)
 #define mmio_wr32(a,b,c) mmio_refn((a), (b), (c),  0, -1)
 
-struct gf100_grctx_oclass {
-	struct nvkm_oclass base;
+struct gf100_grctx_func {
 	/* main context generation function */
-	void  (*main)(struct gf100_gr_priv *, struct gf100_grctx *);
+	void  (*main)(struct gf100_gr *, struct gf100_grctx *);
 	/* context-specific modify-on-first-load list generation function */
-	void  (*unkn)(struct gf100_gr_priv *);
+	void  (*unkn)(struct gf100_gr *);
 	/* mmio context data */
 	const struct gf100_gr_pack *hub;
 	const struct gf100_gr_pack *gpc;
@@ -50,60 +49,61 @@ struct gf100_grctx_oclass {
 	u32 alpha_nr;
 };
 
-static inline const struct gf100_grctx_oclass *
-gf100_grctx_impl(struct gf100_gr_priv *priv)
-{
-	return (void *)nv_engine(priv)->cclass;
-}
-
-extern struct nvkm_oclass *gf100_grctx_oclass;
-int  gf100_grctx_generate(struct gf100_gr_priv *);
-void gf100_grctx_generate_main(struct gf100_gr_priv *, struct gf100_grctx *);
+extern const struct gf100_grctx_func gf100_grctx;
+int  gf100_grctx_generate(struct gf100_gr *);
+void gf100_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
 void gf100_grctx_generate_bundle(struct gf100_grctx *);
 void gf100_grctx_generate_pagepool(struct gf100_grctx *);
 void gf100_grctx_generate_attrib(struct gf100_grctx *);
-void gf100_grctx_generate_unkn(struct gf100_gr_priv *);
-void gf100_grctx_generate_tpcid(struct gf100_gr_priv *);
-void gf100_grctx_generate_r406028(struct gf100_gr_priv *);
-void gf100_grctx_generate_r4060a8(struct gf100_gr_priv *);
-void gf100_grctx_generate_r418bb8(struct gf100_gr_priv *);
-void gf100_grctx_generate_r406800(struct gf100_gr_priv *);
-
-extern struct nvkm_oclass *gf108_grctx_oclass;
+void gf100_grctx_generate_unkn(struct gf100_gr *);
+void gf100_grctx_generate_tpcid(struct gf100_gr *);
+void gf100_grctx_generate_r406028(struct gf100_gr *);
+void gf100_grctx_generate_r4060a8(struct gf100_gr *);
+void gf100_grctx_generate_r418bb8(struct gf100_gr *);
+void gf100_grctx_generate_r406800(struct gf100_gr *);
+
+extern const struct gf100_grctx_func gf108_grctx;
 void gf108_grctx_generate_attrib(struct gf100_grctx *);
-void gf108_grctx_generate_unkn(struct gf100_gr_priv *);
+void gf108_grctx_generate_unkn(struct gf100_gr *);
 
-extern struct nvkm_oclass *gf104_grctx_oclass;
-extern struct nvkm_oclass *gf110_grctx_oclass;
+extern const struct gf100_grctx_func gf104_grctx;
+extern const struct gf100_grctx_func gf110_grctx;
 
-extern struct nvkm_oclass *gf117_grctx_oclass;
+extern const struct gf100_grctx_func gf117_grctx;
 void gf117_grctx_generate_attrib(struct gf100_grctx *);
 
-extern struct nvkm_oclass *gf119_grctx_oclass;
+extern const struct gf100_grctx_func gf119_grctx;
 
-extern struct nvkm_oclass *gk104_grctx_oclass;
-extern struct nvkm_oclass *gk20a_grctx_oclass;
-void gk104_grctx_generate_main(struct gf100_gr_priv *, struct gf100_grctx *);
+extern const struct gf100_grctx_func gk104_grctx;
+extern const struct gf100_grctx_func gk20a_grctx;
+void gk104_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
 void gk104_grctx_generate_bundle(struct gf100_grctx *);
 void gk104_grctx_generate_pagepool(struct gf100_grctx *);
-void gk104_grctx_generate_unkn(struct gf100_gr_priv *);
-void gk104_grctx_generate_r418bb8(struct gf100_gr_priv *);
-void gk104_grctx_generate_rop_active_fbps(struct gf100_gr_priv *);
+void gk104_grctx_generate_unkn(struct gf100_gr *);
+void gk104_grctx_generate_r418bb8(struct gf100_gr *);
+void gk104_grctx_generate_rop_active_fbps(struct gf100_gr *);
+
 
+void gm107_grctx_generate_bundle(struct gf100_grctx *);
+void gm107_grctx_generate_pagepool(struct gf100_grctx *);
+void gm107_grctx_generate_attrib(struct gf100_grctx *);
 
-extern struct nvkm_oclass *gk110_grctx_oclass;
-extern struct nvkm_oclass *gk110b_grctx_oclass;
-extern struct nvkm_oclass *gk208_grctx_oclass;
+extern const struct gf100_grctx_func gk110_grctx;
+extern const struct gf100_grctx_func gk110b_grctx;
+extern const struct gf100_grctx_func gk208_grctx;
 
-extern struct nvkm_oclass *gm107_grctx_oclass;
+extern const struct gf100_grctx_func gm107_grctx;
 void gm107_grctx_generate_bundle(struct gf100_grctx *);
 void gm107_grctx_generate_pagepool(struct gf100_grctx *);
 void gm107_grctx_generate_attrib(struct gf100_grctx *);
 
-extern struct nvkm_oclass *gm204_grctx_oclass;
-void gm204_grctx_generate_main(struct gf100_gr_priv *, struct gf100_grctx *);
+extern const struct gf100_grctx_func gm204_grctx;
+void gm204_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
+void gm204_grctx_generate_tpcid(struct gf100_gr *);
+void gm204_grctx_generate_405b60(struct gf100_gr *);
 
-extern struct nvkm_oclass *gm206_grctx_oclass;
+extern const struct gf100_grctx_func gm206_grctx;
+extern const struct gf100_grctx_func gm20b_grctx;
 
 /* context init value lists */
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
index c5a8d55e2cac..54fd74e9cca0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
@@ -79,17 +79,8 @@ gf104_grctx_pack_tpc[] = {
  * PGRAPH context implementation
  ******************************************************************************/
 
-struct nvkm_oclass *
-gf104_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xc3),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gf104_grctx = {
 	.main  = gf100_grctx_generate_main,
 	.unkn  = gf100_grctx_generate_unkn,
 	.hub   = gf100_grctx_pack_hub,
@@ -105,4 +96,4 @@ gf104_grctx_oclass = &(struct gf100_grctx_oclass) {
 	.attrib = gf100_grctx_generate_attrib,
 	.attrib_nr_max = 0x324,
 	.attrib_nr = 0x218,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
index 87c844a5f34b..505cdcbfc085 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
@@ -730,18 +730,18 @@ gf108_grctx_pack_tpc[] = {
 void
 gf108_grctx_generate_attrib(struct gf100_grctx *info)
 {
-	struct gf100_gr_priv *priv = info->priv;
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(priv);
-	const u32  alpha = impl->alpha_nr;
-	const u32   beta = impl->attrib_nr;
-	const u32   size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
+	struct gf100_gr *gr = info->gr;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	const u32  alpha = grctx->alpha_nr;
+	const u32   beta = grctx->attrib_nr;
+	const u32   size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
 	const u32 access = NV_MEM_ACCESS_RW;
 	const int s = 12;
-	const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
+	const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
 	const int timeslice_mode = 1;
 	const int max_batches = 0xffff;
 	u32 bo = 0;
-	u32 ao = bo + impl->attrib_nr_max * priv->tpc_total;
+	u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total;
 	int gpc, tpc;
 
 	mmio_refn(info, 0x418810, 0x80000000, s, b);
@@ -749,43 +749,35 @@ gf108_grctx_generate_attrib(struct gf100_grctx *info)
 	mmio_wr32(info, 0x405830, (beta << 16) | alpha);
 	mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
 
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
 			const u32 a = alpha;
 			const u32 b =  beta;
 			const u32 t = timeslice_mode;
 			const u32 o = TPC_UNIT(gpc, tpc, 0x500);
 			mmio_skip(info, o + 0x20, (t << 28) | (b << 16) | ++bo);
 			mmio_wr32(info, o + 0x20, (t << 28) | (b << 16) | --bo);
-			bo += impl->attrib_nr_max;
+			bo += grctx->attrib_nr_max;
 			mmio_wr32(info, o + 0x44, (a << 16) | ao);
-			ao += impl->alpha_nr_max;
+			ao += grctx->alpha_nr_max;
 		}
 	}
 }
 
 void
-gf108_grctx_generate_unkn(struct gf100_gr_priv *priv)
+gf108_grctx_generate_unkn(struct gf100_gr *gr)
 {
-	nv_mask(priv, 0x418c6c, 0x00000001, 0x00000001);
-	nv_mask(priv, 0x41980c, 0x00000010, 0x00000010);
-	nv_mask(priv, 0x419814, 0x00000004, 0x00000004);
-	nv_mask(priv, 0x4064c0, 0x80000000, 0x80000000);
-	nv_mask(priv, 0x405800, 0x08000000, 0x08000000);
-	nv_mask(priv, 0x419c00, 0x00000008, 0x00000008);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_mask(device, 0x418c6c, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x41980c, 0x00000010, 0x00000010);
+	nvkm_mask(device, 0x419814, 0x00000004, 0x00000004);
+	nvkm_mask(device, 0x4064c0, 0x80000000, 0x80000000);
+	nvkm_mask(device, 0x405800, 0x08000000, 0x08000000);
+	nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008);
 }
 
-struct nvkm_oclass *
-gf108_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xc1),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gf108_grctx = {
 	.main  = gf100_grctx_generate_main,
 	.unkn  = gf108_grctx_generate_unkn,
 	.hub   = gf108_grctx_pack_hub,
@@ -803,4 +795,4 @@ gf108_grctx_oclass = &(struct gf100_grctx_oclass) {
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x324,
 	.alpha_nr = 0x218,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
index b3acd931b978..7df398b53f8f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
@@ -330,17 +330,8 @@ gf110_grctx_pack_gpc[] = {
  * PGRAPH context implementation
  ******************************************************************************/
 
-struct nvkm_oclass *
-gf110_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xc8),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gf110_grctx = {
 	.main  = gf100_grctx_generate_main,
 	.unkn  = gf100_grctx_generate_unkn,
 	.hub   = gf100_grctx_pack_hub,
@@ -356,4 +347,4 @@ gf110_grctx_oclass = &(struct gf100_grctx_oclass) {
 	.attrib = gf100_grctx_generate_attrib,
 	.attrib_nr_max = 0x324,
 	.attrib_nr = 0x218,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index 9bbe2c97552e..b5b875928aba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -182,18 +182,18 @@ gf117_grctx_pack_ppc[] = {
 void
 gf117_grctx_generate_attrib(struct gf100_grctx *info)
 {
-	struct gf100_gr_priv *priv = info->priv;
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(priv);
-	const u32  alpha = impl->alpha_nr;
-	const u32   beta = impl->attrib_nr;
-	const u32   size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
+	struct gf100_gr *gr = info->gr;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	const u32  alpha = grctx->alpha_nr;
+	const u32   beta = grctx->attrib_nr;
+	const u32   size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
 	const u32 access = NV_MEM_ACCESS_RW;
 	const int s = 12;
-	const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
+	const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
 	const int timeslice_mode = 1;
 	const int max_batches = 0xffff;
 	u32 bo = 0;
-	u32 ao = bo + impl->attrib_nr_max * priv->tpc_total;
+	u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total;
 	int gpc, ppc;
 
 	mmio_refn(info, 0x418810, 0x80000000, s, b);
@@ -201,68 +201,60 @@ gf117_grctx_generate_attrib(struct gf100_grctx *info)
 	mmio_wr32(info, 0x405830, (beta << 16) | alpha);
 	mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
 
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++) {
-			const u32 a = alpha * priv->ppc_tpc_nr[gpc][ppc];
-			const u32 b =  beta * priv->ppc_tpc_nr[gpc][ppc];
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++) {
+			const u32 a = alpha * gr->ppc_tpc_nr[gpc][ppc];
+			const u32 b =  beta * gr->ppc_tpc_nr[gpc][ppc];
 			const u32 t = timeslice_mode;
 			const u32 o = PPC_UNIT(gpc, ppc, 0);
 			mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo);
 			mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo);
-			bo += impl->attrib_nr_max * priv->ppc_tpc_nr[gpc][ppc];
+			bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
 			mmio_wr32(info, o + 0xe4, (a << 16) | ao);
-			ao += impl->alpha_nr_max * priv->ppc_tpc_nr[gpc][ppc];
+			ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
 		}
 	}
 }
 
 void
-gf117_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
+gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 {
-	struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
 	int i;
 
-	nvkm_mc(priv)->unk260(nvkm_mc(priv), 0);
+	nvkm_mc_unk260(device->mc, 0);
 
-	gf100_gr_mmio(priv, oclass->hub);
-	gf100_gr_mmio(priv, oclass->gpc);
-	gf100_gr_mmio(priv, oclass->zcull);
-	gf100_gr_mmio(priv, oclass->tpc);
-	gf100_gr_mmio(priv, oclass->ppc);
+	gf100_gr_mmio(gr, grctx->hub);
+	gf100_gr_mmio(gr, grctx->gpc);
+	gf100_gr_mmio(gr, grctx->zcull);
+	gf100_gr_mmio(gr, grctx->tpc);
+	gf100_gr_mmio(gr, grctx->ppc);
 
-	nv_wr32(priv, 0x404154, 0x00000000);
+	nvkm_wr32(device, 0x404154, 0x00000000);
 
-	oclass->bundle(info);
-	oclass->pagepool(info);
-	oclass->attrib(info);
-	oclass->unkn(priv);
+	grctx->bundle(info);
+	grctx->pagepool(info);
+	grctx->attrib(info);
+	grctx->unkn(gr);
 
-	gf100_grctx_generate_tpcid(priv);
-	gf100_grctx_generate_r406028(priv);
-	gf100_grctx_generate_r4060a8(priv);
-	gk104_grctx_generate_r418bb8(priv);
-	gf100_grctx_generate_r406800(priv);
+	gf100_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_r406028(gr);
+	gf100_grctx_generate_r4060a8(gr);
+	gk104_grctx_generate_r418bb8(gr);
+	gf100_grctx_generate_r406800(gr);
 
 	for (i = 0; i < 8; i++)
-		nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
+		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
 
-	gf100_gr_icmd(priv, oclass->icmd);
-	nv_wr32(priv, 0x404154, 0x00000400);
-	gf100_gr_mthd(priv, oclass->mthd);
-	nvkm_mc(priv)->unk260(nvkm_mc(priv), 1);
+	gf100_gr_icmd(gr, grctx->icmd);
+	nvkm_wr32(device, 0x404154, 0x00000400);
+	gf100_gr_mthd(gr, grctx->mthd);
+	nvkm_mc_unk260(device->mc, 1);
 }
 
-struct nvkm_oclass *
-gf117_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xd7),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gf117_grctx = {
 	.main  = gf117_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gf117_grctx_pack_hub,
@@ -281,4 +273,4 @@ gf117_grctx_oclass = &(struct gf100_grctx_oclass) {
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x7ff,
 	.alpha_nr = 0x324,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
index 8d8761443809..605185b078be 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
@@ -498,17 +498,8 @@ gf119_grctx_pack_tpc[] = {
  * PGRAPH context implementation
  ******************************************************************************/
 
-struct nvkm_oclass *
-gf119_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xd9),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gf119_grctx = {
 	.main  = gf100_grctx_generate_main,
 	.unkn  = gf108_grctx_generate_unkn,
 	.hub   = gf119_grctx_pack_hub,
@@ -526,4 +517,4 @@ gf119_grctx_oclass = &(struct gf100_grctx_oclass) {
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x324,
 	.alpha_nr = 0x218,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index b12f6a9fd926..a843e3689c3c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -843,27 +843,27 @@ gk104_grctx_pack_ppc[] = {
 void
 gk104_grctx_generate_bundle(struct gf100_grctx *info)
 {
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
-	const u32 state_limit = min(impl->bundle_min_gpm_fifo_depth,
-				    impl->bundle_size / 0x20);
-	const u32 token_limit = impl->bundle_token_limit;
+	const struct gf100_grctx_func *grctx = info->gr->func->grctx;
+	const u32 state_limit = min(grctx->bundle_min_gpm_fifo_depth,
+				    grctx->bundle_size / 0x20);
+	const u32 token_limit = grctx->bundle_token_limit;
 	const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
 	const int s = 8;
-	const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
+	const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
 	mmio_refn(info, 0x408004, 0x00000000, s, b);
-	mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
+	mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
 	mmio_refn(info, 0x418808, 0x00000000, s, b);
-	mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s));
+	mmio_wr32(info, 0x41880c, 0x80000000 | (grctx->bundle_size >> s));
 	mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
 }
 
 void
 gk104_grctx_generate_pagepool(struct gf100_grctx *info)
 {
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
+	const struct gf100_grctx_func *grctx = info->gr->func->grctx;
 	const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
 	const int s = 8;
-	const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
+	const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
 	mmio_refn(info, 0x40800c, 0x00000000, s, b);
 	mmio_wr32(info, 0x408010, 0x80000000);
 	mmio_refn(info, 0x419004, 0x00000000, s, b);
@@ -872,31 +872,33 @@ gk104_grctx_generate_pagepool(struct gf100_grctx *info)
 }
 
 void
-gk104_grctx_generate_unkn(struct gf100_gr_priv *priv)
+gk104_grctx_generate_unkn(struct gf100_gr *gr)
 {
-	nv_mask(priv, 0x418c6c, 0x00000001, 0x00000001);
-	nv_mask(priv, 0x41980c, 0x00000010, 0x00000010);
-	nv_mask(priv, 0x41be08, 0x00000004, 0x00000004);
-	nv_mask(priv, 0x4064c0, 0x80000000, 0x80000000);
-	nv_mask(priv, 0x405800, 0x08000000, 0x08000000);
-	nv_mask(priv, 0x419c00, 0x00000008, 0x00000008);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_mask(device, 0x418c6c, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x41980c, 0x00000010, 0x00000010);
+	nvkm_mask(device, 0x41be08, 0x00000004, 0x00000004);
+	nvkm_mask(device, 0x4064c0, 0x80000000, 0x80000000);
+	nvkm_mask(device, 0x405800, 0x08000000, 0x08000000);
+	nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008);
 }
 
 void
-gk104_grctx_generate_r418bb8(struct gf100_gr_priv *priv)
+gk104_grctx_generate_r418bb8(struct gf100_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u32 data[6] = {}, data2[2] = {};
 	u8  tpcnr[GPC_MAX];
 	u8  shift, ntpcv;
 	int gpc, tpc, i;
 
 	/* calculate first set of magics */
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
 
 	gpc = -1;
-	for (tpc = 0; tpc < priv->tpc_total; tpc++) {
+	for (tpc = 0; tpc < gr->tpc_total; tpc++) {
 		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
+			gpc = (gpc + 1) % gr->gpc_nr;
 		} while (!tpcnr[gpc]);
 		tpcnr[gpc]--;
 
@@ -908,7 +910,7 @@ gk104_grctx_generate_r418bb8(struct gf100_gr_priv *priv)
 
 	/* and the second... */
 	shift = 0;
-	ntpcv = priv->tpc_total;
+	ntpcv = gr->tpc_total;
 	while (!(ntpcv & (1 << 4))) {
 		ntpcv <<= 1;
 		shift++;
@@ -921,86 +923,79 @@ gk104_grctx_generate_r418bb8(struct gf100_gr_priv *priv)
 		data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
 
 	/* GPC_BROADCAST */
-	nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) |
-				 priv->magic_not_rop_nr);
+	nvkm_wr32(device, 0x418bb8, (gr->tpc_total << 8) |
+				 gr->magic_not_rop_nr);
 	for (i = 0; i < 6; i++)
-		nv_wr32(priv, 0x418b08 + (i * 4), data[i]);
+		nvkm_wr32(device, 0x418b08 + (i * 4), data[i]);
 
 	/* GPC_BROADCAST.TP_BROADCAST */
-	nv_wr32(priv, 0x41bfd0, (priv->tpc_total << 8) |
-				 priv->magic_not_rop_nr | data2[0]);
-	nv_wr32(priv, 0x41bfe4, data2[1]);
+	nvkm_wr32(device, 0x41bfd0, (gr->tpc_total << 8) |
+				 gr->magic_not_rop_nr | data2[0]);
+	nvkm_wr32(device, 0x41bfe4, data2[1]);
 	for (i = 0; i < 6; i++)
-		nv_wr32(priv, 0x41bf00 + (i * 4), data[i]);
+		nvkm_wr32(device, 0x41bf00 + (i * 4), data[i]);
 
 	/* UNK78xx */
-	nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) |
-				 priv->magic_not_rop_nr);
+	nvkm_wr32(device, 0x4078bc, (gr->tpc_total << 8) |
+				 gr->magic_not_rop_nr);
 	for (i = 0; i < 6; i++)
-		nv_wr32(priv, 0x40780c + (i * 4), data[i]);
+		nvkm_wr32(device, 0x40780c + (i * 4), data[i]);
 }
 
 void
-gk104_grctx_generate_rop_active_fbps(struct gf100_gr_priv *priv)
+gk104_grctx_generate_rop_active_fbps(struct gf100_gr *gr)
 {
-	const u32 fbp_count = nv_rd32(priv, 0x120074);
-	nv_mask(priv, 0x408850, 0x0000000f, fbp_count); /* zrop */
-	nv_mask(priv, 0x408958, 0x0000000f, fbp_count); /* crop */
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 fbp_count = nvkm_rd32(device, 0x120074);
+	nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
+	nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
 }
 
 void
-gk104_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
+gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 {
-	struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
 	int i;
 
-	nvkm_mc(priv)->unk260(nvkm_mc(priv), 0);
+	nvkm_mc_unk260(device->mc, 0);
 
-	gf100_gr_mmio(priv, oclass->hub);
-	gf100_gr_mmio(priv, oclass->gpc);
-	gf100_gr_mmio(priv, oclass->zcull);
-	gf100_gr_mmio(priv, oclass->tpc);
-	gf100_gr_mmio(priv, oclass->ppc);
+	gf100_gr_mmio(gr, grctx->hub);
+	gf100_gr_mmio(gr, grctx->gpc);
+	gf100_gr_mmio(gr, grctx->zcull);
+	gf100_gr_mmio(gr, grctx->tpc);
+	gf100_gr_mmio(gr, grctx->ppc);
 
-	nv_wr32(priv, 0x404154, 0x00000000);
+	nvkm_wr32(device, 0x404154, 0x00000000);
 
-	oclass->bundle(info);
-	oclass->pagepool(info);
-	oclass->attrib(info);
-	oclass->unkn(priv);
+	grctx->bundle(info);
+	grctx->pagepool(info);
+	grctx->attrib(info);
+	grctx->unkn(gr);
 
-	gf100_grctx_generate_tpcid(priv);
-	gf100_grctx_generate_r406028(priv);
-	gk104_grctx_generate_r418bb8(priv);
-	gf100_grctx_generate_r406800(priv);
+	gf100_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_r406028(gr);
+	gk104_grctx_generate_r418bb8(gr);
+	gf100_grctx_generate_r406800(gr);
 
 	for (i = 0; i < 8; i++)
-		nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
+		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
 
-	nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr);
-	gk104_grctx_generate_rop_active_fbps(priv);
-	nv_mask(priv, 0x419f78, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
+	gk104_grctx_generate_rop_active_fbps(gr);
+	nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000);
 
-	gf100_gr_icmd(priv, oclass->icmd);
-	nv_wr32(priv, 0x404154, 0x00000400);
-	gf100_gr_mthd(priv, oclass->mthd);
-	nvkm_mc(priv)->unk260(nvkm_mc(priv), 1);
+	gf100_gr_icmd(gr, grctx->icmd);
+	nvkm_wr32(device, 0x404154, 0x00000400);
+	gf100_gr_mthd(gr, grctx->mthd);
+	nvkm_mc_unk260(device->mc, 1);
 
-	nv_mask(priv, 0x418800, 0x00200000, 0x00200000);
-	nv_mask(priv, 0x41be10, 0x00800000, 0x00800000);
+	nvkm_mask(device, 0x418800, 0x00200000, 0x00200000);
+	nvkm_mask(device, 0x41be10, 0x00800000, 0x00800000);
 }
 
-struct nvkm_oclass *
-gk104_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xe4),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gk104_grctx = {
 	.main  = gk104_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gk104_grctx_pack_hub,
@@ -1021,4 +1016,4 @@ gk104_grctx_oclass = &(struct gf100_grctx_oclass) {
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x7ff,
 	.alpha_nr = 0x648,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
index b3f58be04e9c..7b95ec2fe453 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
@@ -808,17 +808,8 @@ gk110_grctx_pack_ppc[] = {
  * PGRAPH context implementation
  ******************************************************************************/
 
-struct nvkm_oclass *
-gk110_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xf0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gk110_grctx = {
 	.main  = gk104_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gk110_grctx_pack_hub,
@@ -839,4 +830,4 @@ gk110_grctx_oclass = &(struct gf100_grctx_oclass) {
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x7ff,
 	.alpha_nr = 0x648,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
index b11c26794fde..048b1152da44 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
@@ -69,17 +69,8 @@ gk110b_grctx_pack_tpc[] = {
  * PGRAPH context implementation
  ******************************************************************************/
 
-struct nvkm_oclass *
-gk110b_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xf1),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gk110b_grctx = {
 	.main  = gk104_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gk110_grctx_pack_hub,
@@ -100,4 +91,4 @@ gk110b_grctx_oclass = &(struct gf100_grctx_oclass) {
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x7ff,
 	.alpha_nr = 0x648,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
index 6e8ce9fc311a..67b7a1b43617 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
@@ -530,17 +530,8 @@ gk208_grctx_pack_ppc[] = {
  * PGRAPH context implementation
  ******************************************************************************/
 
-struct nvkm_oclass *
-gk208_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0x08),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gk208_grctx = {
 	.main  = gk104_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gk208_grctx_pack_hub,
@@ -561,4 +552,4 @@ gk208_grctx_oclass = &(struct gf100_grctx_oclass) {
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x7ff,
 	.alpha_nr = 0x648,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
index 2f241f6f0f0a..ddaa16a71c84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -20,34 +20,60 @@
  * DEALINGS IN THE SOFTWARE.
  */
 #include "ctxgf100.h"
+#include "gf100.h"
 
-static const struct gf100_gr_pack
-gk20a_grctx_pack_mthd[] = {
-	{ gk104_grctx_init_a097_0, 0xa297 },
-	{ gf100_grctx_init_902d_0, 0x902d },
-	{}
-};
+#include <subdev/mc.h>
+
+static void
+gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	int idle_timeout_save;
+	int i;
+
+	gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+
+	gf100_gr_wait_idle(gr);
+
+	idle_timeout_save = nvkm_rd32(device, 0x404154);
+	nvkm_wr32(device, 0x404154, 0x00000000);
+
+	grctx->attrib(info);
+
+	grctx->unkn(gr);
+
+	gf100_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_r406028(gr);
+	gk104_grctx_generate_r418bb8(gr);
+	gf100_grctx_generate_r406800(gr);
+
+	for (i = 0; i < 8; i++)
+		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
 
-struct nvkm_oclass *
-gk20a_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xea),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
-	.main  = gk104_grctx_generate_main,
+	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
+
+	gk104_grctx_generate_rop_active_fbps(gr);
+
+	nvkm_mask(device, 0x5044b0, 0x8000000, 0x8000000);
+
+	gf100_gr_wait_idle(gr);
+
+	nvkm_wr32(device, 0x404154, idle_timeout_save);
+	gf100_gr_wait_idle(gr);
+
+	gf100_gr_mthd(gr, gr->fuc_method);
+	gf100_gr_wait_idle(gr);
+
+	gf100_gr_icmd(gr, gr->fuc_bundle);
+	grctx->pagepool(info);
+	grctx->bundle(info);
+}
+
+const struct gf100_grctx_func
+gk20a_grctx = {
+	.main  = gk20a_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
-	.hub   = gk104_grctx_pack_hub,
-	.gpc   = gk104_grctx_pack_gpc,
-	.zcull = gf100_grctx_pack_zcull,
-	.tpc   = gk104_grctx_pack_tpc,
-	.ppc   = gk104_grctx_pack_ppc,
-	.icmd  = gk104_grctx_pack_icmd,
-	.mthd  = gk20a_grctx_pack_mthd,
 	.bundle = gk104_grctx_generate_bundle,
 	.bundle_size = 0x1800,
 	.bundle_min_gpm_fifo_depth = 0x62,
@@ -59,4 +85,4 @@ gk20a_grctx_oclass = &(struct gf100_grctx_oclass) {
 	.attrib_nr = 0x240,
 	.alpha_nr_max = 0x648 + (0x648 / 2),
 	.alpha_nr = 0x648,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index fbeaae3ae6ce..95f59e3169f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -863,27 +863,27 @@ gm107_grctx_pack_ppc[] = {
 void
 gm107_grctx_generate_bundle(struct gf100_grctx *info)
 {
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
-	const u32 state_limit = min(impl->bundle_min_gpm_fifo_depth,
-				    impl->bundle_size / 0x20);
-	const u32 token_limit = impl->bundle_token_limit;
+	const struct gf100_grctx_func *grctx = info->gr->func->grctx;
+	const u32 state_limit = min(grctx->bundle_min_gpm_fifo_depth,
+				    grctx->bundle_size / 0x20);
+	const u32 token_limit = grctx->bundle_token_limit;
 	const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
 	const int s = 8;
-	const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
+	const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
 	mmio_refn(info, 0x408004, 0x00000000, s, b);
-	mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
+	mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
 	mmio_refn(info, 0x418e24, 0x00000000, s, b);
-	mmio_wr32(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s));
+	mmio_wr32(info, 0x418e28, 0x80000000 | (grctx->bundle_size >> s));
 	mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
 }
 
 void
 gm107_grctx_generate_pagepool(struct gf100_grctx *info)
 {
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
+	const struct gf100_grctx_func *grctx = info->gr->func->grctx;
 	const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
 	const int s = 8;
-	const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
+	const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
 	mmio_refn(info, 0x40800c, 0x00000000, s, b);
 	mmio_wr32(info, 0x408010, 0x80000000);
 	mmio_refn(info, 0x419004, 0x00000000, s, b);
@@ -895,17 +895,17 @@ gm107_grctx_generate_pagepool(struct gf100_grctx *info)
 void
 gm107_grctx_generate_attrib(struct gf100_grctx *info)
 {
-	struct gf100_gr_priv *priv = info->priv;
-	const struct gf100_grctx_oclass *impl = (void *)gf100_grctx_impl(priv);
-	const u32  alpha = impl->alpha_nr;
-	const u32 attrib = impl->attrib_nr;
-	const u32   size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
+	struct gf100_gr *gr = info->gr;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	const u32  alpha = grctx->alpha_nr;
+	const u32 attrib = grctx->attrib_nr;
+	const u32   size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
 	const u32 access = NV_MEM_ACCESS_RW;
 	const int s = 12;
-	const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
+	const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
 	const int max_batches = 0xffff;
 	u32 bo = 0;
-	u32 ao = bo + impl->attrib_nr_max * priv->tpc_total;
+	u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total;
 	int gpc, ppc, n = 0;
 
 	mmio_refn(info, 0x418810, 0x80000000, s, b);
@@ -914,97 +914,90 @@ gm107_grctx_generate_attrib(struct gf100_grctx *info)
 	mmio_wr32(info, 0x405830, (attrib << 16) | alpha);
 	mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
 
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++, n++) {
-			const u32 as =  alpha * priv->ppc_tpc_nr[gpc][ppc];
-			const u32 bs = attrib * priv->ppc_tpc_nr[gpc][ppc];
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) {
+			const u32 as =  alpha * gr->ppc_tpc_nr[gpc][ppc];
+			const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc];
 			const u32 u = 0x418ea0 + (n * 0x04);
 			const u32 o = PPC_UNIT(gpc, ppc, 0);
 			mmio_wr32(info, o + 0xc0, bs);
 			mmio_wr32(info, o + 0xf4, bo);
-			bo += impl->attrib_nr_max * priv->ppc_tpc_nr[gpc][ppc];
+			bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
 			mmio_wr32(info, o + 0xe4, as);
 			mmio_wr32(info, o + 0xf8, ao);
-			ao += impl->alpha_nr_max * priv->ppc_tpc_nr[gpc][ppc];
+			ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
 			mmio_wr32(info, u, ((bs / 3 /*XXX*/) << 16) | bs);
 		}
 	}
 }
 
-static void
-gm107_grctx_generate_tpcid(struct gf100_gr_priv *priv)
+void
+gm107_grctx_generate_tpcid(struct gf100_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	int gpc, tpc, id;
 
 	for (tpc = 0, id = 0; tpc < 4; tpc++) {
-		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-			if (tpc < priv->tpc_nr[gpc]) {
-				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id);
-				nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
-				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x088), id);
+		for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+			if (tpc < gr->tpc_nr[gpc]) {
+				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
+				nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), id);
 				id++;
 			}
 
-			nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
-			nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
+			nvkm_wr32(device, GPC_UNIT(gpc, 0x0c08), gr->tpc_nr[gpc]);
+			nvkm_wr32(device, GPC_UNIT(gpc, 0x0c8c), gr->tpc_nr[gpc]);
 		}
 	}
 }
 
 static void
-gm107_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
+gm107_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 {
-	struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
 	int i;
 
-	gf100_gr_mmio(priv, oclass->hub);
-	gf100_gr_mmio(priv, oclass->gpc);
-	gf100_gr_mmio(priv, oclass->zcull);
-	gf100_gr_mmio(priv, oclass->tpc);
-	gf100_gr_mmio(priv, oclass->ppc);
+	gf100_gr_mmio(gr, grctx->hub);
+	gf100_gr_mmio(gr, grctx->gpc);
+	gf100_gr_mmio(gr, grctx->zcull);
+	gf100_gr_mmio(gr, grctx->tpc);
+	gf100_gr_mmio(gr, grctx->ppc);
 
-	nv_wr32(priv, 0x404154, 0x00000000);
+	nvkm_wr32(device, 0x404154, 0x00000000);
 
-	oclass->bundle(info);
-	oclass->pagepool(info);
-	oclass->attrib(info);
-	oclass->unkn(priv);
+	grctx->bundle(info);
+	grctx->pagepool(info);
+	grctx->attrib(info);
+	grctx->unkn(gr);
 
-	gm107_grctx_generate_tpcid(priv);
-	gf100_grctx_generate_r406028(priv);
-	gk104_grctx_generate_r418bb8(priv);
-	gf100_grctx_generate_r406800(priv);
+	gm107_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_r406028(gr);
+	gk104_grctx_generate_r418bb8(gr);
+	gf100_grctx_generate_r406800(gr);
 
-	nv_wr32(priv, 0x4064d0, 0x00000001);
+	nvkm_wr32(device, 0x4064d0, 0x00000001);
 	for (i = 1; i < 8; i++)
-		nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
-	nv_wr32(priv, 0x406500, 0x00000001);
+		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
+	nvkm_wr32(device, 0x406500, 0x00000001);
 
-	nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr);
+	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
 
-	gk104_grctx_generate_rop_active_fbps(priv);
+	gk104_grctx_generate_rop_active_fbps(gr);
 
-	gf100_gr_icmd(priv, oclass->icmd);
-	nv_wr32(priv, 0x404154, 0x00000400);
-	gf100_gr_mthd(priv, oclass->mthd);
+	gf100_gr_icmd(gr, grctx->icmd);
+	nvkm_wr32(device, 0x404154, 0x00000400);
+	gf100_gr_mthd(gr, grctx->mthd);
 
-	nv_mask(priv, 0x419e00, 0x00808080, 0x00808080);
-	nv_mask(priv, 0x419ccc, 0x80000000, 0x80000000);
-	nv_mask(priv, 0x419f80, 0x80000000, 0x80000000);
-	nv_mask(priv, 0x419f88, 0x80000000, 0x80000000);
+	nvkm_mask(device, 0x419e00, 0x00808080, 0x00808080);
+	nvkm_mask(device, 0x419ccc, 0x80000000, 0x80000000);
+	nvkm_mask(device, 0x419f80, 0x80000000, 0x80000000);
+	nvkm_mask(device, 0x419f88, 0x80000000, 0x80000000);
 }
 
-struct nvkm_oclass *
-gm107_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0x08),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gm107_grctx = {
 	.main  = gm107_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gm107_grctx_pack_hub,
@@ -1025,4 +1018,4 @@ gm107_grctx_oclass = &(struct gf100_grctx_oclass) {
 	.attrib_nr = 0xaa0,
 	.alpha_nr_max = 0x1800,
 	.alpha_nr = 0x1000,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c
index ea8e66151aa8..170cbfdbe1ae 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c
@@ -918,17 +918,18 @@ gm204_grctx_pack_ppc[] = {
  * PGRAPH context implementation
  ******************************************************************************/
 
-static void
-gm204_grctx_generate_tpcid(struct gf100_gr_priv *priv)
+void
+gm204_grctx_generate_tpcid(struct gf100_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	int gpc, tpc, id;
 
 	for (tpc = 0, id = 0; tpc < 4; tpc++) {
-		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-			if (tpc < priv->tpc_nr[gpc]) {
-				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id);
-				nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
-				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x088), id);
+		for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+			if (tpc < gr->tpc_nr[gpc]) {
+				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
+				nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), id);
 				id++;
 			}
 		}
@@ -936,101 +937,95 @@ gm204_grctx_generate_tpcid(struct gf100_gr_priv *priv)
 }
 
 static void
-gm204_grctx_generate_rop_active_fbps(struct gf100_gr_priv *priv)
+gm204_grctx_generate_rop_active_fbps(struct gf100_gr *gr)
 {
-	const u32 fbp_count = nv_rd32(priv, 0x12006c);
-	nv_mask(priv, 0x408850, 0x0000000f, fbp_count); /* zrop */
-	nv_mask(priv, 0x408958, 0x0000000f, fbp_count); /* crop */
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 fbp_count = nvkm_rd32(device, 0x12006c);
+	nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
+	nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
 }
 
-static void
-gm204_grctx_generate_405b60(struct gf100_gr_priv *priv)
+void
+gm204_grctx_generate_405b60(struct gf100_gr *gr)
 {
-	const u32 dist_nr = DIV_ROUND_UP(priv->tpc_total, 4);
-	u32 dist[TPC_MAX] = {};
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4);
+	u32 dist[TPC_MAX / 4] = {};
 	u32 gpcs[GPC_MAX] = {};
 	u8  tpcnr[GPC_MAX];
 	int tpc, gpc, i;
 
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
 
 	/* won't result in the same distribution as the binary driver where
 	 * some of the gpcs have more tpcs than others, but this shall do
 	 * for the moment.  the code for earlier gpus has this issue too.
 	 */
-	for (gpc = -1, i = 0; i < priv->tpc_total; i++) {
+	for (gpc = -1, i = 0; i < gr->tpc_total; i++) {
 		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
+			gpc = (gpc + 1) % gr->gpc_nr;
 		} while(!tpcnr[gpc]);
-		tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
 
 		dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8);
 		gpcs[gpc] |= i << (tpc * 8);
 	}
 
 	for (i = 0; i < dist_nr; i++)
-		nv_wr32(priv, 0x405b60 + (i * 4), dist[i]);
-	for (i = 0; i < priv->gpc_nr; i++)
-		nv_wr32(priv, 0x405ba0 + (i * 4), gpcs[i]);
+		nvkm_wr32(device, 0x405b60 + (i * 4), dist[i]);
+	for (i = 0; i < gr->gpc_nr; i++)
+		nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
 }
 
 void
-gm204_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
+gm204_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 {
-	struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
 	u32 tmp;
 	int i;
 
-	gf100_gr_mmio(priv, oclass->hub);
-	gf100_gr_mmio(priv, oclass->gpc);
-	gf100_gr_mmio(priv, oclass->zcull);
-	gf100_gr_mmio(priv, oclass->tpc);
-	gf100_gr_mmio(priv, oclass->ppc);
+	gf100_gr_mmio(gr, grctx->hub);
+	gf100_gr_mmio(gr, grctx->gpc);
+	gf100_gr_mmio(gr, grctx->zcull);
+	gf100_gr_mmio(gr, grctx->tpc);
+	gf100_gr_mmio(gr, grctx->ppc);
 
-	nv_wr32(priv, 0x404154, 0x00000000);
+	nvkm_wr32(device, 0x404154, 0x00000000);
 
-	oclass->bundle(info);
-	oclass->pagepool(info);
-	oclass->attrib(info);
-	oclass->unkn(priv);
+	grctx->bundle(info);
+	grctx->pagepool(info);
+	grctx->attrib(info);
+	grctx->unkn(gr);
 
-	gm204_grctx_generate_tpcid(priv);
-	gf100_grctx_generate_r406028(priv);
-	gk104_grctx_generate_r418bb8(priv);
+	gm204_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_r406028(gr);
+	gk104_grctx_generate_r418bb8(gr);
 
 	for (i = 0; i < 8; i++)
-		nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
-	nv_wr32(priv, 0x406500, 0x00000000);
+		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
+	nvkm_wr32(device, 0x406500, 0x00000000);
 
-	nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr);
+	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
 
-	gm204_grctx_generate_rop_active_fbps(priv);
+	gm204_grctx_generate_rop_active_fbps(gr);
 
-	for (tmp = 0, i = 0; i < priv->gpc_nr; i++)
-		tmp |= ((1 << priv->tpc_nr[i]) - 1) << (i * 4);
-	nv_wr32(priv, 0x4041c4, tmp);
+	for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
+		tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 4);
+	nvkm_wr32(device, 0x4041c4, tmp);
 
-	gm204_grctx_generate_405b60(priv);
+	gm204_grctx_generate_405b60(gr);
 
-	gf100_gr_icmd(priv, oclass->icmd);
-	nv_wr32(priv, 0x404154, 0x00000800);
-	gf100_gr_mthd(priv, oclass->mthd);
+	gf100_gr_icmd(gr, grctx->icmd);
+	nvkm_wr32(device, 0x404154, 0x00000800);
+	gf100_gr_mthd(gr, grctx->mthd);
 
-	nv_mask(priv, 0x418e94, 0xffffffff, 0xc4230000);
-	nv_mask(priv, 0x418e4c, 0xffffffff, 0x70000000);
+	nvkm_mask(device, 0x418e94, 0xffffffff, 0xc4230000);
+	nvkm_mask(device, 0x418e4c, 0xffffffff, 0x70000000);
 }
 
-struct nvkm_oclass *
-gm204_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0x24),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gm204_grctx = {
 	.main  = gm204_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gm204_grctx_pack_hub,
@@ -1051,4 +1046,4 @@ gm204_grctx_oclass = &(struct gf100_grctx_oclass) {
 	.attrib_nr = 0x400,
 	.alpha_nr_max = 0x1800,
 	.alpha_nr = 0x1000,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c
index 91ec41617943..d6be6034c2c2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c
@@ -49,17 +49,8 @@ gm206_grctx_pack_gpc[] = {
 	{}
 };
 
-struct nvkm_oclass *
-gm206_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0x26),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gm206_grctx = {
 	.main  = gm204_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gm204_grctx_pack_hub,
@@ -80,4 +71,4 @@ gm206_grctx_oclass = &(struct gf100_grctx_oclass) {
 	.attrib_nr = 0x400,
 	.alpha_nr_max = 0x1800,
 	.alpha_nr = 0x1000,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
new file mode 100644
index 000000000000..670260402538
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "ctxgf100.h"
+
+static void
+gm20b_grctx_generate_r406028(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	u32 tpc_per_gpc = 0;
+	int i;
+
+	for (i = 0; i < gr->gpc_nr; i++)
+		tpc_per_gpc |= gr->tpc_nr[i] << (4 * i);
+
+	nvkm_wr32(device, 0x406028, tpc_per_gpc);
+	nvkm_wr32(device, 0x405870, tpc_per_gpc);
+}
+
+static void
+gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	int idle_timeout_save;
+	int i, tmp;
+
+	gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+
+	gf100_gr_wait_idle(gr);
+
+	idle_timeout_save = nvkm_rd32(device, 0x404154);
+	nvkm_wr32(device, 0x404154, 0x00000000);
+
+	grctx->attrib(info);
+
+	grctx->unkn(gr);
+
+	gm204_grctx_generate_tpcid(gr);
+	gm20b_grctx_generate_r406028(gr);
+	gk104_grctx_generate_r418bb8(gr);
+
+	for (i = 0; i < 8; i++)
+		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
+
+	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
+
+	gk104_grctx_generate_rop_active_fbps(gr);
+	nvkm_wr32(device, 0x408908, nvkm_rd32(device, 0x410108) | 0x80000000);
+
+	for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
+		tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 4);
+	nvkm_wr32(device, 0x4041c4, tmp);
+
+	gm204_grctx_generate_405b60(gr);
+
+	gf100_gr_wait_idle(gr);
+
+	nvkm_wr32(device, 0x404154, idle_timeout_save);
+	gf100_gr_wait_idle(gr);
+
+	gf100_gr_mthd(gr, gr->fuc_method);
+	gf100_gr_wait_idle(gr);
+
+	gf100_gr_icmd(gr, gr->fuc_bundle);
+	grctx->pagepool(info);
+	grctx->bundle(info);
+}
+
+const struct gf100_grctx_func
+gm20b_grctx = {
+	.main  = gm20b_grctx_generate_main,
+	.unkn  = gk104_grctx_generate_unkn,
+	.bundle = gm107_grctx_generate_bundle,
+	.bundle_size = 0x1800,
+	.bundle_min_gpm_fifo_depth = 0x182,
+	.bundle_token_limit = 0x1c0,
+	.pagepool = gm107_grctx_generate_pagepool,
+	.pagepool_size = 0x8000,
+	.attrib = gm107_grctx_generate_attrib,
+	.attrib_nr_max = 0x600,
+	.attrib_nr = 0x400,
+	.alpha_nr_max = 0xc00,
+	.alpha_nr = 0x800,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c
index dc31462afe65..80a6b017af64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c
@@ -111,7 +111,6 @@
 
 #include "ctxnv40.h"
 #include "nv40.h"
-#include <core/device.h>
 
 /* TODO:
  *  - get vs count from 0x1540
@@ -583,13 +582,13 @@ nv40_gr_construct_shader(struct nvkm_grctx *ctx)
 
 	offset += 0x0280/4;
 	for (i = 0; i < 16; i++, offset += 2)
-		nv_wo32(obj, offset * 4, 0x3f800000);
+		nvkm_wo32(obj, offset * 4, 0x3f800000);
 
 	for (vs = 0; vs < vs_nr; vs++, offset += vs_len) {
 		for (i = 0; i < vs_nr_b0 * 6; i += 6)
-			nv_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001);
+			nvkm_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001);
 		for (i = 0; i < vs_nr_b1 * 4; i += 4)
-			nv_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000);
+			nvkm_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000);
 	}
 }
 
@@ -675,7 +674,7 @@ nv40_grctx_init(struct nvkm_device *device, u32 *size)
 	struct nvkm_grctx ctx = {
 		.device = device,
 		.mode = NVKM_GRCTX_PROG,
-		.data = ctxprog,
+		.ucode = ctxprog,
 		.ctxprog_max = 256,
 	};
 
@@ -684,9 +683,9 @@ nv40_grctx_init(struct nvkm_device *device, u32 *size)
 
 	nv40_grctx_generate(&ctx);
 
-	nv_wr32(device, 0x400324, 0);
+	nvkm_wr32(device, 0x400324, 0);
 	for (i = 0; i < ctx.ctxprog_len; i++)
-		nv_wr32(device, 0x400328, ctxprog[i]);
+		nvkm_wr32(device, 0x400328, ctxprog[i]);
 	*size = ctx.ctxvals_pos * 4;
 
 	kfree(ctxprog);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h
index 8a89961956af..50e808e9f926 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h
@@ -9,7 +9,8 @@ struct nvkm_grctx {
 		NVKM_GRCTX_PROG,
 		NVKM_GRCTX_VALS
 	} mode;
-	void *data;
+	u32 *ucode;
+	struct nvkm_gpuobj *data;
 
 	u32 ctxprog_max;
 	u32 ctxprog_len;
@@ -22,7 +23,7 @@ struct nvkm_grctx {
 static inline void
 cp_out(struct nvkm_grctx *ctx, u32 inst)
 {
-	u32 *ctxprog = ctx->data;
+	u32 *ctxprog = ctx->ucode;
 
 	if (ctx->mode != NVKM_GRCTX_PROG)
 		return;
@@ -56,7 +57,7 @@ cp_ctx(struct nvkm_grctx *ctx, u32 reg, u32 length)
 static inline void
 cp_name(struct nvkm_grctx *ctx, int name)
 {
-	u32 *ctxprog = ctx->data;
+	u32 *ctxprog = ctx->ucode;
 	int i;
 
 	if (ctx->mode != NVKM_GRCTX_PROG)
@@ -124,6 +125,6 @@ gr_def(struct nvkm_grctx *ctx, u32 reg, u32 val)
 	reg = (reg - 0x00400000) / 4;
 	reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base;
 
-	nv_wo32(ctx->data, reg * 4, val);
+	nvkm_wo32(ctx->data, reg * 4, val);
 }
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
index 9c9528d2cd90..1e13278cf306 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
@@ -107,7 +107,6 @@
 
 #include "ctxnv40.h"
 
-#include <core/device.h>
 #include <subdev/fb.h>
 
 #define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf)
@@ -269,7 +268,7 @@ nv50_grctx_init(struct nvkm_device *device, u32 *size)
 	struct nvkm_grctx ctx = {
 		.device = device,
 		.mode = NVKM_GRCTX_PROG,
-		.data = ctxprog,
+		.ucode = ctxprog,
 		.ctxprog_max = 512,
 	};
 
@@ -277,9 +276,9 @@ nv50_grctx_init(struct nvkm_device *device, u32 *size)
 		return -ENOMEM;
 	nv50_grctx_generate(&ctx);
 
-	nv_wr32(device, 0x400324, 0);
+	nvkm_wr32(device, 0x400324, 0);
 	for (i = 0; i < ctx.ctxprog_len; i++)
-		nv_wr32(device, 0x400328, ctxprog[i]);
+		nvkm_wr32(device, 0x400328, ctxprog[i]);
 	*size = ctx.ctxvals_pos * 4;
 	kfree(ctxprog);
 	return 0;
@@ -299,7 +298,7 @@ nv50_gr_construct_mmio(struct nvkm_grctx *ctx)
 	struct nvkm_device *device = ctx->device;
 	int i, j;
 	int offset, base;
-	u32 units = nv_rd32 (ctx->device, 0x1540);
+	u32 units = nvkm_rd32(device, 0x1540);
 
 	/* 0800: DISPATCH */
 	cp_ctx(ctx, 0x400808, 7);
@@ -570,7 +569,7 @@ nv50_gr_construct_mmio(struct nvkm_grctx *ctx)
 		else if (device->chipset < 0xa0)
 			gr_def(ctx, 0x407d08, 0x00390040);
 		else {
-			if (nvkm_fb(device)->ram->type != NV_MEM_TYPE_GDDR5)
+			if (device->fb->ram->type != NVKM_RAM_TYPE_GDDR5)
 				gr_def(ctx, 0x407d08, 0x003d0040);
 			else
 				gr_def(ctx, 0x407d08, 0x003c0040);
@@ -784,9 +783,10 @@ nv50_gr_construct_mmio(struct nvkm_grctx *ctx)
 static void
 dd_emit(struct nvkm_grctx *ctx, int num, u32 val) {
 	int i;
-	if (val && ctx->mode == NVKM_GRCTX_VALS)
+	if (val && ctx->mode == NVKM_GRCTX_VALS) {
 		for (i = 0; i < num; i++)
-			nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val);
+			nvkm_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val);
+	}
 	ctx->ctxvals_pos += num;
 }
 
@@ -1156,9 +1156,10 @@ nv50_gr_construct_mmio_ddata(struct nvkm_grctx *ctx)
 static void
 xf_emit(struct nvkm_grctx *ctx, int num, u32 val) {
 	int i;
-	if (val && ctx->mode == NVKM_GRCTX_VALS)
+	if (val && ctx->mode == NVKM_GRCTX_VALS) {
 		for (i = 0; i < num; i++)
-			nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val);
+			nvkm_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val);
+	}
 	ctx->ctxvals_pos += num << 3;
 }
 
@@ -1190,7 +1191,7 @@ nv50_gr_construct_xfer1(struct nvkm_grctx *ctx)
 	int i;
 	int offset;
 	int size = 0;
-	u32 units = nv_rd32 (ctx->device, 0x1540);
+	u32 units = nvkm_rd32(device, 0x1540);
 
 	offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
 	ctx->ctxvals_base = offset;
@@ -3273,7 +3274,7 @@ nv50_gr_construct_xfer2(struct nvkm_grctx *ctx)
 	struct nvkm_device *device = ctx->device;
 	int i;
 	u32 offset;
-	u32 units = nv_rd32 (ctx->device, 0x1540);
+	u32 units = nvkm_rd32(device, 0x1540);
 	int size = 0;
 
 	offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c
new file mode 100644
index 000000000000..ce913300539f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+
+#include <subdev/timer.h>
+
+static const struct nvkm_bitfield nv50_gr_status[] = {
+	{ 0x00000001, "BUSY" }, /* set when any bit is set */
+	{ 0x00000002, "DISPATCH" },
+	{ 0x00000004, "UNK2" },
+	{ 0x00000008, "UNK3" },
+	{ 0x00000010, "UNK4" },
+	{ 0x00000020, "UNK5" },
+	{ 0x00000040, "M2MF" },
+	{ 0x00000080, "UNK7" },
+	{ 0x00000100, "CTXPROG" },
+	{ 0x00000200, "VFETCH" },
+	{ 0x00000400, "CCACHE_PREGEOM" },
+	{ 0x00000800, "STRMOUT_VATTR_POSTGEOM" },
+	{ 0x00001000, "VCLIP" },
+	{ 0x00002000, "RATTR_APLANE" },
+	{ 0x00004000, "TRAST" },
+	{ 0x00008000, "CLIPID" },
+	{ 0x00010000, "ZCULL" },
+	{ 0x00020000, "ENG2D" },
+	{ 0x00040000, "RMASK" },
+	{ 0x00080000, "TPC_RAST" },
+	{ 0x00100000, "TPC_PROP" },
+	{ 0x00200000, "TPC_TEX" },
+	{ 0x00400000, "TPC_GEOM" },
+	{ 0x00800000, "TPC_MP" },
+	{ 0x01000000, "ROP" },
+	{}
+};
+
+static const struct nvkm_bitfield
+nv50_gr_vstatus_0[] = {
+	{ 0x01, "VFETCH" },
+	{ 0x02, "CCACHE" },
+	{ 0x04, "PREGEOM" },
+	{ 0x08, "POSTGEOM" },
+	{ 0x10, "VATTR" },
+	{ 0x20, "STRMOUT" },
+	{ 0x40, "VCLIP" },
+	{}
+};
+
+static const struct nvkm_bitfield
+nv50_gr_vstatus_1[] = {
+	{ 0x01, "TPC_RAST" },
+	{ 0x02, "TPC_PROP" },
+	{ 0x04, "TPC_TEX" },
+	{ 0x08, "TPC_GEOM" },
+	{ 0x10, "TPC_MP" },
+	{}
+};
+
+static const struct nvkm_bitfield
+nv50_gr_vstatus_2[] = {
+	{ 0x01, "RATTR" },
+	{ 0x02, "APLANE" },
+	{ 0x04, "TRAST" },
+	{ 0x08, "CLIPID" },
+	{ 0x10, "ZCULL" },
+	{ 0x20, "ENG2D" },
+	{ 0x40, "RMASK" },
+	{ 0x80, "ROP" },
+	{}
+};
+
+static void
+nvkm_gr_vstatus_print(struct nv50_gr *gr, int r,
+		      const struct nvkm_bitfield *units, u32 status)
+{
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	u32 stat = status;
+	u8  mask = 0x00;
+	char msg[64];
+	int i;
+
+	for (i = 0; units[i].name && status; i++) {
+		if ((status & 7) == 1)
+			mask |= (1 << i);
+		status >>= 3;
+	}
+
+	nvkm_snprintbf(msg, sizeof(msg), units, mask);
+	nvkm_error(subdev, "PGRAPH_VSTATUS%d: %08x [%s]\n", r, stat, msg);
+}
+
+int
+g84_gr_tlb_flush(struct nvkm_gr *base)
+{
+	struct nv50_gr *gr = nv50_gr(base);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_timer *tmr = device->timer;
+	bool idle, timeout = false;
+	unsigned long flags;
+	char status[128];
+	u64 start;
+	u32 tmp;
+
+	spin_lock_irqsave(&gr->lock, flags);
+	nvkm_mask(device, 0x400500, 0x00000001, 0x00000000);
+
+	start = nvkm_timer_read(tmr);
+	do {
+		idle = true;
+
+		for (tmp = nvkm_rd32(device, 0x400380); tmp && idle; tmp >>= 3) {
+			if ((tmp & 7) == 1)
+				idle = false;
+		}
+
+		for (tmp = nvkm_rd32(device, 0x400384); tmp && idle; tmp >>= 3) {
+			if ((tmp & 7) == 1)
+				idle = false;
+		}
+
+		for (tmp = nvkm_rd32(device, 0x400388); tmp && idle; tmp >>= 3) {
+			if ((tmp & 7) == 1)
+				idle = false;
+		}
+	} while (!idle &&
+		 !(timeout = nvkm_timer_read(tmr) - start > 2000000000));
+
+	if (timeout) {
+		nvkm_error(subdev, "PGRAPH TLB flush idle timeout fail\n");
+
+		tmp = nvkm_rd32(device, 0x400700);
+		nvkm_snprintbf(status, sizeof(status), nv50_gr_status, tmp);
+		nvkm_error(subdev, "PGRAPH_STATUS %08x [%s]\n", tmp, status);
+
+		nvkm_gr_vstatus_print(gr, 0, nv50_gr_vstatus_0,
+				       nvkm_rd32(device, 0x400380));
+		nvkm_gr_vstatus_print(gr, 1, nv50_gr_vstatus_1,
+				       nvkm_rd32(device, 0x400384));
+		nvkm_gr_vstatus_print(gr, 2, nv50_gr_vstatus_2,
+				       nvkm_rd32(device, 0x400388));
+	}
+
+
+	nvkm_wr32(device, 0x100c80, 0x00000001);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
+			break;
+	);
+	nvkm_mask(device, 0x400500, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&gr->lock, flags);
+	return timeout ? -EBUSY : 0;
+}
+
+static const struct nvkm_gr_func
+g84_gr = {
+	.init = nv50_gr_init,
+	.intr = nv50_gr_intr,
+	.chan_new = nv50_gr_chan_new,
+	.tlb_flush = g84_gr_tlb_flush,
+	.units = nv50_gr_units,
+	.sclass = {
+		{ -1, -1, 0x0030, &nv50_gr_object },
+		{ -1, -1, 0x502d, &nv50_gr_object },
+		{ -1, -1, 0x5039, &nv50_gr_object },
+		{ -1, -1, 0x50c0, &nv50_gr_object },
+		{ -1, -1, 0x8297, &nv50_gr_object },
+		{}
+	}
+};
+
+int
+g84_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv50_gr_new_(&g84_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index ca11ddb6ed46..f1358a564e3e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -26,13 +26,12 @@
 #include "fuc/os.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/handle.h>
 #include <core/option.h>
-#include <engine/fifo.h>
 #include <subdev/fb.h>
 #include <subdev/mc.h>
+#include <subdev/pmu.h>
 #include <subdev/timer.h>
+#include <engine/fifo.h>
 
 #include <nvif/class.h>
 #include <nvif/unpack.h>
@@ -42,35 +41,36 @@
  ******************************************************************************/
 
 static void
-gf100_gr_zbc_clear_color(struct gf100_gr_priv *priv, int zbc)
+gf100_gr_zbc_clear_color(struct gf100_gr *gr, int zbc)
 {
-	if (priv->zbc_color[zbc].format) {
-		nv_wr32(priv, 0x405804, priv->zbc_color[zbc].ds[0]);
-		nv_wr32(priv, 0x405808, priv->zbc_color[zbc].ds[1]);
-		nv_wr32(priv, 0x40580c, priv->zbc_color[zbc].ds[2]);
-		nv_wr32(priv, 0x405810, priv->zbc_color[zbc].ds[3]);
-	}
-	nv_wr32(priv, 0x405814, priv->zbc_color[zbc].format);
-	nv_wr32(priv, 0x405820, zbc);
-	nv_wr32(priv, 0x405824, 0x00000004); /* TRIGGER | WRITE | COLOR */
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	if (gr->zbc_color[zbc].format) {
+		nvkm_wr32(device, 0x405804, gr->zbc_color[zbc].ds[0]);
+		nvkm_wr32(device, 0x405808, gr->zbc_color[zbc].ds[1]);
+		nvkm_wr32(device, 0x40580c, gr->zbc_color[zbc].ds[2]);
+		nvkm_wr32(device, 0x405810, gr->zbc_color[zbc].ds[3]);
+	}
+	nvkm_wr32(device, 0x405814, gr->zbc_color[zbc].format);
+	nvkm_wr32(device, 0x405820, zbc);
+	nvkm_wr32(device, 0x405824, 0x00000004); /* TRIGGER | WRITE | COLOR */
 }
 
 static int
-gf100_gr_zbc_color_get(struct gf100_gr_priv *priv, int format,
+gf100_gr_zbc_color_get(struct gf100_gr *gr, int format,
 		       const u32 ds[4], const u32 l2[4])
 {
-	struct nvkm_ltc *ltc = nvkm_ltc(priv);
+	struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc;
 	int zbc = -ENOSPC, i;
 
 	for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
-		if (priv->zbc_color[i].format) {
-			if (priv->zbc_color[i].format != format)
+		if (gr->zbc_color[i].format) {
+			if (gr->zbc_color[i].format != format)
 				continue;
-			if (memcmp(priv->zbc_color[i].ds, ds, sizeof(
-				   priv->zbc_color[i].ds)))
+			if (memcmp(gr->zbc_color[i].ds, ds, sizeof(
+				   gr->zbc_color[i].ds)))
 				continue;
-			if (memcmp(priv->zbc_color[i].l2, l2, sizeof(
-				   priv->zbc_color[i].l2))) {
+			if (memcmp(gr->zbc_color[i].l2, l2, sizeof(
+				   gr->zbc_color[i].l2))) {
 				WARN_ON(1);
 				return -EINVAL;
 			}
@@ -83,38 +83,39 @@ gf100_gr_zbc_color_get(struct gf100_gr_priv *priv, int format,
 	if (zbc < 0)
 		return zbc;
 
-	memcpy(priv->zbc_color[zbc].ds, ds, sizeof(priv->zbc_color[zbc].ds));
-	memcpy(priv->zbc_color[zbc].l2, l2, sizeof(priv->zbc_color[zbc].l2));
-	priv->zbc_color[zbc].format = format;
-	ltc->zbc_color_get(ltc, zbc, l2);
-	gf100_gr_zbc_clear_color(priv, zbc);
+	memcpy(gr->zbc_color[zbc].ds, ds, sizeof(gr->zbc_color[zbc].ds));
+	memcpy(gr->zbc_color[zbc].l2, l2, sizeof(gr->zbc_color[zbc].l2));
+	gr->zbc_color[zbc].format = format;
+	nvkm_ltc_zbc_color_get(ltc, zbc, l2);
+	gf100_gr_zbc_clear_color(gr, zbc);
 	return zbc;
 }
 
 static void
-gf100_gr_zbc_clear_depth(struct gf100_gr_priv *priv, int zbc)
+gf100_gr_zbc_clear_depth(struct gf100_gr *gr, int zbc)
 {
-	if (priv->zbc_depth[zbc].format)
-		nv_wr32(priv, 0x405818, priv->zbc_depth[zbc].ds);
-	nv_wr32(priv, 0x40581c, priv->zbc_depth[zbc].format);
-	nv_wr32(priv, 0x405820, zbc);
-	nv_wr32(priv, 0x405824, 0x00000005); /* TRIGGER | WRITE | DEPTH */
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	if (gr->zbc_depth[zbc].format)
+		nvkm_wr32(device, 0x405818, gr->zbc_depth[zbc].ds);
+	nvkm_wr32(device, 0x40581c, gr->zbc_depth[zbc].format);
+	nvkm_wr32(device, 0x405820, zbc);
+	nvkm_wr32(device, 0x405824, 0x00000005); /* TRIGGER | WRITE | DEPTH */
 }
 
 static int
-gf100_gr_zbc_depth_get(struct gf100_gr_priv *priv, int format,
+gf100_gr_zbc_depth_get(struct gf100_gr *gr, int format,
 		       const u32 ds, const u32 l2)
 {
-	struct nvkm_ltc *ltc = nvkm_ltc(priv);
+	struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc;
 	int zbc = -ENOSPC, i;
 
 	for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
-		if (priv->zbc_depth[i].format) {
-			if (priv->zbc_depth[i].format != format)
+		if (gr->zbc_depth[i].format) {
+			if (gr->zbc_depth[i].format != format)
 				continue;
-			if (priv->zbc_depth[i].ds != ds)
+			if (gr->zbc_depth[i].ds != ds)
 				continue;
-			if (priv->zbc_depth[i].l2 != l2) {
+			if (gr->zbc_depth[i].l2 != l2) {
 				WARN_ON(1);
 				return -EINVAL;
 			}
@@ -127,11 +128,11 @@ gf100_gr_zbc_depth_get(struct gf100_gr_priv *priv, int format,
 	if (zbc < 0)
 		return zbc;
 
-	priv->zbc_depth[zbc].format = format;
-	priv->zbc_depth[zbc].ds = ds;
-	priv->zbc_depth[zbc].l2 = l2;
-	ltc->zbc_depth_get(ltc, zbc, l2);
-	gf100_gr_zbc_clear_depth(priv, zbc);
+	gr->zbc_depth[zbc].format = format;
+	gr->zbc_depth[zbc].ds = ds;
+	gr->zbc_depth[zbc].l2 = l2;
+	nvkm_ltc_zbc_depth_get(ltc, zbc, l2);
+	gf100_gr_zbc_clear_depth(gr, zbc);
 	return zbc;
 }
 
@@ -142,7 +143,7 @@ gf100_gr_zbc_depth_get(struct gf100_gr_priv *priv, int format,
 static int
 gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size)
 {
-	struct gf100_gr_priv *priv = (void *)object->engine;
+	struct gf100_gr *gr = (void *)object->engine;
 	union {
 		struct fermi_a_zbc_color_v0 v0;
 	} *args = data;
@@ -169,7 +170,7 @@ gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size)
 		case FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8:
 		case FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10:
 		case FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11:
-			ret = gf100_gr_zbc_color_get(priv, args->v0.format,
+			ret = gf100_gr_zbc_color_get(gr, args->v0.format,
 							   args->v0.ds,
 							   args->v0.l2);
 			if (ret >= 0) {
@@ -188,7 +189,7 @@ gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size)
 static int
 gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size)
 {
-	struct gf100_gr_priv *priv = (void *)object->engine;
+	struct gf100_gr *gr = (void *)object->engine;
 	union {
 		struct fermi_a_zbc_depth_v0 v0;
 	} *args = data;
@@ -197,7 +198,7 @@ gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size)
 	if (nvif_unpack(args->v0, 0, 0, false)) {
 		switch (args->v0.format) {
 		case FERMI_A_ZBC_DEPTH_V0_FMT_FP32:
-			ret = gf100_gr_zbc_depth_get(priv, args->v0.format,
+			ret = gf100_gr_zbc_depth_get(gr, args->v0.format,
 							   args->v0.ds,
 							   args->v0.l2);
 			return (ret >= 0) ? 0 : -ENOSPC;
@@ -223,106 +224,176 @@ gf100_fermi_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
 	return -EINVAL;
 }
 
-struct nvkm_ofuncs
-gf100_fermi_ofuncs = {
-	.ctor = _nvkm_object_ctor,
-	.dtor = nvkm_object_destroy,
-	.init = nvkm_object_init,
-	.fini = nvkm_object_fini,
+const struct nvkm_object_func
+gf100_fermi = {
 	.mthd = gf100_fermi_mthd,
 };
 
-static int
-gf100_gr_set_shader_exceptions(struct nvkm_object *object, u32 mthd,
-			       void *pdata, u32 size)
+static void
+gf100_gr_mthd_set_shader_exceptions(struct nvkm_device *device, u32 data)
 {
-	struct gf100_gr_priv *priv = (void *)object->engine;
-	if (size >= sizeof(u32)) {
-		u32 data = *(u32 *)pdata ? 0xffffffff : 0x00000000;
-		nv_wr32(priv, 0x419e44, data);
-		nv_wr32(priv, 0x419e4c, data);
-		return 0;
+	nvkm_wr32(device, 0x419e44, data ? 0xffffffff : 0x00000000);
+	nvkm_wr32(device, 0x419e4c, data ? 0xffffffff : 0x00000000);
+}
+
+static bool
+gf100_gr_mthd_sw(struct nvkm_device *device, u16 class, u32 mthd, u32 data)
+{
+	switch (class & 0x00ff) {
+	case 0x97:
+	case 0xc0:
+		switch (mthd) {
+		case 0x1528:
+			gf100_gr_mthd_set_shader_exceptions(device, data);
+			return true;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
 	}
-	return -EINVAL;
+	return false;
 }
 
-struct nvkm_omthds
-gf100_gr_9097_omthds[] = {
-	{ 0x1528, 0x1528, gf100_gr_set_shader_exceptions },
-	{}
-};
+static int
+gf100_gr_object_get(struct nvkm_gr *base, int index, struct nvkm_sclass *sclass)
+{
+	struct gf100_gr *gr = gf100_gr(base);
+	int c = 0;
 
-struct nvkm_omthds
-gf100_gr_90c0_omthds[] = {
-	{ 0x1528, 0x1528, gf100_gr_set_shader_exceptions },
-	{}
-};
+	while (gr->func->sclass[c].oclass) {
+		if (c++ == index) {
+			*sclass = gr->func->sclass[index];
+			return index;
+		}
+	}
 
-struct nvkm_oclass
-gf100_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
-	{ FERMI_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ FERMI_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
-	{}
-};
+	return c;
+}
 
 /*******************************************************************************
  * PGRAPH context
  ******************************************************************************/
 
-int
-gf100_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		      struct nvkm_oclass *oclass, void *args, u32 size,
-		      struct nvkm_object **pobject)
+static int
+gf100_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		   int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_vm *vm = nvkm_client(parent)->vm;
-	struct gf100_gr_priv *priv = (void *)engine;
-	struct gf100_gr_data *data = priv->mmio_data;
-	struct gf100_gr_mmio *mmio = priv->mmio_list;
-	struct gf100_gr_chan *chan;
+	struct gf100_gr_chan *chan = gf100_gr_chan(object);
+	struct gf100_gr *gr = chan->gr;
 	int ret, i;
 
-	/* allocate memory for context, and fill with default values */
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL,
-				     priv->size, 0x100,
-				     NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
+	ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
+			      align, false, parent, pgpuobj);
 	if (ret)
 		return ret;
 
+	nvkm_kmap(*pgpuobj);
+	for (i = 0; i < gr->size; i += 4)
+		nvkm_wo32(*pgpuobj, i, gr->data[i / 4]);
+
+	if (!gr->firmware) {
+		nvkm_wo32(*pgpuobj, 0x00, chan->mmio_nr / 2);
+		nvkm_wo32(*pgpuobj, 0x04, chan->mmio_vma.offset >> 8);
+	} else {
+		nvkm_wo32(*pgpuobj, 0xf4, 0);
+		nvkm_wo32(*pgpuobj, 0xf8, 0);
+		nvkm_wo32(*pgpuobj, 0x10, chan->mmio_nr / 2);
+		nvkm_wo32(*pgpuobj, 0x14, lower_32_bits(chan->mmio_vma.offset));
+		nvkm_wo32(*pgpuobj, 0x18, upper_32_bits(chan->mmio_vma.offset));
+		nvkm_wo32(*pgpuobj, 0x1c, 1);
+		nvkm_wo32(*pgpuobj, 0x20, 0);
+		nvkm_wo32(*pgpuobj, 0x28, 0);
+		nvkm_wo32(*pgpuobj, 0x2c, 0);
+	}
+	nvkm_done(*pgpuobj);
+	return 0;
+}
+
+static void *
+gf100_gr_chan_dtor(struct nvkm_object *object)
+{
+	struct gf100_gr_chan *chan = gf100_gr_chan(object);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
+		if (chan->data[i].vma.node) {
+			nvkm_vm_unmap(&chan->data[i].vma);
+			nvkm_vm_put(&chan->data[i].vma);
+		}
+		nvkm_memory_del(&chan->data[i].mem);
+	}
+
+	if (chan->mmio_vma.node) {
+		nvkm_vm_unmap(&chan->mmio_vma);
+		nvkm_vm_put(&chan->mmio_vma);
+	}
+	nvkm_memory_del(&chan->mmio);
+	return chan;
+}
+
+static const struct nvkm_object_func
+gf100_gr_chan = {
+	.dtor = gf100_gr_chan_dtor,
+	.bind = gf100_gr_chan_bind,
+};
+
+static int
+gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		  const struct nvkm_oclass *oclass,
+		  struct nvkm_object **pobject)
+{
+	struct gf100_gr *gr = gf100_gr(base);
+	struct gf100_gr_data *data = gr->mmio_data;
+	struct gf100_gr_mmio *mmio = gr->mmio_list;
+	struct gf100_gr_chan *chan;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	int ret, i;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&gf100_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	*pobject = &chan->object;
+
 	/* allocate memory for a "mmio list" buffer that's used by the HUB
 	 * fuc to modify some per-context register settings on first load
 	 * of the context.
 	 */
-	ret = nvkm_gpuobj_new(nv_object(chan), NULL, 0x1000, 0x100, 0,
-			      &chan->mmio);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x100,
+			      false, &chan->mmio);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_map_vm(nv_gpuobj(chan->mmio), vm,
-				 NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
-				 &chan->mmio_vma);
+	ret = nvkm_vm_get(fifoch->vm, 0x1000, 12, NV_MEM_ACCESS_RW |
+			  NV_MEM_ACCESS_SYS, &chan->mmio_vma);
 	if (ret)
 		return ret;
 
+	nvkm_memory_map(chan->mmio, &chan->mmio_vma, 0);
+
 	/* allocate buffers referenced by mmio list */
-	for (i = 0; data->size && i < ARRAY_SIZE(priv->mmio_data); i++) {
-		ret = nvkm_gpuobj_new(nv_object(chan), NULL, data->size,
-				      data->align, 0, &chan->data[i].mem);
+	for (i = 0; data->size && i < ARRAY_SIZE(gr->mmio_data); i++) {
+		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+				      data->size, data->align, false,
+				      &chan->data[i].mem);
 		if (ret)
 			return ret;
 
-		ret = nvkm_gpuobj_map_vm(chan->data[i].mem, vm, data->access,
-					 &chan->data[i].vma);
+		ret = nvkm_vm_get(fifoch->vm,
+				  nvkm_memory_size(chan->data[i].mem), 12,
+				  data->access, &chan->data[i].vma);
 		if (ret)
 			return ret;
 
+		nvkm_memory_map(chan->data[i].mem, &chan->data[i].vma, 0);
 		data++;
 	}
 
 	/* finally, fill in the mmio list and point the context at it */
-	for (i = 0; mmio->addr && i < ARRAY_SIZE(priv->mmio_list); i++) {
+	nvkm_kmap(chan->mmio);
+	for (i = 0; mmio->addr && i < ARRAY_SIZE(gr->mmio_list); i++) {
 		u32 addr = mmio->addr;
 		u32 data = mmio->data;
 
@@ -331,49 +402,14 @@ gf100_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
 			data |= info >> mmio->shift;
 		}
 
-		nv_wo32(chan->mmio, chan->mmio_nr++ * 4, addr);
-		nv_wo32(chan->mmio, chan->mmio_nr++ * 4, data);
+		nvkm_wo32(chan->mmio, chan->mmio_nr++ * 4, addr);
+		nvkm_wo32(chan->mmio, chan->mmio_nr++ * 4, data);
 		mmio++;
 	}
-
-	for (i = 0; i < priv->size; i += 4)
-		nv_wo32(chan, i, priv->data[i / 4]);
-
-	if (!priv->firmware) {
-		nv_wo32(chan, 0x00, chan->mmio_nr / 2);
-		nv_wo32(chan, 0x04, chan->mmio_vma.offset >> 8);
-	} else {
-		nv_wo32(chan, 0xf4, 0);
-		nv_wo32(chan, 0xf8, 0);
-		nv_wo32(chan, 0x10, chan->mmio_nr / 2);
-		nv_wo32(chan, 0x14, lower_32_bits(chan->mmio_vma.offset));
-		nv_wo32(chan, 0x18, upper_32_bits(chan->mmio_vma.offset));
-		nv_wo32(chan, 0x1c, 1);
-		nv_wo32(chan, 0x20, 0);
-		nv_wo32(chan, 0x28, 0);
-		nv_wo32(chan, 0x2c, 0);
-	}
-
+	nvkm_done(chan->mmio);
 	return 0;
 }
 
-void
-gf100_gr_context_dtor(struct nvkm_object *object)
-{
-	struct gf100_gr_chan *chan = (void *)object;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
-		nvkm_gpuobj_unmap(&chan->data[i].vma);
-		nvkm_gpuobj_ref(NULL, &chan->data[i].mem);
-	}
-
-	nvkm_gpuobj_unmap(&chan->mmio_vma);
-	nvkm_gpuobj_ref(NULL, &chan->mmio);
-
-	nvkm_gr_context_destroy(&chan->base);
-}
-
 /*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
@@ -635,7 +671,7 @@ gf100_gr_pack_mmio[] = {
  ******************************************************************************/
 
 void
-gf100_gr_zbc_init(struct gf100_gr_priv *priv)
+gf100_gr_zbc_init(struct gf100_gr *gr)
 {
 	const u32  zero[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000,
 			      0x00000000, 0x00000000, 0x00000000, 0x00000000 };
@@ -645,22 +681,22 @@ gf100_gr_zbc_init(struct gf100_gr_priv *priv)
 			      0x00000000, 0x00000000, 0x00000000, 0x00000000 };
 	const u32 f32_1[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000,
 			      0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000 };
-	struct nvkm_ltc *ltc = nvkm_ltc(priv);
+	struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc;
 	int index;
 
-	if (!priv->zbc_color[0].format) {
-		gf100_gr_zbc_color_get(priv, 1,  & zero[0],   &zero[4]);
-		gf100_gr_zbc_color_get(priv, 2,  &  one[0],    &one[4]);
-		gf100_gr_zbc_color_get(priv, 4,  &f32_0[0],  &f32_0[4]);
-		gf100_gr_zbc_color_get(priv, 4,  &f32_1[0],  &f32_1[4]);
-		gf100_gr_zbc_depth_get(priv, 1, 0x00000000, 0x00000000);
-		gf100_gr_zbc_depth_get(priv, 1, 0x3f800000, 0x3f800000);
+	if (!gr->zbc_color[0].format) {
+		gf100_gr_zbc_color_get(gr, 1,  & zero[0],   &zero[4]);
+		gf100_gr_zbc_color_get(gr, 2,  &  one[0],    &one[4]);
+		gf100_gr_zbc_color_get(gr, 4,  &f32_0[0],  &f32_0[4]);
+		gf100_gr_zbc_color_get(gr, 4,  &f32_1[0],  &f32_1[4]);
+		gf100_gr_zbc_depth_get(gr, 1, 0x00000000, 0x00000000);
+		gf100_gr_zbc_depth_get(gr, 1, 0x3f800000, 0x3f800000);
 	}
 
 	for (index = ltc->zbc_min; index <= ltc->zbc_max; index++)
-		gf100_gr_zbc_clear_color(priv, index);
+		gf100_gr_zbc_clear_color(gr, index);
 	for (index = ltc->zbc_min; index <= ltc->zbc_max; index++)
-		gf100_gr_zbc_clear_depth(priv, index);
+		gf100_gr_zbc_clear_depth(gr, index);
 }
 
 /**
@@ -669,8 +705,10 @@ gf100_gr_zbc_init(struct gf100_gr_priv *priv)
  * progress.
  */
 int
-gf100_gr_wait_idle(struct gf100_gr_priv *priv)
+gf100_gr_wait_idle(struct gf100_gr *gr)
 {
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	unsigned long end_jiffies = jiffies + msecs_to_jiffies(2000);
 	bool gr_enabled, ctxsw_active, gr_busy;
 
@@ -679,24 +717,26 @@ gf100_gr_wait_idle(struct gf100_gr_priv *priv)
 		 * required to make sure FIFO_ENGINE_STATUS (0x2640) is
 		 * up-to-date
 		 */
-		nv_rd32(priv, 0x400700);
+		nvkm_rd32(device, 0x400700);
 
-		gr_enabled = nv_rd32(priv, 0x200) & 0x1000;
-		ctxsw_active = nv_rd32(priv, 0x2640) & 0x8000;
-		gr_busy = nv_rd32(priv, 0x40060c) & 0x1;
+		gr_enabled = nvkm_rd32(device, 0x200) & 0x1000;
+		ctxsw_active = nvkm_rd32(device, 0x2640) & 0x8000;
+		gr_busy = nvkm_rd32(device, 0x40060c) & 0x1;
 
 		if (!gr_enabled || (!gr_busy && !ctxsw_active))
 			return 0;
 	} while (time_before(jiffies, end_jiffies));
 
-	nv_error(priv, "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n",
-		 gr_enabled, ctxsw_active, gr_busy);
+	nvkm_error(subdev,
+		   "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n",
+		   gr_enabled, ctxsw_active, gr_busy);
 	return -EAGAIN;
 }
 
 void
-gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
+gf100_gr_mmio(struct gf100_gr *gr, const struct gf100_gr_pack *p)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_gr_pack *pack;
 	const struct gf100_gr_init *init;
 
@@ -704,49 +744,54 @@ gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
 		u32 next = init->addr + init->count * init->pitch;
 		u32 addr = init->addr;
 		while (addr < next) {
-			nv_wr32(priv, addr, init->data);
+			nvkm_wr32(device, addr, init->data);
 			addr += init->pitch;
 		}
 	}
 }
 
 void
-gf100_gr_icmd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
+gf100_gr_icmd(struct gf100_gr *gr, const struct gf100_gr_pack *p)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_gr_pack *pack;
 	const struct gf100_gr_init *init;
 	u32 data = 0;
 
-	nv_wr32(priv, 0x400208, 0x80000000);
+	nvkm_wr32(device, 0x400208, 0x80000000);
 
 	pack_for_each_init(init, pack, p) {
 		u32 next = init->addr + init->count * init->pitch;
 		u32 addr = init->addr;
 
 		if ((pack == p && init == p->init) || data != init->data) {
-			nv_wr32(priv, 0x400204, init->data);
+			nvkm_wr32(device, 0x400204, init->data);
 			data = init->data;
 		}
 
 		while (addr < next) {
-			nv_wr32(priv, 0x400200, addr);
+			nvkm_wr32(device, 0x400200, addr);
 			/**
 			 * Wait for GR to go idle after submitting a
 			 * GO_IDLE bundle
 			 */
 			if ((addr & 0xffff) == 0xe100)
-				gf100_gr_wait_idle(priv);
-			nv_wait(priv, 0x400700, 0x00000004, 0x00000000);
+				gf100_gr_wait_idle(gr);
+			nvkm_msec(device, 2000,
+				if (!(nvkm_rd32(device, 0x400700) & 0x00000004))
+					break;
+			);
 			addr += init->pitch;
 		}
 	}
 
-	nv_wr32(priv, 0x400208, 0x00000000);
+	nvkm_wr32(device, 0x400208, 0x00000000);
 }
 
 void
-gf100_gr_mthd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
+gf100_gr_mthd(struct gf100_gr *gr, const struct gf100_gr_pack *p)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_gr_pack *pack;
 	const struct gf100_gr_init *init;
 	u32 data = 0;
@@ -757,79 +802,75 @@ gf100_gr_mthd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
 		u32 addr = init->addr;
 
 		if ((pack == p && init == p->init) || data != init->data) {
-			nv_wr32(priv, 0x40448c, init->data);
+			nvkm_wr32(device, 0x40448c, init->data);
 			data = init->data;
 		}
 
 		while (addr < next) {
-			nv_wr32(priv, 0x404488, ctrl | (addr << 14));
+			nvkm_wr32(device, 0x404488, ctrl | (addr << 14));
 			addr += init->pitch;
 		}
 	}
 }
 
 u64
-gf100_gr_units(struct nvkm_gr *gr)
+gf100_gr_units(struct nvkm_gr *base)
 {
-	struct gf100_gr_priv *priv = (void *)gr;
+	struct gf100_gr *gr = gf100_gr(base);
 	u64 cfg;
 
-	cfg  = (u32)priv->gpc_nr;
-	cfg |= (u32)priv->tpc_total << 8;
-	cfg |= (u64)priv->rop_nr << 32;
+	cfg  = (u32)gr->gpc_nr;
+	cfg |= (u32)gr->tpc_total << 8;
+	cfg |= (u64)gr->rop_nr << 32;
 
 	return cfg;
 }
 
-static const struct nvkm_enum gk104_sked_error[] = {
-	{ 7, "CONSTANT_BUFFER_SIZE" },
-	{ 9, "LOCAL_MEMORY_SIZE_POS" },
-	{ 10, "LOCAL_MEMORY_SIZE_NEG" },
-	{ 11, "WARP_CSTACK_SIZE" },
-	{ 12, "TOTAL_TEMP_SIZE" },
-	{ 13, "REGISTER_COUNT" },
-	{ 18, "TOTAL_THREADS" },
-	{ 20, "PROGRAM_OFFSET" },
-	{ 21, "SHARED_MEMORY_SIZE" },
-	{ 25, "SHARED_CONFIG_TOO_SMALL" },
-	{ 26, "TOTAL_REGISTER_COUNT" },
+static const struct nvkm_bitfield gk104_sked_error[] = {
+	{ 0x00000080, "CONSTANT_BUFFER_SIZE" },
+	{ 0x00000200, "LOCAL_MEMORY_SIZE_POS" },
+	{ 0x00000400, "LOCAL_MEMORY_SIZE_NEG" },
+	{ 0x00000800, "WARP_CSTACK_SIZE" },
+	{ 0x00001000, "TOTAL_TEMP_SIZE" },
+	{ 0x00002000, "REGISTER_COUNT" },
+	{ 0x00040000, "TOTAL_THREADS" },
+	{ 0x00100000, "PROGRAM_OFFSET" },
+	{ 0x00200000, "SHARED_MEMORY_SIZE" },
+	{ 0x02000000, "SHARED_CONFIG_TOO_SMALL" },
+	{ 0x04000000, "TOTAL_REGISTER_COUNT" },
 	{}
 };
 
-static const struct nvkm_enum gf100_gpc_rop_error[] = {
-	{ 1, "RT_PITCH_OVERRUN" },
-	{ 4, "RT_WIDTH_OVERRUN" },
-	{ 5, "RT_HEIGHT_OVERRUN" },
-	{ 7, "ZETA_STORAGE_TYPE_MISMATCH" },
-	{ 8, "RT_STORAGE_TYPE_MISMATCH" },
-	{ 10, "RT_LINEAR_MISMATCH" },
+static const struct nvkm_bitfield gf100_gpc_rop_error[] = {
+	{ 0x00000002, "RT_PITCH_OVERRUN" },
+	{ 0x00000010, "RT_WIDTH_OVERRUN" },
+	{ 0x00000020, "RT_HEIGHT_OVERRUN" },
+	{ 0x00000080, "ZETA_STORAGE_TYPE_MISMATCH" },
+	{ 0x00000100, "RT_STORAGE_TYPE_MISMATCH" },
+	{ 0x00000400, "RT_LINEAR_MISMATCH" },
 	{}
 };
 
 static void
-gf100_gr_trap_gpc_rop(struct gf100_gr_priv *priv, int gpc)
+gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc)
 {
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	char error[128];
 	u32 trap[4];
-	int i;
 
-	trap[0] = nv_rd32(priv, GPC_UNIT(gpc, 0x0420));
-	trap[1] = nv_rd32(priv, GPC_UNIT(gpc, 0x0434));
-	trap[2] = nv_rd32(priv, GPC_UNIT(gpc, 0x0438));
-	trap[3] = nv_rd32(priv, GPC_UNIT(gpc, 0x043c));
+	trap[0] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0420)) & 0x3fffffff;
+	trap[1] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0434));
+	trap[2] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0438));
+	trap[3] = nvkm_rd32(device, GPC_UNIT(gpc, 0x043c));
 
-	nv_error(priv, "GPC%d/PROP trap:", gpc);
-	for (i = 0; i <= 29; ++i) {
-		if (!(trap[0] & (1 << i)))
-			continue;
-		pr_cont(" ");
-		nvkm_enum_print(gf100_gpc_rop_error, i);
-	}
-	pr_cont("\n");
+	nvkm_snprintbf(error, sizeof(error), gf100_gpc_rop_error, trap[0]);
 
-	nv_error(priv, "x = %u, y = %u, format = %x, storage type = %x\n",
-		 trap[1] & 0xffff, trap[1] >> 16, (trap[2] >> 8) & 0x3f,
-		 trap[3] & 0xff);
-	nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+	nvkm_error(subdev, "GPC%d/PROP trap: %08x [%s] x = %u, y = %u, "
+			   "format = %x, storage type = %x\n",
+		   gpc, trap[0], error, trap[1] & 0xffff, trap[1] >> 16,
+		   (trap[2] >> 8) & 0x3f, trap[3] & 0xff);
+	nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
 }
 
 static const struct nvkm_enum gf100_mp_warp_error[] = {
@@ -852,401 +893,418 @@ static const struct nvkm_bitfield gf100_mp_global_error[] = {
 };
 
 static void
-gf100_gr_trap_mp(struct gf100_gr_priv *priv, int gpc, int tpc)
+gf100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc)
 {
-	u32 werr = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x648));
-	u32 gerr = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x650));
-
-	nv_error(priv, "GPC%i/TPC%i/MP trap:", gpc, tpc);
-	nvkm_bitfield_print(gf100_mp_global_error, gerr);
-	if (werr) {
-		pr_cont(" ");
-		nvkm_enum_print(gf100_mp_warp_error, werr & 0xffff);
-	}
-	pr_cont("\n");
-
-	nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x648), 0x00000000);
-	nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x650), gerr);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 werr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x648));
+	u32 gerr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x650));
+	const struct nvkm_enum *warp;
+	char glob[128];
+
+	nvkm_snprintbf(glob, sizeof(glob), gf100_mp_global_error, gerr);
+	warp = nvkm_enum_find(gf100_mp_warp_error, werr & 0xffff);
+
+	nvkm_error(subdev, "GPC%i/TPC%i/MP trap: "
+			   "global %08x [%s] warp %04x [%s]\n",
+		   gpc, tpc, gerr, glob, werr, warp ? warp->name : "");
+
+	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x648), 0x00000000);
+	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x650), gerr);
 }
 
 static void
-gf100_gr_trap_tpc(struct gf100_gr_priv *priv, int gpc, int tpc)
+gf100_gr_trap_tpc(struct gf100_gr *gr, int gpc, int tpc)
 {
-	u32 stat = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0508));
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0508));
 
 	if (stat & 0x00000001) {
-		u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0224));
-		nv_error(priv, "GPC%d/TPC%d/TEX: 0x%08x\n", gpc, tpc, trap);
-		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0224), 0xc0000000);
+		u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0224));
+		nvkm_error(subdev, "GPC%d/TPC%d/TEX: %08x\n", gpc, tpc, trap);
+		nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x0224), 0xc0000000);
 		stat &= ~0x00000001;
 	}
 
 	if (stat & 0x00000002) {
-		gf100_gr_trap_mp(priv, gpc, tpc);
+		gf100_gr_trap_mp(gr, gpc, tpc);
 		stat &= ~0x00000002;
 	}
 
 	if (stat & 0x00000004) {
-		u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0084));
-		nv_error(priv, "GPC%d/TPC%d/POLY: 0x%08x\n", gpc, tpc, trap);
-		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0084), 0xc0000000);
+		u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0084));
+		nvkm_error(subdev, "GPC%d/TPC%d/POLY: %08x\n", gpc, tpc, trap);
+		nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x0084), 0xc0000000);
 		stat &= ~0x00000004;
 	}
 
 	if (stat & 0x00000008) {
-		u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x048c));
-		nv_error(priv, "GPC%d/TPC%d/L1C: 0x%08x\n", gpc, tpc, trap);
-		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x048c), 0xc0000000);
+		u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x048c));
+		nvkm_error(subdev, "GPC%d/TPC%d/L1C: %08x\n", gpc, tpc, trap);
+		nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x048c), 0xc0000000);
 		stat &= ~0x00000008;
 	}
 
 	if (stat) {
-		nv_error(priv, "GPC%d/TPC%d/0x%08x: unknown\n", gpc, tpc, stat);
+		nvkm_error(subdev, "GPC%d/TPC%d/%08x: unknown\n", gpc, tpc, stat);
 	}
 }
 
 static void
-gf100_gr_trap_gpc(struct gf100_gr_priv *priv, int gpc)
+gf100_gr_trap_gpc(struct gf100_gr *gr, int gpc)
 {
-	u32 stat = nv_rd32(priv, GPC_UNIT(gpc, 0x2c90));
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, GPC_UNIT(gpc, 0x2c90));
 	int tpc;
 
 	if (stat & 0x00000001) {
-		gf100_gr_trap_gpc_rop(priv, gpc);
+		gf100_gr_trap_gpc_rop(gr, gpc);
 		stat &= ~0x00000001;
 	}
 
 	if (stat & 0x00000002) {
-		u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0900));
-		nv_error(priv, "GPC%d/ZCULL: 0x%08x\n", gpc, trap);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+		u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x0900));
+		nvkm_error(subdev, "GPC%d/ZCULL: %08x\n", gpc, trap);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
 		stat &= ~0x00000002;
 	}
 
 	if (stat & 0x00000004) {
-		u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x1028));
-		nv_error(priv, "GPC%d/CCACHE: 0x%08x\n", gpc, trap);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+		u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x1028));
+		nvkm_error(subdev, "GPC%d/CCACHE: %08x\n", gpc, trap);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
 		stat &= ~0x00000004;
 	}
 
 	if (stat & 0x00000008) {
-		u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0824));
-		nv_error(priv, "GPC%d/ESETUP: 0x%08x\n", gpc, trap);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+		u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x0824));
+		nvkm_error(subdev, "GPC%d/ESETUP: %08x\n", gpc, trap);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
 		stat &= ~0x00000009;
 	}
 
-	for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+	for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
 		u32 mask = 0x00010000 << tpc;
 		if (stat & mask) {
-			gf100_gr_trap_tpc(priv, gpc, tpc);
-			nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), mask);
+			gf100_gr_trap_tpc(gr, gpc, tpc);
+			nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), mask);
 			stat &= ~mask;
 		}
 	}
 
 	if (stat) {
-		nv_error(priv, "GPC%d/0x%08x: unknown\n", gpc, stat);
+		nvkm_error(subdev, "GPC%d/%08x: unknown\n", gpc, stat);
 	}
 }
 
 static void
-gf100_gr_trap_intr(struct gf100_gr_priv *priv)
+gf100_gr_trap_intr(struct gf100_gr *gr)
 {
-	u32 trap = nv_rd32(priv, 0x400108);
-	int rop, gpc, i;
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 trap = nvkm_rd32(device, 0x400108);
+	int rop, gpc;
 
 	if (trap & 0x00000001) {
-		u32 stat = nv_rd32(priv, 0x404000);
-		nv_error(priv, "DISPATCH 0x%08x\n", stat);
-		nv_wr32(priv, 0x404000, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x00000001);
+		u32 stat = nvkm_rd32(device, 0x404000);
+		nvkm_error(subdev, "DISPATCH %08x\n", stat);
+		nvkm_wr32(device, 0x404000, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x00000001);
 		trap &= ~0x00000001;
 	}
 
 	if (trap & 0x00000002) {
-		u32 stat = nv_rd32(priv, 0x404600);
-		nv_error(priv, "M2MF 0x%08x\n", stat);
-		nv_wr32(priv, 0x404600, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x00000002);
+		u32 stat = nvkm_rd32(device, 0x404600);
+		nvkm_error(subdev, "M2MF %08x\n", stat);
+		nvkm_wr32(device, 0x404600, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x00000002);
 		trap &= ~0x00000002;
 	}
 
 	if (trap & 0x00000008) {
-		u32 stat = nv_rd32(priv, 0x408030);
-		nv_error(priv, "CCACHE 0x%08x\n", stat);
-		nv_wr32(priv, 0x408030, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x00000008);
+		u32 stat = nvkm_rd32(device, 0x408030);
+		nvkm_error(subdev, "CCACHE %08x\n", stat);
+		nvkm_wr32(device, 0x408030, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x00000008);
 		trap &= ~0x00000008;
 	}
 
 	if (trap & 0x00000010) {
-		u32 stat = nv_rd32(priv, 0x405840);
-		nv_error(priv, "SHADER 0x%08x\n", stat);
-		nv_wr32(priv, 0x405840, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x00000010);
+		u32 stat = nvkm_rd32(device, 0x405840);
+		nvkm_error(subdev, "SHADER %08x\n", stat);
+		nvkm_wr32(device, 0x405840, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x00000010);
 		trap &= ~0x00000010;
 	}
 
 	if (trap & 0x00000040) {
-		u32 stat = nv_rd32(priv, 0x40601c);
-		nv_error(priv, "UNK6 0x%08x\n", stat);
-		nv_wr32(priv, 0x40601c, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x00000040);
+		u32 stat = nvkm_rd32(device, 0x40601c);
+		nvkm_error(subdev, "UNK6 %08x\n", stat);
+		nvkm_wr32(device, 0x40601c, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x00000040);
 		trap &= ~0x00000040;
 	}
 
 	if (trap & 0x00000080) {
-		u32 stat = nv_rd32(priv, 0x404490);
-		nv_error(priv, "MACRO 0x%08x\n", stat);
-		nv_wr32(priv, 0x404490, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x00000080);
+		u32 stat = nvkm_rd32(device, 0x404490);
+		nvkm_error(subdev, "MACRO %08x\n", stat);
+		nvkm_wr32(device, 0x404490, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x00000080);
 		trap &= ~0x00000080;
 	}
 
 	if (trap & 0x00000100) {
-		u32 stat = nv_rd32(priv, 0x407020);
+		u32 stat = nvkm_rd32(device, 0x407020) & 0x3fffffff;
+		char sked[128];
 
-		nv_error(priv, "SKED:");
-		for (i = 0; i <= 29; ++i) {
-			if (!(stat & (1 << i)))
-				continue;
-			pr_cont(" ");
-			nvkm_enum_print(gk104_sked_error, i);
-		}
-		pr_cont("\n");
+		nvkm_snprintbf(sked, sizeof(sked), gk104_sked_error, stat);
+		nvkm_error(subdev, "SKED: %08x [%s]\n", stat, sked);
 
-		if (stat & 0x3fffffff)
-			nv_wr32(priv, 0x407020, 0x40000000);
-		nv_wr32(priv, 0x400108, 0x00000100);
+		if (stat)
+			nvkm_wr32(device, 0x407020, 0x40000000);
+		nvkm_wr32(device, 0x400108, 0x00000100);
 		trap &= ~0x00000100;
 	}
 
 	if (trap & 0x01000000) {
-		u32 stat = nv_rd32(priv, 0x400118);
-		for (gpc = 0; stat && gpc < priv->gpc_nr; gpc++) {
+		u32 stat = nvkm_rd32(device, 0x400118);
+		for (gpc = 0; stat && gpc < gr->gpc_nr; gpc++) {
 			u32 mask = 0x00000001 << gpc;
 			if (stat & mask) {
-				gf100_gr_trap_gpc(priv, gpc);
-				nv_wr32(priv, 0x400118, mask);
+				gf100_gr_trap_gpc(gr, gpc);
+				nvkm_wr32(device, 0x400118, mask);
 				stat &= ~mask;
 			}
 		}
-		nv_wr32(priv, 0x400108, 0x01000000);
+		nvkm_wr32(device, 0x400108, 0x01000000);
 		trap &= ~0x01000000;
 	}
 
 	if (trap & 0x02000000) {
-		for (rop = 0; rop < priv->rop_nr; rop++) {
-			u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070));
-			u32 statc = nv_rd32(priv, ROP_UNIT(rop, 0x144));
-			nv_error(priv, "ROP%d 0x%08x 0x%08x\n",
+		for (rop = 0; rop < gr->rop_nr; rop++) {
+			u32 statz = nvkm_rd32(device, ROP_UNIT(rop, 0x070));
+			u32 statc = nvkm_rd32(device, ROP_UNIT(rop, 0x144));
+			nvkm_error(subdev, "ROP%d %08x %08x\n",
 				 rop, statz, statc);
-			nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
-			nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
+			nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000);
+			nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000);
 		}
-		nv_wr32(priv, 0x400108, 0x02000000);
+		nvkm_wr32(device, 0x400108, 0x02000000);
 		trap &= ~0x02000000;
 	}
 
 	if (trap) {
-		nv_error(priv, "TRAP UNHANDLED 0x%08x\n", trap);
-		nv_wr32(priv, 0x400108, trap);
+		nvkm_error(subdev, "TRAP UNHANDLED %08x\n", trap);
+		nvkm_wr32(device, 0x400108, trap);
 	}
 }
 
 static void
-gf100_gr_ctxctl_debug_unit(struct gf100_gr_priv *priv, u32 base)
+gf100_gr_ctxctl_debug_unit(struct gf100_gr *gr, u32 base)
 {
-	nv_error(priv, "%06x - done 0x%08x\n", base,
-		 nv_rd32(priv, base + 0x400));
-	nv_error(priv, "%06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
-		 nv_rd32(priv, base + 0x800), nv_rd32(priv, base + 0x804),
-		 nv_rd32(priv, base + 0x808), nv_rd32(priv, base + 0x80c));
-	nv_error(priv, "%06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
-		 nv_rd32(priv, base + 0x810), nv_rd32(priv, base + 0x814),
-		 nv_rd32(priv, base + 0x818), nv_rd32(priv, base + 0x81c));
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	nvkm_error(subdev, "%06x - done %08x\n", base,
+		   nvkm_rd32(device, base + 0x400));
+	nvkm_error(subdev, "%06x - stat %08x %08x %08x %08x\n", base,
+		   nvkm_rd32(device, base + 0x800),
+		   nvkm_rd32(device, base + 0x804),
+		   nvkm_rd32(device, base + 0x808),
+		   nvkm_rd32(device, base + 0x80c));
+	nvkm_error(subdev, "%06x - stat %08x %08x %08x %08x\n", base,
+		   nvkm_rd32(device, base + 0x810),
+		   nvkm_rd32(device, base + 0x814),
+		   nvkm_rd32(device, base + 0x818),
+		   nvkm_rd32(device, base + 0x81c));
 }
 
 void
-gf100_gr_ctxctl_debug(struct gf100_gr_priv *priv)
+gf100_gr_ctxctl_debug(struct gf100_gr *gr)
 {
-	u32 gpcnr = nv_rd32(priv, 0x409604) & 0xffff;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	u32 gpcnr = nvkm_rd32(device, 0x409604) & 0xffff;
 	u32 gpc;
 
-	gf100_gr_ctxctl_debug_unit(priv, 0x409000);
+	gf100_gr_ctxctl_debug_unit(gr, 0x409000);
 	for (gpc = 0; gpc < gpcnr; gpc++)
-		gf100_gr_ctxctl_debug_unit(priv, 0x502000 + (gpc * 0x8000));
+		gf100_gr_ctxctl_debug_unit(gr, 0x502000 + (gpc * 0x8000));
 }
 
 static void
-gf100_gr_ctxctl_isr(struct gf100_gr_priv *priv)
+gf100_gr_ctxctl_isr(struct gf100_gr *gr)
 {
-	u32 stat = nv_rd32(priv, 0x409c18);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x409c18);
 
 	if (stat & 0x00000001) {
-		u32 code = nv_rd32(priv, 0x409814);
+		u32 code = nvkm_rd32(device, 0x409814);
 		if (code == E_BAD_FWMTHD) {
-			u32 class = nv_rd32(priv, 0x409808);
-			u32  addr = nv_rd32(priv, 0x40980c);
+			u32 class = nvkm_rd32(device, 0x409808);
+			u32  addr = nvkm_rd32(device, 0x40980c);
 			u32  subc = (addr & 0x00070000) >> 16;
 			u32  mthd = (addr & 0x00003ffc);
-			u32  data = nv_rd32(priv, 0x409810);
+			u32  data = nvkm_rd32(device, 0x409810);
 
-			nv_error(priv, "FECS MTHD subc %d class 0x%04x "
-				       "mthd 0x%04x data 0x%08x\n",
-				 subc, class, mthd, data);
+			nvkm_error(subdev, "FECS MTHD subc %d class %04x "
+					   "mthd %04x data %08x\n",
+				   subc, class, mthd, data);
 
-			nv_wr32(priv, 0x409c20, 0x00000001);
+			nvkm_wr32(device, 0x409c20, 0x00000001);
 			stat &= ~0x00000001;
 		} else {
-			nv_error(priv, "FECS ucode error %d\n", code);
+			nvkm_error(subdev, "FECS ucode error %d\n", code);
 		}
 	}
 
 	if (stat & 0x00080000) {
-		nv_error(priv, "FECS watchdog timeout\n");
-		gf100_gr_ctxctl_debug(priv);
-		nv_wr32(priv, 0x409c20, 0x00080000);
+		nvkm_error(subdev, "FECS watchdog timeout\n");
+		gf100_gr_ctxctl_debug(gr);
+		nvkm_wr32(device, 0x409c20, 0x00080000);
 		stat &= ~0x00080000;
 	}
 
 	if (stat) {
-		nv_error(priv, "FECS 0x%08x\n", stat);
-		gf100_gr_ctxctl_debug(priv);
-		nv_wr32(priv, 0x409c20, stat);
+		nvkm_error(subdev, "FECS %08x\n", stat);
+		gf100_gr_ctxctl_debug(gr);
+		nvkm_wr32(device, 0x409c20, stat);
 	}
 }
 
 static void
-gf100_gr_intr(struct nvkm_subdev *subdev)
+gf100_gr_intr(struct nvkm_gr *base)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
-	struct nvkm_engine *engine = nv_engine(subdev);
-	struct nvkm_object *engctx;
-	struct nvkm_handle *handle;
-	struct gf100_gr_priv *priv = (void *)subdev;
-	u64 inst = nv_rd32(priv, 0x409b00) & 0x0fffffff;
-	u32 stat = nv_rd32(priv, 0x400100);
-	u32 addr = nv_rd32(priv, 0x400704);
+	struct gf100_gr *gr = gf100_gr(base);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	u64 inst = nvkm_rd32(device, 0x409b00) & 0x0fffffff;
+	u32 stat = nvkm_rd32(device, 0x400100);
+	u32 addr = nvkm_rd32(device, 0x400704);
 	u32 mthd = (addr & 0x00003ffc);
 	u32 subc = (addr & 0x00070000) >> 16;
-	u32 data = nv_rd32(priv, 0x400708);
-	u32 code = nv_rd32(priv, 0x400110);
+	u32 data = nvkm_rd32(device, 0x400708);
+	u32 code = nvkm_rd32(device, 0x400110);
 	u32 class;
-	int chid;
+	const char *name = "unknown";
+	int chid = -1;
+
+	chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags);
+	if (chan) {
+		name = chan->object.client->name;
+		chid = chan->chid;
+	}
 
-	if (nv_device(priv)->card_type < NV_E0 || subc < 4)
-		class = nv_rd32(priv, 0x404200 + (subc * 4));
+	if (device->card_type < NV_E0 || subc < 4)
+		class = nvkm_rd32(device, 0x404200 + (subc * 4));
 	else
 		class = 0x0000;
 
-	engctx = nvkm_engctx_get(engine, inst);
-	chid   = pfifo->chid(pfifo, engctx);
-
 	if (stat & 0x00000001) {
 		/*
 		 * notifier interrupt, only needed for cyclestats
 		 * can be safely ignored
 		 */
-		nv_wr32(priv, 0x400100, 0x00000001);
+		nvkm_wr32(device, 0x400100, 0x00000001);
 		stat &= ~0x00000001;
 	}
 
 	if (stat & 0x00000010) {
-		handle = nvkm_handle_get_class(engctx, class);
-		if (!handle || nv_call(handle->object, mthd, data)) {
-			nv_error(priv,
-				 "ILLEGAL_MTHD ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
-				 chid, inst << 12, nvkm_client_name(engctx),
-				 subc, class, mthd, data);
+		if (!gf100_gr_mthd_sw(device, class, mthd, data)) {
+			nvkm_error(subdev, "ILLEGAL_MTHD ch %d [%010llx %s] "
+				   "subc %d class %04x mthd %04x data %08x\n",
+				   chid, inst << 12, name, subc,
+				   class, mthd, data);
 		}
-		nvkm_handle_put(handle);
-		nv_wr32(priv, 0x400100, 0x00000010);
+		nvkm_wr32(device, 0x400100, 0x00000010);
 		stat &= ~0x00000010;
 	}
 
 	if (stat & 0x00000020) {
-		nv_error(priv,
-			 "ILLEGAL_CLASS ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
-			 chid, inst << 12, nvkm_client_name(engctx), subc,
-			 class, mthd, data);
-		nv_wr32(priv, 0x400100, 0x00000020);
+		nvkm_error(subdev, "ILLEGAL_CLASS ch %d [%010llx %s] "
+			   "subc %d class %04x mthd %04x data %08x\n",
+			   chid, inst << 12, name, subc, class, mthd, data);
+		nvkm_wr32(device, 0x400100, 0x00000020);
 		stat &= ~0x00000020;
 	}
 
 	if (stat & 0x00100000) {
-		nv_error(priv, "DATA_ERROR [");
-		nvkm_enum_print(nv50_data_error_names, code);
-		pr_cont("] ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
-			chid, inst << 12, nvkm_client_name(engctx), subc,
-			class, mthd, data);
-		nv_wr32(priv, 0x400100, 0x00100000);
+		const struct nvkm_enum *en =
+			nvkm_enum_find(nv50_data_error_names, code);
+		nvkm_error(subdev, "DATA_ERROR %08x [%s] ch %d [%010llx %s] "
+				   "subc %d class %04x mthd %04x data %08x\n",
+			   code, en ? en->name : "", chid, inst << 12,
+			   name, subc, class, mthd, data);
+		nvkm_wr32(device, 0x400100, 0x00100000);
 		stat &= ~0x00100000;
 	}
 
 	if (stat & 0x00200000) {
-		nv_error(priv, "TRAP ch %d [0x%010llx %s]\n", chid, inst << 12,
-			 nvkm_client_name(engctx));
-		gf100_gr_trap_intr(priv);
-		nv_wr32(priv, 0x400100, 0x00200000);
+		nvkm_error(subdev, "TRAP ch %d [%010llx %s]\n",
+			   chid, inst << 12, name);
+		gf100_gr_trap_intr(gr);
+		nvkm_wr32(device, 0x400100, 0x00200000);
 		stat &= ~0x00200000;
 	}
 
 	if (stat & 0x00080000) {
-		gf100_gr_ctxctl_isr(priv);
-		nv_wr32(priv, 0x400100, 0x00080000);
+		gf100_gr_ctxctl_isr(gr);
+		nvkm_wr32(device, 0x400100, 0x00080000);
 		stat &= ~0x00080000;
 	}
 
 	if (stat) {
-		nv_error(priv, "unknown stat 0x%08x\n", stat);
-		nv_wr32(priv, 0x400100, stat);
+		nvkm_error(subdev, "intr %08x\n", stat);
+		nvkm_wr32(device, 0x400100, stat);
 	}
 
-	nv_wr32(priv, 0x400500, 0x00010001);
-	nvkm_engctx_put(engctx);
+	nvkm_wr32(device, 0x400500, 0x00010001);
+	nvkm_fifo_chan_put(device->fifo, flags, &chan);
 }
 
 void
-gf100_gr_init_fw(struct gf100_gr_priv *priv, u32 fuc_base,
+gf100_gr_init_fw(struct gf100_gr *gr, u32 fuc_base,
 		 struct gf100_gr_fuc *code, struct gf100_gr_fuc *data)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	int i;
 
-	nv_wr32(priv, fuc_base + 0x01c0, 0x01000000);
+	nvkm_wr32(device, fuc_base + 0x01c0, 0x01000000);
 	for (i = 0; i < data->size / 4; i++)
-		nv_wr32(priv, fuc_base + 0x01c4, data->data[i]);
+		nvkm_wr32(device, fuc_base + 0x01c4, data->data[i]);
 
-	nv_wr32(priv, fuc_base + 0x0180, 0x01000000);
+	nvkm_wr32(device, fuc_base + 0x0180, 0x01000000);
 	for (i = 0; i < code->size / 4; i++) {
 		if ((i & 0x3f) == 0)
-			nv_wr32(priv, fuc_base + 0x0188, i >> 6);
-		nv_wr32(priv, fuc_base + 0x0184, code->data[i]);
+			nvkm_wr32(device, fuc_base + 0x0188, i >> 6);
+		nvkm_wr32(device, fuc_base + 0x0184, code->data[i]);
 	}
 
 	/* code must be padded to 0x40 words */
 	for (; i & 0x3f; i++)
-		nv_wr32(priv, fuc_base + 0x0184, 0);
+		nvkm_wr32(device, fuc_base + 0x0184, 0);
 }
 
 static void
-gf100_gr_init_csdata(struct gf100_gr_priv *priv,
+gf100_gr_init_csdata(struct gf100_gr *gr,
 		     const struct gf100_gr_pack *pack,
 		     u32 falcon, u32 starstar, u32 base)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_gr_pack *iter;
 	const struct gf100_gr_init *init;
 	u32 addr = ~0, prev = ~0, xfer = 0;
 	u32 star, temp;
 
-	nv_wr32(priv, falcon + 0x01c0, 0x02000000 + starstar);
-	star = nv_rd32(priv, falcon + 0x01c4);
-	temp = nv_rd32(priv, falcon + 0x01c4);
+	nvkm_wr32(device, falcon + 0x01c0, 0x02000000 + starstar);
+	star = nvkm_rd32(device, falcon + 0x01c4);
+	temp = nvkm_rd32(device, falcon + 0x01c4);
 	if (temp > star)
 		star = temp;
-	nv_wr32(priv, falcon + 0x01c0, 0x01000000 + star);
+	nvkm_wr32(device, falcon + 0x01c0, 0x01000000 + star);
 
 	pack_for_each_init(init, iter, pack) {
 		u32 head = init->addr - base;
@@ -1255,7 +1313,7 @@ gf100_gr_init_csdata(struct gf100_gr_priv *priv,
 			if (head != prev + 4 || xfer >= 32) {
 				if (xfer) {
 					u32 data = ((--xfer << 26) | addr);
-					nv_wr32(priv, falcon + 0x01c4, data);
+					nvkm_wr32(device, falcon + 0x01c4, data);
 					star += 4;
 				}
 				addr = head;
@@ -1267,157 +1325,166 @@ gf100_gr_init_csdata(struct gf100_gr_priv *priv,
 		}
 	}
 
-	nv_wr32(priv, falcon + 0x01c4, (--xfer << 26) | addr);
-	nv_wr32(priv, falcon + 0x01c0, 0x01000004 + starstar);
-	nv_wr32(priv, falcon + 0x01c4, star + 4);
+	nvkm_wr32(device, falcon + 0x01c4, (--xfer << 26) | addr);
+	nvkm_wr32(device, falcon + 0x01c0, 0x01000004 + starstar);
+	nvkm_wr32(device, falcon + 0x01c4, star + 4);
 }
 
 int
-gf100_gr_init_ctxctl(struct gf100_gr_priv *priv)
+gf100_gr_init_ctxctl(struct gf100_gr *gr)
 {
-	struct gf100_gr_oclass *oclass = (void *)nv_object(priv)->oclass;
-	struct gf100_grctx_oclass *cclass = (void *)nv_engine(priv)->cclass;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	int i;
 
-	if (priv->firmware) {
+	if (gr->firmware) {
 		/* load fuc microcode */
-		nvkm_mc(priv)->unk260(nvkm_mc(priv), 0);
-		gf100_gr_init_fw(priv, 0x409000, &priv->fuc409c,
-						 &priv->fuc409d);
-		gf100_gr_init_fw(priv, 0x41a000, &priv->fuc41ac,
-						 &priv->fuc41ad);
-		nvkm_mc(priv)->unk260(nvkm_mc(priv), 1);
+		nvkm_mc_unk260(device->mc, 0);
+		gf100_gr_init_fw(gr, 0x409000, &gr->fuc409c, &gr->fuc409d);
+		gf100_gr_init_fw(gr, 0x41a000, &gr->fuc41ac, &gr->fuc41ad);
+		nvkm_mc_unk260(device->mc, 1);
 
 		/* start both of them running */
-		nv_wr32(priv, 0x409840, 0xffffffff);
-		nv_wr32(priv, 0x41a10c, 0x00000000);
-		nv_wr32(priv, 0x40910c, 0x00000000);
-		nv_wr32(priv, 0x41a100, 0x00000002);
-		nv_wr32(priv, 0x409100, 0x00000002);
-		if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001))
-			nv_warn(priv, "0x409800 wait failed\n");
-
-		nv_wr32(priv, 0x409840, 0xffffffff);
-		nv_wr32(priv, 0x409500, 0x7fffffff);
-		nv_wr32(priv, 0x409504, 0x00000021);
-
-		nv_wr32(priv, 0x409840, 0xffffffff);
-		nv_wr32(priv, 0x409500, 0x00000000);
-		nv_wr32(priv, 0x409504, 0x00000010);
-		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
-			nv_error(priv, "fuc09 req 0x10 timeout\n");
+		nvkm_wr32(device, 0x409840, 0xffffffff);
+		nvkm_wr32(device, 0x41a10c, 0x00000000);
+		nvkm_wr32(device, 0x40910c, 0x00000000);
+		nvkm_wr32(device, 0x41a100, 0x00000002);
+		nvkm_wr32(device, 0x409100, 0x00000002);
+		if (nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, 0x409800) & 0x00000001)
+				break;
+		) < 0)
 			return -EBUSY;
-		}
-		priv->size = nv_rd32(priv, 0x409800);
 
-		nv_wr32(priv, 0x409840, 0xffffffff);
-		nv_wr32(priv, 0x409500, 0x00000000);
-		nv_wr32(priv, 0x409504, 0x00000016);
-		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
-			nv_error(priv, "fuc09 req 0x16 timeout\n");
+		nvkm_wr32(device, 0x409840, 0xffffffff);
+		nvkm_wr32(device, 0x409500, 0x7fffffff);
+		nvkm_wr32(device, 0x409504, 0x00000021);
+
+		nvkm_wr32(device, 0x409840, 0xffffffff);
+		nvkm_wr32(device, 0x409500, 0x00000000);
+		nvkm_wr32(device, 0x409504, 0x00000010);
+		if (nvkm_msec(device, 2000,
+			if ((gr->size = nvkm_rd32(device, 0x409800)))
+				break;
+		) < 0)
 			return -EBUSY;
-		}
 
-		nv_wr32(priv, 0x409840, 0xffffffff);
-		nv_wr32(priv, 0x409500, 0x00000000);
-		nv_wr32(priv, 0x409504, 0x00000025);
-		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
-			nv_error(priv, "fuc09 req 0x25 timeout\n");
+		nvkm_wr32(device, 0x409840, 0xffffffff);
+		nvkm_wr32(device, 0x409500, 0x00000000);
+		nvkm_wr32(device, 0x409504, 0x00000016);
+		if (nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, 0x409800))
+				break;
+		) < 0)
+			return -EBUSY;
+
+		nvkm_wr32(device, 0x409840, 0xffffffff);
+		nvkm_wr32(device, 0x409500, 0x00000000);
+		nvkm_wr32(device, 0x409504, 0x00000025);
+		if (nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, 0x409800))
+				break;
+		) < 0)
 			return -EBUSY;
-		}
 
-		if (nv_device(priv)->chipset >= 0xe0) {
-			nv_wr32(priv, 0x409800, 0x00000000);
-			nv_wr32(priv, 0x409500, 0x00000001);
-			nv_wr32(priv, 0x409504, 0x00000030);
-			if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
-				nv_error(priv, "fuc09 req 0x30 timeout\n");
+		if (device->chipset >= 0xe0) {
+			nvkm_wr32(device, 0x409800, 0x00000000);
+			nvkm_wr32(device, 0x409500, 0x00000001);
+			nvkm_wr32(device, 0x409504, 0x00000030);
+			if (nvkm_msec(device, 2000,
+				if (nvkm_rd32(device, 0x409800))
+					break;
+			) < 0)
 				return -EBUSY;
-			}
 
-			nv_wr32(priv, 0x409810, 0xb00095c8);
-			nv_wr32(priv, 0x409800, 0x00000000);
-			nv_wr32(priv, 0x409500, 0x00000001);
-			nv_wr32(priv, 0x409504, 0x00000031);
-			if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
-				nv_error(priv, "fuc09 req 0x31 timeout\n");
+			nvkm_wr32(device, 0x409810, 0xb00095c8);
+			nvkm_wr32(device, 0x409800, 0x00000000);
+			nvkm_wr32(device, 0x409500, 0x00000001);
+			nvkm_wr32(device, 0x409504, 0x00000031);
+			if (nvkm_msec(device, 2000,
+				if (nvkm_rd32(device, 0x409800))
+					break;
+			) < 0)
 				return -EBUSY;
-			}
 
-			nv_wr32(priv, 0x409810, 0x00080420);
-			nv_wr32(priv, 0x409800, 0x00000000);
-			nv_wr32(priv, 0x409500, 0x00000001);
-			nv_wr32(priv, 0x409504, 0x00000032);
-			if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
-				nv_error(priv, "fuc09 req 0x32 timeout\n");
+			nvkm_wr32(device, 0x409810, 0x00080420);
+			nvkm_wr32(device, 0x409800, 0x00000000);
+			nvkm_wr32(device, 0x409500, 0x00000001);
+			nvkm_wr32(device, 0x409504, 0x00000032);
+			if (nvkm_msec(device, 2000,
+				if (nvkm_rd32(device, 0x409800))
+					break;
+			) < 0)
 				return -EBUSY;
-			}
 
-			nv_wr32(priv, 0x409614, 0x00000070);
-			nv_wr32(priv, 0x409614, 0x00000770);
-			nv_wr32(priv, 0x40802c, 0x00000001);
+			nvkm_wr32(device, 0x409614, 0x00000070);
+			nvkm_wr32(device, 0x409614, 0x00000770);
+			nvkm_wr32(device, 0x40802c, 0x00000001);
 		}
 
-		if (priv->data == NULL) {
-			int ret = gf100_grctx_generate(priv);
+		if (gr->data == NULL) {
+			int ret = gf100_grctx_generate(gr);
 			if (ret) {
-				nv_error(priv, "failed to construct context\n");
+				nvkm_error(subdev, "failed to construct context\n");
 				return ret;
 			}
 		}
 
 		return 0;
 	} else
-	if (!oclass->fecs.ucode) {
+	if (!gr->func->fecs.ucode) {
 		return -ENOSYS;
 	}
 
 	/* load HUB microcode */
-	nvkm_mc(priv)->unk260(nvkm_mc(priv), 0);
-	nv_wr32(priv, 0x4091c0, 0x01000000);
-	for (i = 0; i < oclass->fecs.ucode->data.size / 4; i++)
-		nv_wr32(priv, 0x4091c4, oclass->fecs.ucode->data.data[i]);
+	nvkm_mc_unk260(device->mc, 0);
+	nvkm_wr32(device, 0x4091c0, 0x01000000);
+	for (i = 0; i < gr->func->fecs.ucode->data.size / 4; i++)
+		nvkm_wr32(device, 0x4091c4, gr->func->fecs.ucode->data.data[i]);
 
-	nv_wr32(priv, 0x409180, 0x01000000);
-	for (i = 0; i < oclass->fecs.ucode->code.size / 4; i++) {
+	nvkm_wr32(device, 0x409180, 0x01000000);
+	for (i = 0; i < gr->func->fecs.ucode->code.size / 4; i++) {
 		if ((i & 0x3f) == 0)
-			nv_wr32(priv, 0x409188, i >> 6);
-		nv_wr32(priv, 0x409184, oclass->fecs.ucode->code.data[i]);
+			nvkm_wr32(device, 0x409188, i >> 6);
+		nvkm_wr32(device, 0x409184, gr->func->fecs.ucode->code.data[i]);
 	}
 
 	/* load GPC microcode */
-	nv_wr32(priv, 0x41a1c0, 0x01000000);
-	for (i = 0; i < oclass->gpccs.ucode->data.size / 4; i++)
-		nv_wr32(priv, 0x41a1c4, oclass->gpccs.ucode->data.data[i]);
+	nvkm_wr32(device, 0x41a1c0, 0x01000000);
+	for (i = 0; i < gr->func->gpccs.ucode->data.size / 4; i++)
+		nvkm_wr32(device, 0x41a1c4, gr->func->gpccs.ucode->data.data[i]);
 
-	nv_wr32(priv, 0x41a180, 0x01000000);
-	for (i = 0; i < oclass->gpccs.ucode->code.size / 4; i++) {
+	nvkm_wr32(device, 0x41a180, 0x01000000);
+	for (i = 0; i < gr->func->gpccs.ucode->code.size / 4; i++) {
 		if ((i & 0x3f) == 0)
-			nv_wr32(priv, 0x41a188, i >> 6);
-		nv_wr32(priv, 0x41a184, oclass->gpccs.ucode->code.data[i]);
+			nvkm_wr32(device, 0x41a188, i >> 6);
+		nvkm_wr32(device, 0x41a184, gr->func->gpccs.ucode->code.data[i]);
 	}
-	nvkm_mc(priv)->unk260(nvkm_mc(priv), 1);
+	nvkm_mc_unk260(device->mc, 1);
 
 	/* load register lists */
-	gf100_gr_init_csdata(priv, cclass->hub, 0x409000, 0x000, 0x000000);
-	gf100_gr_init_csdata(priv, cclass->gpc, 0x41a000, 0x000, 0x418000);
-	gf100_gr_init_csdata(priv, cclass->tpc, 0x41a000, 0x004, 0x419800);
-	gf100_gr_init_csdata(priv, cclass->ppc, 0x41a000, 0x008, 0x41be00);
+	gf100_gr_init_csdata(gr, grctx->hub, 0x409000, 0x000, 0x000000);
+	gf100_gr_init_csdata(gr, grctx->gpc, 0x41a000, 0x000, 0x418000);
+	gf100_gr_init_csdata(gr, grctx->tpc, 0x41a000, 0x004, 0x419800);
+	gf100_gr_init_csdata(gr, grctx->ppc, 0x41a000, 0x008, 0x41be00);
 
 	/* start HUB ucode running, it'll init the GPCs */
-	nv_wr32(priv, 0x40910c, 0x00000000);
-	nv_wr32(priv, 0x409100, 0x00000002);
-	if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) {
-		nv_error(priv, "HUB_INIT timed out\n");
-		gf100_gr_ctxctl_debug(priv);
+	nvkm_wr32(device, 0x40910c, 0x00000000);
+	nvkm_wr32(device, 0x409100, 0x00000002);
+	if (nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x409800) & 0x80000000)
+			break;
+	) < 0) {
+		gf100_gr_ctxctl_debug(gr);
 		return -EBUSY;
 	}
 
-	priv->size = nv_rd32(priv, 0x409804);
-	if (priv->data == NULL) {
-		int ret = gf100_grctx_generate(priv);
+	gr->size = nvkm_rd32(device, 0x409804);
+	if (gr->data == NULL) {
+		int ret = gf100_grctx_generate(gr);
 		if (ret) {
-			nv_error(priv, "failed to construct context\n");
+			nvkm_error(subdev, "failed to construct context\n");
 			return ret;
 		}
 	}
@@ -1425,143 +1492,160 @@ gf100_gr_init_ctxctl(struct gf100_gr_priv *priv)
 	return 0;
 }
 
-int
-gf100_gr_init(struct nvkm_object *object)
+static int
+gf100_gr_oneinit(struct nvkm_gr *base)
 {
-	struct gf100_gr_oclass *oclass = (void *)object->oclass;
-	struct gf100_gr_priv *priv = (void *)object;
-	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
-	u32 data[TPC_MAX / 8] = {};
-	u8  tpcnr[GPC_MAX];
-	int gpc, tpc, rop;
-	int ret, i;
+	struct gf100_gr *gr = gf100_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	int ret, i, j;
 
-	ret = nvkm_gr_init(&priv->base);
+	nvkm_pmu_pgob(device->pmu, false);
+
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 256, false,
+			      &gr->unk4188b4);
 	if (ret)
 		return ret;
 
-	nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x08a4), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x0888), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x088c), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x0890), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x0894), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
-	nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
-
-	gf100_gr_mmio(priv, oclass->mmio);
-
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
-	for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
-		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
-		} while (!tpcnr[gpc]);
-		tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
-
-		data[i / 8] |= tpc << ((i % 8) * 4);
-	}
-
-	nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
-	nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
-	nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
-	nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
-
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0914),
-			priv->magic_not_rop_nr << 8 | priv->tpc_nr[gpc]);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 |
-			priv->tpc_total);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
-	}
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 256, false,
+			      &gr->unk4188b8);
+	if (ret)
+		return ret;
 
-	if (nv_device(priv)->chipset != 0xd7)
-		nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918);
-	else
-		nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
-
-	nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
-
-	nv_wr32(priv, 0x400500, 0x00010001);
-
-	nv_wr32(priv, 0x400100, 0xffffffff);
-	nv_wr32(priv, 0x40013c, 0xffffffff);
-
-	nv_wr32(priv, 0x409c24, 0x000f0000);
-	nv_wr32(priv, 0x404000, 0xc0000000);
-	nv_wr32(priv, 0x404600, 0xc0000000);
-	nv_wr32(priv, 0x408030, 0xc0000000);
-	nv_wr32(priv, 0x40601c, 0xc0000000);
-	nv_wr32(priv, 0x404490, 0xc0000000);
-	nv_wr32(priv, 0x406018, 0xc0000000);
-	nv_wr32(priv, 0x405840, 0xc0000000);
-	nv_wr32(priv, 0x405844, 0x00ffffff);
-	nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
-	nv_mask(priv, 0x419eb4, 0x00001000, 0x00001000);
-
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
-		for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
+	nvkm_kmap(gr->unk4188b4);
+	for (i = 0; i < 0x1000; i += 4)
+		nvkm_wo32(gr->unk4188b4, i, 0x00000010);
+	nvkm_done(gr->unk4188b4);
+
+	nvkm_kmap(gr->unk4188b8);
+	for (i = 0; i < 0x1000; i += 4)
+		nvkm_wo32(gr->unk4188b8, i, 0x00000010);
+	nvkm_done(gr->unk4188b8);
+
+	gr->rop_nr = (nvkm_rd32(device, 0x409604) & 0x001f0000) >> 16;
+	gr->gpc_nr =  nvkm_rd32(device, 0x409604) & 0x0000001f;
+	for (i = 0; i < gr->gpc_nr; i++) {
+		gr->tpc_nr[i]  = nvkm_rd32(device, GPC_UNIT(i, 0x2608));
+		gr->tpc_total += gr->tpc_nr[i];
+		gr->ppc_nr[i]  = gr->func->ppc_nr;
+		for (j = 0; j < gr->ppc_nr[i]; j++) {
+			u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4)));
+			gr->ppc_tpc_nr[i][j] = hweight8(mask);
 		}
-		nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
 	}
 
-	for (rop = 0; rop < priv->rop_nr; rop++) {
-		nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
-		nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
-		nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
-		nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
+	/*XXX: these need figuring out... though it might not even matter */
+	switch (device->chipset) {
+	case 0xc0:
+		if (gr->tpc_total == 11) { /* 465, 3/4/4/0, 4 */
+			gr->magic_not_rop_nr = 0x07;
+		} else
+		if (gr->tpc_total == 14) { /* 470, 3/3/4/4, 5 */
+			gr->magic_not_rop_nr = 0x05;
+		} else
+		if (gr->tpc_total == 15) { /* 480, 3/4/4/4, 6 */
+			gr->magic_not_rop_nr = 0x06;
+		}
+		break;
+	case 0xc3: /* 450, 4/0/0/0, 2 */
+		gr->magic_not_rop_nr = 0x03;
+		break;
+	case 0xc4: /* 460, 3/4/0/0, 4 */
+		gr->magic_not_rop_nr = 0x01;
+		break;
+	case 0xc1: /* 2/0/0/0, 1 */
+		gr->magic_not_rop_nr = 0x01;
+		break;
+	case 0xc8: /* 4/4/3/4, 5 */
+		gr->magic_not_rop_nr = 0x06;
+		break;
+	case 0xce: /* 4/4/0/0, 4 */
+		gr->magic_not_rop_nr = 0x03;
+		break;
+	case 0xcf: /* 4/0/0/0, 3 */
+		gr->magic_not_rop_nr = 0x03;
+		break;
+	case 0xd7:
+	case 0xd9: /* 1/0/0/0, 1 */
+	case 0xea: /* gk20a */
+	case 0x12b: /* gm20b */
+		gr->magic_not_rop_nr = 0x01;
+		break;
 	}
 
-	nv_wr32(priv, 0x400108, 0xffffffff);
-	nv_wr32(priv, 0x400138, 0xffffffff);
-	nv_wr32(priv, 0x400118, 0xffffffff);
-	nv_wr32(priv, 0x400130, 0xffffffff);
-	nv_wr32(priv, 0x40011c, 0xffffffff);
-	nv_wr32(priv, 0x400134, 0xffffffff);
-
-	nv_wr32(priv, 0x400054, 0x34ce3464);
-
-	gf100_gr_zbc_init(priv);
+	return 0;
+}
 
-	return gf100_gr_init_ctxctl(priv);
+int
+gf100_gr_init_(struct nvkm_gr *base)
+{
+	struct gf100_gr *gr = gf100_gr(base);
+	nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false);
+	return gr->func->init(gr);
 }
 
-static void
+void
 gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc)
 {
 	kfree(fuc->data);
 	fuc->data = NULL;
 }
 
+void *
+gf100_gr_dtor(struct nvkm_gr *base)
+{
+	struct gf100_gr *gr = gf100_gr(base);
+
+	if (gr->func->dtor)
+		gr->func->dtor(gr);
+	kfree(gr->data);
+
+	gf100_gr_dtor_fw(&gr->fuc409c);
+	gf100_gr_dtor_fw(&gr->fuc409d);
+	gf100_gr_dtor_fw(&gr->fuc41ac);
+	gf100_gr_dtor_fw(&gr->fuc41ad);
+
+	nvkm_memory_del(&gr->unk4188b8);
+	nvkm_memory_del(&gr->unk4188b4);
+	return gr;
+}
+
+static const struct nvkm_gr_func
+gf100_gr_ = {
+	.dtor = gf100_gr_dtor,
+	.oneinit = gf100_gr_oneinit,
+	.init = gf100_gr_init_,
+	.intr = gf100_gr_intr,
+	.units = gf100_gr_units,
+	.chan_new = gf100_gr_chan_new,
+	.object_get = gf100_gr_object_get,
+};
+
 int
-gf100_gr_ctor_fw(struct gf100_gr_priv *priv, const char *fwname,
+gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
 		 struct gf100_gr_fuc *fuc)
 {
-	struct nvkm_device *device = nv_device(priv);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	const struct firmware *fw;
-	char f[32];
+	char f[64];
+	char cname[16];
 	int ret;
+	int i;
+
+	/* Convert device name to lowercase */
+	strncpy(cname, device->chip->name, sizeof(cname));
+	cname[sizeof(cname) - 1] = '\0';
+	i = strlen(cname);
+	while (i) {
+		--i;
+		cname[i] = tolower(cname[i]);
+	}
 
-	snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname);
-	ret = request_firmware(&fw, f, nv_device_base(device));
+	snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
+	ret = request_firmware(&fw, f, device->dev);
 	if (ret) {
-		snprintf(f, sizeof(f), "nouveau/%s", fwname);
-		ret = request_firmware(&fw, f, nv_device_base(device));
-		if (ret) {
-			nv_error(priv, "failed to load %s\n", fwname);
-			return ret;
-		}
+		nvkm_error(subdev, "failed to load %s\n", fwname);
+		return ret;
 	}
 
 	fuc->size = fw->size;
@@ -1570,126 +1654,150 @@ gf100_gr_ctor_fw(struct gf100_gr_priv *priv, const char *fwname,
 	return (fuc->data != NULL) ? 0 : -ENOMEM;
 }
 
-void
-gf100_gr_dtor(struct nvkm_object *object)
+int
+gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device,
+	      int index, struct gf100_gr *gr)
 {
-	struct gf100_gr_priv *priv = (void *)object;
+	int ret;
 
-	kfree(priv->data);
+	gr->func = func;
+	gr->firmware = nvkm_boolopt(device->cfgopt, "NvGrUseFW",
+				    func->fecs.ucode == NULL);
 
-	gf100_gr_dtor_fw(&priv->fuc409c);
-	gf100_gr_dtor_fw(&priv->fuc409d);
-	gf100_gr_dtor_fw(&priv->fuc41ac);
-	gf100_gr_dtor_fw(&priv->fuc41ad);
+	ret = nvkm_gr_ctor(&gf100_gr_, device, index, 0x08001000,
+			   gr->firmware || func->fecs.ucode != NULL,
+			   &gr->base);
+	if (ret)
+		return ret;
 
-	nvkm_gpuobj_ref(NULL, &priv->unk4188b8);
-	nvkm_gpuobj_ref(NULL, &priv->unk4188b4);
+	if (gr->firmware) {
+		nvkm_info(&gr->base.engine.subdev, "using external firmware\n");
+		if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) ||
+		    gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) ||
+		    gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) ||
+		    gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad))
+			return -ENODEV;
+	}
 
-	nvkm_gr_destroy(&priv->base);
+	return 0;
 }
 
 int
-gf100_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *bclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_gr **pgr)
 {
-	struct gf100_gr_oclass *oclass = (void *)bclass;
-	struct nvkm_device *device = nv_device(parent);
-	struct gf100_gr_priv *priv;
-	bool use_ext_fw, enable;
-	int ret, i, j;
+	struct gf100_gr *gr;
+	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+		return -ENOMEM;
+	*pgr = &gr->base;
+	return gf100_gr_ctor(func, device, index, gr);
+}
 
-	use_ext_fw = nvkm_boolopt(device->cfgopt, "NvGrUseFW",
-				  oclass->fecs.ucode == NULL);
-	enable = use_ext_fw || oclass->fecs.ucode != NULL;
+int
+gf100_gr_init(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
+	u32 data[TPC_MAX / 8] = {};
+	u8  tpcnr[GPC_MAX];
+	int gpc, tpc, rop;
+	int i;
 
-	ret = nvkm_gr_create(parent, engine, bclass, enable, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x08a4), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x0888), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8);
+	nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8);
 
-	nv_subdev(priv)->unit = 0x08001000;
-	nv_subdev(priv)->intr = gf100_gr_intr;
+	gf100_gr_mmio(gr, gr->func->mmio);
 
-	priv->base.units = gf100_gr_units;
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
+		do {
+			gpc = (gpc + 1) % gr->gpc_nr;
+		} while (!tpcnr[gpc]);
+		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
 
-	if (use_ext_fw) {
-		nv_info(priv, "using external firmware\n");
-		if (gf100_gr_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
-		    gf100_gr_ctor_fw(priv, "fuc409d", &priv->fuc409d) ||
-		    gf100_gr_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) ||
-		    gf100_gr_ctor_fw(priv, "fuc41ad", &priv->fuc41ad))
-			return -ENODEV;
-		priv->firmware = true;
+		data[i / 8] |= tpc << ((i % 8) * 4);
 	}
 
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
-			      &priv->unk4188b4);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
-			      &priv->unk4188b8);
-	if (ret)
-		return ret;
+	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
 
-	for (i = 0; i < 0x1000; i += 4) {
-		nv_wo32(priv->unk4188b4, i, 0x00000010);
-		nv_wo32(priv->unk4188b8, i, 0x00000010);
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+			gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+			gr->tpc_total);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
 	}
 
-	priv->rop_nr = (nv_rd32(priv, 0x409604) & 0x001f0000) >> 16;
-	priv->gpc_nr =  nv_rd32(priv, 0x409604) & 0x0000001f;
-	for (i = 0; i < priv->gpc_nr; i++) {
-		priv->tpc_nr[i]  = nv_rd32(priv, GPC_UNIT(i, 0x2608));
-		priv->tpc_total += priv->tpc_nr[i];
-		priv->ppc_nr[i]  = oclass->ppc_nr;
-		for (j = 0; j < priv->ppc_nr[i]; j++) {
-			u8 mask = nv_rd32(priv, GPC_UNIT(i, 0x0c30 + (j * 4)));
-			priv->ppc_tpc_nr[i][j] = hweight8(mask);
+	if (device->chipset != 0xd7)
+		nvkm_wr32(device, GPC_BCAST(0x1bd4), magicgpc918);
+	else
+		nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+
+	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+
+	nvkm_wr32(device, 0x400500, 0x00010001);
+
+	nvkm_wr32(device, 0x400100, 0xffffffff);
+	nvkm_wr32(device, 0x40013c, 0xffffffff);
+
+	nvkm_wr32(device, 0x409c24, 0x000f0000);
+	nvkm_wr32(device, 0x404000, 0xc0000000);
+	nvkm_wr32(device, 0x404600, 0xc0000000);
+	nvkm_wr32(device, 0x408030, 0xc0000000);
+	nvkm_wr32(device, 0x40601c, 0xc0000000);
+	nvkm_wr32(device, 0x404490, 0xc0000000);
+	nvkm_wr32(device, 0x406018, 0xc0000000);
+	nvkm_wr32(device, 0x405840, 0xc0000000);
+	nvkm_wr32(device, 0x405844, 0x00ffffff);
+	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+	nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000);
+
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
 		}
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
 	}
 
-	/*XXX: these need figuring out... though it might not even matter */
-	switch (nv_device(priv)->chipset) {
-	case 0xc0:
-		if (priv->tpc_total == 11) { /* 465, 3/4/4/0, 4 */
-			priv->magic_not_rop_nr = 0x07;
-		} else
-		if (priv->tpc_total == 14) { /* 470, 3/3/4/4, 5 */
-			priv->magic_not_rop_nr = 0x05;
-		} else
-		if (priv->tpc_total == 15) { /* 480, 3/4/4/4, 6 */
-			priv->magic_not_rop_nr = 0x06;
-		}
-		break;
-	case 0xc3: /* 450, 4/0/0/0, 2 */
-		priv->magic_not_rop_nr = 0x03;
-		break;
-	case 0xc4: /* 460, 3/4/0/0, 4 */
-		priv->magic_not_rop_nr = 0x01;
-		break;
-	case 0xc1: /* 2/0/0/0, 1 */
-		priv->magic_not_rop_nr = 0x01;
-		break;
-	case 0xc8: /* 4/4/3/4, 5 */
-		priv->magic_not_rop_nr = 0x06;
-		break;
-	case 0xce: /* 4/4/0/0, 4 */
-		priv->magic_not_rop_nr = 0x03;
-		break;
-	case 0xcf: /* 4/0/0/0, 3 */
-		priv->magic_not_rop_nr = 0x03;
-		break;
-	case 0xd7:
-	case 0xd9: /* 1/0/0/0, 1 */
-		priv->magic_not_rop_nr = 0x01;
-		break;
+	for (rop = 0; rop < gr->rop_nr; rop++) {
+		nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
 	}
 
-	nv_engine(priv)->cclass = *oclass->cclass;
-	nv_engine(priv)->sclass =  oclass->sclass;
-	return 0;
+	nvkm_wr32(device, 0x400108, 0xffffffff);
+	nvkm_wr32(device, 0x400138, 0xffffffff);
+	nvkm_wr32(device, 0x400118, 0xffffffff);
+	nvkm_wr32(device, 0x400130, 0xffffffff);
+	nvkm_wr32(device, 0x40011c, 0xffffffff);
+	nvkm_wr32(device, 0x400134, 0xffffffff);
+
+	nvkm_wr32(device, 0x400054, 0x34ce3464);
+
+	gf100_gr_zbc_init(gr);
+
+	return gf100_gr_init_ctxctl(gr);
 }
 
 #include "fuc/hubgf100.fuc3.h"
@@ -1712,18 +1820,24 @@ gf100_gr_gpccs_ucode = {
 	.data.size = sizeof(gf100_grgpc_data),
 };
 
-struct nvkm_oclass *
-gf100_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gf100_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gf100_grctx_oclass,
-	.sclass =  gf100_gr_sclass,
+static const struct gf100_gr_func
+gf100_gr = {
+	.init = gf100_gr_init,
 	.mmio = gf100_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
-}.base;
+	.grctx = &gf100_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
+		{ -1, -1, FERMI_A, &gf100_fermi },
+		{ -1, -1, FERMI_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gf100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gf100_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index c9533fdac4fc..4611961b1187 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -21,11 +21,14 @@
  *
  * Authors: Ben Skeggs
  */
-#ifndef __NVC0_GR_H__
-#define __NVC0_GR_H__
-#include <engine/gr.h>
+#ifndef __GF100_GR_H__
+#define __GF100_GR_H__
+#define gf100_gr(p) container_of((p), struct gf100_gr, base)
+#include "priv.h"
 
+#include <core/gpuobj.h>
 #include <subdev/ltc.h>
+#include <subdev/mmu.h>
 
 #define GPC_MAX 32
 #define TPC_MAX (GPC_MAX * 8)
@@ -67,7 +70,8 @@ struct gf100_gr_zbc_depth {
 	u32 l2;
 };
 
-struct gf100_gr_priv {
+struct gf100_gr {
+	const struct gf100_gr_func *func;
 	struct nvkm_gr base;
 
 	struct gf100_gr_fuc fuc409c;
@@ -76,6 +80,15 @@ struct gf100_gr_priv {
 	struct gf100_gr_fuc fuc41ad;
 	bool firmware;
 
+	/*
+	 * Used if the register packs are loaded from NVIDIA fw instead of
+	 * using hardcoded arrays.
+	 */
+	struct gf100_gr_pack *fuc_sw_nonctx;
+	struct gf100_gr_pack *fuc_sw_ctx;
+	struct gf100_gr_pack *fuc_bundle;
+	struct gf100_gr_pack *fuc_method;
+
 	struct gf100_gr_zbc_color zbc_color[NVKM_LTC_MAX_ZBC_CNT];
 	struct gf100_gr_zbc_depth zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
 
@@ -86,8 +99,8 @@ struct gf100_gr_priv {
 	u8 ppc_nr[GPC_MAX];
 	u8 ppc_tpc_nr[GPC_MAX][4];
 
-	struct nvkm_gpuobj *unk4188b4;
-	struct nvkm_gpuobj *unk4188b8;
+	struct nvkm_memory *unk4188b4;
+	struct nvkm_memory *unk4188b8;
 
 	struct gf100_gr_data mmio_data[4];
 	struct gf100_gr_mmio mmio_list[4096/8];
@@ -97,48 +110,65 @@ struct gf100_gr_priv {
 	u8 magic_not_rop_nr;
 };
 
+int gf100_gr_ctor(const struct gf100_gr_func *, struct nvkm_device *,
+		  int, struct gf100_gr *);
+int gf100_gr_new_(const struct gf100_gr_func *, struct nvkm_device *,
+		  int, struct nvkm_gr **);
+void *gf100_gr_dtor(struct nvkm_gr *);
+
+struct gf100_gr_func {
+	void (*dtor)(struct gf100_gr *);
+	int (*init)(struct gf100_gr *);
+	void (*init_gpc_mmu)(struct gf100_gr *);
+	void (*set_hww_esr_report_mask)(struct gf100_gr *);
+	const struct gf100_gr_pack *mmio;
+	struct {
+		struct gf100_gr_ucode *ucode;
+	} fecs;
+	struct {
+		struct gf100_gr_ucode *ucode;
+	} gpccs;
+	int ppc_nr;
+	const struct gf100_grctx_func *grctx;
+	struct nvkm_sclass sclass[];
+};
+
+int gf100_gr_init(struct gf100_gr *);
+
+int gk104_gr_init(struct gf100_gr *);
+
+int gk20a_gr_new_(const struct gf100_gr_func *, struct nvkm_device *,
+		  int, struct nvkm_gr **);
+void gk20a_gr_dtor(struct gf100_gr *);
+int gk20a_gr_init(struct gf100_gr *);
+
+int gm204_gr_init(struct gf100_gr *);
+
+#define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object)
+
 struct gf100_gr_chan {
-	struct nvkm_gr_chan base;
+	struct nvkm_object object;
+	struct gf100_gr *gr;
 
-	struct nvkm_gpuobj *mmio;
+	struct nvkm_memory *mmio;
 	struct nvkm_vma mmio_vma;
 	int mmio_nr;
+
 	struct {
-		struct nvkm_gpuobj *mem;
+		struct nvkm_memory *mem;
 		struct nvkm_vma vma;
 	} data[4];
 };
 
-int  gf100_gr_context_ctor(struct nvkm_object *, struct nvkm_object *,
-			     struct nvkm_oclass *, void *, u32,
-			     struct nvkm_object **);
-void gf100_gr_context_dtor(struct nvkm_object *);
-
-void gf100_gr_ctxctl_debug(struct gf100_gr_priv *);
+void gf100_gr_ctxctl_debug(struct gf100_gr *);
 
+void gf100_gr_dtor_fw(struct gf100_gr_fuc *);
+int  gf100_gr_ctor_fw(struct gf100_gr *, const char *,
+		      struct gf100_gr_fuc *);
 u64  gf100_gr_units(struct nvkm_gr *);
-int  gf100_gr_ctor(struct nvkm_object *, struct nvkm_object *,
-		     struct nvkm_oclass *, void *data, u32 size,
-		     struct nvkm_object **);
-void gf100_gr_dtor(struct nvkm_object *);
-int  gf100_gr_init(struct nvkm_object *);
-void gf100_gr_zbc_init(struct gf100_gr_priv *);
-
-int  gk104_gr_ctor(struct nvkm_object *, struct nvkm_object *,
-		     struct nvkm_oclass *, void *data, u32 size,
-		     struct nvkm_object **);
-int  gk104_gr_init(struct nvkm_object *);
-
-int  gm204_gr_init(struct nvkm_object *);
+void gf100_gr_zbc_init(struct gf100_gr *);
 
-extern struct nvkm_ofuncs gf100_fermi_ofuncs;
-
-extern struct nvkm_oclass gf100_gr_sclass[];
-extern struct nvkm_omthds gf100_gr_9097_omthds[];
-extern struct nvkm_omthds gf100_gr_90c0_omthds[];
-extern struct nvkm_oclass gf110_gr_sclass[];
-extern struct nvkm_oclass gk110_gr_sclass[];
-extern struct nvkm_oclass gm204_gr_sclass[];
+extern const struct nvkm_object_func gf100_fermi;
 
 struct gf100_gr_init {
 	u32 addr;
@@ -167,25 +197,11 @@ extern struct gf100_gr_ucode gf100_gr_gpccs_ucode;
 extern struct gf100_gr_ucode gk110_gr_fecs_ucode;
 extern struct gf100_gr_ucode gk110_gr_gpccs_ucode;
 
-struct gf100_gr_oclass {
-	struct nvkm_oclass base;
-	struct nvkm_oclass **cclass;
-	struct nvkm_oclass *sclass;
-	const struct gf100_gr_pack *mmio;
-	struct {
-		struct gf100_gr_ucode *ucode;
-	} fecs;
-	struct {
-		struct gf100_gr_ucode *ucode;
-	} gpccs;
-	int ppc_nr;
-};
-
-int  gf100_gr_wait_idle(struct gf100_gr_priv *);
-void gf100_gr_mmio(struct gf100_gr_priv *, const struct gf100_gr_pack *);
-void gf100_gr_icmd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
-void gf100_gr_mthd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
-int  gf100_gr_init_ctxctl(struct gf100_gr_priv *);
+int  gf100_gr_wait_idle(struct gf100_gr *);
+void gf100_gr_mmio(struct gf100_gr *, const struct gf100_gr_pack *);
+void gf100_gr_icmd(struct gf100_gr *, const struct gf100_gr_pack *);
+void gf100_gr_mthd(struct gf100_gr *, const struct gf100_gr_pack *);
+int  gf100_gr_init_ctxctl(struct gf100_gr *);
 
 /* register init value lists */
 
@@ -261,7 +277,7 @@ extern const struct gf100_gr_init gm107_gr_init_tex_0[];
 extern const struct gf100_gr_init gm107_gr_init_l1c_0[];
 extern const struct gf100_gr_init gm107_gr_init_wwdx_0[];
 extern const struct gf100_gr_init gm107_gr_init_cbm_0[];
-void gm107_gr_init_bios(struct gf100_gr_priv *);
+void gm107_gr_init_bios(struct gf100_gr *);
 
 extern const struct gf100_gr_pack gm204_gr_pack_mmio[];
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index 20d3b85db3b5..8f253e0a22f4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -24,6 +24,8 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
+#include <nvif/class.h>
+
 /*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
@@ -110,18 +112,24 @@ gf104_gr_pack_mmio[] = {
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-struct nvkm_oclass *
-gf104_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xc3),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gf100_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gf104_grctx_oclass,
-	.sclass = gf100_gr_sclass,
+static const struct gf100_gr_func
+gf104_gr = {
+	.init = gf100_gr_init,
 	.mmio = gf104_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
-}.base;
+	.grctx = &gf104_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
+		{ -1, -1, FERMI_A, &gf100_fermi },
+		{ -1, -1, FERMI_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gf104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gf104_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 8df73421c78c..815a5aafa245 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -27,20 +27,6 @@
 #include <nvif/class.h>
 
 /*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf108_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
-	{ FERMI_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ FERMI_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ FERMI_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
-	{}
-};
-
-/*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
 
@@ -117,18 +103,25 @@ gf108_gr_pack_mmio[] = {
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-struct nvkm_oclass *
-gf108_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xc1),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gf100_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gf108_grctx_oclass,
-	.sclass = gf108_gr_sclass,
+static const struct gf100_gr_func
+gf108_gr = {
+	.init = gf100_gr_init,
 	.mmio = gf108_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
-}.base;
+	.grctx = &gf108_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
+		{ -1, -1, FERMI_A, &gf100_fermi },
+		{ -1, -1, FERMI_B, &gf100_fermi },
+		{ -1, -1, FERMI_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gf108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gf108_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index ef76e2dd1d31..d13187409d68 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -27,21 +27,6 @@
 #include <nvif/class.h>
 
 /*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-struct nvkm_oclass
-gf110_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
-	{ FERMI_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ FERMI_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ FERMI_C, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ FERMI_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
-	{}
-};
-
-/*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
 
@@ -99,18 +84,26 @@ gf110_gr_pack_mmio[] = {
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-struct nvkm_oclass *
-gf110_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xc8),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gf100_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gf110_grctx_oclass,
-	.sclass = gf110_gr_sclass,
+static const struct gf100_gr_func
+gf110_gr = {
+	.init = gf100_gr_init,
 	.mmio = gf110_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
-}.base;
+	.grctx = &gf110_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
+		{ -1, -1, FERMI_A, &gf100_fermi },
+		{ -1, -1, FERMI_B, &gf100_fermi },
+		{ -1, -1, FERMI_C, &gf100_fermi },
+		{ -1, -1, FERMI_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gf110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gf110_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index 871ac5f806f6..28483d8bf3d2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -24,6 +24,8 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
+#include <nvif/class.h>
+
 /*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
@@ -118,19 +120,27 @@ gf117_gr_gpccs_ucode = {
 	.data.size = sizeof(gf117_grgpc_data),
 };
 
-struct nvkm_oclass *
-gf117_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xd7),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gf100_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gf117_grctx_oclass,
-	.sclass = gf110_gr_sclass,
+static const struct gf100_gr_func
+gf117_gr = {
+	.init = gf100_gr_init,
 	.mmio = gf117_gr_pack_mmio,
 	.fecs.ucode = &gf117_gr_fecs_ucode,
 	.gpccs.ucode = &gf117_gr_gpccs_ucode,
 	.ppc_nr = 1,
-}.base;
+	.grctx = &gf117_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
+		{ -1, -1, FERMI_A, &gf100_fermi },
+		{ -1, -1, FERMI_B, &gf100_fermi },
+		{ -1, -1, FERMI_C, &gf100_fermi },
+		{ -1, -1, FERMI_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gf117_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gf117_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index e6dd651e2636..9811a72e0313 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -24,6 +24,8 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
+#include <nvif/class.h>
+
 /*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
@@ -173,18 +175,26 @@ gf119_gr_pack_mmio[] = {
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-struct nvkm_oclass *
-gf119_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xd9),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gf100_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gf119_grctx_oclass,
-	.sclass = gf110_gr_sclass,
+static const struct gf100_gr_func
+gf119_gr = {
+	.init = gf100_gr_init,
 	.mmio = gf119_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
-}.base;
+	.grctx = &gf119_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
+		{ -1, -1, FERMI_A, &gf100_fermi },
+		{ -1, -1, FERMI_B, &gf100_fermi },
+		{ -1, -1, FERMI_C, &gf100_fermi },
+		{ -1, -1, FERMI_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gf119_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gf119_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 46f7844eca70..abf54928a1a4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -24,24 +24,9 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
-#include <subdev/pmu.h>
-
 #include <nvif/class.h>
 
 /*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ KEPLER_INLINE_TO_MEMORY_A, &nvkm_object_ofuncs },
-	{ KEPLER_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ KEPLER_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
-	{}
-};
-
-/*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
 
@@ -193,132 +178,112 @@ gk104_gr_pack_mmio[] = {
  ******************************************************************************/
 
 int
-gk104_gr_init(struct nvkm_object *object)
+gk104_gr_init(struct gf100_gr *gr)
 {
-	struct gf100_gr_oclass *oclass = (void *)object->oclass;
-	struct gf100_gr_priv *priv = (void *)object;
-	struct nvkm_pmu *pmu = nvkm_pmu(priv);
-	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
 	u32 data[TPC_MAX / 8] = {};
 	u8  tpcnr[GPC_MAX];
 	int gpc, tpc, rop;
-	int ret, i;
-
-	if (pmu)
-		pmu->pgob(pmu, false);
+	int i;
 
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
+	nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x08a4), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x0888), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8);
+	nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8);
 
-	nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x08a4), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x0888), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x088c), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x0890), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x0894), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
-	nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
+	gf100_gr_mmio(gr, gr->func->mmio);
 
-	gf100_gr_mmio(priv, oclass->mmio);
-
-	nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
+	nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
 
 	memset(data, 0x00, sizeof(data));
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
-	for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
 		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
+			gpc = (gpc + 1) % gr->gpc_nr;
 		} while (!tpcnr[gpc]);
-		tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
 
 		data[i / 8] |= tpc << ((i % 8) * 4);
 	}
 
-	nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
-	nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
-	nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
-	nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
-
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0914),
-			priv->magic_not_rop_nr << 8 | priv->tpc_nr[gpc]);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 |
-			priv->tpc_total);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
+	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
+
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+			gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+			gr->tpc_total);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
 	}
 
-	nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
-	nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
-
-	nv_wr32(priv, 0x400500, 0x00010001);
-
-	nv_wr32(priv, 0x400100, 0xffffffff);
-	nv_wr32(priv, 0x40013c, 0xffffffff);
-
-	nv_wr32(priv, 0x409ffc, 0x00000000);
-	nv_wr32(priv, 0x409c14, 0x00003e3e);
-	nv_wr32(priv, 0x409c24, 0x000f0001);
-	nv_wr32(priv, 0x404000, 0xc0000000);
-	nv_wr32(priv, 0x404600, 0xc0000000);
-	nv_wr32(priv, 0x408030, 0xc0000000);
-	nv_wr32(priv, 0x404490, 0xc0000000);
-	nv_wr32(priv, 0x406018, 0xc0000000);
-	nv_wr32(priv, 0x407020, 0x40000000);
-	nv_wr32(priv, 0x405840, 0xc0000000);
-	nv_wr32(priv, 0x405844, 0x00ffffff);
-	nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
-	nv_mask(priv, 0x419eb4, 0x00001000, 0x00001000);
-
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		nv_wr32(priv, GPC_UNIT(gpc, 0x3038), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
-		for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
+	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+
+	nvkm_wr32(device, 0x400500, 0x00010001);
+
+	nvkm_wr32(device, 0x400100, 0xffffffff);
+	nvkm_wr32(device, 0x40013c, 0xffffffff);
+
+	nvkm_wr32(device, 0x409ffc, 0x00000000);
+	nvkm_wr32(device, 0x409c14, 0x00003e3e);
+	nvkm_wr32(device, 0x409c24, 0x000f0001);
+	nvkm_wr32(device, 0x404000, 0xc0000000);
+	nvkm_wr32(device, 0x404600, 0xc0000000);
+	nvkm_wr32(device, 0x408030, 0xc0000000);
+	nvkm_wr32(device, 0x404490, 0xc0000000);
+	nvkm_wr32(device, 0x406018, 0xc0000000);
+	nvkm_wr32(device, 0x407020, 0x40000000);
+	nvkm_wr32(device, 0x405840, 0xc0000000);
+	nvkm_wr32(device, 0x405844, 0x00ffffff);
+	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+	nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000);
+
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x3038), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
 		}
-		nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
 	}
 
-	for (rop = 0; rop < priv->rop_nr; rop++) {
-		nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
-		nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
-		nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
-		nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
+	for (rop = 0; rop < gr->rop_nr; rop++) {
+		nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
 	}
 
-	nv_wr32(priv, 0x400108, 0xffffffff);
-	nv_wr32(priv, 0x400138, 0xffffffff);
-	nv_wr32(priv, 0x400118, 0xffffffff);
-	nv_wr32(priv, 0x400130, 0xffffffff);
-	nv_wr32(priv, 0x40011c, 0xffffffff);
-	nv_wr32(priv, 0x400134, 0xffffffff);
+	nvkm_wr32(device, 0x400108, 0xffffffff);
+	nvkm_wr32(device, 0x400138, 0xffffffff);
+	nvkm_wr32(device, 0x400118, 0xffffffff);
+	nvkm_wr32(device, 0x400130, 0xffffffff);
+	nvkm_wr32(device, 0x40011c, 0xffffffff);
+	nvkm_wr32(device, 0x400134, 0xffffffff);
 
-	nv_wr32(priv, 0x400054, 0x34ce3464);
+	nvkm_wr32(device, 0x400054, 0x34ce3464);
 
-	gf100_gr_zbc_init(priv);
+	gf100_gr_zbc_init(gr);
 
-	return gf100_gr_init_ctxctl(priv);
-}
-
-int
-gk104_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
-{
-	struct nvkm_pmu *pmu = nvkm_pmu(parent);
-	if (pmu)
-		pmu->pgob(pmu, false);
-	return gf100_gr_ctor(parent, engine, oclass, data, size, pobject);
+	return gf100_gr_init_ctxctl(gr);
 }
 
 #include "fuc/hubgk104.fuc3.h"
@@ -341,19 +306,25 @@ gk104_gr_gpccs_ucode = {
 	.data.size = sizeof(gk104_grgpc_data),
 };
 
-struct nvkm_oclass *
-gk104_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xe4),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gk104_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gk104_grctx_oclass,
-	.sclass = gk104_gr_sclass,
+static const struct gf100_gr_func
+gk104_gr = {
+	.init = gk104_gr_init,
 	.mmio = gk104_gr_pack_mmio,
 	.fecs.ucode = &gk104_gr_fecs_ucode,
 	.gpccs.ucode = &gk104_gr_gpccs_ucode,
 	.ppc_nr = 1,
-}.base;
+	.grctx = &gk104_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_A },
+		{ -1, -1, KEPLER_A, &gf100_fermi },
+		{ -1, -1, KEPLER_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gk104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gk104_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index f4cd8e5546af..32aa2946e7b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -29,19 +29,6 @@
 #include <nvif/class.h>
 
 /*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-struct nvkm_oclass
-gk110_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
-	{ KEPLER_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ KEPLER_COMPUTE_B, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
-	{}
-};
-
-/*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
 
@@ -193,19 +180,25 @@ gk110_gr_gpccs_ucode = {
 	.data.size = sizeof(gk110_grgpc_data),
 };
 
-struct nvkm_oclass *
-gk110_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xf0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gk104_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gk110_grctx_oclass,
-	.sclass =  gk110_gr_sclass,
+static const struct gf100_gr_func
+gk110_gr = {
+	.init = gk104_gr_init,
 	.mmio = gk110_gr_pack_mmio,
 	.fecs.ucode = &gk110_gr_fecs_ucode,
 	.gpccs.ucode = &gk110_gr_gpccs_ucode,
 	.ppc_nr = 2,
-}.base;
+	.grctx = &gk110_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+		{ -1, -1, KEPLER_B, &gf100_fermi },
+		{ -1, -1, KEPLER_COMPUTE_B },
+		{}
+	}
+};
+
+int
+gk110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gk110_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index 9ff9eab0ccaf..22f88afbf35f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -24,6 +24,8 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
+#include <nvif/class.h>
+
 /*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
@@ -98,19 +100,25 @@ gk110b_gr_pack_mmio[] = {
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-struct nvkm_oclass *
-gk110b_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xf1),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gk104_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gk110b_grctx_oclass,
-	.sclass =  gk110_gr_sclass,
+static const struct gf100_gr_func
+gk110b_gr = {
+	.init = gk104_gr_init,
 	.mmio = gk110b_gr_pack_mmio,
 	.fecs.ucode = &gk110_gr_fecs_ucode,
 	.gpccs.ucode = &gk110_gr_gpccs_ucode,
 	.ppc_nr = 2,
-}.base;
+	.grctx = &gk110b_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+		{ -1, -1, KEPLER_B, &gf100_fermi },
+		{ -1, -1, KEPLER_COMPUTE_B },
+		{}
+	}
+};
+
+int
+gk110b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gk110b_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 85f44a3d5d11..ee7554fc87dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -29,19 +29,6 @@
 #include <nvif/class.h>
 
 /*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk208_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
-	{ KEPLER_B, &gf100_fermi_ofuncs },
-	{ KEPLER_COMPUTE_B, &nvkm_object_ofuncs },
-	{}
-};
-
-/*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
 
@@ -172,19 +159,25 @@ gk208_gr_gpccs_ucode = {
 	.data.size = sizeof(gk208_grgpc_data),
 };
 
-struct nvkm_oclass *
-gk208_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0x08),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gk104_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gk208_grctx_oclass,
-	.sclass =  gk208_gr_sclass,
+static const struct gf100_gr_func
+gk208_gr = {
+	.init = gk104_gr_init,
 	.mmio = gk208_gr_pack_mmio,
 	.fecs.ucode = &gk208_gr_fecs_ucode,
 	.gpccs.ucode = &gk208_gr_gpccs_ucode,
 	.ppc_nr = 1,
-}.base;
+	.grctx = &gk208_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+		{ -1, -1, KEPLER_B, &gf100_fermi },
+		{ -1, -1, KEPLER_COMPUTE_B },
+		{}
+	}
+};
+
+int
+gk208_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gk208_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index 40ff5eb9180c..b8758d3b8b51 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -22,28 +22,335 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
+#include <subdev/timer.h>
+
 #include <nvif/class.h>
 
-static struct nvkm_oclass
-gk20a_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ KEPLER_INLINE_TO_MEMORY_A, &nvkm_object_ofuncs },
-	{ KEPLER_C, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ KEPLER_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
-	{}
+static void
+gk20a_gr_init_dtor(struct gf100_gr_pack *pack)
+{
+	vfree(pack);
+}
+
+struct gk20a_fw_av
+{
+	u32 addr;
+	u32 data;
+};
+
+static struct gf100_gr_pack *
+gk20a_gr_av_to_init(struct gf100_gr_fuc *fuc)
+{
+	struct gf100_gr_init *init;
+	struct gf100_gr_pack *pack;
+	const int nent = (fuc->size / sizeof(struct gk20a_fw_av));
+	int i;
+
+	pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
+	if (!pack)
+		return ERR_PTR(-ENOMEM);
+
+	init = (void *)(pack + 2);
+
+	pack[0].init = init;
+
+	for (i = 0; i < nent; i++) {
+		struct gf100_gr_init *ent = &init[i];
+		struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc->data)[i];
+
+		ent->addr = av->addr;
+		ent->data = av->data;
+		ent->count = 1;
+		ent->pitch = 1;
+	}
+
+	return pack;
+}
+
+struct gk20a_fw_aiv
+{
+	u32 addr;
+	u32 index;
+	u32 data;
 };
 
-struct nvkm_oclass *
-gk20a_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xea),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gk104_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gk20a_grctx_oclass,
-	.sclass = gk20a_gr_sclass,
-	.mmio = gk104_gr_pack_mmio,
+static struct gf100_gr_pack *
+gk20a_gr_aiv_to_init(struct gf100_gr_fuc *fuc)
+{
+	struct gf100_gr_init *init;
+	struct gf100_gr_pack *pack;
+	const int nent = (fuc->size / sizeof(struct gk20a_fw_aiv));
+	int i;
+
+	pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
+	if (!pack)
+		return ERR_PTR(-ENOMEM);
+
+	init = (void *)(pack + 2);
+
+	pack[0].init = init;
+
+	for (i = 0; i < nent; i++) {
+		struct gf100_gr_init *ent = &init[i];
+		struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)fuc->data)[i];
+
+		ent->addr = av->addr;
+		ent->data = av->data;
+		ent->count = 1;
+		ent->pitch = 1;
+	}
+
+	return pack;
+}
+
+static struct gf100_gr_pack *
+gk20a_gr_av_to_method(struct gf100_gr_fuc *fuc)
+{
+	struct gf100_gr_init *init;
+	struct gf100_gr_pack *pack;
+	/* We don't suppose we will initialize more than 16 classes here... */
+	static const unsigned int max_classes = 16;
+	const int nent = (fuc->size / sizeof(struct gk20a_fw_av));
+	int i, classidx = 0;
+	u32 prevclass = 0;
+
+	pack = vzalloc((sizeof(*pack) * max_classes) +
+		       (sizeof(*init) * (nent + 1)));
+	if (!pack)
+		return ERR_PTR(-ENOMEM);
+
+	init = (void *)(pack + max_classes);
+
+	for (i = 0; i < nent; i++) {
+		struct gf100_gr_init *ent = &init[i];
+		struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc->data)[i];
+		u32 class = av->addr & 0xffff;
+		u32 addr = (av->addr & 0xffff0000) >> 14;
+
+		if (prevclass != class) {
+			pack[classidx].init = ent;
+			pack[classidx].type = class;
+			prevclass = class;
+			if (++classidx >= max_classes) {
+				vfree(pack);
+				return ERR_PTR(-ENOSPC);
+			}
+		}
+
+		ent->addr = addr;
+		ent->data = av->data;
+		ent->count = 1;
+		ent->pitch = 1;
+	}
+
+	return pack;
+}
+
+static int
+gk20a_gr_wait_mem_scrubbing(struct gf100_gr *gr)
+{
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x40910c) & 0x00000006))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "FECS mem scrubbing timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x41a10c) & 0x00000006))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "GPCCS mem scrubbing timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static void
+gk20a_gr_set_hww_esr_report_mask(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_wr32(device, 0x419e44, 0x1ffffe);
+	nvkm_wr32(device, 0x419e4c, 0x7f);
+}
+
+int
+gk20a_gr_init(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
+	u32 data[TPC_MAX / 8] = {};
+	u8  tpcnr[GPC_MAX];
+	int gpc, tpc;
+	int ret, i;
+
+	/* Clear SCC RAM */
+	nvkm_wr32(device, 0x40802c, 0x1);
+
+	gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
+
+	ret = gk20a_gr_wait_mem_scrubbing(gr);
+	if (ret)
+		return ret;
+
+	ret = gf100_gr_wait_idle(gr);
+	if (ret)
+		return ret;
+
+	/* MMU debug buffer */
+	nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(gr->unk4188b4) >> 8);
+	nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(gr->unk4188b8) >> 8);
+
+	if (gr->func->init_gpc_mmu)
+		gr->func->init_gpc_mmu(gr);
+
+	/* Set the PE as stream master */
+	nvkm_mask(device, 0x503018, 0x1, 0x1);
+
+	/* Zcull init */
+	memset(data, 0x00, sizeof(data));
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
+		do {
+			gpc = (gpc + 1) % gr->gpc_nr;
+		} while (!tpcnr[gpc]);
+		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
+
+		data[i / 8] |= tpc << ((i % 8) * 4);
+	}
+
+	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
+
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+			  gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+			  gr->tpc_total);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
+	}
+
+	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+
+	/* Enable FIFO access */
+	nvkm_wr32(device, 0x400500, 0x00010001);
+
+	/* Enable interrupts */
+	nvkm_wr32(device, 0x400100, 0xffffffff);
+	nvkm_wr32(device, 0x40013c, 0xffffffff);
+
+	/* Enable FECS error interrupts */
+	nvkm_wr32(device, 0x409c24, 0x000f0000);
+
+	/* Enable hardware warning exceptions */
+	nvkm_wr32(device, 0x404000, 0xc0000000);
+	nvkm_wr32(device, 0x404600, 0xc0000000);
+
+	if (gr->func->set_hww_esr_report_mask)
+		gr->func->set_hww_esr_report_mask(gr);
+
+	/* Enable TPC exceptions per GPC */
+	nvkm_wr32(device, 0x419d0c, 0x2);
+	nvkm_wr32(device, 0x41ac94, (((1 << gr->tpc_total) - 1) & 0xff) << 16);
+
+	/* Reset and enable all exceptions */
+	nvkm_wr32(device, 0x400108, 0xffffffff);
+	nvkm_wr32(device, 0x400138, 0xffffffff);
+	nvkm_wr32(device, 0x400118, 0xffffffff);
+	nvkm_wr32(device, 0x400130, 0xffffffff);
+	nvkm_wr32(device, 0x40011c, 0xffffffff);
+	nvkm_wr32(device, 0x400134, 0xffffffff);
+
+	gf100_gr_zbc_init(gr);
+
+	return gf100_gr_init_ctxctl(gr);
+}
+
+void
+gk20a_gr_dtor(struct gf100_gr *gr)
+{
+	gk20a_gr_init_dtor(gr->fuc_method);
+	gk20a_gr_init_dtor(gr->fuc_bundle);
+	gk20a_gr_init_dtor(gr->fuc_sw_ctx);
+	gk20a_gr_init_dtor(gr->fuc_sw_nonctx);
+}
+
+int
+gk20a_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_gr **pgr)
+{
+	struct gf100_gr_fuc fuc;
+	struct gf100_gr *gr;
+	int ret;
+
+	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+		return -ENOMEM;
+	*pgr = &gr->base;
+
+	ret = gf100_gr_ctor(func, device, index, gr);
+	if (ret)
+		return ret;
+
+	ret = gf100_gr_ctor_fw(gr, "sw_nonctx", &fuc);
+	if (ret)
+		return ret;
+	gr->fuc_sw_nonctx = gk20a_gr_av_to_init(&fuc);
+	gf100_gr_dtor_fw(&fuc);
+	if (IS_ERR(gr->fuc_sw_nonctx))
+		return PTR_ERR(gr->fuc_sw_nonctx);
+
+	ret = gf100_gr_ctor_fw(gr, "sw_ctx", &fuc);
+	if (ret)
+		return ret;
+	gr->fuc_sw_ctx = gk20a_gr_aiv_to_init(&fuc);
+	gf100_gr_dtor_fw(&fuc);
+	if (IS_ERR(gr->fuc_sw_ctx))
+		return PTR_ERR(gr->fuc_sw_ctx);
+
+	ret = gf100_gr_ctor_fw(gr, "sw_bundle_init", &fuc);
+	if (ret)
+		return ret;
+	gr->fuc_bundle = gk20a_gr_av_to_init(&fuc);
+	gf100_gr_dtor_fw(&fuc);
+	if (IS_ERR(gr->fuc_bundle))
+		return PTR_ERR(gr->fuc_bundle);
+
+	ret = gf100_gr_ctor_fw(gr, "sw_method_init", &fuc);
+	if (ret)
+		return ret;
+	gr->fuc_method = gk20a_gr_av_to_method(&fuc);
+	gf100_gr_dtor_fw(&fuc);
+	if (IS_ERR(gr->fuc_method))
+		return PTR_ERR(gr->fuc_method);
+
+	return 0;
+}
+
+static const struct gf100_gr_func
+gk20a_gr = {
+	.dtor = gk20a_gr_dtor,
+	.init = gk20a_gr_init,
+	.set_hww_esr_report_mask = gk20a_gr_set_hww_esr_report_mask,
 	.ppc_nr = 1,
-}.base;
+	.grctx = &gk20a_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_A },
+		{ -1, -1, KEPLER_C, &gf100_fermi },
+		{ -1, -1, KEPLER_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gk20a_gr_new_(&gk20a_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index a5ebd459bc24..56e960212e5d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -30,19 +30,6 @@
 #include <nvif/class.h>
 
 /*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gm107_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
-	{ MAXWELL_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ MAXWELL_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
-	{}
-};
-
-/*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
 
@@ -292,7 +279,7 @@ gm107_gr_pack_mmio[] = {
  ******************************************************************************/
 
 void
-gm107_gr_init_bios(struct gf100_gr_priv *priv)
+gm107_gr_init_bios(struct gf100_gr *gr)
 {
 	static const struct {
 		u32 ctrl;
@@ -304,7 +291,8 @@ gm107_gr_init_bios(struct gf100_gr_priv *priv)
 		{ 0x419af0, 0x419af4 },
 		{ 0x419af8, 0x419afc },
 	};
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	struct nvkm_bios *bios = device->bios;
 	struct nvbios_P0260E infoE;
 	struct nvbios_P0260X infoX;
 	int E = -1, X;
@@ -312,124 +300,119 @@ gm107_gr_init_bios(struct gf100_gr_priv *priv)
 
 	while (nvbios_P0260Ep(bios, ++E, &ver, &hdr, &infoE)) {
 		if (X = -1, E < ARRAY_SIZE(regs)) {
-			nv_wr32(priv, regs[E].ctrl, infoE.data);
+			nvkm_wr32(device, regs[E].ctrl, infoE.data);
 			while (nvbios_P0260Xp(bios, ++X, &ver, &hdr, &infoX))
-				nv_wr32(priv, regs[E].data, infoX.data);
+				nvkm_wr32(device, regs[E].data, infoX.data);
 		}
 	}
 }
 
 int
-gm107_gr_init(struct nvkm_object *object)
+gm107_gr_init(struct gf100_gr *gr)
 {
-	struct gf100_gr_oclass *oclass = (void *)object->oclass;
-	struct gf100_gr_priv *priv = (void *)object;
-	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
 	u32 data[TPC_MAX / 8] = {};
 	u8  tpcnr[GPC_MAX];
 	int gpc, tpc, ppc, rop;
-	int ret, i;
-
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
+	int i;
 
-	nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x0890), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x0894), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
-	nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
+	nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8);
+	nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8);
 
-	gf100_gr_mmio(priv, oclass->mmio);
+	gf100_gr_mmio(gr, gr->func->mmio);
 
-	gm107_gr_init_bios(priv);
+	gm107_gr_init_bios(gr);
 
-	nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
+	nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
 
 	memset(data, 0x00, sizeof(data));
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
-	for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
 		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
+			gpc = (gpc + 1) % gr->gpc_nr;
 		} while (!tpcnr[gpc]);
-		tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
 
 		data[i / 8] |= tpc << ((i % 8) * 4);
 	}
 
-	nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
-	nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
-	nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
-	nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
-
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0914),
-			priv->magic_not_rop_nr << 8 | priv->tpc_nr[gpc]);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 |
-			priv->tpc_total);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
+	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
+
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+			gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+			gr->tpc_total);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
 	}
 
-	nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
-	nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
-
-	nv_wr32(priv, 0x400500, 0x00010001);
-
-	nv_wr32(priv, 0x400100, 0xffffffff);
-	nv_wr32(priv, 0x40013c, 0xffffffff);
-	nv_wr32(priv, 0x400124, 0x00000002);
-	nv_wr32(priv, 0x409c24, 0x000e0000);
-
-	nv_wr32(priv, 0x404000, 0xc0000000);
-	nv_wr32(priv, 0x404600, 0xc0000000);
-	nv_wr32(priv, 0x408030, 0xc0000000);
-	nv_wr32(priv, 0x404490, 0xc0000000);
-	nv_wr32(priv, 0x406018, 0xc0000000);
-	nv_wr32(priv, 0x407020, 0x40000000);
-	nv_wr32(priv, 0x405840, 0xc0000000);
-	nv_wr32(priv, 0x405844, 0x00ffffff);
-	nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
-
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		for (ppc = 0; ppc < 2 /* priv->ppc_nr[gpc] */; ppc++)
-			nv_wr32(priv, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
-		for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
+	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+
+	nvkm_wr32(device, 0x400500, 0x00010001);
+
+	nvkm_wr32(device, 0x400100, 0xffffffff);
+	nvkm_wr32(device, 0x40013c, 0xffffffff);
+	nvkm_wr32(device, 0x400124, 0x00000002);
+	nvkm_wr32(device, 0x409c24, 0x000e0000);
+
+	nvkm_wr32(device, 0x404000, 0xc0000000);
+	nvkm_wr32(device, 0x404600, 0xc0000000);
+	nvkm_wr32(device, 0x408030, 0xc0000000);
+	nvkm_wr32(device, 0x404490, 0xc0000000);
+	nvkm_wr32(device, 0x406018, 0xc0000000);
+	nvkm_wr32(device, 0x407020, 0x40000000);
+	nvkm_wr32(device, 0x405840, 0xc0000000);
+	nvkm_wr32(device, 0x405844, 0x00ffffff);
+	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		for (ppc = 0; ppc < 2 /* gr->ppc_nr[gpc] */; ppc++)
+			nvkm_wr32(device, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
 		}
-		nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
 	}
 
-	for (rop = 0; rop < priv->rop_nr; rop++) {
-		nv_wr32(priv, ROP_UNIT(rop, 0x144), 0x40000000);
-		nv_wr32(priv, ROP_UNIT(rop, 0x070), 0x40000000);
-		nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
-		nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
+	for (rop = 0; rop < gr->rop_nr; rop++) {
+		nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
 	}
 
-	nv_wr32(priv, 0x400108, 0xffffffff);
-	nv_wr32(priv, 0x400138, 0xffffffff);
-	nv_wr32(priv, 0x400118, 0xffffffff);
-	nv_wr32(priv, 0x400130, 0xffffffff);
-	nv_wr32(priv, 0x40011c, 0xffffffff);
-	nv_wr32(priv, 0x400134, 0xffffffff);
+	nvkm_wr32(device, 0x400108, 0xffffffff);
+	nvkm_wr32(device, 0x400138, 0xffffffff);
+	nvkm_wr32(device, 0x400118, 0xffffffff);
+	nvkm_wr32(device, 0x400130, 0xffffffff);
+	nvkm_wr32(device, 0x40011c, 0xffffffff);
+	nvkm_wr32(device, 0x400134, 0xffffffff);
 
-	nv_wr32(priv, 0x400054, 0x2c350f63);
+	nvkm_wr32(device, 0x400054, 0x2c350f63);
 
-	gf100_gr_zbc_init(priv);
+	gf100_gr_zbc_init(gr);
 
-	return gf100_gr_init_ctxctl(priv);
+	return gf100_gr_init_ctxctl(gr);
 }
 
 #include "fuc/hubgm107.fuc5.h"
@@ -452,19 +435,25 @@ gm107_gr_gpccs_ucode = {
 	.data.size = sizeof(gm107_grgpc_data),
 };
 
-struct nvkm_oclass *
-gm107_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0x07),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gm107_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gm107_grctx_oclass,
-	.sclass =  gm107_gr_sclass,
+static const struct gf100_gr_func
+gm107_gr = {
+	.init = gm107_gr_init,
 	.mmio = gm107_gr_pack_mmio,
 	.fecs.ucode = &gm107_gr_fecs_ucode,
 	.gpccs.ucode = &gm107_gr_gpccs_ucode,
 	.ppc_nr = 2,
-}.base;
+	.grctx = &gm107_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+		{ -1, -1, MAXWELL_A, &gf100_fermi },
+		{ -1, -1, MAXWELL_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gm107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gm107_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
index fdb1dcf16a59..90381dde451a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
@@ -27,19 +27,6 @@
 #include <nvif/class.h>
 
 /*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-struct nvkm_oclass
-gm204_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
-	{ MAXWELL_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ MAXWELL_COMPUTE_B, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
-	{}
-};
-
-/*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
 
@@ -243,144 +230,144 @@ gm204_gr_data[] = {
  ******************************************************************************/
 
 static int
-gm204_gr_init_ctxctl(struct gf100_gr_priv *priv)
+gm204_gr_init_ctxctl(struct gf100_gr *gr)
 {
 	return 0;
 }
 
 int
-gm204_gr_init(struct nvkm_object *object)
+gm204_gr_init(struct gf100_gr *gr)
 {
-	struct gf100_gr_oclass *oclass = (void *)object->oclass;
-	struct gf100_gr_priv *priv = (void *)object;
-	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
-	u32 data[TPC_MAX / 8] = {};
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
+	u32 data[TPC_MAX / 8] = {}, tmp;
 	u8  tpcnr[GPC_MAX];
 	int gpc, tpc, ppc, rop;
-	int ret, i;
-	u32 tmp;
-
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
+	int i;
 
-	tmp = nv_rd32(priv, 0x100c80); /*XXX: mask? */
-	nv_wr32(priv, 0x418880, 0x00001000 | (tmp & 0x00000fff));
-	nv_wr32(priv, 0x418890, 0x00000000);
-	nv_wr32(priv, 0x418894, 0x00000000);
-	nv_wr32(priv, 0x4188b4, priv->unk4188b4->addr >> 8);
-	nv_wr32(priv, 0x4188b8, priv->unk4188b8->addr >> 8);
-	nv_mask(priv, 0x4188b0, 0x00040000, 0x00040000);
+	tmp = nvkm_rd32(device, 0x100c80); /*XXX: mask? */
+	nvkm_wr32(device, 0x418880, 0x00001000 | (tmp & 0x00000fff));
+	nvkm_wr32(device, 0x418890, 0x00000000);
+	nvkm_wr32(device, 0x418894, 0x00000000);
+	nvkm_wr32(device, 0x4188b4, nvkm_memory_addr(gr->unk4188b4) >> 8);
+	nvkm_wr32(device, 0x4188b8, nvkm_memory_addr(gr->unk4188b8) >> 8);
+	nvkm_mask(device, 0x4188b0, 0x00040000, 0x00040000);
 
 	/*XXX: belongs in fb */
-	nv_wr32(priv, 0x100cc8, priv->unk4188b4->addr >> 8);
-	nv_wr32(priv, 0x100ccc, priv->unk4188b8->addr >> 8);
-	nv_mask(priv, 0x100cc4, 0x00040000, 0x00040000);
+	nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(gr->unk4188b4) >> 8);
+	nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(gr->unk4188b8) >> 8);
+	nvkm_mask(device, 0x100cc4, 0x00040000, 0x00040000);
 
-	gf100_gr_mmio(priv, oclass->mmio);
+	gf100_gr_mmio(gr, gr->func->mmio);
 
-	gm107_gr_init_bios(priv);
+	gm107_gr_init_bios(gr);
 
-	nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
+	nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
 
 	memset(data, 0x00, sizeof(data));
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
-	for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
 		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
+			gpc = (gpc + 1) % gr->gpc_nr;
 		} while (!tpcnr[gpc]);
-		tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
 
 		data[i / 8] |= tpc << ((i % 8) * 4);
 	}
 
-	nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
-	nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
-	nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
-	nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
-
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0914),
-			priv->magic_not_rop_nr << 8 | priv->tpc_nr[gpc]);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 |
-			priv->tpc_total);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
+	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
+
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+			gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+			gr->tpc_total);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
 	}
 
-	nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
-	nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
-	nv_wr32(priv, GPC_BCAST(0x033c), nv_rd32(priv, 0x100804));
-
-	nv_wr32(priv, 0x400500, 0x00010001);
-	nv_wr32(priv, 0x400100, 0xffffffff);
-	nv_wr32(priv, 0x40013c, 0xffffffff);
-	nv_wr32(priv, 0x400124, 0x00000002);
-	nv_wr32(priv, 0x409c24, 0x000e0000);
-	nv_wr32(priv, 0x405848, 0xc0000000);
-	nv_wr32(priv, 0x40584c, 0x00000001);
-	nv_wr32(priv, 0x404000, 0xc0000000);
-	nv_wr32(priv, 0x404600, 0xc0000000);
-	nv_wr32(priv, 0x408030, 0xc0000000);
-	nv_wr32(priv, 0x404490, 0xc0000000);
-	nv_wr32(priv, 0x406018, 0xc0000000);
-	nv_wr32(priv, 0x407020, 0x40000000);
-	nv_wr32(priv, 0x405840, 0xc0000000);
-	nv_wr32(priv, 0x405844, 0x00ffffff);
-	nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
-
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++)
-			nv_wr32(priv, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
-		for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
+	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+	nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
+
+	nvkm_wr32(device, 0x400500, 0x00010001);
+	nvkm_wr32(device, 0x400100, 0xffffffff);
+	nvkm_wr32(device, 0x40013c, 0xffffffff);
+	nvkm_wr32(device, 0x400124, 0x00000002);
+	nvkm_wr32(device, 0x409c24, 0x000e0000);
+	nvkm_wr32(device, 0x405848, 0xc0000000);
+	nvkm_wr32(device, 0x40584c, 0x00000001);
+	nvkm_wr32(device, 0x404000, 0xc0000000);
+	nvkm_wr32(device, 0x404600, 0xc0000000);
+	nvkm_wr32(device, 0x408030, 0xc0000000);
+	nvkm_wr32(device, 0x404490, 0xc0000000);
+	nvkm_wr32(device, 0x406018, 0xc0000000);
+	nvkm_wr32(device, 0x407020, 0x40000000);
+	nvkm_wr32(device, 0x405840, 0xc0000000);
+	nvkm_wr32(device, 0x405844, 0x00ffffff);
+	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++)
+			nvkm_wr32(device, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
 		}
-		nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
 	}
 
-	for (rop = 0; rop < priv->rop_nr; rop++) {
-		nv_wr32(priv, ROP_UNIT(rop, 0x144), 0x40000000);
-		nv_wr32(priv, ROP_UNIT(rop, 0x070), 0x40000000);
-		nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
-		nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
+	for (rop = 0; rop < gr->rop_nr; rop++) {
+		nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
 	}
 
-	nv_wr32(priv, 0x400108, 0xffffffff);
-	nv_wr32(priv, 0x400138, 0xffffffff);
-	nv_wr32(priv, 0x400118, 0xffffffff);
-	nv_wr32(priv, 0x400130, 0xffffffff);
-	nv_wr32(priv, 0x40011c, 0xffffffff);
-	nv_wr32(priv, 0x400134, 0xffffffff);
+	nvkm_wr32(device, 0x400108, 0xffffffff);
+	nvkm_wr32(device, 0x400138, 0xffffffff);
+	nvkm_wr32(device, 0x400118, 0xffffffff);
+	nvkm_wr32(device, 0x400130, 0xffffffff);
+	nvkm_wr32(device, 0x40011c, 0xffffffff);
+	nvkm_wr32(device, 0x400134, 0xffffffff);
 
-	nv_wr32(priv, 0x400054, 0x2c350f63);
+	nvkm_wr32(device, 0x400054, 0x2c350f63);
 
-	gf100_gr_zbc_init(priv);
+	gf100_gr_zbc_init(gr);
 
-	return gm204_gr_init_ctxctl(priv);
+	return gm204_gr_init_ctxctl(gr);
 }
 
-struct nvkm_oclass *
-gm204_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0x24),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gm204_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gm204_grctx_oclass,
-	.sclass =  gm204_gr_sclass,
+static const struct gf100_gr_func
+gm204_gr = {
+	.init = gm204_gr_init,
 	.mmio = gm204_gr_pack_mmio,
 	.ppc_nr = 2,
-}.base;
+	.grctx = &gm204_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+		{ -1, -1, MAXWELL_B, &gf100_fermi },
+		{ -1, -1, MAXWELL_COMPUTE_B },
+		{}
+	}
+};
+
+int
+gm204_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gm204_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c
index 04b9733d146a..341dc560acbb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c
@@ -24,17 +24,25 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
-struct nvkm_oclass *
-gm206_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0x26),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gm204_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gm206_grctx_oclass,
-	.sclass =  gm204_gr_sclass,
+#include <nvif/class.h>
+
+static const struct gf100_gr_func
+gm206_gr = {
+	.init = gm204_gr_init,
 	.mmio = gm204_gr_pack_mmio,
 	.ppc_nr = 2,
-}.base;
+	.grctx = &gm206_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+		{ -1, -1, MAXWELL_B, &gf100_fermi },
+		{ -1, -1, MAXWELL_COMPUTE_B },
+		{}
+	}
+};
+
+int
+gm206_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gm206_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
new file mode 100644
index 000000000000..65b6e3d1e90d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+#include "ctxgf100.h"
+
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+
+static void
+gm20b_gr_init_gpc_mmu(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	u32 val;
+
+	/* TODO this needs to be removed once secure boot works */
+	if (1) {
+		nvkm_wr32(device, 0x100ce4, 0xffffffff);
+	}
+
+	/* TODO update once secure boot works */
+	val = nvkm_rd32(device, 0x100c80);
+	val &= 0xf000087f;
+	nvkm_wr32(device, 0x418880, val);
+	nvkm_wr32(device, 0x418890, 0);
+	nvkm_wr32(device, 0x418894, 0);
+
+	nvkm_wr32(device, 0x4188b0, nvkm_rd32(device, 0x100cc4));
+	nvkm_wr32(device, 0x4188b4, nvkm_rd32(device, 0x100cc8));
+	nvkm_wr32(device, 0x4188b8, nvkm_rd32(device, 0x100ccc));
+
+	nvkm_wr32(device, 0x4188ac, nvkm_rd32(device, 0x100800));
+}
+
+static void
+gm20b_gr_set_hww_esr_report_mask(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_wr32(device, 0x419e44, 0xdffffe);
+	nvkm_wr32(device, 0x419e4c, 0x5);
+}
+
+static const struct gf100_gr_func
+gm20b_gr = {
+	.dtor = gk20a_gr_dtor,
+	.init = gk20a_gr_init,
+	.init_gpc_mmu = gm20b_gr_init_gpc_mmu,
+	.set_hww_esr_report_mask = gm20b_gr_set_hww_esr_report_mask,
+	.ppc_nr = 1,
+	.grctx = &gm20b_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+		{ -1, -1, MAXWELL_B, &gf100_fermi },
+		{ -1, -1, MAXWELL_COMPUTE_B },
+		{}
+	}
+};
+
+int
+gm20b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gk20a_gr_new_(&gm20b_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c
new file mode 100644
index 000000000000..2e68919f00b2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+
+static const struct nvkm_gr_func
+gt200_gr = {
+	.init = nv50_gr_init,
+	.intr = nv50_gr_intr,
+	.chan_new = nv50_gr_chan_new,
+	.tlb_flush = g84_gr_tlb_flush,
+	.units = nv50_gr_units,
+	.sclass = {
+		{ -1, -1, 0x0030, &nv50_gr_object },
+		{ -1, -1, 0x502d, &nv50_gr_object },
+		{ -1, -1, 0x5039, &nv50_gr_object },
+		{ -1, -1, 0x50c0, &nv50_gr_object },
+		{ -1, -1, 0x8397, &nv50_gr_object },
+		{}
+	}
+};
+
+int
+gt200_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv50_gr_new_(&gt200_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c
new file mode 100644
index 000000000000..2bf7aac360cc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+
+static const struct nvkm_gr_func
+gt215_gr = {
+	.init = nv50_gr_init,
+	.intr = nv50_gr_intr,
+	.chan_new = nv50_gr_chan_new,
+	.tlb_flush = g84_gr_tlb_flush,
+	.units = nv50_gr_units,
+	.sclass = {
+		{ -1, -1, 0x0030, &nv50_gr_object },
+		{ -1, -1, 0x502d, &nv50_gr_object },
+		{ -1, -1, 0x5039, &nv50_gr_object },
+		{ -1, -1, 0x50c0, &nv50_gr_object },
+		{ -1, -1, 0x8597, &nv50_gr_object },
+		{ -1, -1, 0x85c0, &nv50_gr_object },
+		{}
+	}
+};
+
+int
+gt215_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv50_gr_new_(&gt215_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c
new file mode 100644
index 000000000000..95d5219faf93
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+
+static const struct nvkm_gr_func
+mcp79_gr = {
+	.init = nv50_gr_init,
+	.intr = nv50_gr_intr,
+	.chan_new = nv50_gr_chan_new,
+	.units = nv50_gr_units,
+	.sclass = {
+		{ -1, -1, 0x0030, &nv50_gr_object },
+		{ -1, -1, 0x502d, &nv50_gr_object },
+		{ -1, -1, 0x5039, &nv50_gr_object },
+		{ -1, -1, 0x50c0, &nv50_gr_object },
+		{ -1, -1, 0x8397, &nv50_gr_object },
+		{}
+	}
+};
+
+int
+mcp79_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv50_gr_new_(&mcp79_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c
new file mode 100644
index 000000000000..027b58e5976b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+
+static const struct nvkm_gr_func
+mcp89_gr = {
+	.init = nv50_gr_init,
+	.intr = nv50_gr_intr,
+	.chan_new = nv50_gr_chan_new,
+	.tlb_flush = g84_gr_tlb_flush,
+	.units = nv50_gr_units,
+	.sclass = {
+		{ -1, -1, 0x0030, &nv50_gr_object },
+		{ -1, -1, 0x502d, &nv50_gr_object },
+		{ -1, -1, 0x5039, &nv50_gr_object },
+		{ -1, -1, 0x50c0, &nv50_gr_object },
+		{ -1, -1, 0x85c0, &nv50_gr_object },
+		{ -1, -1, 0x8697, &nv50_gr_object },
+		{}
+	}
+};
+
+int
+mcp89_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv50_gr_new_(&mcp89_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
index 2614510c28d0..426ba0025a8d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
@@ -21,13 +21,13 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-#include <engine/gr.h>
+#include "priv.h"
 #include "regs.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
+#include <engine/fifo/chan.h>
 #include <subdev/instmem.h>
 #include <subdev/timer.h>
 
@@ -346,25 +346,23 @@ nv04_gr_ctx_regs[] = {
 	NV04_PGRAPH_DEBUG_3
 };
 
-struct nv04_gr_priv {
+#define nv04_gr(p) container_of((p), struct nv04_gr, base)
+
+struct nv04_gr {
 	struct nvkm_gr base;
 	struct nv04_gr_chan *chan[16];
 	spinlock_t lock;
 };
 
+#define nv04_gr_chan(p) container_of((p), struct nv04_gr_chan, object)
+
 struct nv04_gr_chan {
-	struct nvkm_object base;
+	struct nvkm_object object;
+	struct nv04_gr *gr;
 	int chid;
 	u32 nv04[ARRAY_SIZE(nv04_gr_ctx_regs)];
 };
 
-
-static inline struct nv04_gr_priv *
-nv04_gr_priv(struct nv04_gr_chan *chan)
-{
-	return (void *)nv_object(chan)->engine;
-}
-
 /*******************************************************************************
  * Graphics object classes
  ******************************************************************************/
@@ -444,35 +442,34 @@ nv04_gr_priv(struct nv04_gr_chan *chan)
  */
 
 static void
-nv04_gr_set_ctx1(struct nvkm_object *object, u32 mask, u32 value)
+nv04_gr_set_ctx1(struct nvkm_device *device, u32 inst, u32 mask, u32 value)
 {
-	struct nv04_gr_priv *priv = (void *)object->engine;
-	int subc = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
+	int subc = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
 	u32 tmp;
 
-	tmp  = nv_ro32(object, 0x00);
+	tmp  = nvkm_rd32(device, 0x700000 + inst);
 	tmp &= ~mask;
 	tmp |= value;
-	nv_wo32(object, 0x00, tmp);
+	nvkm_wr32(device, 0x700000 + inst, tmp);
 
-	nv_wr32(priv, NV04_PGRAPH_CTX_SWITCH1, tmp);
-	nv_wr32(priv, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
+	nvkm_wr32(device, NV04_PGRAPH_CTX_SWITCH1, tmp);
+	nvkm_wr32(device, NV04_PGRAPH_CTX_CACHE1 + (subc << 2), tmp);
 }
 
 static void
-nv04_gr_set_ctx_val(struct nvkm_object *object, u32 mask, u32 value)
+nv04_gr_set_ctx_val(struct nvkm_device *device, u32 inst, u32 mask, u32 value)
 {
 	int class, op, valid = 1;
 	u32 tmp, ctx1;
 
-	ctx1 = nv_ro32(object, 0x00);
+	ctx1 = nvkm_rd32(device, 0x700000 + inst);
 	class = ctx1 & 0xff;
 	op = (ctx1 >> 15) & 7;
 
-	tmp = nv_ro32(object, 0x0c);
+	tmp = nvkm_rd32(device, 0x70000c + inst);
 	tmp &= ~mask;
 	tmp |= value;
-	nv_wo32(object, 0x0c, tmp);
+	nvkm_wr32(device, 0x70000c + inst, tmp);
 
 	/* check for valid surf2d/surf_dst/surf_color */
 	if (!(tmp & 0x02000000))
@@ -504,527 +501,567 @@ nv04_gr_set_ctx_val(struct nvkm_object *object, u32 mask, u32 value)
 		break;
 	}
 
-	nv04_gr_set_ctx1(object, 0x01000000, valid << 24);
+	nv04_gr_set_ctx1(device, inst, 0x01000000, valid << 24);
 }
 
-static int
-nv04_gr_mthd_set_operation(struct nvkm_object *object, u32 mthd,
-			   void *args, u32 size)
+static bool
+nv04_gr_mthd_set_operation(struct nvkm_device *device, u32 inst, u32 data)
 {
-	u32 class = nv_ro32(object, 0) & 0xff;
-	u32 data = *(u32 *)args;
+	u8 class = nvkm_rd32(device, 0x700000) & 0x000000ff;
 	if (data > 5)
-		return 1;
+		return false;
 	/* Old versions of the objects only accept first three operations. */
 	if (data > 2 && class < 0x40)
-		return 1;
-	nv04_gr_set_ctx1(object, 0x00038000, data << 15);
+		return false;
+	nv04_gr_set_ctx1(device, inst, 0x00038000, data << 15);
 	/* changing operation changes set of objects needed for validation */
-	nv04_gr_set_ctx_val(object, 0, 0);
-	return 0;
+	nv04_gr_set_ctx_val(device, inst, 0, 0);
+	return true;
 }
 
-static int
-nv04_gr_mthd_surf3d_clip_h(struct nvkm_object *object, u32 mthd,
-			   void *args, u32 size)
+static bool
+nv04_gr_mthd_surf3d_clip_h(struct nvkm_device *device, u32 inst, u32 data)
 {
-	struct nv04_gr_priv *priv = (void *)object->engine;
-	u32 data = *(u32 *)args;
 	u32 min = data & 0xffff, max;
 	u32 w = data >> 16;
 	if (min & 0x8000)
 		/* too large */
-		return 1;
+		return false;
 	if (w & 0x8000)
 		/* yes, it accepts negative for some reason. */
 		w |= 0xffff0000;
 	max = min + w;
 	max &= 0x3ffff;
-	nv_wr32(priv, 0x40053c, min);
-	nv_wr32(priv, 0x400544, max);
-	return 0;
+	nvkm_wr32(device, 0x40053c, min);
+	nvkm_wr32(device, 0x400544, max);
+	return true;
 }
 
-static int
-nv04_gr_mthd_surf3d_clip_v(struct nvkm_object *object, u32 mthd,
-			   void *args, u32 size)
+static bool
+nv04_gr_mthd_surf3d_clip_v(struct nvkm_device *device, u32 inst, u32 data)
 {
-	struct nv04_gr_priv *priv = (void *)object->engine;
-	u32 data = *(u32 *)args;
 	u32 min = data & 0xffff, max;
 	u32 w = data >> 16;
 	if (min & 0x8000)
 		/* too large */
-		return 1;
+		return false;
 	if (w & 0x8000)
 		/* yes, it accepts negative for some reason. */
 		w |= 0xffff0000;
 	max = min + w;
 	max &= 0x3ffff;
-	nv_wr32(priv, 0x400540, min);
-	nv_wr32(priv, 0x400548, max);
-	return 0;
+	nvkm_wr32(device, 0x400540, min);
+	nvkm_wr32(device, 0x400548, max);
+	return true;
 }
 
-static u16
-nv04_gr_mthd_bind_class(struct nvkm_object *object, u32 *args, u32 size)
+static u8
+nv04_gr_mthd_bind_class(struct nvkm_device *device, u32 inst)
 {
-	struct nvkm_instmem *imem = nvkm_instmem(object);
-	u32 inst = *(u32 *)args << 4;
-	return nv_ro32(imem, inst);
+	return nvkm_rd32(device, 0x700000 + (inst << 4));
 }
 
-static int
-nv04_gr_mthd_bind_surf2d(struct nvkm_object *object, u32 mthd,
-			    void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_surf2d(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx1(object, 0x00004000, 0);
-		nv04_gr_set_ctx_val(object, 0x02000000, 0);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
+		return true;
 	case 0x42:
-		nv04_gr_set_ctx1(object, 0x00004000, 0);
-		nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_surf2d_swzsurf(struct nvkm_object *object, u32 mthd,
-				 void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_surf2d_swzsurf(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx1(object, 0x00004000, 0);
-		nv04_gr_set_ctx_val(object, 0x02000000, 0);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
+		return true;
 	case 0x42:
-		nv04_gr_set_ctx1(object, 0x00004000, 0);
-		nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
+		return true;
 	case 0x52:
-		nv04_gr_set_ctx1(object, 0x00004000, 0x00004000);
-		nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x00004000, 0x00004000);
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv01_gr_mthd_bind_patt(struct nvkm_object *object, u32 mthd,
-		       void *args, u32 size)
+static bool
+nv01_gr_mthd_bind_patt(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x08000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x08000000, 0);
+		return true;
 	case 0x18:
-		nv04_gr_set_ctx_val(object, 0x08000000, 0x08000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x08000000, 0x08000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_patt(struct nvkm_object *object, u32 mthd,
-		       void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_patt(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x08000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x08000000, 0);
+		return true;
 	case 0x44:
-		nv04_gr_set_ctx_val(object, 0x08000000, 0x08000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x08000000, 0x08000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_rop(struct nvkm_object *object, u32 mthd,
-		      void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_rop(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x10000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x10000000, 0);
+		return true;
 	case 0x43:
-		nv04_gr_set_ctx_val(object, 0x10000000, 0x10000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x10000000, 0x10000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_beta1(struct nvkm_object *object, u32 mthd,
-			void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_beta1(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x20000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x20000000, 0);
+		return true;
 	case 0x12:
-		nv04_gr_set_ctx_val(object, 0x20000000, 0x20000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x20000000, 0x20000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_beta4(struct nvkm_object *object, u32 mthd,
-			void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_beta4(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x40000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x40000000, 0);
+		return true;
 	case 0x72:
-		nv04_gr_set_ctx_val(object, 0x40000000, 0x40000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x40000000, 0x40000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_surf_dst(struct nvkm_object *object, u32 mthd,
-			   void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_surf_dst(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x02000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
+		return true;
 	case 0x58:
-		nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_surf_src(struct nvkm_object *object, u32 mthd,
-			   void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_surf_src(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x04000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x04000000, 0);
+		return true;
 	case 0x59:
-		nv04_gr_set_ctx_val(object, 0x04000000, 0x04000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x04000000, 0x04000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_surf_color(struct nvkm_object *object, u32 mthd,
-			     void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_surf_color(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x02000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
+		return true;
 	case 0x5a:
-		nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_surf_zeta(struct nvkm_object *object, u32 mthd,
-			    void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_surf_zeta(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x04000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x04000000, 0);
+		return true;
 	case 0x5b:
-		nv04_gr_set_ctx_val(object, 0x04000000, 0x04000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x04000000, 0x04000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv01_gr_mthd_bind_clip(struct nvkm_object *object, u32 mthd,
-		       void *args, u32 size)
+static bool
+nv01_gr_mthd_bind_clip(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx1(object, 0x2000, 0);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x2000, 0);
+		return true;
 	case 0x19:
-		nv04_gr_set_ctx1(object, 0x2000, 0x2000);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x2000, 0x2000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv01_gr_mthd_bind_chroma(struct nvkm_object *object, u32 mthd,
-			 void *args, u32 size)
+static bool
+nv01_gr_mthd_bind_chroma(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx1(object, 0x1000, 0);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x1000, 0);
+		return true;
 	/* Yes, for some reason even the old versions of objects
 	 * accept 0x57 and not 0x17. Consistency be damned.
 	 */
 	case 0x57:
-		nv04_gr_set_ctx1(object, 0x1000, 0x1000);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x1000, 0x1000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static struct nvkm_omthds
-nv03_gr_gdi_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_patt },
-	{ 0x0188, 0x0188, nv04_gr_mthd_bind_rop },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_beta1 },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_surf_dst },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
-
-static struct nvkm_omthds
-nv04_gr_gdi_omthds[] = {
-	{ 0x0188, 0x0188, nv04_gr_mthd_bind_patt },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_beta4 },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_surf2d },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv03_gr_mthd_gdi(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_patt; break;
+	case 0x0188: func = nv04_gr_mthd_bind_rop; break;
+	case 0x018c: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0190: func = nv04_gr_mthd_bind_surf_dst; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv01_gr_blit_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_clip },
-	{ 0x018c, 0x018c, nv01_gr_mthd_bind_patt },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_rop },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_beta1 },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_surf_dst },
-	{ 0x019c, 0x019c, nv04_gr_mthd_bind_surf_src },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv04_gr_mthd_gdi(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0188: func = nv04_gr_mthd_bind_patt; break;
+	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
+	case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv04_gr_blit_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_clip },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_patt },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_rop },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_beta1 },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_beta4 },
-	{ 0x019c, 0x019c, nv04_gr_mthd_bind_surf2d },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv01_gr_mthd_blit(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
+	case 0x0188: func = nv01_gr_mthd_bind_clip; break;
+	case 0x018c: func = nv01_gr_mthd_bind_patt; break;
+	case 0x0190: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0198: func = nv04_gr_mthd_bind_surf_dst; break;
+	case 0x019c: func = nv04_gr_mthd_bind_surf_src; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv04_gr_iifc_omthds[] = {
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_chroma },
-	{ 0x018c, 0x018c, nv01_gr_mthd_bind_clip },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_patt },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_rop },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_beta1 },
-	{ 0x019c, 0x019c, nv04_gr_mthd_bind_beta4 },
-	{ 0x01a0, 0x01a0, nv04_gr_mthd_bind_surf2d_swzsurf },
-	{ 0x03e4, 0x03e4, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv04_gr_mthd_blit(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
+	case 0x0188: func = nv01_gr_mthd_bind_clip; break;
+	case 0x018c: func = nv04_gr_mthd_bind_patt; break;
+	case 0x0190: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0198: func = nv04_gr_mthd_bind_beta4; break;
+	case 0x019c: func = nv04_gr_mthd_bind_surf2d; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv01_gr_ifc_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_clip },
-	{ 0x018c, 0x018c, nv01_gr_mthd_bind_patt },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_rop },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_beta1 },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_surf_dst },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv04_gr_mthd_iifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0188: func = nv01_gr_mthd_bind_chroma; break;
+	case 0x018c: func = nv01_gr_mthd_bind_clip; break;
+	case 0x0190: func = nv04_gr_mthd_bind_patt; break;
+	case 0x0194: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0198: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x019c: func = nv04_gr_mthd_bind_beta4; break;
+	case 0x01a0: func = nv04_gr_mthd_bind_surf2d_swzsurf; break;
+	case 0x03e4: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv04_gr_ifc_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_clip },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_patt },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_rop },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_beta1 },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_beta4 },
-	{ 0x019c, 0x019c, nv04_gr_mthd_bind_surf2d },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv01_gr_mthd_ifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
+	case 0x0188: func = nv01_gr_mthd_bind_clip; break;
+	case 0x018c: func = nv01_gr_mthd_bind_patt; break;
+	case 0x0190: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0198: func = nv04_gr_mthd_bind_surf_dst; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv03_gr_sifc_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_patt },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_surf_dst },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv04_gr_mthd_ifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
+	case 0x0188: func = nv01_gr_mthd_bind_clip; break;
+	case 0x018c: func = nv04_gr_mthd_bind_patt; break;
+	case 0x0190: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0198: func = nv04_gr_mthd_bind_beta4; break;
+	case 0x019c: func = nv04_gr_mthd_bind_surf2d; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv04_gr_sifc_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
-	{ 0x0188, 0x0188, nv04_gr_mthd_bind_patt },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_beta4 },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_surf2d },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv03_gr_mthd_sifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
+	case 0x0188: func = nv01_gr_mthd_bind_patt; break;
+	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0194: func = nv04_gr_mthd_bind_surf_dst; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv03_gr_sifm_omthds[] = {
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_patt },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_surf_dst },
-	{ 0x0304, 0x0304, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv04_gr_mthd_sifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
+	case 0x0188: func = nv04_gr_mthd_bind_patt; break;
+	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
+	case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv04_gr_sifm_omthds[] = {
-	{ 0x0188, 0x0188, nv04_gr_mthd_bind_patt },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_beta4 },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_surf2d },
-	{ 0x0304, 0x0304, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv03_gr_mthd_sifm(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0188: func = nv01_gr_mthd_bind_patt; break;
+	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0194: func = nv04_gr_mthd_bind_surf_dst; break;
+	case 0x0304: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv04_gr_surf3d_omthds[] = {
-	{ 0x02f8, 0x02f8, nv04_gr_mthd_surf3d_clip_h },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_surf3d_clip_v },
-	{}
-};
+static bool
+nv04_gr_mthd_sifm(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0188: func = nv04_gr_mthd_bind_patt; break;
+	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
+	case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
+	case 0x0304: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv03_gr_ttri_omthds[] = {
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_clip },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_surf_color },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_surf_zeta },
-	{}
-};
+static bool
+nv04_gr_mthd_surf3d(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x02f8: func = nv04_gr_mthd_surf3d_clip_h; break;
+	case 0x02fc: func = nv04_gr_mthd_surf3d_clip_v; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv01_gr_prim_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_clip },
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_patt },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_surf_dst },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv03_gr_mthd_ttri(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0188: func = nv01_gr_mthd_bind_clip; break;
+	case 0x018c: func = nv04_gr_mthd_bind_surf_color; break;
+	case 0x0190: func = nv04_gr_mthd_bind_surf_zeta; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv04_gr_prim_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_clip },
-	{ 0x0188, 0x0188, nv04_gr_mthd_bind_patt },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_beta4 },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_surf2d },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv01_gr_mthd_prim(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_clip; break;
+	case 0x0188: func = nv01_gr_mthd_bind_patt; break;
+	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0194: func = nv04_gr_mthd_bind_surf_dst; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static int
-nv04_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
+static bool
+nv04_gr_mthd_prim(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
 {
-	struct nvkm_gpuobj *obj;
-	int ret;
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_clip; break;
+	case 0x0188: func = nv04_gr_mthd_bind_patt; break;
+	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
+	case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
-				 16, 16, 0, &obj);
-	*pobject = nv_object(obj);
-	if (ret)
-		return ret;
+static bool
+nv04_gr_mthd(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32, u32);
+	switch (nvkm_rd32(device, 0x700000 + inst) & 0x000000ff) {
+	case 0x1c ... 0x1e:
+		   func = nv01_gr_mthd_prim; break;
+	case 0x1f: func = nv01_gr_mthd_blit; break;
+	case 0x21: func = nv01_gr_mthd_ifc; break;
+	case 0x36: func = nv03_gr_mthd_sifc; break;
+	case 0x37: func = nv03_gr_mthd_sifm; break;
+	case 0x48: func = nv03_gr_mthd_ttri; break;
+	case 0x4a: func = nv04_gr_mthd_gdi; break;
+	case 0x4b: func = nv03_gr_mthd_gdi; break;
+	case 0x53: func = nv04_gr_mthd_surf3d; break;
+	case 0x5c ... 0x5e:
+		   func = nv04_gr_mthd_prim; break;
+	case 0x5f: func = nv04_gr_mthd_blit; break;
+	case 0x60: func = nv04_gr_mthd_iifc; break;
+	case 0x61: func = nv04_gr_mthd_ifc; break;
+	case 0x76: func = nv04_gr_mthd_sifc; break;
+	case 0x77: func = nv04_gr_mthd_sifm; break;
+	default:
+		return false;
+	}
+	return func(device, inst, mthd, data);
+}
 
-	nv_wo32(obj, 0x00, nv_mclass(obj));
+static int
+nv04_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		    int align, struct nvkm_gpuobj **pgpuobj)
+{
+	int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16, align,
+				  false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, object->oclass);
+		nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
 #ifdef __BIG_ENDIAN
-	nv_mo32(obj, 0x00, 0x00080000, 0x00080000);
+		nvkm_mo32(*pgpuobj, 0x08, 0x00080000, 0x00080000);
 #endif
-	nv_wo32(obj, 0x04, 0x00000000);
-	nv_wo32(obj, 0x08, 0x00000000);
-	nv_wo32(obj, 0x0c, 0x00000000);
-	return 0;
+		nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
+		nvkm_done(*pgpuobj);
+	}
+	return ret;
 }
 
-struct nvkm_ofuncs
-nv04_gr_ofuncs = {
-	.ctor = nv04_gr_object_ctor,
-	.dtor = _nvkm_gpuobj_dtor,
-	.init = _nvkm_gpuobj_init,
-	.fini = _nvkm_gpuobj_fini,
-	.rd32 = _nvkm_gpuobj_rd32,
-	.wr32 = _nvkm_gpuobj_wr32,
-};
-
-static struct nvkm_oclass
-nv04_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs }, /* beta1 */
-	{ 0x0017, &nv04_gr_ofuncs }, /* chroma */
-	{ 0x0018, &nv04_gr_ofuncs }, /* pattern (nv01) */
-	{ 0x0019, &nv04_gr_ofuncs }, /* clip */
-	{ 0x001c, &nv04_gr_ofuncs, nv01_gr_prim_omthds }, /* line */
-	{ 0x001d, &nv04_gr_ofuncs, nv01_gr_prim_omthds }, /* tri */
-	{ 0x001e, &nv04_gr_ofuncs, nv01_gr_prim_omthds }, /* rect */
-	{ 0x001f, &nv04_gr_ofuncs, nv01_gr_blit_omthds },
-	{ 0x0021, &nv04_gr_ofuncs, nv01_gr_ifc_omthds },
-	{ 0x0030, &nv04_gr_ofuncs }, /* null */
-	{ 0x0036, &nv04_gr_ofuncs, nv03_gr_sifc_omthds },
-	{ 0x0037, &nv04_gr_ofuncs, nv03_gr_sifm_omthds },
-	{ 0x0038, &nv04_gr_ofuncs }, /* dvd subpicture */
-	{ 0x0039, &nv04_gr_ofuncs }, /* m2mf */
-	{ 0x0042, &nv04_gr_ofuncs }, /* surf2d */
-	{ 0x0043, &nv04_gr_ofuncs }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs }, /* pattern */
-	{ 0x0048, &nv04_gr_ofuncs, nv03_gr_ttri_omthds },
-	{ 0x004a, &nv04_gr_ofuncs, nv04_gr_gdi_omthds },
-	{ 0x004b, &nv04_gr_ofuncs, nv03_gr_gdi_omthds },
-	{ 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
-	{ 0x0053, &nv04_gr_ofuncs, nv04_gr_surf3d_omthds },
-	{ 0x0054, &nv04_gr_ofuncs }, /* ttri */
-	{ 0x0055, &nv04_gr_ofuncs }, /* mtri */
-	{ 0x0057, &nv04_gr_ofuncs }, /* chroma */
-	{ 0x0058, &nv04_gr_ofuncs }, /* surf_dst */
-	{ 0x0059, &nv04_gr_ofuncs }, /* surf_src */
-	{ 0x005a, &nv04_gr_ofuncs }, /* surf_color */
-	{ 0x005b, &nv04_gr_ofuncs }, /* surf_zeta */
-	{ 0x005c, &nv04_gr_ofuncs, nv04_gr_prim_omthds }, /* line */
-	{ 0x005d, &nv04_gr_ofuncs, nv04_gr_prim_omthds }, /* tri */
-	{ 0x005e, &nv04_gr_ofuncs, nv04_gr_prim_omthds }, /* rect */
-	{ 0x005f, &nv04_gr_ofuncs, nv04_gr_blit_omthds },
-	{ 0x0060, &nv04_gr_ofuncs, nv04_gr_iifc_omthds },
-	{ 0x0061, &nv04_gr_ofuncs, nv04_gr_ifc_omthds },
-	{ 0x0064, &nv04_gr_ofuncs }, /* iifc (nv05) */
-	{ 0x0065, &nv04_gr_ofuncs }, /* ifc (nv05) */
-	{ 0x0066, &nv04_gr_ofuncs }, /* sifc (nv05) */
-	{ 0x0072, &nv04_gr_ofuncs }, /* beta4 */
-	{ 0x0076, &nv04_gr_ofuncs, nv04_gr_sifc_omthds },
-	{ 0x0077, &nv04_gr_ofuncs, nv04_gr_sifm_omthds },
-	{},
+const struct nvkm_object_func
+nv04_gr_object = {
+	.bind = nv04_gr_object_bind,
 };
 
 /*******************************************************************************
@@ -1032,13 +1069,14 @@ nv04_gr_sclass[] = {
  ******************************************************************************/
 
 static struct nv04_gr_chan *
-nv04_gr_channel(struct nv04_gr_priv *priv)
+nv04_gr_channel(struct nv04_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	struct nv04_gr_chan *chan = NULL;
-	if (nv_rd32(priv, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) {
-		int chid = nv_rd32(priv, NV04_PGRAPH_CTX_USER) >> 24;
-		if (chid < ARRAY_SIZE(priv->chan))
-			chan = priv->chan[chid];
+	if (nvkm_rd32(device, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) {
+		int chid = nvkm_rd32(device, NV04_PGRAPH_CTX_USER) >> 24;
+		if (chid < ARRAY_SIZE(gr->chan))
+			chan = gr->chan[chid];
 	}
 	return chan;
 }
@@ -1046,55 +1084,52 @@ nv04_gr_channel(struct nv04_gr_priv *priv)
 static int
 nv04_gr_load_context(struct nv04_gr_chan *chan, int chid)
 {
-	struct nv04_gr_priv *priv = nv04_gr_priv(chan);
+	struct nvkm_device *device = chan->gr->base.engine.subdev.device;
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(nv04_gr_ctx_regs); i++)
-		nv_wr32(priv, nv04_gr_ctx_regs[i], chan->nv04[i]);
+		nvkm_wr32(device, nv04_gr_ctx_regs[i], chan->nv04[i]);
 
-	nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
-	nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, chid << 24);
-	nv_mask(priv, NV04_PGRAPH_FFINTFC_ST2, 0xfff00000, 0x00000000);
+	nvkm_wr32(device, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
+	nvkm_mask(device, NV04_PGRAPH_CTX_USER, 0xff000000, chid << 24);
+	nvkm_mask(device, NV04_PGRAPH_FFINTFC_ST2, 0xfff00000, 0x00000000);
 	return 0;
 }
 
 static int
 nv04_gr_unload_context(struct nv04_gr_chan *chan)
 {
-	struct nv04_gr_priv *priv = nv04_gr_priv(chan);
+	struct nvkm_device *device = chan->gr->base.engine.subdev.device;
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(nv04_gr_ctx_regs); i++)
-		chan->nv04[i] = nv_rd32(priv, nv04_gr_ctx_regs[i]);
+		chan->nv04[i] = nvkm_rd32(device, nv04_gr_ctx_regs[i]);
 
-	nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
-	nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
+	nvkm_wr32(device, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
+	nvkm_mask(device, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
 	return 0;
 }
 
 static void
-nv04_gr_context_switch(struct nv04_gr_priv *priv)
+nv04_gr_context_switch(struct nv04_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	struct nv04_gr_chan *prev = NULL;
 	struct nv04_gr_chan *next = NULL;
-	unsigned long flags;
 	int chid;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	nv04_gr_idle(priv);
+	nv04_gr_idle(&gr->base);
 
 	/* If previous context is valid, we need to save it */
-	prev = nv04_gr_channel(priv);
+	prev = nv04_gr_channel(gr);
 	if (prev)
 		nv04_gr_unload_context(prev);
 
 	/* load context for next channel */
-	chid = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0x0f;
-	next = priv->chan[chid];
+	chid = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0x0f;
+	next = gr->chan[chid];
 	if (next)
 		nv04_gr_load_context(next, chid);
-
-	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
 static u32 *ctx_reg(struct nv04_gr_chan *chan, u32 reg)
@@ -1109,98 +1144,85 @@ static u32 *ctx_reg(struct nv04_gr_chan *chan, u32 reg)
 	return NULL;
 }
 
-static int
-nv04_gr_context_ctor(struct nvkm_object *parent,
-		     struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+static void *
+nv04_gr_chan_dtor(struct nvkm_object *object)
 {
-	struct nvkm_fifo_chan *fifo = (void *)parent;
-	struct nv04_gr_priv *priv = (void *)engine;
-	struct nv04_gr_chan *chan;
+	struct nv04_gr_chan *chan = nv04_gr_chan(object);
+	struct nv04_gr *gr = chan->gr;
 	unsigned long flags;
-	int ret;
-
-	ret = nvkm_object_create(parent, engine, oclass, 0, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	spin_lock_irqsave(&priv->lock, flags);
-	if (priv->chan[fifo->chid]) {
-		*pobject = nv_object(priv->chan[fifo->chid]);
-		atomic_inc(&(*pobject)->refcount);
-		spin_unlock_irqrestore(&priv->lock, flags);
-		nvkm_object_destroy(&chan->base);
-		return 1;
-	}
 
-	*ctx_reg(chan, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
-
-	priv->chan[fifo->chid] = chan;
-	chan->chid = fifo->chid;
-	spin_unlock_irqrestore(&priv->lock, flags);
-	return 0;
+	spin_lock_irqsave(&gr->lock, flags);
+	gr->chan[chan->chid] = NULL;
+	spin_unlock_irqrestore(&gr->lock, flags);
+	return chan;
 }
 
-static void
-nv04_gr_context_dtor(struct nvkm_object *object)
+static int
+nv04_gr_chan_fini(struct nvkm_object *object, bool suspend)
 {
-	struct nv04_gr_priv *priv = (void *)object->engine;
-	struct nv04_gr_chan *chan = (void *)object;
+	struct nv04_gr_chan *chan = nv04_gr_chan(object);
+	struct nv04_gr *gr = chan->gr;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	unsigned long flags;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	priv->chan[chan->chid] = NULL;
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	nvkm_object_destroy(&chan->base);
+	spin_lock_irqsave(&gr->lock, flags);
+	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+	if (nv04_gr_channel(gr) == chan)
+		nv04_gr_unload_context(chan);
+	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&gr->lock, flags);
+	return 0;
 }
 
+static const struct nvkm_object_func
+nv04_gr_chan = {
+	.dtor = nv04_gr_chan_dtor,
+	.fini = nv04_gr_chan_fini,
+};
+
 static int
-nv04_gr_context_fini(struct nvkm_object *object, bool suspend)
+nv04_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
-	struct nv04_gr_priv *priv = (void *)object->engine;
-	struct nv04_gr_chan *chan = (void *)object;
+	struct nv04_gr *gr = nv04_gr(base);
+	struct nv04_gr_chan *chan;
 	unsigned long flags;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
-	if (nv04_gr_channel(priv) == chan)
-		nv04_gr_unload_context(chan);
-	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv04_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	chan->chid = fifoch->chid;
+	*pobject = &chan->object;
 
-	return nvkm_object_fini(&chan->base, suspend);
-}
+	*ctx_reg(chan, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
 
-static struct nvkm_oclass
-nv04_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_gr_context_ctor,
-		.dtor = nv04_gr_context_dtor,
-		.init = nvkm_object_init,
-		.fini = nv04_gr_context_fini,
-	},
-};
+	spin_lock_irqsave(&gr->lock, flags);
+	gr->chan[chan->chid] = chan;
+	spin_unlock_irqrestore(&gr->lock, flags);
+	return 0;
+}
 
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
 bool
-nv04_gr_idle(void *obj)
+nv04_gr_idle(struct nvkm_gr *gr)
 {
-	struct nvkm_gr *gr = nvkm_gr(obj);
+	struct nvkm_subdev *subdev = &gr->engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 mask = 0xffffffff;
 
-	if (nv_device(obj)->card_type == NV_40)
+	if (device->card_type == NV_40)
 		mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
 
-	if (!nv_wait(gr, NV04_PGRAPH_STATUS, mask, 0)) {
-		nv_error(gr, "idle timed out with status 0x%08x\n",
-			 nv_rd32(gr, NV04_PGRAPH_STATUS));
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, NV04_PGRAPH_STATUS) & mask))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "idle timed out with status %08x\n",
+			   nvkm_rd32(device, NV04_PGRAPH_STATUS));
 		return false;
 	}
 
@@ -1247,136 +1269,159 @@ nv04_gr_nsource[] = {
 };
 
 static void
-nv04_gr_intr(struct nvkm_subdev *subdev)
+nv04_gr_intr(struct nvkm_gr *base)
 {
-	struct nv04_gr_priv *priv = (void *)subdev;
-	struct nv04_gr_chan *chan = NULL;
-	struct nvkm_namedb *namedb = NULL;
-	struct nvkm_handle *handle = NULL;
-	u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
-	u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
-	u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
-	u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+	struct nv04_gr *gr = nv04_gr(base);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
+	u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
+	u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
+	u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
 	u32 chid = (addr & 0x0f000000) >> 24;
 	u32 subc = (addr & 0x0000e000) >> 13;
 	u32 mthd = (addr & 0x00001ffc);
-	u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
-	u32 class = nv_rd32(priv, 0x400180 + subc * 4) & 0xff;
-	u32 inst = (nv_rd32(priv, 0x40016c) & 0xffff) << 4;
+	u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
+	u32 class = nvkm_rd32(device, 0x400180 + subc * 4) & 0xff;
+	u32 inst = (nvkm_rd32(device, 0x40016c) & 0xffff) << 4;
 	u32 show = stat;
+	char msg[128], src[128], sta[128];
+	struct nv04_gr_chan *chan;
 	unsigned long flags;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	chan = priv->chan[chid];
-	if (chan)
-		namedb = (void *)nv_pclass(nv_object(chan), NV_NAMEDB_CLASS);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	spin_lock_irqsave(&gr->lock, flags);
+	chan = gr->chan[chid];
 
 	if (stat & NV_PGRAPH_INTR_NOTIFY) {
 		if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
-			handle = nvkm_namedb_get_vinst(namedb, inst);
-			if (handle && !nv_call(handle->object, mthd, data))
+			if (!nv04_gr_mthd(device, inst, mthd, data))
 				show &= ~NV_PGRAPH_INTR_NOTIFY;
 		}
 	}
 
 	if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
-		nv_wr32(priv, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+		nvkm_wr32(device, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
 		stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
 		show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
-		nv04_gr_context_switch(priv);
+		nv04_gr_context_switch(gr);
 	}
 
-	nv_wr32(priv, NV03_PGRAPH_INTR, stat);
-	nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+	nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
+	nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
 
 	if (show) {
-		nv_error(priv, "%s", "");
-		nvkm_bitfield_print(nv04_gr_intr_name, show);
-		pr_cont(" nsource:");
-		nvkm_bitfield_print(nv04_gr_nsource, nsource);
-		pr_cont(" nstatus:");
-		nvkm_bitfield_print(nv04_gr_nstatus, nstatus);
-		pr_cont("\n");
-		nv_error(priv,
-			 "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
-			 chid, nvkm_client_name(chan), subc, class, mthd,
-			 data);
+		nvkm_snprintbf(msg, sizeof(msg), nv04_gr_intr_name, show);
+		nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
+		nvkm_snprintbf(sta, sizeof(sta), nv04_gr_nstatus, nstatus);
+		nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
+				   "nstatus %08x [%s] ch %d [%s] subc %d "
+				   "class %04x mthd %04x data %08x\n",
+			   show, msg, nsource, src, nstatus, sta, chid,
+			   chan ? chan->object.client->name : "unknown",
+			   subc, class, mthd, data);
 	}
 
-	nvkm_namedb_put(handle);
+	spin_unlock_irqrestore(&gr->lock, flags);
 }
 
 static int
-nv04_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+nv04_gr_init(struct nvkm_gr *base)
 {
-	struct nv04_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv04_gr_intr;
-	nv_engine(priv)->cclass = &nv04_gr_cclass;
-	nv_engine(priv)->sclass = nv04_gr_sclass;
-	spin_lock_init(&priv->lock);
-	return 0;
-}
-
-static int
-nv04_gr_init(struct nvkm_object *object)
-{
-	struct nvkm_engine *engine = nv_engine(object);
-	struct nv04_gr_priv *priv = (void *)engine;
-	int ret;
-
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
+	struct nv04_gr *gr = nv04_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 
 	/* Enable PGRAPH interrupts */
-	nv_wr32(priv, NV03_PGRAPH_INTR, 0xFFFFFFFF);
-	nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
-
-	nv_wr32(priv, NV04_PGRAPH_VALID1, 0);
-	nv_wr32(priv, NV04_PGRAPH_VALID2, 0);
-	/*nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x000001FF);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x1231c000);
+	nvkm_wr32(device, NV03_PGRAPH_INTR, 0xFFFFFFFF);
+	nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+	nvkm_wr32(device, NV04_PGRAPH_VALID1, 0);
+	nvkm_wr32(device, NV04_PGRAPH_VALID2, 0);
+	/*nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x000001FF);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x1231c000);
 	/*1231C000 blob, 001 haiku*/
 	/*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x72111100);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x72111100);
 	/*0x72111100 blob , 01 haiku*/
-	/*nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
+	/*nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
 	/*haiku same*/
 
-	/*nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
+	/*nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
 	/*haiku and blob 10d4*/
 
-	nv_wr32(priv, NV04_PGRAPH_STATE        , 0xFFFFFFFF);
-	nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL  , 0x10000100);
-	nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
+	nvkm_wr32(device, NV04_PGRAPH_STATE        , 0xFFFFFFFF);
+	nvkm_wr32(device, NV04_PGRAPH_CTX_CONTROL  , 0x10000100);
+	nvkm_mask(device, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
 
 	/* These don't belong here, they're part of a per-channel context */
-	nv_wr32(priv, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
-	nv_wr32(priv, NV04_PGRAPH_BETA_AND     , 0xFFFFFFFF);
+	nvkm_wr32(device, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
+	nvkm_wr32(device, NV04_PGRAPH_BETA_AND     , 0xFFFFFFFF);
 	return 0;
 }
 
-struct nvkm_oclass
-nv04_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_gr_ctor,
-		.dtor = _nvkm_gr_dtor,
-		.init = nv04_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
+static const struct nvkm_gr_func
+nv04_gr = {
+	.init = nv04_gr_init,
+	.intr = nv04_gr_intr,
+	.chan_new = nv04_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0017, &nv04_gr_object }, /* chroma */
+		{ -1, -1, 0x0018, &nv04_gr_object }, /* pattern (nv01) */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x001c, &nv04_gr_object }, /* line */
+		{ -1, -1, 0x001d, &nv04_gr_object }, /* tri */
+		{ -1, -1, 0x001e, &nv04_gr_object }, /* rect */
+		{ -1, -1, 0x001f, &nv04_gr_object },
+		{ -1, -1, 0x0021, &nv04_gr_object },
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0036, &nv04_gr_object },
+		{ -1, -1, 0x0037, &nv04_gr_object },
+		{ -1, -1, 0x0038, &nv04_gr_object }, /* dvd subpicture */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0042, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* pattern */
+		{ -1, -1, 0x0048, &nv04_gr_object },
+		{ -1, -1, 0x004a, &nv04_gr_object },
+		{ -1, -1, 0x004b, &nv04_gr_object },
+		{ -1, -1, 0x0052, &nv04_gr_object }, /* swzsurf */
+		{ -1, -1, 0x0053, &nv04_gr_object },
+		{ -1, -1, 0x0054, &nv04_gr_object }, /* ttri */
+		{ -1, -1, 0x0055, &nv04_gr_object }, /* mtri */
+		{ -1, -1, 0x0057, &nv04_gr_object }, /* chroma */
+		{ -1, -1, 0x0058, &nv04_gr_object }, /* surf_dst */
+		{ -1, -1, 0x0059, &nv04_gr_object }, /* surf_src */
+		{ -1, -1, 0x005a, &nv04_gr_object }, /* surf_color */
+		{ -1, -1, 0x005b, &nv04_gr_object }, /* surf_zeta */
+		{ -1, -1, 0x005c, &nv04_gr_object }, /* line */
+		{ -1, -1, 0x005d, &nv04_gr_object }, /* tri */
+		{ -1, -1, 0x005e, &nv04_gr_object }, /* rect */
+		{ -1, -1, 0x005f, &nv04_gr_object },
+		{ -1, -1, 0x0060, &nv04_gr_object },
+		{ -1, -1, 0x0061, &nv04_gr_object },
+		{ -1, -1, 0x0064, &nv04_gr_object }, /* iifc (nv05) */
+		{ -1, -1, 0x0065, &nv04_gr_object }, /* ifc (nv05) */
+		{ -1, -1, 0x0066, &nv04_gr_object }, /* sifc (nv05) */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0076, &nv04_gr_object },
+		{ -1, -1, 0x0077, &nv04_gr_object },
+		{}
+	}
 };
+
+int
+nv04_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	struct nv04_gr *gr;
+
+	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+		return -ENOMEM;
+	spin_lock_init(&gr->lock);
+	*pgr = &gr->base;
+
+	return nvkm_gr_ctor(&nv04_gr, device, index, 0x00001000,
+			    true, &gr->base);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
index d6ace41830e9..4542867fa9e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
@@ -21,13 +21,13 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-#include <engine/gr.h>
+#include "nv10.h"
 #include "regs.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
+#include <engine/fifo/chan.h>
 #include <subdev/fb.h>
 
 struct pipe_state {
@@ -386,14 +386,19 @@ static int nv17_gr_ctx_regs[] = {
 	0x00400a04,
 };
 
-struct nv10_gr_priv {
+#define nv10_gr(p) container_of((p), struct nv10_gr, base)
+
+struct nv10_gr {
 	struct nvkm_gr base;
 	struct nv10_gr_chan *chan[32];
 	spinlock_t lock;
 };
 
+#define nv10_gr_chan(p) container_of((p), struct nv10_gr_chan, object)
+
 struct nv10_gr_chan {
-	struct nvkm_object base;
+	struct nvkm_object object;
+	struct nv10_gr *gr;
 	int chid;
 	int nv10[ARRAY_SIZE(nv10_gr_ctx_regs)];
 	int nv17[ARRAY_SIZE(nv17_gr_ctx_regs)];
@@ -402,214 +407,151 @@ struct nv10_gr_chan {
 };
 
 
-static inline struct nv10_gr_priv *
-nv10_gr_priv(struct nv10_gr_chan *chan)
-{
-	return (void *)nv_object(chan)->engine;
-}
-
 /*******************************************************************************
  * Graphics object classes
  ******************************************************************************/
 
-#define PIPE_SAVE(priv, state, addr)					\
+#define PIPE_SAVE(gr, state, addr)					\
 	do {								\
 		int __i;						\
-		nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, addr);		\
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, addr);		\
 		for (__i = 0; __i < ARRAY_SIZE(state); __i++)		\
-			state[__i] = nv_rd32(priv, NV10_PGRAPH_PIPE_DATA); \
+			state[__i] = nvkm_rd32(device, NV10_PGRAPH_PIPE_DATA); \
 	} while (0)
 
-#define PIPE_RESTORE(priv, state, addr)					\
+#define PIPE_RESTORE(gr, state, addr)					\
 	do {								\
 		int __i;						\
-		nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, addr);		\
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, addr);		\
 		for (__i = 0; __i < ARRAY_SIZE(state); __i++)		\
-			nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, state[__i]); \
+			nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, state[__i]); \
 	} while (0)
 
-static struct nvkm_oclass
-nv10_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs }, /* beta1 */
-	{ 0x0019, &nv04_gr_ofuncs }, /* clip */
-	{ 0x0030, &nv04_gr_ofuncs }, /* null */
-	{ 0x0039, &nv04_gr_ofuncs }, /* m2mf */
-	{ 0x0043, &nv04_gr_ofuncs }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs }, /* pattern */
-	{ 0x004a, &nv04_gr_ofuncs }, /* gdi */
-	{ 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
-	{ 0x005f, &nv04_gr_ofuncs }, /* blit */
-	{ 0x0062, &nv04_gr_ofuncs }, /* surf2d */
-	{ 0x0072, &nv04_gr_ofuncs }, /* beta4 */
-	{ 0x0089, &nv04_gr_ofuncs }, /* sifm */
-	{ 0x008a, &nv04_gr_ofuncs }, /* ifc */
-	{ 0x009f, &nv04_gr_ofuncs }, /* blit */
-	{ 0x0093, &nv04_gr_ofuncs }, /* surf3d */
-	{ 0x0094, &nv04_gr_ofuncs }, /* ttri */
-	{ 0x0095, &nv04_gr_ofuncs }, /* mtri */
-	{ 0x0056, &nv04_gr_ofuncs }, /* celcius */
-	{},
-};
-
-static struct nvkm_oclass
-nv15_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs }, /* beta1 */
-	{ 0x0019, &nv04_gr_ofuncs }, /* clip */
-	{ 0x0030, &nv04_gr_ofuncs }, /* null */
-	{ 0x0039, &nv04_gr_ofuncs }, /* m2mf */
-	{ 0x0043, &nv04_gr_ofuncs }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs }, /* pattern */
-	{ 0x004a, &nv04_gr_ofuncs }, /* gdi */
-	{ 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
-	{ 0x005f, &nv04_gr_ofuncs }, /* blit */
-	{ 0x0062, &nv04_gr_ofuncs }, /* surf2d */
-	{ 0x0072, &nv04_gr_ofuncs }, /* beta4 */
-	{ 0x0089, &nv04_gr_ofuncs }, /* sifm */
-	{ 0x008a, &nv04_gr_ofuncs }, /* ifc */
-	{ 0x009f, &nv04_gr_ofuncs }, /* blit */
-	{ 0x0093, &nv04_gr_ofuncs }, /* surf3d */
-	{ 0x0094, &nv04_gr_ofuncs }, /* ttri */
-	{ 0x0095, &nv04_gr_ofuncs }, /* mtri */
-	{ 0x0096, &nv04_gr_ofuncs }, /* celcius */
-	{},
-};
-
-static int
-nv17_gr_mthd_lma_window(struct nvkm_object *object, u32 mthd,
-			void *args, u32 size)
+static void
+nv17_gr_mthd_lma_window(struct nv10_gr_chan *chan, u32 mthd, u32 data)
 {
-	struct nv10_gr_chan *chan = (void *)object->parent;
-	struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+	struct nvkm_device *device = chan->object.engine->subdev.device;
+	struct nvkm_gr *gr = &chan->gr->base;
 	struct pipe_state *pipe = &chan->pipe_state;
 	u32 pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
 	u32 xfmode0, xfmode1;
-	u32 data = *(u32 *)args;
 	int i;
 
 	chan->lma_window[(mthd - 0x1638) / 4] = data;
 
 	if (mthd != 0x1644)
-		return 0;
+		return;
 
-	nv04_gr_idle(priv);
+	nv04_gr_idle(gr);
 
-	PIPE_SAVE(priv, pipe_0x0040, 0x0040);
-	PIPE_SAVE(priv, pipe->pipe_0x0200, 0x0200);
+	PIPE_SAVE(device, pipe_0x0040, 0x0040);
+	PIPE_SAVE(device, pipe->pipe_0x0200, 0x0200);
 
-	PIPE_RESTORE(priv, chan->lma_window, 0x6790);
+	PIPE_RESTORE(device, chan->lma_window, 0x6790);
 
-	nv04_gr_idle(priv);
+	nv04_gr_idle(gr);
 
-	xfmode0 = nv_rd32(priv, NV10_PGRAPH_XFMODE0);
-	xfmode1 = nv_rd32(priv, NV10_PGRAPH_XFMODE1);
+	xfmode0 = nvkm_rd32(device, NV10_PGRAPH_XFMODE0);
+	xfmode1 = nvkm_rd32(device, NV10_PGRAPH_XFMODE1);
 
-	PIPE_SAVE(priv, pipe->pipe_0x4400, 0x4400);
-	PIPE_SAVE(priv, pipe_0x64c0, 0x64c0);
-	PIPE_SAVE(priv, pipe_0x6ab0, 0x6ab0);
-	PIPE_SAVE(priv, pipe_0x6a80, 0x6a80);
+	PIPE_SAVE(device, pipe->pipe_0x4400, 0x4400);
+	PIPE_SAVE(device, pipe_0x64c0, 0x64c0);
+	PIPE_SAVE(device, pipe_0x6ab0, 0x6ab0);
+	PIPE_SAVE(device, pipe_0x6a80, 0x6a80);
 
-	nv04_gr_idle(priv);
+	nv04_gr_idle(gr);
 
-	nv_wr32(priv, NV10_PGRAPH_XFMODE0, 0x10000000);
-	nv_wr32(priv, NV10_PGRAPH_XFMODE1, 0x00000000);
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
+	nvkm_wr32(device, NV10_PGRAPH_XFMODE0, 0x10000000);
+	nvkm_wr32(device, NV10_PGRAPH_XFMODE1, 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
 	for (i = 0; i < 4; i++)
-		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
 	for (i = 0; i < 4; i++)
-		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
 
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
 	for (i = 0; i < 3; i++)
-		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
 
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
 	for (i = 0; i < 3; i++)
-		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
-
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
-	nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000008);
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
 
-	PIPE_RESTORE(priv, pipe->pipe_0x0200, 0x0200);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000008);
 
-	nv04_gr_idle(priv);
+	PIPE_RESTORE(device, pipe->pipe_0x0200, 0x0200);
 
-	PIPE_RESTORE(priv, pipe_0x0040, 0x0040);
+	nv04_gr_idle(gr);
 
-	nv_wr32(priv, NV10_PGRAPH_XFMODE0, xfmode0);
-	nv_wr32(priv, NV10_PGRAPH_XFMODE1, xfmode1);
+	PIPE_RESTORE(device, pipe_0x0040, 0x0040);
 
-	PIPE_RESTORE(priv, pipe_0x64c0, 0x64c0);
-	PIPE_RESTORE(priv, pipe_0x6ab0, 0x6ab0);
-	PIPE_RESTORE(priv, pipe_0x6a80, 0x6a80);
-	PIPE_RESTORE(priv, pipe->pipe_0x4400, 0x4400);
+	nvkm_wr32(device, NV10_PGRAPH_XFMODE0, xfmode0);
+	nvkm_wr32(device, NV10_PGRAPH_XFMODE1, xfmode1);
 
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
-	nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+	PIPE_RESTORE(device, pipe_0x64c0, 0x64c0);
+	PIPE_RESTORE(device, pipe_0x6ab0, 0x6ab0);
+	PIPE_RESTORE(device, pipe_0x6a80, 0x6a80);
+	PIPE_RESTORE(device, pipe->pipe_0x4400, 0x4400);
 
-	nv04_gr_idle(priv);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
 
-	return 0;
+	nv04_gr_idle(gr);
 }
 
-static int
-nv17_gr_mthd_lma_enable(struct nvkm_object *object, u32 mthd,
-			void *args, u32 size)
+static void
+nv17_gr_mthd_lma_enable(struct nv10_gr_chan *chan, u32 mthd, u32 data)
 {
-	struct nv10_gr_chan *chan = (void *)object->parent;
-	struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+	struct nvkm_device *device = chan->object.engine->subdev.device;
+	struct nvkm_gr *gr = &chan->gr->base;
 
-	nv04_gr_idle(priv);
+	nv04_gr_idle(gr);
 
-	nv_mask(priv, NV10_PGRAPH_DEBUG_4, 0x00000100, 0x00000100);
-	nv_mask(priv, 0x4006b0, 0x08000000, 0x08000000);
-	return 0;
+	nvkm_mask(device, NV10_PGRAPH_DEBUG_4, 0x00000100, 0x00000100);
+	nvkm_mask(device, 0x4006b0, 0x08000000, 0x08000000);
 }
 
-static struct nvkm_omthds
-nv17_celcius_omthds[] = {
-	{ 0x1638, 0x1638, nv17_gr_mthd_lma_window },
-	{ 0x163c, 0x163c, nv17_gr_mthd_lma_window },
-	{ 0x1640, 0x1640, nv17_gr_mthd_lma_window },
-	{ 0x1644, 0x1644, nv17_gr_mthd_lma_window },
-	{ 0x1658, 0x1658, nv17_gr_mthd_lma_enable },
-	{}
-};
+static bool
+nv17_gr_mthd_celcius(struct nv10_gr_chan *chan, u32 mthd, u32 data)
+{
+	void (*func)(struct nv10_gr_chan *, u32, u32);
+	switch (mthd) {
+	case 0x1638 ... 0x1644:
+		     func = nv17_gr_mthd_lma_window; break;
+	case 0x1658: func = nv17_gr_mthd_lma_enable; break;
+	default:
+		return false;
+	}
+	func(chan, mthd, data);
+	return true;
+}
 
-static struct nvkm_oclass
-nv17_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs }, /* beta1 */
-	{ 0x0019, &nv04_gr_ofuncs }, /* clip */
-	{ 0x0030, &nv04_gr_ofuncs }, /* null */
-	{ 0x0039, &nv04_gr_ofuncs }, /* m2mf */
-	{ 0x0043, &nv04_gr_ofuncs }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs }, /* pattern */
-	{ 0x004a, &nv04_gr_ofuncs }, /* gdi */
-	{ 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
-	{ 0x005f, &nv04_gr_ofuncs }, /* blit */
-	{ 0x0062, &nv04_gr_ofuncs }, /* surf2d */
-	{ 0x0072, &nv04_gr_ofuncs }, /* beta4 */
-	{ 0x0089, &nv04_gr_ofuncs }, /* sifm */
-	{ 0x008a, &nv04_gr_ofuncs }, /* ifc */
-	{ 0x009f, &nv04_gr_ofuncs }, /* blit */
-	{ 0x0093, &nv04_gr_ofuncs }, /* surf3d */
-	{ 0x0094, &nv04_gr_ofuncs }, /* ttri */
-	{ 0x0095, &nv04_gr_ofuncs }, /* mtri */
-	{ 0x0099, &nv04_gr_ofuncs, nv17_celcius_omthds },
-	{},
-};
+static bool
+nv10_gr_mthd(struct nv10_gr_chan *chan, u8 class, u32 mthd, u32 data)
+{
+	bool (*func)(struct nv10_gr_chan *, u32, u32);
+	switch (class) {
+	case 0x99: func = nv17_gr_mthd_celcius; break;
+	default:
+		return false;
+	}
+	return func(chan, mthd, data);
+}
 
 /*******************************************************************************
  * PGRAPH context
  ******************************************************************************/
 
 static struct nv10_gr_chan *
-nv10_gr_channel(struct nv10_gr_priv *priv)
+nv10_gr_channel(struct nv10_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	struct nv10_gr_chan *chan = NULL;
-	if (nv_rd32(priv, 0x400144) & 0x00010000) {
-		int chid = nv_rd32(priv, 0x400148) >> 24;
-		if (chid < ARRAY_SIZE(priv->chan))
-			chan = priv->chan[chid];
+	if (nvkm_rd32(device, 0x400144) & 0x00010000) {
+		int chid = nvkm_rd32(device, 0x400148) >> 24;
+		if (chid < ARRAY_SIZE(gr->chan))
+			chan = gr->chan[chid];
 	}
 	return chan;
 }
@@ -617,75 +559,78 @@ nv10_gr_channel(struct nv10_gr_priv *priv)
 static void
 nv10_gr_save_pipe(struct nv10_gr_chan *chan)
 {
-	struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+	struct nv10_gr *gr = chan->gr;
 	struct pipe_state *pipe = &chan->pipe_state;
-
-	PIPE_SAVE(priv, pipe->pipe_0x4400, 0x4400);
-	PIPE_SAVE(priv, pipe->pipe_0x0200, 0x0200);
-	PIPE_SAVE(priv, pipe->pipe_0x6400, 0x6400);
-	PIPE_SAVE(priv, pipe->pipe_0x6800, 0x6800);
-	PIPE_SAVE(priv, pipe->pipe_0x6c00, 0x6c00);
-	PIPE_SAVE(priv, pipe->pipe_0x7000, 0x7000);
-	PIPE_SAVE(priv, pipe->pipe_0x7400, 0x7400);
-	PIPE_SAVE(priv, pipe->pipe_0x7800, 0x7800);
-	PIPE_SAVE(priv, pipe->pipe_0x0040, 0x0040);
-	PIPE_SAVE(priv, pipe->pipe_0x0000, 0x0000);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+
+	PIPE_SAVE(gr, pipe->pipe_0x4400, 0x4400);
+	PIPE_SAVE(gr, pipe->pipe_0x0200, 0x0200);
+	PIPE_SAVE(gr, pipe->pipe_0x6400, 0x6400);
+	PIPE_SAVE(gr, pipe->pipe_0x6800, 0x6800);
+	PIPE_SAVE(gr, pipe->pipe_0x6c00, 0x6c00);
+	PIPE_SAVE(gr, pipe->pipe_0x7000, 0x7000);
+	PIPE_SAVE(gr, pipe->pipe_0x7400, 0x7400);
+	PIPE_SAVE(gr, pipe->pipe_0x7800, 0x7800);
+	PIPE_SAVE(gr, pipe->pipe_0x0040, 0x0040);
+	PIPE_SAVE(gr, pipe->pipe_0x0000, 0x0000);
 }
 
 static void
 nv10_gr_load_pipe(struct nv10_gr_chan *chan)
 {
-	struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+	struct nv10_gr *gr = chan->gr;
 	struct pipe_state *pipe = &chan->pipe_state;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u32 xfmode0, xfmode1;
 	int i;
 
-	nv04_gr_idle(priv);
+	nv04_gr_idle(&gr->base);
 	/* XXX check haiku comments */
-	xfmode0 = nv_rd32(priv, NV10_PGRAPH_XFMODE0);
-	xfmode1 = nv_rd32(priv, NV10_PGRAPH_XFMODE1);
-	nv_wr32(priv, NV10_PGRAPH_XFMODE0, 0x10000000);
-	nv_wr32(priv, NV10_PGRAPH_XFMODE1, 0x00000000);
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
+	xfmode0 = nvkm_rd32(device, NV10_PGRAPH_XFMODE0);
+	xfmode1 = nvkm_rd32(device, NV10_PGRAPH_XFMODE1);
+	nvkm_wr32(device, NV10_PGRAPH_XFMODE0, 0x10000000);
+	nvkm_wr32(device, NV10_PGRAPH_XFMODE1, 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
 	for (i = 0; i < 4; i++)
-		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
 	for (i = 0; i < 4; i++)
-		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
 
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
 	for (i = 0; i < 3; i++)
-		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
 
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
 	for (i = 0; i < 3; i++)
-		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
 
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
-	nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000008);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000008);
 
 
-	PIPE_RESTORE(priv, pipe->pipe_0x0200, 0x0200);
-	nv04_gr_idle(priv);
+	PIPE_RESTORE(gr, pipe->pipe_0x0200, 0x0200);
+	nv04_gr_idle(&gr->base);
 
 	/* restore XFMODE */
-	nv_wr32(priv, NV10_PGRAPH_XFMODE0, xfmode0);
-	nv_wr32(priv, NV10_PGRAPH_XFMODE1, xfmode1);
-	PIPE_RESTORE(priv, pipe->pipe_0x6400, 0x6400);
-	PIPE_RESTORE(priv, pipe->pipe_0x6800, 0x6800);
-	PIPE_RESTORE(priv, pipe->pipe_0x6c00, 0x6c00);
-	PIPE_RESTORE(priv, pipe->pipe_0x7000, 0x7000);
-	PIPE_RESTORE(priv, pipe->pipe_0x7400, 0x7400);
-	PIPE_RESTORE(priv, pipe->pipe_0x7800, 0x7800);
-	PIPE_RESTORE(priv, pipe->pipe_0x4400, 0x4400);
-	PIPE_RESTORE(priv, pipe->pipe_0x0000, 0x0000);
-	PIPE_RESTORE(priv, pipe->pipe_0x0040, 0x0040);
-	nv04_gr_idle(priv);
+	nvkm_wr32(device, NV10_PGRAPH_XFMODE0, xfmode0);
+	nvkm_wr32(device, NV10_PGRAPH_XFMODE1, xfmode1);
+	PIPE_RESTORE(gr, pipe->pipe_0x6400, 0x6400);
+	PIPE_RESTORE(gr, pipe->pipe_0x6800, 0x6800);
+	PIPE_RESTORE(gr, pipe->pipe_0x6c00, 0x6c00);
+	PIPE_RESTORE(gr, pipe->pipe_0x7000, 0x7000);
+	PIPE_RESTORE(gr, pipe->pipe_0x7400, 0x7400);
+	PIPE_RESTORE(gr, pipe->pipe_0x7800, 0x7800);
+	PIPE_RESTORE(gr, pipe->pipe_0x4400, 0x4400);
+	PIPE_RESTORE(gr, pipe->pipe_0x0000, 0x0000);
+	PIPE_RESTORE(gr, pipe->pipe_0x0040, 0x0040);
+	nv04_gr_idle(&gr->base);
 }
 
 static void
 nv10_gr_create_pipe(struct nv10_gr_chan *chan)
 {
-	struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+	struct nv10_gr *gr = chan->gr;
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
 	struct pipe_state *pipe_state = &chan->pipe_state;
 	u32 *pipe_state_addr;
 	int i;
@@ -698,7 +643,7 @@ nv10_gr_create_pipe(struct nv10_gr_chan *chan)
 		u32 *__end_addr = pipe_state->pipe_##addr + \
 				ARRAY_SIZE(pipe_state->pipe_##addr); \
 		if (pipe_state_addr != __end_addr) \
-			nv_error(priv, "incomplete pipe init for 0x%x :  %p/%p\n", \
+			nvkm_error(subdev, "incomplete pipe init for 0x%x :  %p/%p\n", \
 				addr, pipe_state_addr, __end_addr); \
 	} while (0)
 #define NV_WRITE_PIPE_INIT(value) *(pipe_state_addr++) = value
@@ -838,33 +783,36 @@ nv10_gr_create_pipe(struct nv10_gr_chan *chan)
 }
 
 static int
-nv10_gr_ctx_regs_find_offset(struct nv10_gr_priv *priv, int reg)
+nv10_gr_ctx_regs_find_offset(struct nv10_gr *gr, int reg)
 {
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
 	int i;
 	for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++) {
 		if (nv10_gr_ctx_regs[i] == reg)
 			return i;
 	}
-	nv_error(priv, "unknown offset nv10_ctx_regs %d\n", reg);
+	nvkm_error(subdev, "unknown offset nv10_ctx_regs %d\n", reg);
 	return -1;
 }
 
 static int
-nv17_gr_ctx_regs_find_offset(struct nv10_gr_priv *priv, int reg)
+nv17_gr_ctx_regs_find_offset(struct nv10_gr *gr, int reg)
 {
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
 	int i;
 	for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++) {
 		if (nv17_gr_ctx_regs[i] == reg)
 			return i;
 	}
-	nv_error(priv, "unknown offset nv17_ctx_regs %d\n", reg);
+	nvkm_error(subdev, "unknown offset nv17_ctx_regs %d\n", reg);
 	return -1;
 }
 
 static void
 nv10_gr_load_dma_vtxbuf(struct nv10_gr_chan *chan, int chid, u32 inst)
 {
-	struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+	struct nv10_gr *gr = chan->gr;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u32 st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4];
 	u32 ctx_user, ctx_switch[5];
 	int i, subchan = -1;
@@ -876,7 +824,7 @@ nv10_gr_load_dma_vtxbuf(struct nv10_gr_chan *chan, int chid, u32 inst)
 
 	/* Look for a celsius object */
 	for (i = 0; i < 8; i++) {
-		int class = nv_rd32(priv, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff;
+		int class = nvkm_rd32(device, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff;
 
 		if (class == 0x56 || class == 0x96 || class == 0x99) {
 			subchan = i;
@@ -888,159 +836,183 @@ nv10_gr_load_dma_vtxbuf(struct nv10_gr_chan *chan, int chid, u32 inst)
 		return;
 
 	/* Save the current ctx object */
-	ctx_user = nv_rd32(priv, NV10_PGRAPH_CTX_USER);
+	ctx_user = nvkm_rd32(device, NV10_PGRAPH_CTX_USER);
 	for (i = 0; i < 5; i++)
-		ctx_switch[i] = nv_rd32(priv, NV10_PGRAPH_CTX_SWITCH(i));
+		ctx_switch[i] = nvkm_rd32(device, NV10_PGRAPH_CTX_SWITCH(i));
 
 	/* Save the FIFO state */
-	st2 = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2);
-	st2_dl = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2_DL);
-	st2_dh = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2_DH);
-	fifo_ptr = nv_rd32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR);
+	st2 = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2);
+	st2_dl = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2_DL);
+	st2_dh = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2_DH);
+	fifo_ptr = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR);
 
 	for (i = 0; i < ARRAY_SIZE(fifo); i++)
-		fifo[i] = nv_rd32(priv, 0x4007a0 + 4 * i);
+		fifo[i] = nvkm_rd32(device, 0x4007a0 + 4 * i);
 
 	/* Switch to the celsius subchannel */
 	for (i = 0; i < 5; i++)
-		nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(i),
-			nv_rd32(priv, NV10_PGRAPH_CTX_CACHE(subchan, i)));
-	nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13);
+		nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(i),
+			nvkm_rd32(device, NV10_PGRAPH_CTX_CACHE(subchan, i)));
+	nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13);
 
 	/* Inject NV10TCL_DMA_VTXBUF */
-	nv_wr32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0);
-	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2,
+	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0);
+	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2,
 		0x2c000000 | chid << 20 | subchan << 16 | 0x18c);
-	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
-	nv_mask(priv, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
-	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
-	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
+	nvkm_mask(device, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
+	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
 
 	/* Restore the FIFO state */
 	for (i = 0; i < ARRAY_SIZE(fifo); i++)
-		nv_wr32(priv, 0x4007a0 + 4 * i, fifo[i]);
+		nvkm_wr32(device, 0x4007a0 + 4 * i, fifo[i]);
 
-	nv_wr32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr);
-	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2, st2);
-	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl);
-	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh);
+	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr);
+	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2, st2);
+	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl);
+	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh);
 
 	/* Restore the current ctx object */
 	for (i = 0; i < 5; i++)
-		nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]);
-	nv_wr32(priv, NV10_PGRAPH_CTX_USER, ctx_user);
+		nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_USER, ctx_user);
 }
 
 static int
 nv10_gr_load_context(struct nv10_gr_chan *chan, int chid)
 {
-	struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+	struct nv10_gr *gr = chan->gr;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u32 inst;
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++)
-		nv_wr32(priv, nv10_gr_ctx_regs[i], chan->nv10[i]);
+		nvkm_wr32(device, nv10_gr_ctx_regs[i], chan->nv10[i]);
 
-	if (nv_device(priv)->card_type >= NV_11 &&
-	    nv_device(priv)->chipset >= 0x17) {
+	if (device->card_type >= NV_11 && device->chipset >= 0x17) {
 		for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++)
-			nv_wr32(priv, nv17_gr_ctx_regs[i], chan->nv17[i]);
+			nvkm_wr32(device, nv17_gr_ctx_regs[i], chan->nv17[i]);
 	}
 
 	nv10_gr_load_pipe(chan);
 
-	inst = nv_rd32(priv, NV10_PGRAPH_GLOBALSTATE1) & 0xffff;
+	inst = nvkm_rd32(device, NV10_PGRAPH_GLOBALSTATE1) & 0xffff;
 	nv10_gr_load_dma_vtxbuf(chan, chid, inst);
 
-	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
-	nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, chid << 24);
-	nv_mask(priv, NV10_PGRAPH_FFINTFC_ST2, 0x30000000, 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+	nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, chid << 24);
+	nvkm_mask(device, NV10_PGRAPH_FFINTFC_ST2, 0x30000000, 0x00000000);
 	return 0;
 }
 
 static int
 nv10_gr_unload_context(struct nv10_gr_chan *chan)
 {
-	struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+	struct nv10_gr *gr = chan->gr;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++)
-		chan->nv10[i] = nv_rd32(priv, nv10_gr_ctx_regs[i]);
+		chan->nv10[i] = nvkm_rd32(device, nv10_gr_ctx_regs[i]);
 
-	if (nv_device(priv)->card_type >= NV_11 &&
-	    nv_device(priv)->chipset >= 0x17) {
+	if (device->card_type >= NV_11 && device->chipset >= 0x17) {
 		for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++)
-			chan->nv17[i] = nv_rd32(priv, nv17_gr_ctx_regs[i]);
+			chan->nv17[i] = nvkm_rd32(device, nv17_gr_ctx_regs[i]);
 	}
 
 	nv10_gr_save_pipe(chan);
 
-	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
-	nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
+	nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
 	return 0;
 }
 
 static void
-nv10_gr_context_switch(struct nv10_gr_priv *priv)
+nv10_gr_context_switch(struct nv10_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	struct nv10_gr_chan *prev = NULL;
 	struct nv10_gr_chan *next = NULL;
-	unsigned long flags;
 	int chid;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	nv04_gr_idle(priv);
+	nv04_gr_idle(&gr->base);
 
 	/* If previous context is valid, we need to save it */
-	prev = nv10_gr_channel(priv);
+	prev = nv10_gr_channel(gr);
 	if (prev)
 		nv10_gr_unload_context(prev);
 
 	/* load context for next channel */
-	chid = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
-	next = priv->chan[chid];
+	chid = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
+	next = gr->chan[chid];
 	if (next)
 		nv10_gr_load_context(next, chid);
+}
+
+static int
+nv10_gr_chan_fini(struct nvkm_object *object, bool suspend)
+{
+	struct nv10_gr_chan *chan = nv10_gr_chan(object);
+	struct nv10_gr *gr = chan->gr;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	unsigned long flags;
 
-	spin_unlock_irqrestore(&priv->lock, flags);
+	spin_lock_irqsave(&gr->lock, flags);
+	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+	if (nv10_gr_channel(gr) == chan)
+		nv10_gr_unload_context(chan);
+	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&gr->lock, flags);
+	return 0;
+}
+
+static void *
+nv10_gr_chan_dtor(struct nvkm_object *object)
+{
+	struct nv10_gr_chan *chan = nv10_gr_chan(object);
+	struct nv10_gr *gr = chan->gr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&gr->lock, flags);
+	gr->chan[chan->chid] = NULL;
+	spin_unlock_irqrestore(&gr->lock, flags);
+	return chan;
 }
 
+static const struct nvkm_object_func
+nv10_gr_chan = {
+	.dtor = nv10_gr_chan_dtor,
+	.fini = nv10_gr_chan_fini,
+};
+
 #define NV_WRITE_CTX(reg, val) do { \
-	int offset = nv10_gr_ctx_regs_find_offset(priv, reg); \
+	int offset = nv10_gr_ctx_regs_find_offset(gr, reg); \
 	if (offset > 0) \
 		chan->nv10[offset] = val; \
 	} while (0)
 
 #define NV17_WRITE_CTX(reg, val) do { \
-	int offset = nv17_gr_ctx_regs_find_offset(priv, reg); \
+	int offset = nv17_gr_ctx_regs_find_offset(gr, reg); \
 	if (offset > 0) \
 		chan->nv17[offset] = val; \
 	} while (0)
 
-static int
-nv10_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+int
+nv10_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
-	struct nvkm_fifo_chan *fifo = (void *)parent;
-	struct nv10_gr_priv *priv = (void *)engine;
+	struct nv10_gr *gr = nv10_gr(base);
 	struct nv10_gr_chan *chan;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	unsigned long flags;
-	int ret;
-
-	ret = nvkm_object_create(parent, engine, oclass, 0, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	spin_lock_irqsave(&priv->lock, flags);
-	if (priv->chan[fifo->chid]) {
-		*pobject = nv_object(priv->chan[fifo->chid]);
-		atomic_inc(&(*pobject)->refcount);
-		spin_unlock_irqrestore(&priv->lock, flags);
-		nvkm_object_destroy(&chan->base);
-		return 1;
-	}
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv10_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	chan->chid = fifoch->chid;
+	*pobject = &chan->object;
 
 	NV_WRITE_CTX(0x00400e88, 0x08000000);
 	NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
@@ -1049,12 +1021,11 @@ nv10_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
 	NV_WRITE_CTX(0x00400e14, 0x00001000);
 	NV_WRITE_CTX(0x00400e30, 0x00080008);
 	NV_WRITE_CTX(0x00400e34, 0x00080008);
-	if (nv_device(priv)->card_type >= NV_11 &&
-	    nv_device(priv)->chipset >= 0x17) {
+	if (device->card_type >= NV_11 && device->chipset >= 0x17) {
 		/* is it really needed ??? */
 		NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
-					nv_rd32(priv, NV10_PGRAPH_DEBUG_4));
-		NV17_WRITE_CTX(0x004006b0, nv_rd32(priv, 0x004006b0));
+			       nvkm_rd32(device, NV10_PGRAPH_DEBUG_4));
+		NV17_WRITE_CTX(0x004006b0, nvkm_rd32(device, 0x004006b0));
 		NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
 		NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
 		NV17_WRITE_CTX(0x00400ec0, 0x00000080);
@@ -1064,74 +1035,32 @@ nv10_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
 
 	nv10_gr_create_pipe(chan);
 
-	priv->chan[fifo->chid] = chan;
-	chan->chid = fifo->chid;
-	spin_unlock_irqrestore(&priv->lock, flags);
+	spin_lock_irqsave(&gr->lock, flags);
+	gr->chan[chan->chid] = chan;
+	spin_unlock_irqrestore(&gr->lock, flags);
 	return 0;
 }
 
-static void
-nv10_gr_context_dtor(struct nvkm_object *object)
-{
-	struct nv10_gr_priv *priv = (void *)object->engine;
-	struct nv10_gr_chan *chan = (void *)object;
-	unsigned long flags;
-
-	spin_lock_irqsave(&priv->lock, flags);
-	priv->chan[chan->chid] = NULL;
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	nvkm_object_destroy(&chan->base);
-}
-
-static int
-nv10_gr_context_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv10_gr_priv *priv = (void *)object->engine;
-	struct nv10_gr_chan *chan = (void *)object;
-	unsigned long flags;
-
-	spin_lock_irqsave(&priv->lock, flags);
-	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
-	if (nv10_gr_channel(priv) == chan)
-		nv10_gr_unload_context(chan);
-	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	return nvkm_object_fini(&chan->base, suspend);
-}
-
-static struct nvkm_oclass
-nv10_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x10),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv10_gr_context_ctor,
-		.dtor = nv10_gr_context_dtor,
-		.init = nvkm_object_init,
-		.fini = nv10_gr_context_fini,
-	},
-};
-
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-static void
-nv10_gr_tile_prog(struct nvkm_engine *engine, int i)
+void
+nv10_gr_tile(struct nvkm_gr *base, int i, struct nvkm_fb_tile *tile)
 {
-	struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i];
-	struct nvkm_fifo *pfifo = nvkm_fifo(engine);
-	struct nv10_gr_priv *priv = (void *)engine;
+	struct nv10_gr *gr = nv10_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	struct nvkm_fifo *fifo = device->fifo;
 	unsigned long flags;
 
-	pfifo->pause(pfifo, &flags);
-	nv04_gr_idle(priv);
+	nvkm_fifo_pause(fifo, &flags);
+	nv04_gr_idle(&gr->base);
 
-	nv_wr32(priv, NV10_PGRAPH_TLIMIT(i), tile->limit);
-	nv_wr32(priv, NV10_PGRAPH_TSIZE(i), tile->pitch);
-	nv_wr32(priv, NV10_PGRAPH_TILE(i), tile->addr);
+	nvkm_wr32(device, NV10_PGRAPH_TLIMIT(i), tile->limit);
+	nvkm_wr32(device, NV10_PGRAPH_TSIZE(i), tile->pitch);
+	nvkm_wr32(device, NV10_PGRAPH_TILE(i), tile->addr);
 
-	pfifo->start(pfifo, &flags);
+	nvkm_fifo_start(fifo, &flags);
 }
 
 const struct nvkm_bitfield nv10_gr_intr_name[] = {
@@ -1148,168 +1077,145 @@ const struct nvkm_bitfield nv10_gr_nstatus[] = {
 	{}
 };
 
-static void
-nv10_gr_intr(struct nvkm_subdev *subdev)
+void
+nv10_gr_intr(struct nvkm_gr *base)
 {
-	struct nv10_gr_priv *priv = (void *)subdev;
-	struct nv10_gr_chan *chan = NULL;
-	struct nvkm_namedb *namedb = NULL;
-	struct nvkm_handle *handle = NULL;
-	u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
-	u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
-	u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
-	u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+	struct nv10_gr *gr = nv10_gr(base);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
+	u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
+	u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
+	u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
 	u32 chid = (addr & 0x01f00000) >> 20;
 	u32 subc = (addr & 0x00070000) >> 16;
 	u32 mthd = (addr & 0x00001ffc);
-	u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
-	u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xfff;
+	u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
+	u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xfff;
 	u32 show = stat;
+	char msg[128], src[128], sta[128];
+	struct nv10_gr_chan *chan;
 	unsigned long flags;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	chan = priv->chan[chid];
-	if (chan)
-		namedb = (void *)nv_pclass(nv_object(chan), NV_NAMEDB_CLASS);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	spin_lock_irqsave(&gr->lock, flags);
+	chan = gr->chan[chid];
 
 	if (stat & NV_PGRAPH_INTR_ERROR) {
 		if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
-			handle = nvkm_namedb_get_class(namedb, class);
-			if (handle && !nv_call(handle->object, mthd, data))
+			if (!nv10_gr_mthd(chan, class, mthd, data))
 				show &= ~NV_PGRAPH_INTR_ERROR;
 		}
 	}
 
 	if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
-		nv_wr32(priv, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+		nvkm_wr32(device, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
 		stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
 		show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
-		nv10_gr_context_switch(priv);
+		nv10_gr_context_switch(gr);
 	}
 
-	nv_wr32(priv, NV03_PGRAPH_INTR, stat);
-	nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+	nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
+	nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
 
 	if (show) {
-		nv_error(priv, "%s", "");
-		nvkm_bitfield_print(nv10_gr_intr_name, show);
-		pr_cont(" nsource:");
-		nvkm_bitfield_print(nv04_gr_nsource, nsource);
-		pr_cont(" nstatus:");
-		nvkm_bitfield_print(nv10_gr_nstatus, nstatus);
-		pr_cont("\n");
-		nv_error(priv,
-			 "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
-			 chid, nvkm_client_name(chan), subc, class, mthd,
-			 data);
+		nvkm_snprintbf(msg, sizeof(msg), nv10_gr_intr_name, show);
+		nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
+		nvkm_snprintbf(sta, sizeof(sta), nv10_gr_nstatus, nstatus);
+		nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
+				   "nstatus %08x [%s] ch %d [%s] subc %d "
+				   "class %04x mthd %04x data %08x\n",
+			   show, msg, nsource, src, nstatus, sta, chid,
+			   chan ? chan->object.client->name : "unknown",
+			   subc, class, mthd, data);
 	}
 
-	nvkm_namedb_put(handle);
+	spin_unlock_irqrestore(&gr->lock, flags);
 }
 
-static int
-nv10_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+int
+nv10_gr_init(struct nvkm_gr *base)
 {
-	struct nv10_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv10_gr_intr;
-	nv_engine(priv)->cclass = &nv10_gr_cclass;
-
-	if (nv_device(priv)->chipset <= 0x10)
-		nv_engine(priv)->sclass = nv10_gr_sclass;
-	else
-	if (nv_device(priv)->chipset <  0x17 ||
-	    nv_device(priv)->card_type < NV_11)
-		nv_engine(priv)->sclass = nv15_gr_sclass;
-	else
-		nv_engine(priv)->sclass = nv17_gr_sclass;
-
-	nv_engine(priv)->tile_prog = nv10_gr_tile_prog;
-	spin_lock_init(&priv->lock);
-	return 0;
-}
-
-static void
-nv10_gr_dtor(struct nvkm_object *object)
-{
-	struct nv10_gr_priv *priv = (void *)object;
-	nvkm_gr_destroy(&priv->base);
-}
-
-static int
-nv10_gr_init(struct nvkm_object *object)
-{
-	struct nvkm_engine *engine = nv_engine(object);
-	struct nvkm_fb *pfb = nvkm_fb(object);
-	struct nv10_gr_priv *priv = (void *)engine;
-	int ret, i;
-
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
-	nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
-
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x00118700);
-	/* nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31));
-
-	if (nv_device(priv)->card_type >= NV_11 &&
-	    nv_device(priv)->chipset >= 0x17) {
-		nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x1f000000);
-		nv_wr32(priv, 0x400a10, 0x03ff3fb6);
-		nv_wr32(priv, 0x400838, 0x002f8684);
-		nv_wr32(priv, 0x40083c, 0x00115f3f);
-		nv_wr32(priv, 0x4006b0, 0x40000020);
+	struct nv10_gr *gr = nv10_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+
+	nvkm_wr32(device, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
+	nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x00118700);
+	/* nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31));
+
+	if (device->card_type >= NV_11 && device->chipset >= 0x17) {
+		nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x1f000000);
+		nvkm_wr32(device, 0x400a10, 0x03ff3fb6);
+		nvkm_wr32(device, 0x400838, 0x002f8684);
+		nvkm_wr32(device, 0x40083c, 0x00115f3f);
+		nvkm_wr32(device, 0x4006b0, 0x40000020);
 	} else {
-		nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00000000);
+		nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00000000);
 	}
 
-	/* Turn all the tiling regions off. */
-	for (i = 0; i < pfb->tile.regions; i++)
-		engine->tile_prog(engine, i);
-
-	nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
-	nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
-	nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
-	nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
-	nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
-	nv_wr32(priv, NV10_PGRAPH_STATE, 0xFFFFFFFF);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_STATE, 0xFFFFFFFF);
 
-	nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
-	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
-	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
+	nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
 	return 0;
 }
 
-static int
-nv10_gr_fini(struct nvkm_object *object, bool suspend)
+int
+nv10_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_gr **pgr)
 {
-	struct nv10_gr_priv *priv = (void *)object;
-	return nvkm_gr_fini(&priv->base, suspend);
+	struct nv10_gr *gr;
+
+	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+		return -ENOMEM;
+	spin_lock_init(&gr->lock);
+	*pgr = &gr->base;
+
+	return nvkm_gr_ctor(func, device, index, 0x00001000, true, &gr->base);
 }
 
-struct nvkm_oclass
-nv10_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x10),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv10_gr_ctor,
-		.dtor = nv10_gr_dtor,
-		.init = nv10_gr_init,
-		.fini = nv10_gr_fini,
-	},
+static const struct nvkm_gr_func
+nv10_gr = {
+	.init = nv10_gr_init,
+	.intr = nv10_gr_intr,
+	.tile = nv10_gr_tile,
+	.chan_new = nv10_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* pattern */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0052, &nv04_gr_object }, /* swzsurf */
+		{ -1, -1, 0x005f, &nv04_gr_object }, /* blit */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* blit */
+		{ -1, -1, 0x0093, &nv04_gr_object }, /* surf3d */
+		{ -1, -1, 0x0094, &nv04_gr_object }, /* ttri */
+		{ -1, -1, 0x0095, &nv04_gr_object }, /* mtri */
+		{ -1, -1, 0x0056, &nv04_gr_object }, /* celcius */
+		{}
+	}
 };
+
+int
+nv10_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv10_gr_new_(&nv10_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
new file mode 100644
index 000000000000..d7c3d86cc99d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
@@ -0,0 +1,13 @@
+#ifndef __NV10_GR_H__
+#define __NV10_GR_H__
+#include "priv.h"
+
+int nv10_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index,
+		 struct nvkm_gr **);
+int nv10_gr_init(struct nvkm_gr *);
+void nv10_gr_intr(struct nvkm_gr *);
+void nv10_gr_tile(struct nvkm_gr *, int, struct nvkm_fb_tile *);
+
+int nv10_gr_chan_new(struct nvkm_gr *, struct nvkm_fifo_chan *,
+		     const struct nvkm_oclass *, struct nvkm_object **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c
new file mode 100644
index 000000000000..3e2c6856b4c4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragr) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "nv10.h"
+
+static const struct nvkm_gr_func
+nv15_gr = {
+	.init = nv10_gr_init,
+	.intr = nv10_gr_intr,
+	.tile = nv10_gr_tile,
+	.chan_new = nv10_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* pattern */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0052, &nv04_gr_object }, /* swzsurf */
+		{ -1, -1, 0x005f, &nv04_gr_object }, /* blit */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* blit */
+		{ -1, -1, 0x0093, &nv04_gr_object }, /* surf3d */
+		{ -1, -1, 0x0094, &nv04_gr_object }, /* ttri */
+		{ -1, -1, 0x0095, &nv04_gr_object }, /* mtri */
+		{ -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
+		{}
+	}
+};
+
+int
+nv15_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv10_gr_new_(&nv15_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c
new file mode 100644
index 000000000000..12437d085a73
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragr) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "nv10.h"
+
+static const struct nvkm_gr_func
+nv17_gr = {
+	.init = nv10_gr_init,
+	.intr = nv10_gr_intr,
+	.tile = nv10_gr_tile,
+	.chan_new = nv10_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* pattern */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0052, &nv04_gr_object }, /* swzsurf */
+		{ -1, -1, 0x005f, &nv04_gr_object }, /* blit */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* blit */
+		{ -1, -1, 0x0093, &nv04_gr_object }, /* surf3d */
+		{ -1, -1, 0x0094, &nv04_gr_object }, /* ttri */
+		{ -1, -1, 0x0095, &nv04_gr_object }, /* mtri */
+		{ -1, -1, 0x0099, &nv04_gr_object },
+		{}
+	}
+};
+
+int
+nv17_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv10_gr_new_(&nv17_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
index 1713ffb669e8..5caef65d3c6e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
@@ -2,375 +2,374 @@
 #include "regs.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
+#include <engine/fifo/chan.h>
 #include <subdev/fb.h>
 #include <subdev/timer.h>
 
 /*******************************************************************************
- * Graphics object classes
+ * PGRAPH context
  ******************************************************************************/
 
-static struct nvkm_oclass
-nv20_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
-	{ 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
-	{ 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
-	{ 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
-	{ 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
-	{ 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
-	{ 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
-	{ 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
-	{ 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
-	{ 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
-	{ 0x0096, &nv04_gr_ofuncs, NULL }, /* celcius */
-	{ 0x0097, &nv04_gr_ofuncs, NULL }, /* kelvin */
-	{ 0x009e, &nv04_gr_ofuncs, NULL }, /* swzsurf */
-	{ 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
-	{},
-};
+int
+nv20_gr_chan_init(struct nvkm_object *object)
+{
+	struct nv20_gr_chan *chan = nv20_gr_chan(object);
+	struct nv20_gr *gr = chan->gr;
+	u32 inst = nvkm_memory_addr(chan->inst);
 
-/*******************************************************************************
- * PGRAPH context
- ******************************************************************************/
+	nvkm_kmap(gr->ctxtab);
+	nvkm_wo32(gr->ctxtab, chan->chid * 4, inst >> 4);
+	nvkm_done(gr->ctxtab);
+	return 0;
+}
+
+int
+nv20_gr_chan_fini(struct nvkm_object *object, bool suspend)
+{
+	struct nv20_gr_chan *chan = nv20_gr_chan(object);
+	struct nv20_gr *gr = chan->gr;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	u32 inst = nvkm_memory_addr(chan->inst);
+	int chid = -1;
+
+	nvkm_mask(device, 0x400720, 0x00000001, 0x00000000);
+	if (nvkm_rd32(device, 0x400144) & 0x00010000)
+		chid = (nvkm_rd32(device, 0x400148) & 0x1f000000) >> 24;
+	if (chan->chid == chid) {
+		nvkm_wr32(device, 0x400784, inst >> 4);
+		nvkm_wr32(device, 0x400788, 0x00000002);
+		nvkm_msec(device, 2000,
+			if (!nvkm_rd32(device, 0x400700))
+				break;
+		);
+		nvkm_wr32(device, 0x400144, 0x10000000);
+		nvkm_mask(device, 0x400148, 0xff000000, 0x1f000000);
+	}
+	nvkm_mask(device, 0x400720, 0x00000001, 0x00000001);
+
+	nvkm_kmap(gr->ctxtab);
+	nvkm_wo32(gr->ctxtab, chan->chid * 4, 0x00000000);
+	nvkm_done(gr->ctxtab);
+	return 0;
+}
+
+void *
+nv20_gr_chan_dtor(struct nvkm_object *object)
+{
+	struct nv20_gr_chan *chan = nv20_gr_chan(object);
+	nvkm_memory_del(&chan->inst);
+	return chan;
+}
+
+static const struct nvkm_object_func
+nv20_gr_chan = {
+	.dtor = nv20_gr_chan_dtor,
+	.init = nv20_gr_chan_init,
+	.fini = nv20_gr_chan_fini,
+};
 
 static int
-nv20_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+nv20_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
+	struct nv20_gr *gr = nv20_gr(base);
 	struct nv20_gr_chan *chan;
 	int ret, i;
 
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x37f0,
-				     16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv20_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	chan->chid = fifoch->chid;
+	*pobject = &chan->object;
+
+	ret = nvkm_memory_new(gr->base.engine.subdev.device,
+			      NVKM_MEM_TARGET_INST, 0x37f0, 16, true,
+			      &chan->inst);
 	if (ret)
 		return ret;
 
-	chan->chid = nvkm_fifo_chan(parent)->chid;
-
-	nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24));
-	nv_wo32(chan, 0x033c, 0xffff0000);
-	nv_wo32(chan, 0x03a0, 0x0fff0000);
-	nv_wo32(chan, 0x03a4, 0x0fff0000);
-	nv_wo32(chan, 0x047c, 0x00000101);
-	nv_wo32(chan, 0x0490, 0x00000111);
-	nv_wo32(chan, 0x04a8, 0x44400000);
+	nvkm_kmap(chan->inst);
+	nvkm_wo32(chan->inst, 0x0000, 0x00000001 | (chan->chid << 24));
+	nvkm_wo32(chan->inst, 0x033c, 0xffff0000);
+	nvkm_wo32(chan->inst, 0x03a0, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x03a4, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x047c, 0x00000101);
+	nvkm_wo32(chan->inst, 0x0490, 0x00000111);
+	nvkm_wo32(chan->inst, 0x04a8, 0x44400000);
 	for (i = 0x04d4; i <= 0x04e0; i += 4)
-		nv_wo32(chan, i, 0x00030303);
+		nvkm_wo32(chan->inst, i, 0x00030303);
 	for (i = 0x04f4; i <= 0x0500; i += 4)
-		nv_wo32(chan, i, 0x00080000);
+		nvkm_wo32(chan->inst, i, 0x00080000);
 	for (i = 0x050c; i <= 0x0518; i += 4)
-		nv_wo32(chan, i, 0x01012000);
+		nvkm_wo32(chan->inst, i, 0x01012000);
 	for (i = 0x051c; i <= 0x0528; i += 4)
-		nv_wo32(chan, i, 0x000105b8);
+		nvkm_wo32(chan->inst, i, 0x000105b8);
 	for (i = 0x052c; i <= 0x0538; i += 4)
-		nv_wo32(chan, i, 0x00080008);
+		nvkm_wo32(chan->inst, i, 0x00080008);
 	for (i = 0x055c; i <= 0x0598; i += 4)
-		nv_wo32(chan, i, 0x07ff0000);
-	nv_wo32(chan, 0x05a4, 0x4b7fffff);
-	nv_wo32(chan, 0x05fc, 0x00000001);
-	nv_wo32(chan, 0x0604, 0x00004000);
-	nv_wo32(chan, 0x0610, 0x00000001);
-	nv_wo32(chan, 0x0618, 0x00040000);
-	nv_wo32(chan, 0x061c, 0x00010000);
+		nvkm_wo32(chan->inst, i, 0x07ff0000);
+	nvkm_wo32(chan->inst, 0x05a4, 0x4b7fffff);
+	nvkm_wo32(chan->inst, 0x05fc, 0x00000001);
+	nvkm_wo32(chan->inst, 0x0604, 0x00004000);
+	nvkm_wo32(chan->inst, 0x0610, 0x00000001);
+	nvkm_wo32(chan->inst, 0x0618, 0x00040000);
+	nvkm_wo32(chan->inst, 0x061c, 0x00010000);
 	for (i = 0x1c1c; i <= 0x248c; i += 16) {
-		nv_wo32(chan, (i + 0), 0x10700ff9);
-		nv_wo32(chan, (i + 4), 0x0436086c);
-		nv_wo32(chan, (i + 8), 0x000c001b);
+		nvkm_wo32(chan->inst, (i + 0), 0x10700ff9);
+		nvkm_wo32(chan->inst, (i + 4), 0x0436086c);
+		nvkm_wo32(chan->inst, (i + 8), 0x000c001b);
 	}
-	nv_wo32(chan, 0x281c, 0x3f800000);
-	nv_wo32(chan, 0x2830, 0x3f800000);
-	nv_wo32(chan, 0x285c, 0x40000000);
-	nv_wo32(chan, 0x2860, 0x3f800000);
-	nv_wo32(chan, 0x2864, 0x3f000000);
-	nv_wo32(chan, 0x286c, 0x40000000);
-	nv_wo32(chan, 0x2870, 0x3f800000);
-	nv_wo32(chan, 0x2878, 0xbf800000);
-	nv_wo32(chan, 0x2880, 0xbf800000);
-	nv_wo32(chan, 0x34a4, 0x000fe000);
-	nv_wo32(chan, 0x3530, 0x000003f8);
-	nv_wo32(chan, 0x3540, 0x002fe000);
+	nvkm_wo32(chan->inst, 0x281c, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2830, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x285c, 0x40000000);
+	nvkm_wo32(chan->inst, 0x2860, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2864, 0x3f000000);
+	nvkm_wo32(chan->inst, 0x286c, 0x40000000);
+	nvkm_wo32(chan->inst, 0x2870, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2878, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x2880, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x34a4, 0x000fe000);
+	nvkm_wo32(chan->inst, 0x3530, 0x000003f8);
+	nvkm_wo32(chan->inst, 0x3540, 0x002fe000);
 	for (i = 0x355c; i <= 0x3578; i += 4)
-		nv_wo32(chan, i, 0x001c527c);
-	return 0;
-}
-
-int
-nv20_gr_context_init(struct nvkm_object *object)
-{
-	struct nv20_gr_priv *priv = (void *)object->engine;
-	struct nv20_gr_chan *chan = (void *)object;
-	int ret;
-
-	ret = nvkm_gr_context_init(&chan->base);
-	if (ret)
-		return ret;
-
-	nv_wo32(priv->ctxtab, chan->chid * 4, nv_gpuobj(chan)->addr >> 4);
+		nvkm_wo32(chan->inst, i, 0x001c527c);
+	nvkm_done(chan->inst);
 	return 0;
 }
 
-int
-nv20_gr_context_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv20_gr_priv *priv = (void *)object->engine;
-	struct nv20_gr_chan *chan = (void *)object;
-	int chid = -1;
-
-	nv_mask(priv, 0x400720, 0x00000001, 0x00000000);
-	if (nv_rd32(priv, 0x400144) & 0x00010000)
-		chid = (nv_rd32(priv, 0x400148) & 0x1f000000) >> 24;
-	if (chan->chid == chid) {
-		nv_wr32(priv, 0x400784, nv_gpuobj(chan)->addr >> 4);
-		nv_wr32(priv, 0x400788, 0x00000002);
-		nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
-		nv_wr32(priv, 0x400144, 0x10000000);
-		nv_mask(priv, 0x400148, 0xff000000, 0x1f000000);
-	}
-	nv_mask(priv, 0x400720, 0x00000001, 0x00000001);
-
-	nv_wo32(priv->ctxtab, chan->chid * 4, 0x00000000);
-	return nvkm_gr_context_fini(&chan->base, suspend);
-}
-
-static struct nvkm_oclass
-nv20_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x20),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv20_gr_context_ctor,
-		.dtor = _nvkm_gr_context_dtor,
-		.init = nv20_gr_context_init,
-		.fini = nv20_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
-};
-
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
 void
-nv20_gr_tile_prog(struct nvkm_engine *engine, int i)
+nv20_gr_tile(struct nvkm_gr *base, int i, struct nvkm_fb_tile *tile)
 {
-	struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i];
-	struct nvkm_fifo *pfifo = nvkm_fifo(engine);
-	struct nv20_gr_priv *priv = (void *)engine;
+	struct nv20_gr *gr = nv20_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	struct nvkm_fifo *fifo = device->fifo;
 	unsigned long flags;
 
-	pfifo->pause(pfifo, &flags);
-	nv04_gr_idle(priv);
+	nvkm_fifo_pause(fifo, &flags);
+	nv04_gr_idle(&gr->base);
 
-	nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
-	nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
-	nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
+	nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
+	nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
+	nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
 
-	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
-	nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->limit);
-	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
-	nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->pitch);
-	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
-	nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->addr);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->limit);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->pitch);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->addr);
 
-	if (nv_device(engine)->chipset != 0x34) {
-		nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
-		nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp);
+	if (device->chipset != 0x34) {
+		nvkm_wr32(device, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->zcomp);
 	}
 
-	pfifo->start(pfifo, &flags);
+	nvkm_fifo_start(fifo, &flags);
 }
 
 void
-nv20_gr_intr(struct nvkm_subdev *subdev)
+nv20_gr_intr(struct nvkm_gr *base)
 {
-	struct nvkm_engine *engine = nv_engine(subdev);
-	struct nvkm_object *engctx;
-	struct nvkm_handle *handle;
-	struct nv20_gr_priv *priv = (void *)subdev;
-	u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
-	u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
-	u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
-	u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+	struct nv20_gr *gr = nv20_gr(base);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_fifo_chan *chan;
+	u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
+	u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
+	u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
+	u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
 	u32 chid = (addr & 0x01f00000) >> 20;
 	u32 subc = (addr & 0x00070000) >> 16;
 	u32 mthd = (addr & 0x00001ffc);
-	u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
-	u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xfff;
+	u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
+	u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xfff;
 	u32 show = stat;
+	char msg[128], src[128], sta[128];
+	unsigned long flags;
 
-	engctx = nvkm_engctx_get(engine, chid);
-	if (stat & NV_PGRAPH_INTR_ERROR) {
-		if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
-			handle = nvkm_handle_get_class(engctx, class);
-			if (handle && !nv_call(handle->object, mthd, data))
-				show &= ~NV_PGRAPH_INTR_ERROR;
-			nvkm_handle_put(handle);
-		}
-	}
+	chan = nvkm_fifo_chan_chid(device->fifo, chid, &flags);
 
-	nv_wr32(priv, NV03_PGRAPH_INTR, stat);
-	nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+	nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
+	nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
 
 	if (show) {
-		nv_error(priv, "%s", "");
-		nvkm_bitfield_print(nv10_gr_intr_name, show);
-		pr_cont(" nsource:");
-		nvkm_bitfield_print(nv04_gr_nsource, nsource);
-		pr_cont(" nstatus:");
-		nvkm_bitfield_print(nv10_gr_nstatus, nstatus);
-		pr_cont("\n");
-		nv_error(priv,
-			 "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
-			 chid, nvkm_client_name(engctx), subc, class, mthd,
-			 data);
+		nvkm_snprintbf(msg, sizeof(msg), nv10_gr_intr_name, show);
+		nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
+		nvkm_snprintbf(sta, sizeof(sta), nv10_gr_nstatus, nstatus);
+		nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
+				   "nstatus %08x [%s] ch %d [%s] subc %d "
+				   "class %04x mthd %04x data %08x\n",
+			   show, msg, nsource, src, nstatus, sta, chid,
+			   chan ? chan->object.client->name : "unknown",
+			   subc, class, mthd, data);
 	}
 
-	nvkm_engctx_put(engctx);
-}
-
-static int
-nv20_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
-{
-	struct nv20_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
-			      NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv20_gr_intr;
-	nv_engine(priv)->cclass = &nv20_gr_cclass;
-	nv_engine(priv)->sclass = nv20_gr_sclass;
-	nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
-	return 0;
+	nvkm_fifo_chan_put(device->fifo, flags, &chan);
 }
 
-void
-nv20_gr_dtor(struct nvkm_object *object)
+int
+nv20_gr_oneinit(struct nvkm_gr *base)
 {
-	struct nv20_gr_priv *priv = (void *)object;
-	nvkm_gpuobj_ref(NULL, &priv->ctxtab);
-	nvkm_gr_destroy(&priv->base);
+	struct nv20_gr *gr = nv20_gr(base);
+	return nvkm_memory_new(gr->base.engine.subdev.device,
+			       NVKM_MEM_TARGET_INST, 32 * 4, 16,
+			       true, &gr->ctxtab);
 }
 
 int
-nv20_gr_init(struct nvkm_object *object)
+nv20_gr_init(struct nvkm_gr *base)
 {
-	struct nvkm_engine *engine = nv_engine(object);
-	struct nv20_gr_priv *priv = (void *)engine;
-	struct nvkm_fb *pfb = nvkm_fb(object);
+	struct nv20_gr *gr = nv20_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u32 tmp, vramsz;
-	int ret, i;
-
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
+	int i;
 
-	nv_wr32(priv, NV20_PGRAPH_CHANNEL_CTX_TABLE, priv->ctxtab->addr >> 4);
+	nvkm_wr32(device, NV20_PGRAPH_CHANNEL_CTX_TABLE,
+			  nvkm_memory_addr(gr->ctxtab) >> 4);
 
-	if (nv_device(priv)->chipset == 0x20) {
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x003d0000);
+	if (device->chipset == 0x20) {
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x003d0000);
 		for (i = 0; i < 15; i++)
-			nv_wr32(priv, NV10_PGRAPH_RDI_DATA, 0x00000000);
-		nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
+			nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, 0x00000000);
+		nvkm_msec(device, 2000,
+			if (!nvkm_rd32(device, 0x400700))
+				break;
+		);
 	} else {
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x02c80000);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x02c80000);
 		for (i = 0; i < 32; i++)
-			nv_wr32(priv, NV10_PGRAPH_RDI_DATA, 0x00000000);
-		nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
+			nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, 0x00000000);
+		nvkm_msec(device, 2000,
+			if (!nvkm_rd32(device, 0x400700))
+				break;
+		);
 	}
 
-	nv_wr32(priv, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
-	nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+	nvkm_wr32(device, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
+	nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x00118700);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
-	nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00000000);
-	nv_wr32(priv, 0x40009C           , 0x00000040);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x00118700);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
+	nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00000000);
+	nvkm_wr32(device, 0x40009C           , 0x00000040);
 
-	if (nv_device(priv)->chipset >= 0x25) {
-		nv_wr32(priv, 0x400890, 0x00a8cfff);
-		nv_wr32(priv, 0x400610, 0x304B1FB6);
-		nv_wr32(priv, 0x400B80, 0x1cbd3883);
-		nv_wr32(priv, 0x400B84, 0x44000000);
-		nv_wr32(priv, 0x400098, 0x40000080);
-		nv_wr32(priv, 0x400B88, 0x000000ff);
+	if (device->chipset >= 0x25) {
+		nvkm_wr32(device, 0x400890, 0x00a8cfff);
+		nvkm_wr32(device, 0x400610, 0x304B1FB6);
+		nvkm_wr32(device, 0x400B80, 0x1cbd3883);
+		nvkm_wr32(device, 0x400B84, 0x44000000);
+		nvkm_wr32(device, 0x400098, 0x40000080);
+		nvkm_wr32(device, 0x400B88, 0x000000ff);
 
 	} else {
-		nv_wr32(priv, 0x400880, 0x0008c7df);
-		nv_wr32(priv, 0x400094, 0x00000005);
-		nv_wr32(priv, 0x400B80, 0x45eae20e);
-		nv_wr32(priv, 0x400B84, 0x24000000);
-		nv_wr32(priv, 0x400098, 0x00000040);
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
-		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000030);
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
-		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000030);
+		nvkm_wr32(device, 0x400880, 0x0008c7df);
+		nvkm_wr32(device, 0x400094, 0x00000005);
+		nvkm_wr32(device, 0x400B80, 0x45eae20e);
+		nvkm_wr32(device, 0x400B84, 0x24000000);
+		nvkm_wr32(device, 0x400098, 0x00000040);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000030);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000030);
 	}
 
-	/* Turn all the tiling regions off. */
-	for (i = 0; i < pfb->tile.regions; i++)
-		engine->tile_prog(engine, i);
+	nvkm_wr32(device, 0x4009a0, nvkm_rd32(device, 0x100324));
+	nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, nvkm_rd32(device, 0x100324));
 
-	nv_wr32(priv, 0x4009a0, nv_rd32(priv, 0x100324));
-	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
-	nv_wr32(priv, NV10_PGRAPH_RDI_DATA, nv_rd32(priv, 0x100324));
+	nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+	nvkm_wr32(device, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
 
-	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
-	nv_wr32(priv, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
-
-	tmp = nv_rd32(priv, NV10_PGRAPH_SURFACE) & 0x0007ff00;
-	nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp);
-	tmp = nv_rd32(priv, NV10_PGRAPH_SURFACE) | 0x00020100;
-	nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp);
+	tmp = nvkm_rd32(device, NV10_PGRAPH_SURFACE) & 0x0007ff00;
+	nvkm_wr32(device, NV10_PGRAPH_SURFACE, tmp);
+	tmp = nvkm_rd32(device, NV10_PGRAPH_SURFACE) | 0x00020100;
+	nvkm_wr32(device, NV10_PGRAPH_SURFACE, tmp);
 
 	/* begin RAM config */
-	vramsz = nv_device_resource_len(nv_device(priv), 0) - 1;
-	nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
-	nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
-	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
-	nv_wr32(priv, NV10_PGRAPH_RDI_DATA , nv_rd32(priv, 0x100200));
-	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
-	nv_wr32(priv, NV10_PGRAPH_RDI_DATA , nv_rd32(priv, 0x100204));
-	nv_wr32(priv, 0x400820, 0);
-	nv_wr32(priv, 0x400824, 0);
-	nv_wr32(priv, 0x400864, vramsz - 1);
-	nv_wr32(priv, 0x400868, vramsz - 1);
+	vramsz = device->func->resource_size(device, 1) - 1;
+	nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
+	nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
+	nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , nvkm_rd32(device, 0x100200));
+	nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , nvkm_rd32(device, 0x100204));
+	nvkm_wr32(device, 0x400820, 0);
+	nvkm_wr32(device, 0x400824, 0);
+	nvkm_wr32(device, 0x400864, vramsz - 1);
+	nvkm_wr32(device, 0x400868, vramsz - 1);
 
 	/* interesting.. the below overwrites some of the tile setup above.. */
-	nv_wr32(priv, 0x400B20, 0x00000000);
-	nv_wr32(priv, 0x400B04, 0xFFFFFFFF);
+	nvkm_wr32(device, 0x400B20, 0x00000000);
+	nvkm_wr32(device, 0x400B04, 0xFFFFFFFF);
 
-	nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
-	nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
-	nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
-	nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
+	nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
+	nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
+	nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
+	nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
 	return 0;
 }
 
-struct nvkm_oclass
-nv20_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x20),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv20_gr_ctor,
-		.dtor = nv20_gr_dtor,
-		.init = nv20_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
+void *
+nv20_gr_dtor(struct nvkm_gr *base)
+{
+	struct nv20_gr *gr = nv20_gr(base);
+	nvkm_memory_del(&gr->ctxtab);
+	return gr;
+}
+
+int
+nv20_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_gr **pgr)
+{
+	struct nv20_gr *gr;
+
+	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+		return -ENOMEM;
+	*pgr = &gr->base;
+
+	return nvkm_gr_ctor(func, device, index, 0x00001000, true, &gr->base);
+}
+
+static const struct nvkm_gr_func
+nv20_gr = {
+	.dtor = nv20_gr_dtor,
+	.oneinit = nv20_gr_oneinit,
+	.init = nv20_gr_init,
+	.intr = nv20_gr_intr,
+	.tile = nv20_gr_tile,
+	.chan_new = nv20_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
+		{ -1, -1, 0x0097, &nv04_gr_object }, /* kelvin */
+		{ -1, -1, 0x009e, &nv04_gr_object }, /* swzsurf */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
+		{}
+	}
 };
+
+int
+nv20_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv20_gr_new_(&nv20_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
index ac4dc048fed1..cdf4501e3798 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
@@ -1,26 +1,33 @@
 #ifndef __NV20_GR_H__
 #define __NV20_GR_H__
-#include <engine/gr.h>
+#define nv20_gr(p) container_of((p), struct nv20_gr, base)
+#include "priv.h"
 
-struct nv20_gr_priv {
+struct nv20_gr {
 	struct nvkm_gr base;
-	struct nvkm_gpuobj *ctxtab;
+	struct nvkm_memory *ctxtab;
 };
 
-struct nv20_gr_chan {
-	struct nvkm_gr_chan base;
-	int chid;
-};
+int nv20_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *,
+		 int, struct nvkm_gr **);
+void *nv20_gr_dtor(struct nvkm_gr *);
+int nv20_gr_oneinit(struct nvkm_gr *);
+int nv20_gr_init(struct nvkm_gr *);
+void nv20_gr_intr(struct nvkm_gr *);
+void nv20_gr_tile(struct nvkm_gr *, int, struct nvkm_fb_tile *);
 
-extern struct nvkm_oclass nv25_gr_sclass[];
-int  nv20_gr_context_init(struct nvkm_object *);
-int  nv20_gr_context_fini(struct nvkm_object *, bool);
+int nv30_gr_init(struct nvkm_gr *);
 
-void nv20_gr_tile_prog(struct nvkm_engine *, int);
-void nv20_gr_intr(struct nvkm_subdev *);
+#define nv20_gr_chan(p) container_of((p), struct nv20_gr_chan, object)
 
-void nv20_gr_dtor(struct nvkm_object *);
-int  nv20_gr_init(struct nvkm_object *);
+struct nv20_gr_chan {
+	struct nvkm_object object;
+	struct nv20_gr *gr;
+	int chid;
+	struct nvkm_memory *inst;
+};
 
-int  nv30_gr_init(struct nvkm_object *);
+void *nv20_gr_chan_dtor(struct nvkm_object *);
+int nv20_gr_chan_init(struct nvkm_object *);
+int nv20_gr_chan_fini(struct nvkm_object *, bool);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
index bc362519cebb..6c4a00819b4b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
@@ -1,158 +1,134 @@
 #include "nv20.h"
 #include "regs.h"
 
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
+#include <engine/fifo/chan.h>
 
 /*******************************************************************************
- * Graphics object classes
+ * PGRAPH context
  ******************************************************************************/
 
-struct nvkm_oclass
-nv25_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
-	{ 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
-	{ 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
-	{ 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
-	{ 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
-	{ 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
-	{ 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
-	{ 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
-	{ 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
-	{ 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
-	{ 0x0096, &nv04_gr_ofuncs, NULL }, /* celcius */
-	{ 0x009e, &nv04_gr_ofuncs, NULL }, /* swzsurf */
-	{ 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
-	{ 0x0597, &nv04_gr_ofuncs, NULL }, /* kelvin */
-	{},
+static const struct nvkm_object_func
+nv25_gr_chan = {
+	.dtor = nv20_gr_chan_dtor,
+	.init = nv20_gr_chan_init,
+	.fini = nv20_gr_chan_fini,
 };
 
-/*******************************************************************************
- * PGRAPH context
- ******************************************************************************/
-
 static int
-nv25_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+nv25_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
+	struct nv20_gr *gr = nv20_gr(base);
 	struct nv20_gr_chan *chan;
 	int ret, i;
 
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x3724,
-				     16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv25_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	chan->chid = fifoch->chid;
+	*pobject = &chan->object;
+
+	ret = nvkm_memory_new(gr->base.engine.subdev.device,
+			      NVKM_MEM_TARGET_INST, 0x3724, 16, true,
+			      &chan->inst);
 	if (ret)
 		return ret;
 
-	chan->chid = nvkm_fifo_chan(parent)->chid;
-
-	nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
-	nv_wo32(chan, 0x035c, 0xffff0000);
-	nv_wo32(chan, 0x03c0, 0x0fff0000);
-	nv_wo32(chan, 0x03c4, 0x0fff0000);
-	nv_wo32(chan, 0x049c, 0x00000101);
-	nv_wo32(chan, 0x04b0, 0x00000111);
-	nv_wo32(chan, 0x04c8, 0x00000080);
-	nv_wo32(chan, 0x04cc, 0xffff0000);
-	nv_wo32(chan, 0x04d0, 0x00000001);
-	nv_wo32(chan, 0x04e4, 0x44400000);
-	nv_wo32(chan, 0x04fc, 0x4b800000);
+	nvkm_kmap(chan->inst);
+	nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
+	nvkm_wo32(chan->inst, 0x035c, 0xffff0000);
+	nvkm_wo32(chan->inst, 0x03c0, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x03c4, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x049c, 0x00000101);
+	nvkm_wo32(chan->inst, 0x04b0, 0x00000111);
+	nvkm_wo32(chan->inst, 0x04c8, 0x00000080);
+	nvkm_wo32(chan->inst, 0x04cc, 0xffff0000);
+	nvkm_wo32(chan->inst, 0x04d0, 0x00000001);
+	nvkm_wo32(chan->inst, 0x04e4, 0x44400000);
+	nvkm_wo32(chan->inst, 0x04fc, 0x4b800000);
 	for (i = 0x0510; i <= 0x051c; i += 4)
-		nv_wo32(chan, i, 0x00030303);
+		nvkm_wo32(chan->inst, i, 0x00030303);
 	for (i = 0x0530; i <= 0x053c; i += 4)
-		nv_wo32(chan, i, 0x00080000);
+		nvkm_wo32(chan->inst, i, 0x00080000);
 	for (i = 0x0548; i <= 0x0554; i += 4)
-		nv_wo32(chan, i, 0x01012000);
+		nvkm_wo32(chan->inst, i, 0x01012000);
 	for (i = 0x0558; i <= 0x0564; i += 4)
-		nv_wo32(chan, i, 0x000105b8);
+		nvkm_wo32(chan->inst, i, 0x000105b8);
 	for (i = 0x0568; i <= 0x0574; i += 4)
-		nv_wo32(chan, i, 0x00080008);
+		nvkm_wo32(chan->inst, i, 0x00080008);
 	for (i = 0x0598; i <= 0x05d4; i += 4)
-		nv_wo32(chan, i, 0x07ff0000);
-	nv_wo32(chan, 0x05e0, 0x4b7fffff);
-	nv_wo32(chan, 0x0620, 0x00000080);
-	nv_wo32(chan, 0x0624, 0x30201000);
-	nv_wo32(chan, 0x0628, 0x70605040);
-	nv_wo32(chan, 0x062c, 0xb0a09080);
-	nv_wo32(chan, 0x0630, 0xf0e0d0c0);
-	nv_wo32(chan, 0x0664, 0x00000001);
-	nv_wo32(chan, 0x066c, 0x00004000);
-	nv_wo32(chan, 0x0678, 0x00000001);
-	nv_wo32(chan, 0x0680, 0x00040000);
-	nv_wo32(chan, 0x0684, 0x00010000);
+		nvkm_wo32(chan->inst, i, 0x07ff0000);
+	nvkm_wo32(chan->inst, 0x05e0, 0x4b7fffff);
+	nvkm_wo32(chan->inst, 0x0620, 0x00000080);
+	nvkm_wo32(chan->inst, 0x0624, 0x30201000);
+	nvkm_wo32(chan->inst, 0x0628, 0x70605040);
+	nvkm_wo32(chan->inst, 0x062c, 0xb0a09080);
+	nvkm_wo32(chan->inst, 0x0630, 0xf0e0d0c0);
+	nvkm_wo32(chan->inst, 0x0664, 0x00000001);
+	nvkm_wo32(chan->inst, 0x066c, 0x00004000);
+	nvkm_wo32(chan->inst, 0x0678, 0x00000001);
+	nvkm_wo32(chan->inst, 0x0680, 0x00040000);
+	nvkm_wo32(chan->inst, 0x0684, 0x00010000);
 	for (i = 0x1b04; i <= 0x2374; i += 16) {
-		nv_wo32(chan, (i + 0), 0x10700ff9);
-		nv_wo32(chan, (i + 4), 0x0436086c);
-		nv_wo32(chan, (i + 8), 0x000c001b);
+		nvkm_wo32(chan->inst, (i + 0), 0x10700ff9);
+		nvkm_wo32(chan->inst, (i + 4), 0x0436086c);
+		nvkm_wo32(chan->inst, (i + 8), 0x000c001b);
 	}
-	nv_wo32(chan, 0x2704, 0x3f800000);
-	nv_wo32(chan, 0x2718, 0x3f800000);
-	nv_wo32(chan, 0x2744, 0x40000000);
-	nv_wo32(chan, 0x2748, 0x3f800000);
-	nv_wo32(chan, 0x274c, 0x3f000000);
-	nv_wo32(chan, 0x2754, 0x40000000);
-	nv_wo32(chan, 0x2758, 0x3f800000);
-	nv_wo32(chan, 0x2760, 0xbf800000);
-	nv_wo32(chan, 0x2768, 0xbf800000);
-	nv_wo32(chan, 0x308c, 0x000fe000);
-	nv_wo32(chan, 0x3108, 0x000003f8);
-	nv_wo32(chan, 0x3468, 0x002fe000);
+	nvkm_wo32(chan->inst, 0x2704, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2718, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2744, 0x40000000);
+	nvkm_wo32(chan->inst, 0x2748, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x274c, 0x3f000000);
+	nvkm_wo32(chan->inst, 0x2754, 0x40000000);
+	nvkm_wo32(chan->inst, 0x2758, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2760, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x2768, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x308c, 0x000fe000);
+	nvkm_wo32(chan->inst, 0x3108, 0x000003f8);
+	nvkm_wo32(chan->inst, 0x3468, 0x002fe000);
 	for (i = 0x3484; i <= 0x34a0; i += 4)
-		nv_wo32(chan, i, 0x001c527c);
+		nvkm_wo32(chan->inst, i, 0x001c527c);
+	nvkm_done(chan->inst);
 	return 0;
 }
 
-static struct nvkm_oclass
-nv25_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x25),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv25_gr_context_ctor,
-		.dtor = _nvkm_gr_context_dtor,
-		.init = nv20_gr_context_init,
-		.fini = nv20_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
-};
-
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-static int
-nv25_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
-{
-	struct nv20_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
-			      NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
-	if (ret)
-		return ret;
+static const struct nvkm_gr_func
+nv25_gr = {
+	.dtor = nv20_gr_dtor,
+	.oneinit = nv20_gr_oneinit,
+	.init = nv20_gr_init,
+	.intr = nv20_gr_intr,
+	.tile = nv20_gr_tile,
+	.chan_new = nv25_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
+		{ -1, -1, 0x009e, &nv04_gr_object }, /* swzsurf */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
+		{ -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
+		{}
+	}
+};
 
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv20_gr_intr;
-	nv_engine(priv)->cclass = &nv25_gr_cclass;
-	nv_engine(priv)->sclass = nv25_gr_sclass;
-	nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
-	return 0;
+int
+nv25_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv20_gr_new_(&nv25_gr, device, index, pgr);
 }
-
-struct nvkm_oclass
-nv25_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x25),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv25_gr_ctor,
-		.dtor = nv20_gr_dtor,
-		.init = nv20_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
index 22a5096e283d..3cad26dbc2b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
@@ -1,125 +1,125 @@
 #include "nv20.h"
 #include "regs.h"
 
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
+#include <engine/fifo/chan.h>
 
 /*******************************************************************************
  * PGRAPH context
  ******************************************************************************/
 
+static const struct nvkm_object_func
+nv2a_gr_chan = {
+	.dtor = nv20_gr_chan_dtor,
+	.init = nv20_gr_chan_init,
+	.fini = nv20_gr_chan_fini,
+};
+
 static int
-nv2a_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+nv2a_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
+	struct nv20_gr *gr = nv20_gr(base);
 	struct nv20_gr_chan *chan;
 	int ret, i;
 
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x36b0,
-				     16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv2a_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	chan->chid = fifoch->chid;
+	*pobject = &chan->object;
+
+	ret = nvkm_memory_new(gr->base.engine.subdev.device,
+			      NVKM_MEM_TARGET_INST, 0x36b0, 16, true,
+			      &chan->inst);
 	if (ret)
 		return ret;
 
-	chan->chid = nvkm_fifo_chan(parent)->chid;
-
-	nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24));
-	nv_wo32(chan, 0x033c, 0xffff0000);
-	nv_wo32(chan, 0x03a0, 0x0fff0000);
-	nv_wo32(chan, 0x03a4, 0x0fff0000);
-	nv_wo32(chan, 0x047c, 0x00000101);
-	nv_wo32(chan, 0x0490, 0x00000111);
-	nv_wo32(chan, 0x04a8, 0x44400000);
+	nvkm_kmap(chan->inst);
+	nvkm_wo32(chan->inst, 0x0000, 0x00000001 | (chan->chid << 24));
+	nvkm_wo32(chan->inst, 0x033c, 0xffff0000);
+	nvkm_wo32(chan->inst, 0x03a0, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x03a4, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x047c, 0x00000101);
+	nvkm_wo32(chan->inst, 0x0490, 0x00000111);
+	nvkm_wo32(chan->inst, 0x04a8, 0x44400000);
 	for (i = 0x04d4; i <= 0x04e0; i += 4)
-		nv_wo32(chan, i, 0x00030303);
+		nvkm_wo32(chan->inst, i, 0x00030303);
 	for (i = 0x04f4; i <= 0x0500; i += 4)
-		nv_wo32(chan, i, 0x00080000);
+		nvkm_wo32(chan->inst, i, 0x00080000);
 	for (i = 0x050c; i <= 0x0518; i += 4)
-		nv_wo32(chan, i, 0x01012000);
+		nvkm_wo32(chan->inst, i, 0x01012000);
 	for (i = 0x051c; i <= 0x0528; i += 4)
-		nv_wo32(chan, i, 0x000105b8);
+		nvkm_wo32(chan->inst, i, 0x000105b8);
 	for (i = 0x052c; i <= 0x0538; i += 4)
-		nv_wo32(chan, i, 0x00080008);
+		nvkm_wo32(chan->inst, i, 0x00080008);
 	for (i = 0x055c; i <= 0x0598; i += 4)
-		nv_wo32(chan, i, 0x07ff0000);
-	nv_wo32(chan, 0x05a4, 0x4b7fffff);
-	nv_wo32(chan, 0x05fc, 0x00000001);
-	nv_wo32(chan, 0x0604, 0x00004000);
-	nv_wo32(chan, 0x0610, 0x00000001);
-	nv_wo32(chan, 0x0618, 0x00040000);
-	nv_wo32(chan, 0x061c, 0x00010000);
+		nvkm_wo32(chan->inst, i, 0x07ff0000);
+	nvkm_wo32(chan->inst, 0x05a4, 0x4b7fffff);
+	nvkm_wo32(chan->inst, 0x05fc, 0x00000001);
+	nvkm_wo32(chan->inst, 0x0604, 0x00004000);
+	nvkm_wo32(chan->inst, 0x0610, 0x00000001);
+	nvkm_wo32(chan->inst, 0x0618, 0x00040000);
+	nvkm_wo32(chan->inst, 0x061c, 0x00010000);
 	for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
-		nv_wo32(chan, (i + 0), 0x10700ff9);
-		nv_wo32(chan, (i + 4), 0x0436086c);
-		nv_wo32(chan, (i + 8), 0x000c001b);
+		nvkm_wo32(chan->inst, (i + 0), 0x10700ff9);
+		nvkm_wo32(chan->inst, (i + 4), 0x0436086c);
+		nvkm_wo32(chan->inst, (i + 8), 0x000c001b);
 	}
-	nv_wo32(chan, 0x269c, 0x3f800000);
-	nv_wo32(chan, 0x26b0, 0x3f800000);
-	nv_wo32(chan, 0x26dc, 0x40000000);
-	nv_wo32(chan, 0x26e0, 0x3f800000);
-	nv_wo32(chan, 0x26e4, 0x3f000000);
-	nv_wo32(chan, 0x26ec, 0x40000000);
-	nv_wo32(chan, 0x26f0, 0x3f800000);
-	nv_wo32(chan, 0x26f8, 0xbf800000);
-	nv_wo32(chan, 0x2700, 0xbf800000);
-	nv_wo32(chan, 0x3024, 0x000fe000);
-	nv_wo32(chan, 0x30a0, 0x000003f8);
-	nv_wo32(chan, 0x33fc, 0x002fe000);
+	nvkm_wo32(chan->inst, 0x269c, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x26b0, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x26dc, 0x40000000);
+	nvkm_wo32(chan->inst, 0x26e0, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x26e4, 0x3f000000);
+	nvkm_wo32(chan->inst, 0x26ec, 0x40000000);
+	nvkm_wo32(chan->inst, 0x26f0, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x26f8, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x2700, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x3024, 0x000fe000);
+	nvkm_wo32(chan->inst, 0x30a0, 0x000003f8);
+	nvkm_wo32(chan->inst, 0x33fc, 0x002fe000);
 	for (i = 0x341c; i <= 0x3438; i += 4)
-		nv_wo32(chan, i, 0x001c527c);
+		nvkm_wo32(chan->inst, i, 0x001c527c);
+	nvkm_done(chan->inst);
 	return 0;
 }
 
-static struct nvkm_oclass
-nv2a_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x2a),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv2a_gr_context_ctor,
-		.dtor = _nvkm_gr_context_dtor,
-		.init = nv20_gr_context_init,
-		.fini = nv20_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
-};
-
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-static int
-nv2a_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
-{
-	struct nv20_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
-			      NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
-	if (ret)
-		return ret;
+static const struct nvkm_gr_func
+nv2a_gr = {
+	.dtor = nv20_gr_dtor,
+	.oneinit = nv20_gr_oneinit,
+	.init = nv20_gr_init,
+	.intr = nv20_gr_intr,
+	.tile = nv20_gr_tile,
+	.chan_new = nv2a_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
+		{ -1, -1, 0x009e, &nv04_gr_object }, /* swzsurf */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
+		{ -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
+		{}
+	}
+};
 
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv20_gr_intr;
-	nv_engine(priv)->cclass = &nv2a_gr_cclass;
-	nv_engine(priv)->sclass = nv25_gr_sclass;
-	nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
-	return 0;
+int
+nv2a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv20_gr_new_(&nv2a_gr, device, index, pgr);
 }
-
-struct nvkm_oclass
-nv2a_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x2a),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv2a_gr_ctor,
-		.dtor = nv20_gr_dtor,
-		.init = nv20_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
index dcc84eb54fb6..69de8c6259fe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
@@ -1,231 +1,198 @@
 #include "nv20.h"
 #include "regs.h"
 
-#include <core/device.h>
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
+#include <engine/fifo/chan.h>
 #include <subdev/fb.h>
 
 /*******************************************************************************
- * Graphics object classes
+ * PGRAPH context
  ******************************************************************************/
 
-static struct nvkm_oclass
-nv30_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
-	{ 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
-	{ 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
-	{ 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
-	{ 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
-	{ 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
-	{ 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
-	{ 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
-	{ 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
-	{ 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
-	{ 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
-	{ 0x0362, &nv04_gr_ofuncs, NULL }, /* surf2d (nv30) */
-	{ 0x0389, &nv04_gr_ofuncs, NULL }, /* sifm (nv30) */
-	{ 0x038a, &nv04_gr_ofuncs, NULL }, /* ifc (nv30) */
-	{ 0x039e, &nv04_gr_ofuncs, NULL }, /* swzsurf (nv30) */
-	{ 0x0397, &nv04_gr_ofuncs, NULL }, /* rankine */
-	{},
+static const struct nvkm_object_func
+nv30_gr_chan = {
+	.dtor = nv20_gr_chan_dtor,
+	.init = nv20_gr_chan_init,
+	.fini = nv20_gr_chan_fini,
 };
 
-/*******************************************************************************
- * PGRAPH context
- ******************************************************************************/
-
 static int
-nv30_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+nv30_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
+	struct nv20_gr *gr = nv20_gr(base);
 	struct nv20_gr_chan *chan;
 	int ret, i;
 
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x5f48,
-				     16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv30_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	chan->chid = fifoch->chid;
+	*pobject = &chan->object;
+
+	ret = nvkm_memory_new(gr->base.engine.subdev.device,
+			      NVKM_MEM_TARGET_INST, 0x5f48, 16, true,
+			      &chan->inst);
 	if (ret)
 		return ret;
 
-	chan->chid = nvkm_fifo_chan(parent)->chid;
-
-	nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
-	nv_wo32(chan, 0x0410, 0x00000101);
-	nv_wo32(chan, 0x0424, 0x00000111);
-	nv_wo32(chan, 0x0428, 0x00000060);
-	nv_wo32(chan, 0x0444, 0x00000080);
-	nv_wo32(chan, 0x0448, 0xffff0000);
-	nv_wo32(chan, 0x044c, 0x00000001);
-	nv_wo32(chan, 0x0460, 0x44400000);
-	nv_wo32(chan, 0x048c, 0xffff0000);
+	nvkm_kmap(chan->inst);
+	nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
+	nvkm_wo32(chan->inst, 0x0410, 0x00000101);
+	nvkm_wo32(chan->inst, 0x0424, 0x00000111);
+	nvkm_wo32(chan->inst, 0x0428, 0x00000060);
+	nvkm_wo32(chan->inst, 0x0444, 0x00000080);
+	nvkm_wo32(chan->inst, 0x0448, 0xffff0000);
+	nvkm_wo32(chan->inst, 0x044c, 0x00000001);
+	nvkm_wo32(chan->inst, 0x0460, 0x44400000);
+	nvkm_wo32(chan->inst, 0x048c, 0xffff0000);
 	for (i = 0x04e0; i < 0x04e8; i += 4)
-		nv_wo32(chan, i, 0x0fff0000);
-	nv_wo32(chan, 0x04ec, 0x00011100);
+		nvkm_wo32(chan->inst, i, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x04ec, 0x00011100);
 	for (i = 0x0508; i < 0x0548; i += 4)
-		nv_wo32(chan, i, 0x07ff0000);
-	nv_wo32(chan, 0x0550, 0x4b7fffff);
-	nv_wo32(chan, 0x058c, 0x00000080);
-	nv_wo32(chan, 0x0590, 0x30201000);
-	nv_wo32(chan, 0x0594, 0x70605040);
-	nv_wo32(chan, 0x0598, 0xb8a89888);
-	nv_wo32(chan, 0x059c, 0xf8e8d8c8);
-	nv_wo32(chan, 0x05b0, 0xb0000000);
+		nvkm_wo32(chan->inst, i, 0x07ff0000);
+	nvkm_wo32(chan->inst, 0x0550, 0x4b7fffff);
+	nvkm_wo32(chan->inst, 0x058c, 0x00000080);
+	nvkm_wo32(chan->inst, 0x0590, 0x30201000);
+	nvkm_wo32(chan->inst, 0x0594, 0x70605040);
+	nvkm_wo32(chan->inst, 0x0598, 0xb8a89888);
+	nvkm_wo32(chan->inst, 0x059c, 0xf8e8d8c8);
+	nvkm_wo32(chan->inst, 0x05b0, 0xb0000000);
 	for (i = 0x0600; i < 0x0640; i += 4)
-		nv_wo32(chan, i, 0x00010588);
+		nvkm_wo32(chan->inst, i, 0x00010588);
 	for (i = 0x0640; i < 0x0680; i += 4)
-		nv_wo32(chan, i, 0x00030303);
+		nvkm_wo32(chan->inst, i, 0x00030303);
 	for (i = 0x06c0; i < 0x0700; i += 4)
-		nv_wo32(chan, i, 0x0008aae4);
+		nvkm_wo32(chan->inst, i, 0x0008aae4);
 	for (i = 0x0700; i < 0x0740; i += 4)
-		nv_wo32(chan, i, 0x01012000);
+		nvkm_wo32(chan->inst, i, 0x01012000);
 	for (i = 0x0740; i < 0x0780; i += 4)
-		nv_wo32(chan, i, 0x00080008);
-	nv_wo32(chan, 0x085c, 0x00040000);
-	nv_wo32(chan, 0x0860, 0x00010000);
+		nvkm_wo32(chan->inst, i, 0x00080008);
+	nvkm_wo32(chan->inst, 0x085c, 0x00040000);
+	nvkm_wo32(chan->inst, 0x0860, 0x00010000);
 	for (i = 0x0864; i < 0x0874; i += 4)
-		nv_wo32(chan, i, 0x00040004);
+		nvkm_wo32(chan->inst, i, 0x00040004);
 	for (i = 0x1f18; i <= 0x3088 ; i += 16) {
-		nv_wo32(chan, i + 0, 0x10700ff9);
-		nv_wo32(chan, i + 1, 0x0436086c);
-		nv_wo32(chan, i + 2, 0x000c001b);
+		nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
+		nvkm_wo32(chan->inst, i + 1, 0x0436086c);
+		nvkm_wo32(chan->inst, i + 2, 0x000c001b);
 	}
 	for (i = 0x30b8; i < 0x30c8; i += 4)
-		nv_wo32(chan, i, 0x0000ffff);
-	nv_wo32(chan, 0x344c, 0x3f800000);
-	nv_wo32(chan, 0x3808, 0x3f800000);
-	nv_wo32(chan, 0x381c, 0x3f800000);
-	nv_wo32(chan, 0x3848, 0x40000000);
-	nv_wo32(chan, 0x384c, 0x3f800000);
-	nv_wo32(chan, 0x3850, 0x3f000000);
-	nv_wo32(chan, 0x3858, 0x40000000);
-	nv_wo32(chan, 0x385c, 0x3f800000);
-	nv_wo32(chan, 0x3864, 0xbf800000);
-	nv_wo32(chan, 0x386c, 0xbf800000);
+		nvkm_wo32(chan->inst, i, 0x0000ffff);
+	nvkm_wo32(chan->inst, 0x344c, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x3808, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x381c, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x3848, 0x40000000);
+	nvkm_wo32(chan->inst, 0x384c, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x3850, 0x3f000000);
+	nvkm_wo32(chan->inst, 0x3858, 0x40000000);
+	nvkm_wo32(chan->inst, 0x385c, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x3864, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x386c, 0xbf800000);
+	nvkm_done(chan->inst);
 	return 0;
 }
 
-static struct nvkm_oclass
-nv30_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x30),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv30_gr_context_ctor,
-		.dtor = _nvkm_gr_context_dtor,
-		.init = nv20_gr_context_init,
-		.fini = nv20_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
-};
-
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-static int
-nv30_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
-{
-	struct nv20_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
-			      NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv20_gr_intr;
-	nv_engine(priv)->cclass = &nv30_gr_cclass;
-	nv_engine(priv)->sclass = nv30_gr_sclass;
-	nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
-	return 0;
-}
-
 int
-nv30_gr_init(struct nvkm_object *object)
+nv30_gr_init(struct nvkm_gr *base)
 {
-	struct nvkm_engine *engine = nv_engine(object);
-	struct nv20_gr_priv *priv = (void *)engine;
-	struct nvkm_fb *pfb = nvkm_fb(object);
-	int ret, i;
-
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, NV20_PGRAPH_CHANNEL_CTX_TABLE, priv->ctxtab->addr >> 4);
-
-	nv_wr32(priv, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
-	nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
-
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0);
-	nv_wr32(priv, 0x400890, 0x01b463ff);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
-	nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000);
-	nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
-	nv_wr32(priv, 0x400B80, 0x1003d888);
-	nv_wr32(priv, 0x400B84, 0x0c000000);
-	nv_wr32(priv, 0x400098, 0x00000000);
-	nv_wr32(priv, 0x40009C, 0x0005ad00);
-	nv_wr32(priv, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
-	nv_wr32(priv, 0x4000a0, 0x00000000);
-	nv_wr32(priv, 0x4000a4, 0x00000008);
-	nv_wr32(priv, 0x4008a8, 0xb784a400);
-	nv_wr32(priv, 0x400ba0, 0x002f8685);
-	nv_wr32(priv, 0x400ba4, 0x00231f3f);
-	nv_wr32(priv, 0x4008a4, 0x40000020);
-
-	if (nv_device(priv)->chipset == 0x34) {
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
-		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00200201);
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
-		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000008);
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
-		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000032);
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
-		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000002);
+	struct nv20_gr *gr = nv20_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+
+	nvkm_wr32(device, NV20_PGRAPH_CHANNEL_CTX_TABLE,
+			  nvkm_memory_addr(gr->ctxtab) >> 4);
+
+	nvkm_wr32(device, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
+	nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x401287c0);
+	nvkm_wr32(device, 0x400890, 0x01b463ff);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
+	nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00008000);
+	nvkm_wr32(device, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
+	nvkm_wr32(device, 0x400B80, 0x1003d888);
+	nvkm_wr32(device, 0x400B84, 0x0c000000);
+	nvkm_wr32(device, 0x400098, 0x00000000);
+	nvkm_wr32(device, 0x40009C, 0x0005ad00);
+	nvkm_wr32(device, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
+	nvkm_wr32(device, 0x4000a0, 0x00000000);
+	nvkm_wr32(device, 0x4000a4, 0x00000008);
+	nvkm_wr32(device, 0x4008a8, 0xb784a400);
+	nvkm_wr32(device, 0x400ba0, 0x002f8685);
+	nvkm_wr32(device, 0x400ba4, 0x00231f3f);
+	nvkm_wr32(device, 0x4008a4, 0x40000020);
+
+	if (device->chipset == 0x34) {
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00200201);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000008);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000032);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000002);
 	}
 
-	nv_wr32(priv, 0x4000c0, 0x00000016);
-
-	/* Turn all the tiling regions off. */
-	for (i = 0; i < pfb->tile.regions; i++)
-		engine->tile_prog(engine, i);
+	nvkm_wr32(device, 0x4000c0, 0x00000016);
 
-	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
-	nv_wr32(priv, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
-	nv_wr32(priv, 0x0040075c             , 0x00000001);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+	nvkm_wr32(device, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
+	nvkm_wr32(device, 0x0040075c             , 0x00000001);
 
 	/* begin RAM config */
-	/* vramsz = pci_resource_len(priv->dev->pdev, 0) - 1; */
-	nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
-	nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
-	if (nv_device(priv)->chipset != 0x34) {
-		nv_wr32(priv, 0x400750, 0x00EA0000);
-		nv_wr32(priv, 0x400754, nv_rd32(priv, 0x100200));
-		nv_wr32(priv, 0x400750, 0x00EA0004);
-		nv_wr32(priv, 0x400754, nv_rd32(priv, 0x100204));
+	/* vramsz = pci_resource_len(gr->dev->pdev, 1) - 1; */
+	nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
+	nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
+	if (device->chipset != 0x34) {
+		nvkm_wr32(device, 0x400750, 0x00EA0000);
+		nvkm_wr32(device, 0x400754, nvkm_rd32(device, 0x100200));
+		nvkm_wr32(device, 0x400750, 0x00EA0004);
+		nvkm_wr32(device, 0x400754, nvkm_rd32(device, 0x100204));
 	}
+
 	return 0;
 }
 
-struct nvkm_oclass
-nv30_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x30),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv30_gr_ctor,
-		.dtor = nv20_gr_dtor,
-		.init = nv30_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
+static const struct nvkm_gr_func
+nv30_gr = {
+	.dtor = nv20_gr_dtor,
+	.oneinit = nv20_gr_oneinit,
+	.init = nv30_gr_init,
+	.intr = nv20_gr_intr,
+	.tile = nv20_gr_tile,
+	.chan_new = nv30_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
+		{ -1, -1, 0x0362, &nv04_gr_object }, /* surf2d (nv30) */
+		{ -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
+		{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
+		{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
+		{ -1, -1, 0x0397, &nv04_gr_object }, /* rankine */
+		{}
+	}
 };
+
+int
+nv30_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv20_gr_new_(&nv30_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
index 985b7f3306ae..2207dac23981 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
@@ -1,159 +1,135 @@
 #include "nv20.h"
 #include "regs.h"
 
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
+#include <engine/fifo/chan.h>
 
 /*******************************************************************************
- * Graphics object classes
+ * PGRAPH context
  ******************************************************************************/
 
-static struct nvkm_oclass
-nv34_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
-	{ 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
-	{ 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
-	{ 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
-	{ 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
-	{ 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
-	{ 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
-	{ 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
-	{ 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
-	{ 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
-	{ 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
-	{ 0x0362, &nv04_gr_ofuncs, NULL }, /* surf2d (nv30) */
-	{ 0x0389, &nv04_gr_ofuncs, NULL }, /* sifm (nv30) */
-	{ 0x038a, &nv04_gr_ofuncs, NULL }, /* ifc (nv30) */
-	{ 0x039e, &nv04_gr_ofuncs, NULL }, /* swzsurf (nv30) */
-	{ 0x0697, &nv04_gr_ofuncs, NULL }, /* rankine */
-	{},
+static const struct nvkm_object_func
+nv34_gr_chan = {
+	.dtor = nv20_gr_chan_dtor,
+	.init = nv20_gr_chan_init,
+	.fini = nv20_gr_chan_fini,
 };
 
-/*******************************************************************************
- * PGRAPH context
- ******************************************************************************/
-
 static int
-nv34_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+nv34_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
+	struct nv20_gr *gr = nv20_gr(base);
 	struct nv20_gr_chan *chan;
 	int ret, i;
 
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x46dc,
-				     16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv34_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	chan->chid = fifoch->chid;
+	*pobject = &chan->object;
+
+	ret = nvkm_memory_new(gr->base.engine.subdev.device,
+			      NVKM_MEM_TARGET_INST, 0x46dc, 16, true,
+			      &chan->inst);
 	if (ret)
 		return ret;
 
-	chan->chid = nvkm_fifo_chan(parent)->chid;
-
-	nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
-	nv_wo32(chan, 0x040c, 0x01000101);
-	nv_wo32(chan, 0x0420, 0x00000111);
-	nv_wo32(chan, 0x0424, 0x00000060);
-	nv_wo32(chan, 0x0440, 0x00000080);
-	nv_wo32(chan, 0x0444, 0xffff0000);
-	nv_wo32(chan, 0x0448, 0x00000001);
-	nv_wo32(chan, 0x045c, 0x44400000);
-	nv_wo32(chan, 0x0480, 0xffff0000);
+	nvkm_kmap(chan->inst);
+	nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
+	nvkm_wo32(chan->inst, 0x040c, 0x01000101);
+	nvkm_wo32(chan->inst, 0x0420, 0x00000111);
+	nvkm_wo32(chan->inst, 0x0424, 0x00000060);
+	nvkm_wo32(chan->inst, 0x0440, 0x00000080);
+	nvkm_wo32(chan->inst, 0x0444, 0xffff0000);
+	nvkm_wo32(chan->inst, 0x0448, 0x00000001);
+	nvkm_wo32(chan->inst, 0x045c, 0x44400000);
+	nvkm_wo32(chan->inst, 0x0480, 0xffff0000);
 	for (i = 0x04d4; i < 0x04dc; i += 4)
-		nv_wo32(chan, i, 0x0fff0000);
-	nv_wo32(chan, 0x04e0, 0x00011100);
+		nvkm_wo32(chan->inst, i, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x04e0, 0x00011100);
 	for (i = 0x04fc; i < 0x053c; i += 4)
-		nv_wo32(chan, i, 0x07ff0000);
-	nv_wo32(chan, 0x0544, 0x4b7fffff);
-	nv_wo32(chan, 0x057c, 0x00000080);
-	nv_wo32(chan, 0x0580, 0x30201000);
-	nv_wo32(chan, 0x0584, 0x70605040);
-	nv_wo32(chan, 0x0588, 0xb8a89888);
-	nv_wo32(chan, 0x058c, 0xf8e8d8c8);
-	nv_wo32(chan, 0x05a0, 0xb0000000);
+		nvkm_wo32(chan->inst, i, 0x07ff0000);
+	nvkm_wo32(chan->inst, 0x0544, 0x4b7fffff);
+	nvkm_wo32(chan->inst, 0x057c, 0x00000080);
+	nvkm_wo32(chan->inst, 0x0580, 0x30201000);
+	nvkm_wo32(chan->inst, 0x0584, 0x70605040);
+	nvkm_wo32(chan->inst, 0x0588, 0xb8a89888);
+	nvkm_wo32(chan->inst, 0x058c, 0xf8e8d8c8);
+	nvkm_wo32(chan->inst, 0x05a0, 0xb0000000);
 	for (i = 0x05f0; i < 0x0630; i += 4)
-		nv_wo32(chan, i, 0x00010588);
+		nvkm_wo32(chan->inst, i, 0x00010588);
 	for (i = 0x0630; i < 0x0670; i += 4)
-		nv_wo32(chan, i, 0x00030303);
+		nvkm_wo32(chan->inst, i, 0x00030303);
 	for (i = 0x06b0; i < 0x06f0; i += 4)
-		nv_wo32(chan, i, 0x0008aae4);
+		nvkm_wo32(chan->inst, i, 0x0008aae4);
 	for (i = 0x06f0; i < 0x0730; i += 4)
-		nv_wo32(chan, i, 0x01012000);
+		nvkm_wo32(chan->inst, i, 0x01012000);
 	for (i = 0x0730; i < 0x0770; i += 4)
-		nv_wo32(chan, i, 0x00080008);
-	nv_wo32(chan, 0x0850, 0x00040000);
-	nv_wo32(chan, 0x0854, 0x00010000);
+		nvkm_wo32(chan->inst, i, 0x00080008);
+	nvkm_wo32(chan->inst, 0x0850, 0x00040000);
+	nvkm_wo32(chan->inst, 0x0854, 0x00010000);
 	for (i = 0x0858; i < 0x0868; i += 4)
-		nv_wo32(chan, i, 0x00040004);
+		nvkm_wo32(chan->inst, i, 0x00040004);
 	for (i = 0x15ac; i <= 0x271c ; i += 16) {
-		nv_wo32(chan, i + 0, 0x10700ff9);
-		nv_wo32(chan, i + 1, 0x0436086c);
-		nv_wo32(chan, i + 2, 0x000c001b);
+		nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
+		nvkm_wo32(chan->inst, i + 1, 0x0436086c);
+		nvkm_wo32(chan->inst, i + 2, 0x000c001b);
 	}
 	for (i = 0x274c; i < 0x275c; i += 4)
-		nv_wo32(chan, i, 0x0000ffff);
-	nv_wo32(chan, 0x2ae0, 0x3f800000);
-	nv_wo32(chan, 0x2e9c, 0x3f800000);
-	nv_wo32(chan, 0x2eb0, 0x3f800000);
-	nv_wo32(chan, 0x2edc, 0x40000000);
-	nv_wo32(chan, 0x2ee0, 0x3f800000);
-	nv_wo32(chan, 0x2ee4, 0x3f000000);
-	nv_wo32(chan, 0x2eec, 0x40000000);
-	nv_wo32(chan, 0x2ef0, 0x3f800000);
-	nv_wo32(chan, 0x2ef8, 0xbf800000);
-	nv_wo32(chan, 0x2f00, 0xbf800000);
+		nvkm_wo32(chan->inst, i, 0x0000ffff);
+	nvkm_wo32(chan->inst, 0x2ae0, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2e9c, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2eb0, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2edc, 0x40000000);
+	nvkm_wo32(chan->inst, 0x2ee0, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2ee4, 0x3f000000);
+	nvkm_wo32(chan->inst, 0x2eec, 0x40000000);
+	nvkm_wo32(chan->inst, 0x2ef0, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2ef8, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x2f00, 0xbf800000);
+	nvkm_done(chan->inst);
 	return 0;
 }
 
-static struct nvkm_oclass
-nv34_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x34),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv34_gr_context_ctor,
-		.dtor = _nvkm_gr_context_dtor,
-		.init = nv20_gr_context_init,
-		.fini = nv20_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
-};
-
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-static int
-nv34_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
-{
-	struct nv20_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
-			      NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
-	if (ret)
-		return ret;
+static const struct nvkm_gr_func
+nv34_gr = {
+	.dtor = nv20_gr_dtor,
+	.oneinit = nv20_gr_oneinit,
+	.init = nv30_gr_init,
+	.intr = nv20_gr_intr,
+	.tile = nv20_gr_tile,
+	.chan_new = nv34_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
+		{ -1, -1, 0x0362, &nv04_gr_object }, /* surf2d (nv30) */
+		{ -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
+		{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
+		{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
+		{ -1, -1, 0x0697, &nv04_gr_object }, /* rankine */
+		{}
+	}
+};
 
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv20_gr_intr;
-	nv_engine(priv)->cclass = &nv34_gr_cclass;
-	nv_engine(priv)->sclass = nv34_gr_sclass;
-	nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
-	return 0;
+int
+nv34_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv20_gr_new_(&nv34_gr, device, index, pgr);
 }
-
-struct nvkm_oclass
-nv34_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x34),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv34_gr_ctor,
-		.dtor = nv20_gr_dtor,
-		.init = nv30_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
index 707625f19ff5..740df0f52c38 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
@@ -1,159 +1,135 @@
 #include "nv20.h"
 #include "regs.h"
 
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
+#include <engine/fifo/chan.h>
 
 /*******************************************************************************
- * Graphics object classes
+ * PGRAPH context
  ******************************************************************************/
 
-static struct nvkm_oclass
-nv35_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
-	{ 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
-	{ 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
-	{ 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
-	{ 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
-	{ 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
-	{ 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
-	{ 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
-	{ 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
-	{ 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
-	{ 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
-	{ 0x0362, &nv04_gr_ofuncs, NULL }, /* surf2d (nv30) */
-	{ 0x0389, &nv04_gr_ofuncs, NULL }, /* sifm (nv30) */
-	{ 0x038a, &nv04_gr_ofuncs, NULL }, /* ifc (nv30) */
-	{ 0x039e, &nv04_gr_ofuncs, NULL }, /* swzsurf (nv30) */
-	{ 0x0497, &nv04_gr_ofuncs, NULL }, /* rankine */
-	{},
+static const struct nvkm_object_func
+nv35_gr_chan = {
+	.dtor = nv20_gr_chan_dtor,
+	.init = nv20_gr_chan_init,
+	.fini = nv20_gr_chan_fini,
 };
 
-/*******************************************************************************
- * PGRAPH context
- ******************************************************************************/
-
 static int
-nv35_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+nv35_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
+	struct nv20_gr *gr = nv20_gr(base);
 	struct nv20_gr_chan *chan;
 	int ret, i;
 
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x577c,
-				     16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv35_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	chan->chid = fifoch->chid;
+	*pobject = &chan->object;
+
+	ret = nvkm_memory_new(gr->base.engine.subdev.device,
+			      NVKM_MEM_TARGET_INST, 0x577c, 16, true,
+			      &chan->inst);
 	if (ret)
 		return ret;
 
-	chan->chid = nvkm_fifo_chan(parent)->chid;
-
-	nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
-	nv_wo32(chan, 0x040c, 0x00000101);
-	nv_wo32(chan, 0x0420, 0x00000111);
-	nv_wo32(chan, 0x0424, 0x00000060);
-	nv_wo32(chan, 0x0440, 0x00000080);
-	nv_wo32(chan, 0x0444, 0xffff0000);
-	nv_wo32(chan, 0x0448, 0x00000001);
-	nv_wo32(chan, 0x045c, 0x44400000);
-	nv_wo32(chan, 0x0488, 0xffff0000);
+	nvkm_kmap(chan->inst);
+	nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
+	nvkm_wo32(chan->inst, 0x040c, 0x00000101);
+	nvkm_wo32(chan->inst, 0x0420, 0x00000111);
+	nvkm_wo32(chan->inst, 0x0424, 0x00000060);
+	nvkm_wo32(chan->inst, 0x0440, 0x00000080);
+	nvkm_wo32(chan->inst, 0x0444, 0xffff0000);
+	nvkm_wo32(chan->inst, 0x0448, 0x00000001);
+	nvkm_wo32(chan->inst, 0x045c, 0x44400000);
+	nvkm_wo32(chan->inst, 0x0488, 0xffff0000);
 	for (i = 0x04dc; i < 0x04e4; i += 4)
-		nv_wo32(chan, i, 0x0fff0000);
-	nv_wo32(chan, 0x04e8, 0x00011100);
+		nvkm_wo32(chan->inst, i, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x04e8, 0x00011100);
 	for (i = 0x0504; i < 0x0544; i += 4)
-		nv_wo32(chan, i, 0x07ff0000);
-	nv_wo32(chan, 0x054c, 0x4b7fffff);
-	nv_wo32(chan, 0x0588, 0x00000080);
-	nv_wo32(chan, 0x058c, 0x30201000);
-	nv_wo32(chan, 0x0590, 0x70605040);
-	nv_wo32(chan, 0x0594, 0xb8a89888);
-	nv_wo32(chan, 0x0598, 0xf8e8d8c8);
-	nv_wo32(chan, 0x05ac, 0xb0000000);
+		nvkm_wo32(chan->inst, i, 0x07ff0000);
+	nvkm_wo32(chan->inst, 0x054c, 0x4b7fffff);
+	nvkm_wo32(chan->inst, 0x0588, 0x00000080);
+	nvkm_wo32(chan->inst, 0x058c, 0x30201000);
+	nvkm_wo32(chan->inst, 0x0590, 0x70605040);
+	nvkm_wo32(chan->inst, 0x0594, 0xb8a89888);
+	nvkm_wo32(chan->inst, 0x0598, 0xf8e8d8c8);
+	nvkm_wo32(chan->inst, 0x05ac, 0xb0000000);
 	for (i = 0x0604; i < 0x0644; i += 4)
-		nv_wo32(chan, i, 0x00010588);
+		nvkm_wo32(chan->inst, i, 0x00010588);
 	for (i = 0x0644; i < 0x0684; i += 4)
-		nv_wo32(chan, i, 0x00030303);
+		nvkm_wo32(chan->inst, i, 0x00030303);
 	for (i = 0x06c4; i < 0x0704; i += 4)
-		nv_wo32(chan, i, 0x0008aae4);
+		nvkm_wo32(chan->inst, i, 0x0008aae4);
 	for (i = 0x0704; i < 0x0744; i += 4)
-		nv_wo32(chan, i, 0x01012000);
+		nvkm_wo32(chan->inst, i, 0x01012000);
 	for (i = 0x0744; i < 0x0784; i += 4)
-		nv_wo32(chan, i, 0x00080008);
-	nv_wo32(chan, 0x0860, 0x00040000);
-	nv_wo32(chan, 0x0864, 0x00010000);
+		nvkm_wo32(chan->inst, i, 0x00080008);
+	nvkm_wo32(chan->inst, 0x0860, 0x00040000);
+	nvkm_wo32(chan->inst, 0x0864, 0x00010000);
 	for (i = 0x0868; i < 0x0878; i += 4)
-		nv_wo32(chan, i, 0x00040004);
+		nvkm_wo32(chan->inst, i, 0x00040004);
 	for (i = 0x1f1c; i <= 0x308c ; i += 16) {
-		nv_wo32(chan, i + 0, 0x10700ff9);
-		nv_wo32(chan, i + 4, 0x0436086c);
-		nv_wo32(chan, i + 8, 0x000c001b);
+		nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
+		nvkm_wo32(chan->inst, i + 4, 0x0436086c);
+		nvkm_wo32(chan->inst, i + 8, 0x000c001b);
 	}
 	for (i = 0x30bc; i < 0x30cc; i += 4)
-		nv_wo32(chan, i, 0x0000ffff);
-	nv_wo32(chan, 0x3450, 0x3f800000);
-	nv_wo32(chan, 0x380c, 0x3f800000);
-	nv_wo32(chan, 0x3820, 0x3f800000);
-	nv_wo32(chan, 0x384c, 0x40000000);
-	nv_wo32(chan, 0x3850, 0x3f800000);
-	nv_wo32(chan, 0x3854, 0x3f000000);
-	nv_wo32(chan, 0x385c, 0x40000000);
-	nv_wo32(chan, 0x3860, 0x3f800000);
-	nv_wo32(chan, 0x3868, 0xbf800000);
-	nv_wo32(chan, 0x3870, 0xbf800000);
+		nvkm_wo32(chan->inst, i, 0x0000ffff);
+	nvkm_wo32(chan->inst, 0x3450, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x380c, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x3820, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x384c, 0x40000000);
+	nvkm_wo32(chan->inst, 0x3850, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x3854, 0x3f000000);
+	nvkm_wo32(chan->inst, 0x385c, 0x40000000);
+	nvkm_wo32(chan->inst, 0x3860, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x3868, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x3870, 0xbf800000);
+	nvkm_done(chan->inst);
 	return 0;
 }
 
-static struct nvkm_oclass
-nv35_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x35),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv35_gr_context_ctor,
-		.dtor = _nvkm_gr_context_dtor,
-		.init = nv20_gr_context_init,
-		.fini = nv20_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
-};
-
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-static int
-nv35_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
-{
-	struct nv20_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
-			      NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
-	if (ret)
-		return ret;
+static const struct nvkm_gr_func
+nv35_gr = {
+	.dtor = nv20_gr_dtor,
+	.oneinit = nv20_gr_oneinit,
+	.init = nv30_gr_init,
+	.intr = nv20_gr_intr,
+	.tile = nv20_gr_tile,
+	.chan_new = nv35_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
+		{ -1, -1, 0x0362, &nv04_gr_object }, /* surf2d (nv30) */
+		{ -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
+		{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
+		{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
+		{ -1, -1, 0x0497, &nv04_gr_object }, /* rankine */
+		{}
+	}
+};
 
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv20_gr_intr;
-	nv_engine(priv)->cclass = &nv35_gr_cclass;
-	nv_engine(priv)->sclass = nv35_gr_sclass;
-	nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
-	return 0;
+int
+nv35_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv20_gr_new_(&nv35_gr, device, index, pgr);
 }
-
-struct nvkm_oclass
-nv35_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x35),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv35_gr_ctor,
-		.dtor = nv20_gr_dtor,
-		.init = nv30_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
index 7e1937980e3f..ffa902ece872 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
@@ -25,26 +25,15 @@
 #include "regs.h"
 
 #include <core/client.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <subdev/fb.h>
 #include <subdev/timer.h>
 #include <engine/fifo.h>
 
-struct nv40_gr_priv {
-	struct nvkm_gr base;
-	u32 size;
-};
-
-struct nv40_gr_chan {
-	struct nvkm_gr_chan base;
-};
-
-static u64
+u64
 nv40_gr_units(struct nvkm_gr *gr)
 {
-	struct nv40_gr_priv *priv = (void *)gr;
-
-	return nv_rd32(priv, 0x1540);
+	return nvkm_rd32(gr->engine.subdev.device, 0x1540);
 }
 
 /*******************************************************************************
@@ -52,80 +41,29 @@ nv40_gr_units(struct nvkm_gr *gr)
  ******************************************************************************/
 
 static int
-nv40_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
+nv40_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		    int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_gpuobj *obj;
-	int ret;
-
-	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
-				 20, 16, 0, &obj);
-	*pobject = nv_object(obj);
-	if (ret)
-		return ret;
-
-	nv_wo32(obj, 0x00, nv_mclass(obj));
-	nv_wo32(obj, 0x04, 0x00000000);
-	nv_wo32(obj, 0x08, 0x00000000);
+	int ret = nvkm_gpuobj_new(object->engine->subdev.device, 20, align,
+				  false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, object->oclass);
+		nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
 #ifdef __BIG_ENDIAN
-	nv_mo32(obj, 0x08, 0x01000000, 0x01000000);
+		nvkm_mo32(*pgpuobj, 0x08, 0x01000000, 0x01000000);
 #endif
-	nv_wo32(obj, 0x0c, 0x00000000);
-	nv_wo32(obj, 0x10, 0x00000000);
-	return 0;
+		nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
+		nvkm_done(*pgpuobj);
+	}
+	return ret;
 }
 
-static struct nvkm_ofuncs
-nv40_gr_ofuncs = {
-	.ctor = nv40_gr_object_ctor,
-	.dtor = _nvkm_gpuobj_dtor,
-	.init = _nvkm_gpuobj_init,
-	.fini = _nvkm_gpuobj_fini,
-	.rd32 = _nvkm_gpuobj_rd32,
-	.wr32 = _nvkm_gpuobj_wr32,
-};
-
-static struct nvkm_oclass
-nv40_gr_sclass[] = {
-	{ 0x0012, &nv40_gr_ofuncs, NULL }, /* beta1 */
-	{ 0x0019, &nv40_gr_ofuncs, NULL }, /* clip */
-	{ 0x0030, &nv40_gr_ofuncs, NULL }, /* null */
-	{ 0x0039, &nv40_gr_ofuncs, NULL }, /* m2mf */
-	{ 0x0043, &nv40_gr_ofuncs, NULL }, /* rop */
-	{ 0x0044, &nv40_gr_ofuncs, NULL }, /* patt */
-	{ 0x004a, &nv40_gr_ofuncs, NULL }, /* gdi */
-	{ 0x0062, &nv40_gr_ofuncs, NULL }, /* surf2d */
-	{ 0x0072, &nv40_gr_ofuncs, NULL }, /* beta4 */
-	{ 0x0089, &nv40_gr_ofuncs, NULL }, /* sifm */
-	{ 0x008a, &nv40_gr_ofuncs, NULL }, /* ifc */
-	{ 0x009f, &nv40_gr_ofuncs, NULL }, /* imageblit */
-	{ 0x3062, &nv40_gr_ofuncs, NULL }, /* surf2d (nv40) */
-	{ 0x3089, &nv40_gr_ofuncs, NULL }, /* sifm (nv40) */
-	{ 0x309e, &nv40_gr_ofuncs, NULL }, /* swzsurf (nv40) */
-	{ 0x4097, &nv40_gr_ofuncs, NULL }, /* curie */
-	{},
-};
-
-static struct nvkm_oclass
-nv44_gr_sclass[] = {
-	{ 0x0012, &nv40_gr_ofuncs, NULL }, /* beta1 */
-	{ 0x0019, &nv40_gr_ofuncs, NULL }, /* clip */
-	{ 0x0030, &nv40_gr_ofuncs, NULL }, /* null */
-	{ 0x0039, &nv40_gr_ofuncs, NULL }, /* m2mf */
-	{ 0x0043, &nv40_gr_ofuncs, NULL }, /* rop */
-	{ 0x0044, &nv40_gr_ofuncs, NULL }, /* patt */
-	{ 0x004a, &nv40_gr_ofuncs, NULL }, /* gdi */
-	{ 0x0062, &nv40_gr_ofuncs, NULL }, /* surf2d */
-	{ 0x0072, &nv40_gr_ofuncs, NULL }, /* beta4 */
-	{ 0x0089, &nv40_gr_ofuncs, NULL }, /* sifm */
-	{ 0x008a, &nv40_gr_ofuncs, NULL }, /* ifc */
-	{ 0x009f, &nv40_gr_ofuncs, NULL }, /* imageblit */
-	{ 0x3062, &nv40_gr_ofuncs, NULL }, /* surf2d (nv40) */
-	{ 0x3089, &nv40_gr_ofuncs, NULL }, /* sifm (nv40) */
-	{ 0x309e, &nv40_gr_ofuncs, NULL }, /* swzsurf (nv40) */
-	{ 0x4497, &nv40_gr_ofuncs, NULL }, /* curie */
-	{},
+const struct nvkm_object_func
+nv40_gr_object = {
+	.bind = nv40_gr_object_bind,
 };
 
 /*******************************************************************************
@@ -133,361 +71,334 @@ nv44_gr_sclass[] = {
  ******************************************************************************/
 
 static int
-nv40_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+nv40_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		  int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nv40_gr_priv *priv = (void *)engine;
-	struct nv40_gr_chan *chan;
-	int ret;
-
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, priv->size,
-				     16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	nv40_grctx_fill(nv_device(priv), nv_gpuobj(chan));
-	nv_wo32(chan, 0x00000, nv_gpuobj(chan)->addr >> 4);
-	return 0;
+	struct nv40_gr_chan *chan = nv40_gr_chan(object);
+	struct nv40_gr *gr = chan->gr;
+	int ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
+				  align, true, parent, pgpuobj);
+	if (ret == 0) {
+		chan->inst = (*pgpuobj)->addr;
+		nvkm_kmap(*pgpuobj);
+		nv40_grctx_fill(gr->base.engine.subdev.device, *pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00000, chan->inst >> 4);
+		nvkm_done(*pgpuobj);
+	}
+	return ret;
 }
 
 static int
-nv40_gr_context_fini(struct nvkm_object *object, bool suspend)
+nv40_gr_chan_fini(struct nvkm_object *object, bool suspend)
 {
-	struct nv40_gr_priv *priv = (void *)object->engine;
-	struct nv40_gr_chan *chan = (void *)object;
-	u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4;
+	struct nv40_gr_chan *chan = nv40_gr_chan(object);
+	struct nv40_gr *gr = chan->gr;
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 inst = 0x01000000 | chan->inst >> 4;
 	int ret = 0;
 
-	nv_mask(priv, 0x400720, 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x400720, 0x00000001, 0x00000000);
 
-	if (nv_rd32(priv, 0x40032c) == inst) {
+	if (nvkm_rd32(device, 0x40032c) == inst) {
 		if (suspend) {
-			nv_wr32(priv, 0x400720, 0x00000000);
-			nv_wr32(priv, 0x400784, inst);
-			nv_mask(priv, 0x400310, 0x00000020, 0x00000020);
-			nv_mask(priv, 0x400304, 0x00000001, 0x00000001);
-			if (!nv_wait(priv, 0x400300, 0x00000001, 0x00000000)) {
-				u32 insn = nv_rd32(priv, 0x400308);
-				nv_warn(priv, "ctxprog timeout 0x%08x\n", insn);
+			nvkm_wr32(device, 0x400720, 0x00000000);
+			nvkm_wr32(device, 0x400784, inst);
+			nvkm_mask(device, 0x400310, 0x00000020, 0x00000020);
+			nvkm_mask(device, 0x400304, 0x00000001, 0x00000001);
+			if (nvkm_msec(device, 2000,
+				if (!(nvkm_rd32(device, 0x400300) & 0x00000001))
+					break;
+			) < 0) {
+				u32 insn = nvkm_rd32(device, 0x400308);
+				nvkm_warn(subdev, "ctxprog timeout %08x\n", insn);
 				ret = -EBUSY;
 			}
 		}
 
-		nv_mask(priv, 0x40032c, 0x01000000, 0x00000000);
+		nvkm_mask(device, 0x40032c, 0x01000000, 0x00000000);
 	}
 
-	if (nv_rd32(priv, 0x400330) == inst)
-		nv_mask(priv, 0x400330, 0x01000000, 0x00000000);
+	if (nvkm_rd32(device, 0x400330) == inst)
+		nvkm_mask(device, 0x400330, 0x01000000, 0x00000000);
 
-	nv_mask(priv, 0x400720, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x400720, 0x00000001, 0x00000001);
 	return ret;
 }
 
-static struct nvkm_oclass
-nv40_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x40),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_gr_context_ctor,
-		.dtor = _nvkm_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = nv40_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+static void *
+nv40_gr_chan_dtor(struct nvkm_object *object)
+{
+	struct nv40_gr_chan *chan = nv40_gr_chan(object);
+	unsigned long flags;
+	spin_lock_irqsave(&chan->gr->base.engine.lock, flags);
+	list_del(&chan->head);
+	spin_unlock_irqrestore(&chan->gr->base.engine.lock, flags);
+	return chan;
+}
+
+static const struct nvkm_object_func
+nv40_gr_chan = {
+	.dtor = nv40_gr_chan_dtor,
+	.fini = nv40_gr_chan_fini,
+	.bind = nv40_gr_chan_bind,
 };
 
+int
+nv40_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
+{
+	struct nv40_gr *gr = nv40_gr(base);
+	struct nv40_gr_chan *chan;
+	unsigned long flags;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv40_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	*pobject = &chan->object;
+
+	spin_lock_irqsave(&chan->gr->base.engine.lock, flags);
+	list_add(&chan->head, &gr->chan);
+	spin_unlock_irqrestore(&chan->gr->base.engine.lock, flags);
+	return 0;
+}
+
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
 static void
-nv40_gr_tile_prog(struct nvkm_engine *engine, int i)
+nv40_gr_tile(struct nvkm_gr *base, int i, struct nvkm_fb_tile *tile)
 {
-	struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i];
-	struct nvkm_fifo *pfifo = nvkm_fifo(engine);
-	struct nv40_gr_priv *priv = (void *)engine;
+	struct nv40_gr *gr = nv40_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	struct nvkm_fifo *fifo = device->fifo;
 	unsigned long flags;
 
-	pfifo->pause(pfifo, &flags);
-	nv04_gr_idle(priv);
+	nvkm_fifo_pause(fifo, &flags);
+	nv04_gr_idle(&gr->base);
 
-	switch (nv_device(priv)->chipset) {
+	switch (device->chipset) {
 	case 0x40:
 	case 0x41:
 	case 0x42:
 	case 0x43:
 	case 0x45:
-	case 0x4e:
-		nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
-		nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
-		nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
-		nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
-		nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
-		nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
-		switch (nv_device(priv)->chipset) {
+		nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
+		nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
+		nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
+		nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+		nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+		nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
+		switch (device->chipset) {
 		case 0x40:
 		case 0x45:
-			nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
-			nv_wr32(priv, NV40_PGRAPH_ZCOMP1(i), tile->zcomp);
+			nvkm_wr32(device, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
+			nvkm_wr32(device, NV40_PGRAPH_ZCOMP1(i), tile->zcomp);
 			break;
 		case 0x41:
 		case 0x42:
 		case 0x43:
-			nv_wr32(priv, NV41_PGRAPH_ZCOMP0(i), tile->zcomp);
-			nv_wr32(priv, NV41_PGRAPH_ZCOMP1(i), tile->zcomp);
+			nvkm_wr32(device, NV41_PGRAPH_ZCOMP0(i), tile->zcomp);
+			nvkm_wr32(device, NV41_PGRAPH_ZCOMP1(i), tile->zcomp);
 			break;
 		default:
 			break;
 		}
 		break;
-	case 0x44:
-	case 0x4a:
-		nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
-		nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
-		nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
-		break;
-	case 0x46:
-	case 0x4c:
 	case 0x47:
 	case 0x49:
 	case 0x4b:
-	case 0x63:
-	case 0x67:
-	case 0x68:
-		nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch);
-		nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit);
-		nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr);
-		nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
-		nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
-		nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
-		switch (nv_device(priv)->chipset) {
-		case 0x47:
-		case 0x49:
-		case 0x4b:
-			nv_wr32(priv, NV47_PGRAPH_ZCOMP0(i), tile->zcomp);
-			nv_wr32(priv, NV47_PGRAPH_ZCOMP1(i), tile->zcomp);
-			break;
-		default:
-			break;
-		}
+		nvkm_wr32(device, NV47_PGRAPH_TSIZE(i), tile->pitch);
+		nvkm_wr32(device, NV47_PGRAPH_TLIMIT(i), tile->limit);
+		nvkm_wr32(device, NV47_PGRAPH_TILE(i), tile->addr);
+		nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+		nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+		nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
+		nvkm_wr32(device, NV47_PGRAPH_ZCOMP0(i), tile->zcomp);
+		nvkm_wr32(device, NV47_PGRAPH_ZCOMP1(i), tile->zcomp);
 		break;
 	default:
+		WARN_ON(1);
 		break;
 	}
 
-	pfifo->start(pfifo, &flags);
+	nvkm_fifo_start(fifo, &flags);
 }
 
-static void
-nv40_gr_intr(struct nvkm_subdev *subdev)
+void
+nv40_gr_intr(struct nvkm_gr *base)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
-	struct nvkm_engine *engine = nv_engine(subdev);
-	struct nvkm_object *engctx;
-	struct nvkm_handle *handle = NULL;
-	struct nv40_gr_priv *priv = (void *)subdev;
-	u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
-	u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
-	u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
-	u32 inst = nv_rd32(priv, 0x40032c) & 0x000fffff;
-	u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+	struct nv40_gr *gr = nv40_gr(base);
+	struct nv40_gr_chan *temp, *chan = NULL;
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
+	u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
+	u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
+	u32 inst = nvkm_rd32(device, 0x40032c) & 0x000fffff;
+	u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
 	u32 subc = (addr & 0x00070000) >> 16;
 	u32 mthd = (addr & 0x00001ffc);
-	u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
-	u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xffff;
+	u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
+	u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xffff;
 	u32 show = stat;
-	int chid;
-
-	engctx = nvkm_engctx_get(engine, inst);
-	chid   = pfifo->chid(pfifo, engctx);
+	char msg[128], src[128], sta[128];
+	unsigned long flags;
 
-	if (stat & NV_PGRAPH_INTR_ERROR) {
-		if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
-			handle = nvkm_handle_get_class(engctx, class);
-			if (handle && !nv_call(handle->object, mthd, data))
-				show &= ~NV_PGRAPH_INTR_ERROR;
-			nvkm_handle_put(handle);
+	spin_lock_irqsave(&gr->base.engine.lock, flags);
+	list_for_each_entry(temp, &gr->chan, head) {
+		if (temp->inst >> 4 == inst) {
+			chan = temp;
+			list_del(&chan->head);
+			list_add(&chan->head, &gr->chan);
+			break;
 		}
+	}
 
+	if (stat & NV_PGRAPH_INTR_ERROR) {
 		if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
-			nv_mask(priv, 0x402000, 0, 0);
+			nvkm_mask(device, 0x402000, 0, 0);
 		}
 	}
 
-	nv_wr32(priv, NV03_PGRAPH_INTR, stat);
-	nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+	nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
+	nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
 
 	if (show) {
-		nv_error(priv, "%s", "");
-		nvkm_bitfield_print(nv10_gr_intr_name, show);
-		pr_cont(" nsource:");
-		nvkm_bitfield_print(nv04_gr_nsource, nsource);
-		pr_cont(" nstatus:");
-		nvkm_bitfield_print(nv10_gr_nstatus, nstatus);
-		pr_cont("\n");
-		nv_error(priv,
-			 "ch %d [0x%08x %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
-			 chid, inst << 4, nvkm_client_name(engctx), subc,
-			 class, mthd, data);
+		nvkm_snprintbf(msg, sizeof(msg), nv10_gr_intr_name, show);
+		nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
+		nvkm_snprintbf(sta, sizeof(sta), nv10_gr_nstatus, nstatus);
+		nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
+				   "nstatus %08x [%s] ch %d [%08x %s] subc %d "
+				   "class %04x mthd %04x data %08x\n",
+			   show, msg, nsource, src, nstatus, sta,
+			   chan ? chan->fifo->chid : -1, inst << 4,
+			   chan ? chan->fifo->object.client->name : "unknown",
+			   subc, class, mthd, data);
 	}
 
-	nvkm_engctx_put(engctx);
+	spin_unlock_irqrestore(&gr->base.engine.lock, flags);
 }
 
-static int
-nv40_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+int
+nv40_gr_init(struct nvkm_gr *base)
 {
-	struct nv40_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv40_gr_intr;
-	nv_engine(priv)->cclass = &nv40_gr_cclass;
-	if (nv44_gr_class(priv))
-		nv_engine(priv)->sclass = nv44_gr_sclass;
-	else
-		nv_engine(priv)->sclass = nv40_gr_sclass;
-	nv_engine(priv)->tile_prog = nv40_gr_tile_prog;
-
-	priv->base.units = nv40_gr_units;
-	return 0;
-}
-
-static int
-nv40_gr_init(struct nvkm_object *object)
-{
-	struct nvkm_engine *engine = nv_engine(object);
-	struct nvkm_fb *pfb = nvkm_fb(object);
-	struct nv40_gr_priv *priv = (void *)engine;
+	struct nv40_gr *gr = nv40_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	int ret, i, j;
 	u32 vramsz;
 
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
-
 	/* generate and upload context program */
-	ret = nv40_grctx_init(nv_device(priv), &priv->size);
+	ret = nv40_grctx_init(device, &gr->size);
 	if (ret)
 		return ret;
 
 	/* No context present currently */
-	nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
+	nvkm_wr32(device, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
 
-	nv_wr32(priv, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
-	nv_wr32(priv, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
+	nvkm_wr32(device, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
+	nvkm_wr32(device, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
-	nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000);
-	nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x401287c0);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
+	nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00008000);
+	nvkm_wr32(device, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
 
-	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
-	nv_wr32(priv, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+	nvkm_wr32(device, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
 
-	j = nv_rd32(priv, 0x1540) & 0xff;
+	j = nvkm_rd32(device, 0x1540) & 0xff;
 	if (j) {
 		for (i = 0; !(j & 1); j >>= 1, i++)
 			;
-		nv_wr32(priv, 0x405000, i);
+		nvkm_wr32(device, 0x405000, i);
 	}
 
-	if (nv_device(priv)->chipset == 0x40) {
-		nv_wr32(priv, 0x4009b0, 0x83280fff);
-		nv_wr32(priv, 0x4009b4, 0x000000a0);
+	if (device->chipset == 0x40) {
+		nvkm_wr32(device, 0x4009b0, 0x83280fff);
+		nvkm_wr32(device, 0x4009b4, 0x000000a0);
 	} else {
-		nv_wr32(priv, 0x400820, 0x83280eff);
-		nv_wr32(priv, 0x400824, 0x000000a0);
+		nvkm_wr32(device, 0x400820, 0x83280eff);
+		nvkm_wr32(device, 0x400824, 0x000000a0);
 	}
 
-	switch (nv_device(priv)->chipset) {
+	switch (device->chipset) {
 	case 0x40:
 	case 0x45:
-		nv_wr32(priv, 0x4009b8, 0x0078e366);
-		nv_wr32(priv, 0x4009bc, 0x0000014c);
+		nvkm_wr32(device, 0x4009b8, 0x0078e366);
+		nvkm_wr32(device, 0x4009bc, 0x0000014c);
 		break;
 	case 0x41:
 	case 0x42: /* pciid also 0x00Cx */
 	/* case 0x0120: XXX (pciid) */
-		nv_wr32(priv, 0x400828, 0x007596ff);
-		nv_wr32(priv, 0x40082c, 0x00000108);
+		nvkm_wr32(device, 0x400828, 0x007596ff);
+		nvkm_wr32(device, 0x40082c, 0x00000108);
 		break;
 	case 0x43:
-		nv_wr32(priv, 0x400828, 0x0072cb77);
-		nv_wr32(priv, 0x40082c, 0x00000108);
+		nvkm_wr32(device, 0x400828, 0x0072cb77);
+		nvkm_wr32(device, 0x40082c, 0x00000108);
 		break;
 	case 0x44:
 	case 0x46: /* G72 */
 	case 0x4a:
 	case 0x4c: /* G7x-based C51 */
 	case 0x4e:
-		nv_wr32(priv, 0x400860, 0);
-		nv_wr32(priv, 0x400864, 0);
+		nvkm_wr32(device, 0x400860, 0);
+		nvkm_wr32(device, 0x400864, 0);
 		break;
 	case 0x47: /* G70 */
 	case 0x49: /* G71 */
 	case 0x4b: /* G73 */
-		nv_wr32(priv, 0x400828, 0x07830610);
-		nv_wr32(priv, 0x40082c, 0x0000016A);
+		nvkm_wr32(device, 0x400828, 0x07830610);
+		nvkm_wr32(device, 0x40082c, 0x0000016A);
 		break;
 	default:
 		break;
 	}
 
-	nv_wr32(priv, 0x400b38, 0x2ffff800);
-	nv_wr32(priv, 0x400b3c, 0x00006000);
+	nvkm_wr32(device, 0x400b38, 0x2ffff800);
+	nvkm_wr32(device, 0x400b3c, 0x00006000);
 
 	/* Tiling related stuff. */
-	switch (nv_device(priv)->chipset) {
+	switch (device->chipset) {
 	case 0x44:
 	case 0x4a:
-		nv_wr32(priv, 0x400bc4, 0x1003d888);
-		nv_wr32(priv, 0x400bbc, 0xb7a7b500);
+		nvkm_wr32(device, 0x400bc4, 0x1003d888);
+		nvkm_wr32(device, 0x400bbc, 0xb7a7b500);
 		break;
 	case 0x46:
-		nv_wr32(priv, 0x400bc4, 0x0000e024);
-		nv_wr32(priv, 0x400bbc, 0xb7a7b520);
+		nvkm_wr32(device, 0x400bc4, 0x0000e024);
+		nvkm_wr32(device, 0x400bbc, 0xb7a7b520);
 		break;
 	case 0x4c:
 	case 0x4e:
 	case 0x67:
-		nv_wr32(priv, 0x400bc4, 0x1003d888);
-		nv_wr32(priv, 0x400bbc, 0xb7a7b540);
+		nvkm_wr32(device, 0x400bc4, 0x1003d888);
+		nvkm_wr32(device, 0x400bbc, 0xb7a7b540);
 		break;
 	default:
 		break;
 	}
 
-	/* Turn all the tiling regions off. */
-	for (i = 0; i < pfb->tile.regions; i++)
-		engine->tile_prog(engine, i);
-
 	/* begin RAM config */
-	vramsz = nv_device_resource_len(nv_device(priv), 0) - 1;
-	switch (nv_device(priv)->chipset) {
+	vramsz = device->func->resource_size(device, 1) - 1;
+	switch (device->chipset) {
 	case 0x40:
-		nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
-		nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
-		nv_wr32(priv, 0x4069A4, nv_rd32(priv, 0x100200));
-		nv_wr32(priv, 0x4069A8, nv_rd32(priv, 0x100204));
-		nv_wr32(priv, 0x400820, 0);
-		nv_wr32(priv, 0x400824, 0);
-		nv_wr32(priv, 0x400864, vramsz);
-		nv_wr32(priv, 0x400868, vramsz);
+		nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
+		nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
+		nvkm_wr32(device, 0x4069A4, nvkm_rd32(device, 0x100200));
+		nvkm_wr32(device, 0x4069A8, nvkm_rd32(device, 0x100204));
+		nvkm_wr32(device, 0x400820, 0);
+		nvkm_wr32(device, 0x400824, 0);
+		nvkm_wr32(device, 0x400864, vramsz);
+		nvkm_wr32(device, 0x400868, vramsz);
 		break;
 	default:
-		switch (nv_device(priv)->chipset) {
+		switch (device->chipset) {
 		case 0x41:
 		case 0x42:
 		case 0x43:
@@ -495,33 +406,70 @@ nv40_gr_init(struct nvkm_object *object)
 		case 0x4e:
 		case 0x44:
 		case 0x4a:
-			nv_wr32(priv, 0x4009F0, nv_rd32(priv, 0x100200));
-			nv_wr32(priv, 0x4009F4, nv_rd32(priv, 0x100204));
+			nvkm_wr32(device, 0x4009F0, nvkm_rd32(device, 0x100200));
+			nvkm_wr32(device, 0x4009F4, nvkm_rd32(device, 0x100204));
 			break;
 		default:
-			nv_wr32(priv, 0x400DF0, nv_rd32(priv, 0x100200));
-			nv_wr32(priv, 0x400DF4, nv_rd32(priv, 0x100204));
+			nvkm_wr32(device, 0x400DF0, nvkm_rd32(device, 0x100200));
+			nvkm_wr32(device, 0x400DF4, nvkm_rd32(device, 0x100204));
 			break;
 		}
-		nv_wr32(priv, 0x4069F0, nv_rd32(priv, 0x100200));
-		nv_wr32(priv, 0x4069F4, nv_rd32(priv, 0x100204));
-		nv_wr32(priv, 0x400840, 0);
-		nv_wr32(priv, 0x400844, 0);
-		nv_wr32(priv, 0x4008A0, vramsz);
-		nv_wr32(priv, 0x4008A4, vramsz);
+		nvkm_wr32(device, 0x4069F0, nvkm_rd32(device, 0x100200));
+		nvkm_wr32(device, 0x4069F4, nvkm_rd32(device, 0x100204));
+		nvkm_wr32(device, 0x400840, 0);
+		nvkm_wr32(device, 0x400844, 0);
+		nvkm_wr32(device, 0x4008A0, vramsz);
+		nvkm_wr32(device, 0x4008A4, vramsz);
 		break;
 	}
 
 	return 0;
 }
 
-struct nvkm_oclass
-nv40_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x40),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_gr_ctor,
-		.dtor = _nvkm_gr_dtor,
-		.init = nv40_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
+int
+nv40_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_gr **pgr)
+{
+	struct nv40_gr *gr;
+
+	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+		return -ENOMEM;
+	*pgr = &gr->base;
+	INIT_LIST_HEAD(&gr->chan);
+
+	return nvkm_gr_ctor(func, device, index, 0x00001000, true, &gr->base);
+}
+
+static const struct nvkm_gr_func
+nv40_gr = {
+	.init = nv40_gr_init,
+	.intr = nv40_gr_intr,
+	.tile = nv40_gr_tile,
+	.units = nv40_gr_units,
+	.chan_new = nv40_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv40_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv40_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv40_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv40_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv40_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv40_gr_object }, /* patt */
+		{ -1, -1, 0x004a, &nv40_gr_object }, /* gdi */
+		{ -1, -1, 0x0062, &nv40_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv40_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv40_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv40_gr_object }, /* ifc */
+		{ -1, -1, 0x009f, &nv40_gr_object }, /* imageblit */
+		{ -1, -1, 0x3062, &nv40_gr_object }, /* surf2d (nv40) */
+		{ -1, -1, 0x3089, &nv40_gr_object }, /* sifm (nv40) */
+		{ -1, -1, 0x309e, &nv40_gr_object }, /* swzsurf (nv40) */
+		{ -1, -1, 0x4097, &nv40_gr_object }, /* curie */
+		{}
+	}
 };
+
+int
+nv40_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv40_gr_new_(&nv40_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
index d852bd6de571..2812ed11f877 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
@@ -1,22 +1,45 @@
 #ifndef __NV40_GR_H__
 #define __NV40_GR_H__
-#include <engine/gr.h>
+#define nv40_gr(p) container_of((p), struct nv40_gr, base)
+#include "priv.h"
 
-#include <core/device.h>
-struct nvkm_gpuobj;
+struct nv40_gr {
+	struct nvkm_gr base;
+	u32 size;
+	struct list_head chan;
+};
+
+int nv40_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index,
+		 struct nvkm_gr **);
+int nv40_gr_init(struct nvkm_gr *);
+void nv40_gr_intr(struct nvkm_gr *);
+u64 nv40_gr_units(struct nvkm_gr *);
+
+#define nv40_gr_chan(p) container_of((p), struct nv40_gr_chan, object)
+
+struct nv40_gr_chan {
+	struct nvkm_object object;
+	struct nv40_gr *gr;
+	struct nvkm_fifo_chan *fifo;
+	u32 inst;
+	struct list_head head;
+};
+
+int nv40_gr_chan_new(struct nvkm_gr *, struct nvkm_fifo_chan *,
+		     const struct nvkm_oclass *, struct nvkm_object **);
+
+extern const struct nvkm_object_func nv40_gr_object;
 
 /* returns 1 if device is one of the nv4x using the 0x4497 object class,
  * helpful to determine a number of other hardware features
  */
 static inline int
-nv44_gr_class(void *priv)
+nv44_gr_class(struct nvkm_device *device)
 {
-	struct nvkm_device *device = nv_device(priv);
-
 	if ((device->chipset & 0xf0) == 0x60)
 		return 1;
 
-	return !(0x0baf & (1 << (device->chipset & 0x0f)));
+	return !(0x0aaf & (1 << (device->chipset & 0x0f)));
 }
 
 int  nv40_grctx_init(struct nvkm_device *, u32 *size);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c
new file mode 100644
index 000000000000..45ff80254eb4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv40.h"
+#include "regs.h"
+
+#include <subdev/fb.h>
+#include <engine/fifo.h>
+
+static void
+nv44_gr_tile(struct nvkm_gr *base, int i, struct nvkm_fb_tile *tile)
+{
+	struct nv40_gr *gr = nv40_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	struct nvkm_fifo *fifo = device->fifo;
+	unsigned long flags;
+
+	nvkm_fifo_pause(fifo, &flags);
+	nv04_gr_idle(&gr->base);
+
+	switch (device->chipset) {
+	case 0x44:
+	case 0x4a:
+		nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
+		nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
+		nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
+		break;
+	case 0x46:
+	case 0x4c:
+	case 0x63:
+	case 0x67:
+	case 0x68:
+		nvkm_wr32(device, NV47_PGRAPH_TSIZE(i), tile->pitch);
+		nvkm_wr32(device, NV47_PGRAPH_TLIMIT(i), tile->limit);
+		nvkm_wr32(device, NV47_PGRAPH_TILE(i), tile->addr);
+		nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+		nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+		nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
+		break;
+	case 0x4e:
+		nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
+		nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
+		nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
+		nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+		nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+		nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
+		break;
+	default:
+		WARN_ON(1);
+		break;
+	}
+
+	nvkm_fifo_start(fifo, &flags);
+}
+
+static const struct nvkm_gr_func
+nv44_gr = {
+	.init = nv40_gr_init,
+	.intr = nv40_gr_intr,
+	.tile = nv44_gr_tile,
+	.units = nv40_gr_units,
+	.chan_new = nv40_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv40_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv40_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv40_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv40_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv40_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv40_gr_object }, /* patt */
+		{ -1, -1, 0x004a, &nv40_gr_object }, /* gdi */
+		{ -1, -1, 0x0062, &nv40_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv40_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv40_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv40_gr_object }, /* ifc */
+		{ -1, -1, 0x009f, &nv40_gr_object }, /* imageblit */
+		{ -1, -1, 0x3062, &nv40_gr_object }, /* surf2d (nv40) */
+		{ -1, -1, 0x3089, &nv40_gr_object }, /* sifm (nv40) */
+		{ -1, -1, 0x309e, &nv40_gr_object }, /* swzsurf (nv40) */
+		{ -1, -1, 0x4497, &nv40_gr_object }, /* curie */
+		{}
+	}
+};
+
+int
+nv44_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv40_gr_new_(&nv44_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
index 270d7cd63fc7..b19b912d5787 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
@@ -24,27 +24,13 @@
 #include "nv50.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
-#include <subdev/timer.h>
 
-struct nv50_gr_priv {
-	struct nvkm_gr base;
-	spinlock_t lock;
-	u32 size;
-};
-
-struct nv50_gr_chan {
-	struct nvkm_gr_chan base;
-};
-
-static u64
+u64
 nv50_gr_units(struct nvkm_gr *gr)
 {
-	struct nv50_gr_priv *priv = (void *)gr;
-
-	return nv_rd32(priv, 0x1540);
+	return nvkm_rd32(gr->engine.subdev.device, 0x1540);
 }
 
 /*******************************************************************************
@@ -52,86 +38,25 @@ nv50_gr_units(struct nvkm_gr *gr)
  ******************************************************************************/
 
 static int
-nv50_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
+nv50_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		    int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_gpuobj *obj;
-	int ret;
-
-	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
-				 16, 16, 0, &obj);
-	*pobject = nv_object(obj);
-	if (ret)
-		return ret;
-
-	nv_wo32(obj, 0x00, nv_mclass(obj));
-	nv_wo32(obj, 0x04, 0x00000000);
-	nv_wo32(obj, 0x08, 0x00000000);
-	nv_wo32(obj, 0x0c, 0x00000000);
-	return 0;
+	int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16,
+				  align, false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, object->oclass);
+		nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
+		nvkm_done(*pgpuobj);
+	}
+	return ret;
 }
 
-static struct nvkm_ofuncs
-nv50_gr_ofuncs = {
-	.ctor = nv50_gr_object_ctor,
-	.dtor = _nvkm_gpuobj_dtor,
-	.init = _nvkm_gpuobj_init,
-	.fini = _nvkm_gpuobj_fini,
-	.rd32 = _nvkm_gpuobj_rd32,
-	.wr32 = _nvkm_gpuobj_wr32,
-};
-
-static struct nvkm_oclass
-nv50_gr_sclass[] = {
-	{ 0x0030, &nv50_gr_ofuncs },
-	{ 0x502d, &nv50_gr_ofuncs },
-	{ 0x5039, &nv50_gr_ofuncs },
-	{ 0x5097, &nv50_gr_ofuncs },
-	{ 0x50c0, &nv50_gr_ofuncs },
-	{}
-};
-
-static struct nvkm_oclass
-g84_gr_sclass[] = {
-	{ 0x0030, &nv50_gr_ofuncs },
-	{ 0x502d, &nv50_gr_ofuncs },
-	{ 0x5039, &nv50_gr_ofuncs },
-	{ 0x50c0, &nv50_gr_ofuncs },
-	{ 0x8297, &nv50_gr_ofuncs },
-	{}
-};
-
-static struct nvkm_oclass
-gt200_gr_sclass[] = {
-	{ 0x0030, &nv50_gr_ofuncs },
-	{ 0x502d, &nv50_gr_ofuncs },
-	{ 0x5039, &nv50_gr_ofuncs },
-	{ 0x50c0, &nv50_gr_ofuncs },
-	{ 0x8397, &nv50_gr_ofuncs },
-	{}
-};
-
-static struct nvkm_oclass
-gt215_gr_sclass[] = {
-	{ 0x0030, &nv50_gr_ofuncs },
-	{ 0x502d, &nv50_gr_ofuncs },
-	{ 0x5039, &nv50_gr_ofuncs },
-	{ 0x50c0, &nv50_gr_ofuncs },
-	{ 0x8597, &nv50_gr_ofuncs },
-	{ 0x85c0, &nv50_gr_ofuncs },
-	{}
-};
-
-static struct nvkm_oclass
-mcp89_gr_sclass[] = {
-	{ 0x0030, &nv50_gr_ofuncs },
-	{ 0x502d, &nv50_gr_ofuncs },
-	{ 0x5039, &nv50_gr_ofuncs },
-	{ 0x50c0, &nv50_gr_ofuncs },
-	{ 0x85c0, &nv50_gr_ofuncs },
-	{ 0x8697, &nv50_gr_ofuncs },
-	{}
+const struct nvkm_object_func
+nv50_gr_object = {
+	.bind = nv50_gr_object_bind,
 };
 
 /*******************************************************************************
@@ -139,160 +64,43 @@ mcp89_gr_sclass[] = {
  ******************************************************************************/
 
 static int
-nv50_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+nv50_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		  int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nv50_gr_priv *priv = (void *)engine;
-	struct nv50_gr_chan *chan;
-	int ret;
-
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, priv->size,
-				     0, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	nv50_grctx_fill(nv_device(priv), nv_gpuobj(chan));
-	return 0;
+	struct nv50_gr *gr = nv50_gr_chan(object)->gr;
+	int ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
+				  align, true, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nv50_grctx_fill(gr->base.engine.subdev.device, *pgpuobj);
+		nvkm_done(*pgpuobj);
+	}
+	return ret;
 }
 
-static struct nvkm_oclass
-nv50_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_gr_context_ctor,
-		.dtor = _nvkm_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PGRAPH engine/subdev functions
- ******************************************************************************/
-
-static const struct nvkm_bitfield nv50_pgr_status[] = {
-	{ 0x00000001, "BUSY" }, /* set when any bit is set */
-	{ 0x00000002, "DISPATCH" },
-	{ 0x00000004, "UNK2" },
-	{ 0x00000008, "UNK3" },
-	{ 0x00000010, "UNK4" },
-	{ 0x00000020, "UNK5" },
-	{ 0x00000040, "M2MF" },
-	{ 0x00000080, "UNK7" },
-	{ 0x00000100, "CTXPROG" },
-	{ 0x00000200, "VFETCH" },
-	{ 0x00000400, "CCACHE_PREGEOM" },
-	{ 0x00000800, "STRMOUT_VATTR_POSTGEOM" },
-	{ 0x00001000, "VCLIP" },
-	{ 0x00002000, "RATTR_APLANE" },
-	{ 0x00004000, "TRAST" },
-	{ 0x00008000, "CLIPID" },
-	{ 0x00010000, "ZCULL" },
-	{ 0x00020000, "ENG2D" },
-	{ 0x00040000, "RMASK" },
-	{ 0x00080000, "TPC_RAST" },
-	{ 0x00100000, "TPC_PROP" },
-	{ 0x00200000, "TPC_TEX" },
-	{ 0x00400000, "TPC_GEOM" },
-	{ 0x00800000, "TPC_MP" },
-	{ 0x01000000, "ROP" },
-	{}
-};
-
-static const char *const nv50_pgr_vstatus_0[] = {
-	"VFETCH", "CCACHE", "PREGEOM", "POSTGEOM", "VATTR", "STRMOUT", "VCLIP",
-	NULL
-};
-
-static const char *const nv50_pgr_vstatus_1[] = {
-	"TPC_RAST", "TPC_PROP", "TPC_TEX", "TPC_GEOM", "TPC_MP", NULL
-};
-
-static const char *const nv50_pgr_vstatus_2[] = {
-	"RATTR", "APLANE", "TRAST", "CLIPID", "ZCULL", "ENG2D", "RMASK",
-	"ROP", NULL
+static const struct nvkm_object_func
+nv50_gr_chan = {
+	.bind = nv50_gr_chan_bind,
 };
 
-static void
-nvkm_pgr_vstatus_print(struct nv50_gr_priv *priv, int r,
-		       const char *const units[], u32 status)
+int
+nv50_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
-	int i;
-
-	nv_error(priv, "PGRAPH_VSTATUS%d: 0x%08x", r, status);
+	struct nv50_gr *gr = nv50_gr(base);
+	struct nv50_gr_chan *chan;
 
-	for (i = 0; units[i] && status; i++) {
-		if ((status & 7) == 1)
-			pr_cont(" %s", units[i]);
-		status >>= 3;
-	}
-	if (status)
-		pr_cont(" (invalid: 0x%x)", status);
-	pr_cont("\n");
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv50_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	*pobject = &chan->object;
+	return 0;
 }
 
-static int
-g84_gr_tlb_flush(struct nvkm_engine *engine)
-{
-	struct nvkm_timer *ptimer = nvkm_timer(engine);
-	struct nv50_gr_priv *priv = (void *)engine;
-	bool idle, timeout = false;
-	unsigned long flags;
-	u64 start;
-	u32 tmp;
-
-	spin_lock_irqsave(&priv->lock, flags);
-	nv_mask(priv, 0x400500, 0x00000001, 0x00000000);
-
-	start = ptimer->read(ptimer);
-	do {
-		idle = true;
-
-		for (tmp = nv_rd32(priv, 0x400380); tmp && idle; tmp >>= 3) {
-			if ((tmp & 7) == 1)
-				idle = false;
-		}
-
-		for (tmp = nv_rd32(priv, 0x400384); tmp && idle; tmp >>= 3) {
-			if ((tmp & 7) == 1)
-				idle = false;
-		}
-
-		for (tmp = nv_rd32(priv, 0x400388); tmp && idle; tmp >>= 3) {
-			if ((tmp & 7) == 1)
-				idle = false;
-		}
-	} while (!idle &&
-		 !(timeout = ptimer->read(ptimer) - start > 2000000000));
-
-	if (timeout) {
-		nv_error(priv, "PGRAPH TLB flush idle timeout fail\n");
-
-		tmp = nv_rd32(priv, 0x400700);
-		nv_error(priv, "PGRAPH_STATUS  : 0x%08x", tmp);
-		nvkm_bitfield_print(nv50_pgr_status, tmp);
-		pr_cont("\n");
-
-		nvkm_pgr_vstatus_print(priv, 0, nv50_pgr_vstatus_0,
-				       nv_rd32(priv, 0x400380));
-		nvkm_pgr_vstatus_print(priv, 1, nv50_pgr_vstatus_1,
-				       nv_rd32(priv, 0x400384));
-		nvkm_pgr_vstatus_print(priv, 2, nv50_pgr_vstatus_2,
-				       nv_rd32(priv, 0x400388));
-	}
-
-
-	nv_wr32(priv, 0x100c80, 0x00000001);
-	if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000))
-		nv_error(priv, "vm flush timeout\n");
-	nv_mask(priv, 0x400500, 0x00000001, 0x00000001);
-	spin_unlock_irqrestore(&priv->lock, flags);
-	return timeout ? -EBUSY : 0;
-}
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
 
 static const struct nvkm_bitfield nv50_mp_exec_errors[] = {
 	{ 0x01, "STACK_UNDERFLOW" },
@@ -427,157 +235,172 @@ static const struct nvkm_bitfield nv50_gr_trap_prop[] = {
 };
 
 static void
-nv50_priv_prop_trap(struct nv50_gr_priv *priv,
-		    u32 ustatus_addr, u32 ustatus, u32 tp)
+nv50_gr_prop_trap(struct nv50_gr *gr, u32 ustatus_addr, u32 ustatus, u32 tp)
 {
-	u32 e0c = nv_rd32(priv, ustatus_addr + 0x04);
-	u32 e10 = nv_rd32(priv, ustatus_addr + 0x08);
-	u32 e14 = nv_rd32(priv, ustatus_addr + 0x0c);
-	u32 e18 = nv_rd32(priv, ustatus_addr + 0x10);
-	u32 e1c = nv_rd32(priv, ustatus_addr + 0x14);
-	u32 e20 = nv_rd32(priv, ustatus_addr + 0x18);
-	u32 e24 = nv_rd32(priv, ustatus_addr + 0x1c);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 e0c = nvkm_rd32(device, ustatus_addr + 0x04);
+	u32 e10 = nvkm_rd32(device, ustatus_addr + 0x08);
+	u32 e14 = nvkm_rd32(device, ustatus_addr + 0x0c);
+	u32 e18 = nvkm_rd32(device, ustatus_addr + 0x10);
+	u32 e1c = nvkm_rd32(device, ustatus_addr + 0x14);
+	u32 e20 = nvkm_rd32(device, ustatus_addr + 0x18);
+	u32 e24 = nvkm_rd32(device, ustatus_addr + 0x1c);
+	char msg[128];
 
 	/* CUDA memory: l[], g[] or stack. */
 	if (ustatus & 0x00000080) {
 		if (e18 & 0x80000000) {
 			/* g[] read fault? */
-			nv_error(priv, "TRAP_PROP - TP %d - CUDA_FAULT - Global read fault at address %02x%08x\n",
+			nvkm_error(subdev, "TRAP_PROP - TP %d - CUDA_FAULT - Global read fault at address %02x%08x\n",
 					 tp, e14, e10 | ((e18 >> 24) & 0x1f));
 			e18 &= ~0x1f000000;
 		} else if (e18 & 0xc) {
 			/* g[] write fault? */
-			nv_error(priv, "TRAP_PROP - TP %d - CUDA_FAULT - Global write fault at address %02x%08x\n",
+			nvkm_error(subdev, "TRAP_PROP - TP %d - CUDA_FAULT - Global write fault at address %02x%08x\n",
 				 tp, e14, e10 | ((e18 >> 7) & 0x1f));
 			e18 &= ~0x00000f80;
 		} else {
-			nv_error(priv, "TRAP_PROP - TP %d - Unknown CUDA fault at address %02x%08x\n",
+			nvkm_error(subdev, "TRAP_PROP - TP %d - Unknown CUDA fault at address %02x%08x\n",
 				 tp, e14, e10);
 		}
 		ustatus &= ~0x00000080;
 	}
 	if (ustatus) {
-		nv_error(priv, "TRAP_PROP - TP %d -", tp);
-		nvkm_bitfield_print(nv50_gr_trap_prop, ustatus);
-		pr_cont(" - Address %02x%08x\n", e14, e10);
+		nvkm_snprintbf(msg, sizeof(msg), nv50_gr_trap_prop, ustatus);
+		nvkm_error(subdev, "TRAP_PROP - TP %d - %08x [%s] - "
+				   "Address %02x%08x\n",
+			   tp, ustatus, msg, e14, e10);
 	}
-	nv_error(priv, "TRAP_PROP - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+	nvkm_error(subdev, "TRAP_PROP - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
 		 tp, e0c, e18, e1c, e20, e24);
 }
 
 static void
-nv50_priv_mp_trap(struct nv50_gr_priv *priv, int tpid, int display)
+nv50_gr_mp_trap(struct nv50_gr *gr, int tpid, int display)
 {
-	u32 units = nv_rd32(priv, 0x1540);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 units = nvkm_rd32(device, 0x1540);
 	u32 addr, mp10, status, pc, oplow, ophigh;
+	char msg[128];
 	int i;
 	int mps = 0;
 	for (i = 0; i < 4; i++) {
 		if (!(units & 1 << (i+24)))
 			continue;
-		if (nv_device(priv)->chipset < 0xa0)
+		if (device->chipset < 0xa0)
 			addr = 0x408200 + (tpid << 12) + (i << 7);
 		else
 			addr = 0x408100 + (tpid << 11) + (i << 7);
-		mp10 = nv_rd32(priv, addr + 0x10);
-		status = nv_rd32(priv, addr + 0x14);
+		mp10 = nvkm_rd32(device, addr + 0x10);
+		status = nvkm_rd32(device, addr + 0x14);
 		if (!status)
 			continue;
 		if (display) {
-			nv_rd32(priv, addr + 0x20);
-			pc = nv_rd32(priv, addr + 0x24);
-			oplow = nv_rd32(priv, addr + 0x70);
-			ophigh = nv_rd32(priv, addr + 0x74);
-			nv_error(priv, "TRAP_MP_EXEC - "
-					"TP %d MP %d:", tpid, i);
-			nvkm_bitfield_print(nv50_mp_exec_errors, status);
-			pr_cont(" at %06x warp %d, opcode %08x %08x\n",
-					pc&0xffffff, pc >> 24,
-					oplow, ophigh);
+			nvkm_rd32(device, addr + 0x20);
+			pc = nvkm_rd32(device, addr + 0x24);
+			oplow = nvkm_rd32(device, addr + 0x70);
+			ophigh = nvkm_rd32(device, addr + 0x74);
+			nvkm_snprintbf(msg, sizeof(msg),
+				       nv50_mp_exec_errors, status);
+			nvkm_error(subdev, "TRAP_MP_EXEC - TP %d MP %d: "
+					   "%08x [%s] at %06x warp %d, "
+					   "opcode %08x %08x\n",
+				   tpid, i, status, msg, pc & 0xffffff,
+				   pc >> 24, oplow, ophigh);
 		}
-		nv_wr32(priv, addr + 0x10, mp10);
-		nv_wr32(priv, addr + 0x14, 0);
+		nvkm_wr32(device, addr + 0x10, mp10);
+		nvkm_wr32(device, addr + 0x14, 0);
 		mps++;
 	}
 	if (!mps && display)
-		nv_error(priv, "TRAP_MP_EXEC - TP %d: "
+		nvkm_error(subdev, "TRAP_MP_EXEC - TP %d: "
 				"No MPs claiming errors?\n", tpid);
 }
 
 static void
-nv50_priv_tp_trap(struct nv50_gr_priv *priv, int type, u32 ustatus_old,
+nv50_gr_tp_trap(struct nv50_gr *gr, int type, u32 ustatus_old,
 		  u32 ustatus_new, int display, const char *name)
 {
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 units = nvkm_rd32(device, 0x1540);
 	int tps = 0;
-	u32 units = nv_rd32(priv, 0x1540);
 	int i, r;
+	char msg[128];
 	u32 ustatus_addr, ustatus;
 	for (i = 0; i < 16; i++) {
 		if (!(units & (1 << i)))
 			continue;
-		if (nv_device(priv)->chipset < 0xa0)
+		if (device->chipset < 0xa0)
 			ustatus_addr = ustatus_old + (i << 12);
 		else
 			ustatus_addr = ustatus_new + (i << 11);
-		ustatus = nv_rd32(priv, ustatus_addr) & 0x7fffffff;
+		ustatus = nvkm_rd32(device, ustatus_addr) & 0x7fffffff;
 		if (!ustatus)
 			continue;
 		tps++;
 		switch (type) {
 		case 6: /* texture error... unknown for now */
 			if (display) {
-				nv_error(priv, "magic set %d:\n", i);
+				nvkm_error(subdev, "magic set %d:\n", i);
 				for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
-					nv_error(priv, "\t0x%08x: 0x%08x\n", r,
-						nv_rd32(priv, r));
+					nvkm_error(subdev, "\t%08x: %08x\n", r,
+						   nvkm_rd32(device, r));
 				if (ustatus) {
-					nv_error(priv, "%s - TP%d:", name, i);
-					nvkm_bitfield_print(nv50_tex_traps,
-							       ustatus);
-					pr_cont("\n");
+					nvkm_snprintbf(msg, sizeof(msg),
+						       nv50_tex_traps, ustatus);
+					nvkm_error(subdev,
+						   "%s - TP%d: %08x [%s]\n",
+						   name, i, ustatus, msg);
 					ustatus = 0;
 				}
 			}
 			break;
 		case 7: /* MP error */
 			if (ustatus & 0x04030000) {
-				nv50_priv_mp_trap(priv, i, display);
+				nv50_gr_mp_trap(gr, i, display);
 				ustatus &= ~0x04030000;
 			}
 			if (ustatus && display) {
-				nv_error(priv, "%s - TP%d:", name, i);
-				nvkm_bitfield_print(nv50_mpc_traps, ustatus);
-				pr_cont("\n");
+				nvkm_snprintbf(msg, sizeof(msg),
+					       nv50_mpc_traps, ustatus);
+				nvkm_error(subdev, "%s - TP%d: %08x [%s]\n",
+					   name, i, ustatus, msg);
 				ustatus = 0;
 			}
 			break;
 		case 8: /* PROP error */
 			if (display)
-				nv50_priv_prop_trap(
-						priv, ustatus_addr, ustatus, i);
+				nv50_gr_prop_trap(
+						gr, ustatus_addr, ustatus, i);
 			ustatus = 0;
 			break;
 		}
 		if (ustatus) {
 			if (display)
-				nv_error(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
+				nvkm_error(subdev, "%s - TP%d: Unhandled ustatus %08x\n", name, i, ustatus);
 		}
-		nv_wr32(priv, ustatus_addr, 0xc0000000);
+		nvkm_wr32(device, ustatus_addr, 0xc0000000);
 	}
 
 	if (!tps && display)
-		nv_warn(priv, "%s - No TPs claiming errors?\n", name);
+		nvkm_warn(subdev, "%s - No TPs claiming errors?\n", name);
 }
 
 static int
-nv50_gr_trap_handler(struct nv50_gr_priv *priv, u32 display,
-		     int chid, u64 inst, struct nvkm_object *engctx)
+nv50_gr_trap_handler(struct nv50_gr *gr, u32 display,
+		     int chid, u64 inst, const char *name)
 {
-	u32 status = nv_rd32(priv, 0x400108);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 status = nvkm_rd32(device, 0x400108);
 	u32 ustatus;
+	char msg[128];
 
 	if (!status && display) {
-		nv_error(priv, "TRAP: no units reporting traps?\n");
+		nvkm_error(subdev, "TRAP: no units reporting traps?\n");
 		return 1;
 	}
 
@@ -585,71 +408,72 @@ nv50_gr_trap_handler(struct nv50_gr_priv *priv, u32 display,
 	 * COND, QUERY. If you get a trap from it, the command is still stuck
 	 * in DISPATCH and you need to do something about it. */
 	if (status & 0x001) {
-		ustatus = nv_rd32(priv, 0x400804) & 0x7fffffff;
+		ustatus = nvkm_rd32(device, 0x400804) & 0x7fffffff;
 		if (!ustatus && display) {
-			nv_error(priv, "TRAP_DISPATCH - no ustatus?\n");
+			nvkm_error(subdev, "TRAP_DISPATCH - no ustatus?\n");
 		}
 
-		nv_wr32(priv, 0x400500, 0x00000000);
+		nvkm_wr32(device, 0x400500, 0x00000000);
 
 		/* Known to be triggered by screwed up NOTIFY and COND... */
 		if (ustatus & 0x00000001) {
-			u32 addr = nv_rd32(priv, 0x400808);
+			u32 addr = nvkm_rd32(device, 0x400808);
 			u32 subc = (addr & 0x00070000) >> 16;
 			u32 mthd = (addr & 0x00001ffc);
-			u32 datal = nv_rd32(priv, 0x40080c);
-			u32 datah = nv_rd32(priv, 0x400810);
-			u32 class = nv_rd32(priv, 0x400814);
-			u32 r848 = nv_rd32(priv, 0x400848);
+			u32 datal = nvkm_rd32(device, 0x40080c);
+			u32 datah = nvkm_rd32(device, 0x400810);
+			u32 class = nvkm_rd32(device, 0x400814);
+			u32 r848 = nvkm_rd32(device, 0x400848);
 
-			nv_error(priv, "TRAP DISPATCH_FAULT\n");
+			nvkm_error(subdev, "TRAP DISPATCH_FAULT\n");
 			if (display && (addr & 0x80000000)) {
-				nv_error(priv,
-					 "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x%08x 400808 0x%08x 400848 0x%08x\n",
-					 chid, inst,
-					 nvkm_client_name(engctx), subc,
-					 class, mthd, datah, datal, addr, r848);
+				nvkm_error(subdev,
+					   "ch %d [%010llx %s] subc %d "
+					   "class %04x mthd %04x data %08x%08x "
+					   "400808 %08x 400848 %08x\n",
+					   chid, inst, name, subc, class, mthd,
+					   datah, datal, addr, r848);
 			} else
 			if (display) {
-				nv_error(priv, "no stuck command?\n");
+				nvkm_error(subdev, "no stuck command?\n");
 			}
 
-			nv_wr32(priv, 0x400808, 0);
-			nv_wr32(priv, 0x4008e8, nv_rd32(priv, 0x4008e8) & 3);
-			nv_wr32(priv, 0x400848, 0);
+			nvkm_wr32(device, 0x400808, 0);
+			nvkm_wr32(device, 0x4008e8, nvkm_rd32(device, 0x4008e8) & 3);
+			nvkm_wr32(device, 0x400848, 0);
 			ustatus &= ~0x00000001;
 		}
 
 		if (ustatus & 0x00000002) {
-			u32 addr = nv_rd32(priv, 0x40084c);
+			u32 addr = nvkm_rd32(device, 0x40084c);
 			u32 subc = (addr & 0x00070000) >> 16;
 			u32 mthd = (addr & 0x00001ffc);
-			u32 data = nv_rd32(priv, 0x40085c);
-			u32 class = nv_rd32(priv, 0x400814);
+			u32 data = nvkm_rd32(device, 0x40085c);
+			u32 class = nvkm_rd32(device, 0x400814);
 
-			nv_error(priv, "TRAP DISPATCH_QUERY\n");
+			nvkm_error(subdev, "TRAP DISPATCH_QUERY\n");
 			if (display && (addr & 0x80000000)) {
-				nv_error(priv,
-					 "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x 40084c 0x%08x\n",
-					 chid, inst,
-					 nvkm_client_name(engctx), subc,
-					 class, mthd, data, addr);
+				nvkm_error(subdev,
+					   "ch %d [%010llx %s] subc %d "
+					   "class %04x mthd %04x data %08x "
+					   "40084c %08x\n", chid, inst, name,
+					   subc, class, mthd, data, addr);
 			} else
 			if (display) {
-				nv_error(priv, "no stuck command?\n");
+				nvkm_error(subdev, "no stuck command?\n");
 			}
 
-			nv_wr32(priv, 0x40084c, 0);
+			nvkm_wr32(device, 0x40084c, 0);
 			ustatus &= ~0x00000002;
 		}
 
 		if (ustatus && display) {
-			nv_error(priv, "TRAP_DISPATCH (unknown "
-				      "0x%08x)\n", ustatus);
+			nvkm_error(subdev, "TRAP_DISPATCH "
+					   "(unknown %08x)\n", ustatus);
 		}
 
-		nv_wr32(priv, 0x400804, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x001);
+		nvkm_wr32(device, 0x400804, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x001);
 		status &= ~0x001;
 		if (!status)
 			return 0;
@@ -657,81 +481,91 @@ nv50_gr_trap_handler(struct nv50_gr_priv *priv, u32 display,
 
 	/* M2MF: Memory to memory copy engine. */
 	if (status & 0x002) {
-		u32 ustatus = nv_rd32(priv, 0x406800) & 0x7fffffff;
+		u32 ustatus = nvkm_rd32(device, 0x406800) & 0x7fffffff;
 		if (display) {
-			nv_error(priv, "TRAP_M2MF");
-			nvkm_bitfield_print(nv50_gr_trap_m2mf, ustatus);
-			pr_cont("\n");
-			nv_error(priv, "TRAP_M2MF %08x %08x %08x %08x\n",
-				nv_rd32(priv, 0x406804), nv_rd32(priv, 0x406808),
-				nv_rd32(priv, 0x40680c), nv_rd32(priv, 0x406810));
-
+			nvkm_snprintbf(msg, sizeof(msg),
+				       nv50_gr_trap_m2mf, ustatus);
+			nvkm_error(subdev, "TRAP_M2MF %08x [%s]\n",
+				   ustatus, msg);
+			nvkm_error(subdev, "TRAP_M2MF %08x %08x %08x %08x\n",
+				   nvkm_rd32(device, 0x406804),
+				   nvkm_rd32(device, 0x406808),
+				   nvkm_rd32(device, 0x40680c),
+				   nvkm_rd32(device, 0x406810));
 		}
 
 		/* No sane way found yet -- just reset the bugger. */
-		nv_wr32(priv, 0x400040, 2);
-		nv_wr32(priv, 0x400040, 0);
-		nv_wr32(priv, 0x406800, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x002);
+		nvkm_wr32(device, 0x400040, 2);
+		nvkm_wr32(device, 0x400040, 0);
+		nvkm_wr32(device, 0x406800, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x002);
 		status &= ~0x002;
 	}
 
 	/* VFETCH: Fetches data from vertex buffers. */
 	if (status & 0x004) {
-		u32 ustatus = nv_rd32(priv, 0x400c04) & 0x7fffffff;
+		u32 ustatus = nvkm_rd32(device, 0x400c04) & 0x7fffffff;
 		if (display) {
-			nv_error(priv, "TRAP_VFETCH");
-			nvkm_bitfield_print(nv50_gr_trap_vfetch, ustatus);
-			pr_cont("\n");
-			nv_error(priv, "TRAP_VFETCH %08x %08x %08x %08x\n",
-				nv_rd32(priv, 0x400c00), nv_rd32(priv, 0x400c08),
-				nv_rd32(priv, 0x400c0c), nv_rd32(priv, 0x400c10));
+			nvkm_snprintbf(msg, sizeof(msg),
+				       nv50_gr_trap_vfetch, ustatus);
+			nvkm_error(subdev, "TRAP_VFETCH %08x [%s]\n",
+				   ustatus, msg);
+			nvkm_error(subdev, "TRAP_VFETCH %08x %08x %08x %08x\n",
+				   nvkm_rd32(device, 0x400c00),
+				   nvkm_rd32(device, 0x400c08),
+				   nvkm_rd32(device, 0x400c0c),
+				   nvkm_rd32(device, 0x400c10));
 		}
 
-		nv_wr32(priv, 0x400c04, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x004);
+		nvkm_wr32(device, 0x400c04, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x004);
 		status &= ~0x004;
 	}
 
 	/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
 	if (status & 0x008) {
-		ustatus = nv_rd32(priv, 0x401800) & 0x7fffffff;
+		ustatus = nvkm_rd32(device, 0x401800) & 0x7fffffff;
 		if (display) {
-			nv_error(priv, "TRAP_STRMOUT");
-			nvkm_bitfield_print(nv50_gr_trap_strmout, ustatus);
-			pr_cont("\n");
-			nv_error(priv, "TRAP_STRMOUT %08x %08x %08x %08x\n",
-				nv_rd32(priv, 0x401804), nv_rd32(priv, 0x401808),
-				nv_rd32(priv, 0x40180c), nv_rd32(priv, 0x401810));
-
+			nvkm_snprintbf(msg, sizeof(msg),
+				       nv50_gr_trap_strmout, ustatus);
+			nvkm_error(subdev, "TRAP_STRMOUT %08x [%s]\n",
+				   ustatus, msg);
+			nvkm_error(subdev, "TRAP_STRMOUT %08x %08x %08x %08x\n",
+				   nvkm_rd32(device, 0x401804),
+				   nvkm_rd32(device, 0x401808),
+				   nvkm_rd32(device, 0x40180c),
+				   nvkm_rd32(device, 0x401810));
 		}
 
 		/* No sane way found yet -- just reset the bugger. */
-		nv_wr32(priv, 0x400040, 0x80);
-		nv_wr32(priv, 0x400040, 0);
-		nv_wr32(priv, 0x401800, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x008);
+		nvkm_wr32(device, 0x400040, 0x80);
+		nvkm_wr32(device, 0x400040, 0);
+		nvkm_wr32(device, 0x401800, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x008);
 		status &= ~0x008;
 	}
 
 	/* CCACHE: Handles code and c[] caches and fills them. */
 	if (status & 0x010) {
-		ustatus = nv_rd32(priv, 0x405018) & 0x7fffffff;
+		ustatus = nvkm_rd32(device, 0x405018) & 0x7fffffff;
 		if (display) {
-			nv_error(priv, "TRAP_CCACHE");
-			nvkm_bitfield_print(nv50_gr_trap_ccache, ustatus);
-			pr_cont("\n");
-			nv_error(priv, "TRAP_CCACHE %08x %08x %08x %08x"
-				     " %08x %08x %08x\n",
-				nv_rd32(priv, 0x405000), nv_rd32(priv, 0x405004),
-				nv_rd32(priv, 0x405008), nv_rd32(priv, 0x40500c),
-				nv_rd32(priv, 0x405010), nv_rd32(priv, 0x405014),
-				nv_rd32(priv, 0x40501c));
-
+			nvkm_snprintbf(msg, sizeof(msg),
+				       nv50_gr_trap_ccache, ustatus);
+			nvkm_error(subdev, "TRAP_CCACHE %08x [%s]\n",
+				   ustatus, msg);
+			nvkm_error(subdev, "TRAP_CCACHE %08x %08x %08x %08x "
+					   "%08x %08x %08x\n",
+				   nvkm_rd32(device, 0x405000),
+				   nvkm_rd32(device, 0x405004),
+				   nvkm_rd32(device, 0x405008),
+				   nvkm_rd32(device, 0x40500c),
+				   nvkm_rd32(device, 0x405010),
+				   nvkm_rd32(device, 0x405014),
+				   nvkm_rd32(device, 0x40501c));
 		}
 
-		nv_wr32(priv, 0x405018, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x010);
+		nvkm_wr32(device, 0x405018, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x010);
 		status &= ~0x010;
 	}
 
@@ -739,239 +573,174 @@ nv50_gr_trap_handler(struct nv50_gr_priv *priv, u32 display,
 	 * remaining, so try to handle it anyway. Perhaps related to that
 	 * unknown DMA slot on tesla? */
 	if (status & 0x20) {
-		ustatus = nv_rd32(priv, 0x402000) & 0x7fffffff;
+		ustatus = nvkm_rd32(device, 0x402000) & 0x7fffffff;
 		if (display)
-			nv_error(priv, "TRAP_UNKC04 0x%08x\n", ustatus);
-		nv_wr32(priv, 0x402000, 0xc0000000);
+			nvkm_error(subdev, "TRAP_UNKC04 %08x\n", ustatus);
+		nvkm_wr32(device, 0x402000, 0xc0000000);
 		/* no status modifiction on purpose */
 	}
 
 	/* TEXTURE: CUDA texturing units */
 	if (status & 0x040) {
-		nv50_priv_tp_trap(priv, 6, 0x408900, 0x408600, display,
+		nv50_gr_tp_trap(gr, 6, 0x408900, 0x408600, display,
 				    "TRAP_TEXTURE");
-		nv_wr32(priv, 0x400108, 0x040);
+		nvkm_wr32(device, 0x400108, 0x040);
 		status &= ~0x040;
 	}
 
 	/* MP: CUDA execution engines. */
 	if (status & 0x080) {
-		nv50_priv_tp_trap(priv, 7, 0x408314, 0x40831c, display,
+		nv50_gr_tp_trap(gr, 7, 0x408314, 0x40831c, display,
 				    "TRAP_MP");
-		nv_wr32(priv, 0x400108, 0x080);
+		nvkm_wr32(device, 0x400108, 0x080);
 		status &= ~0x080;
 	}
 
 	/* PROP:  Handles TP-initiated uncached memory accesses:
 	 * l[], g[], stack, 2d surfaces, render targets. */
 	if (status & 0x100) {
-		nv50_priv_tp_trap(priv, 8, 0x408e08, 0x408708, display,
+		nv50_gr_tp_trap(gr, 8, 0x408e08, 0x408708, display,
 				    "TRAP_PROP");
-		nv_wr32(priv, 0x400108, 0x100);
+		nvkm_wr32(device, 0x400108, 0x100);
 		status &= ~0x100;
 	}
 
 	if (status) {
 		if (display)
-			nv_error(priv, "TRAP: unknown 0x%08x\n", status);
-		nv_wr32(priv, 0x400108, status);
+			nvkm_error(subdev, "TRAP: unknown %08x\n", status);
+		nvkm_wr32(device, 0x400108, status);
 	}
 
 	return 1;
 }
 
-static void
-nv50_gr_intr(struct nvkm_subdev *subdev)
+void
+nv50_gr_intr(struct nvkm_gr *base)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
-	struct nvkm_engine *engine = nv_engine(subdev);
-	struct nvkm_object *engctx;
-	struct nvkm_handle *handle = NULL;
-	struct nv50_gr_priv *priv = (void *)subdev;
-	u32 stat = nv_rd32(priv, 0x400100);
-	u32 inst = nv_rd32(priv, 0x40032c) & 0x0fffffff;
-	u32 addr = nv_rd32(priv, 0x400704);
+	struct nv50_gr *gr = nv50_gr(base);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_fifo_chan *chan;
+	u32 stat = nvkm_rd32(device, 0x400100);
+	u32 inst = nvkm_rd32(device, 0x40032c) & 0x0fffffff;
+	u32 addr = nvkm_rd32(device, 0x400704);
 	u32 subc = (addr & 0x00070000) >> 16;
 	u32 mthd = (addr & 0x00001ffc);
-	u32 data = nv_rd32(priv, 0x400708);
-	u32 class = nv_rd32(priv, 0x400814);
+	u32 data = nvkm_rd32(device, 0x400708);
+	u32 class = nvkm_rd32(device, 0x400814);
 	u32 show = stat, show_bitfield = stat;
-	int chid;
-
-	engctx = nvkm_engctx_get(engine, inst);
-	chid   = pfifo->chid(pfifo, engctx);
-
-	if (stat & 0x00000010) {
-		handle = nvkm_handle_get_class(engctx, class);
-		if (handle && !nv_call(handle->object, mthd, data))
-			show &= ~0x00000010;
-		nvkm_handle_put(handle);
+	const struct nvkm_enum *en;
+	unsigned long flags;
+	const char *name = "unknown";
+	char msg[128];
+	int chid = -1;
+
+	chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags);
+	if (chan)  {
+		name = chan->object.client->name;
+		chid = chan->chid;
 	}
 
 	if (show & 0x00100000) {
-		u32 ecode = nv_rd32(priv, 0x400110);
-		nv_error(priv, "DATA_ERROR ");
-		nvkm_enum_print(nv50_data_error_names, ecode);
-		pr_cont("\n");
+		u32 ecode = nvkm_rd32(device, 0x400110);
+		en = nvkm_enum_find(nv50_data_error_names, ecode);
+		nvkm_error(subdev, "DATA_ERROR %08x [%s]\n",
+			   ecode, en ? en->name : "");
 		show_bitfield &= ~0x00100000;
 	}
 
 	if (stat & 0x00200000) {
-		if (!nv50_gr_trap_handler(priv, show, chid, (u64)inst << 12,
-					  engctx))
+		if (!nv50_gr_trap_handler(gr, show, chid, (u64)inst << 12, name))
 			show &= ~0x00200000;
 		show_bitfield &= ~0x00200000;
 	}
 
-	nv_wr32(priv, 0x400100, stat);
-	nv_wr32(priv, 0x400500, 0x00010001);
+	nvkm_wr32(device, 0x400100, stat);
+	nvkm_wr32(device, 0x400500, 0x00010001);
 
 	if (show) {
 		show &= show_bitfield;
-		if (show) {
-			nv_error(priv, "%s", "");
-			nvkm_bitfield_print(nv50_gr_intr_name, show);
-			pr_cont("\n");
-		}
-		nv_error(priv,
-			 "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
-			 chid, (u64)inst << 12, nvkm_client_name(engctx),
-			 subc, class, mthd, data);
+		nvkm_snprintbf(msg, sizeof(msg), nv50_gr_intr_name, show);
+		nvkm_error(subdev, "%08x [%s] ch %d [%010llx %s] subc %d "
+				   "class %04x mthd %04x data %08x\n",
+			   stat, msg, chid, (u64)inst << 12, name,
+			   subc, class, mthd, data);
 	}
 
-	if (nv_rd32(priv, 0x400824) & (1 << 31))
-		nv_wr32(priv, 0x400824, nv_rd32(priv, 0x400824) & ~(1 << 31));
+	if (nvkm_rd32(device, 0x400824) & (1 << 31))
+		nvkm_wr32(device, 0x400824, nvkm_rd32(device, 0x400824) & ~(1 << 31));
 
-	nvkm_engctx_put(engctx);
+	nvkm_fifo_chan_put(device->fifo, flags, &chan);
 }
 
-static int
-nv50_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+int
+nv50_gr_init(struct nvkm_gr *base)
 {
-	struct nv50_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00201000;
-	nv_subdev(priv)->intr = nv50_gr_intr;
-	nv_engine(priv)->cclass = &nv50_gr_cclass;
-
-	priv->base.units = nv50_gr_units;
-
-	switch (nv_device(priv)->chipset) {
-	case 0x50:
-		nv_engine(priv)->sclass = nv50_gr_sclass;
-		break;
-	case 0x84:
-	case 0x86:
-	case 0x92:
-	case 0x94:
-	case 0x96:
-	case 0x98:
-		nv_engine(priv)->sclass = g84_gr_sclass;
-		break;
-	case 0xa0:
-	case 0xaa:
-	case 0xac:
-		nv_engine(priv)->sclass = gt200_gr_sclass;
-		break;
-	case 0xa3:
-	case 0xa5:
-	case 0xa8:
-		nv_engine(priv)->sclass = gt215_gr_sclass;
-		break;
-	case 0xaf:
-		nv_engine(priv)->sclass = mcp89_gr_sclass;
-		break;
-
-	}
-
-	/* unfortunate hw bug workaround... */
-	if (nv_device(priv)->chipset != 0x50 &&
-	    nv_device(priv)->chipset != 0xac)
-		nv_engine(priv)->tlb_flush = g84_gr_tlb_flush;
-
-	spin_lock_init(&priv->lock);
-	return 0;
-}
-
-static int
-nv50_gr_init(struct nvkm_object *object)
-{
-	struct nv50_gr_priv *priv = (void *)object;
+	struct nv50_gr *gr = nv50_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	int ret, units, i;
 
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
-
 	/* NV_PGRAPH_DEBUG_3_HW_CTX_SWITCH_ENABLED */
-	nv_wr32(priv, 0x40008c, 0x00000004);
+	nvkm_wr32(device, 0x40008c, 0x00000004);
 
 	/* reset/enable traps and interrupts */
-	nv_wr32(priv, 0x400804, 0xc0000000);
-	nv_wr32(priv, 0x406800, 0xc0000000);
-	nv_wr32(priv, 0x400c04, 0xc0000000);
-	nv_wr32(priv, 0x401800, 0xc0000000);
-	nv_wr32(priv, 0x405018, 0xc0000000);
-	nv_wr32(priv, 0x402000, 0xc0000000);
-
-	units = nv_rd32(priv, 0x001540);
+	nvkm_wr32(device, 0x400804, 0xc0000000);
+	nvkm_wr32(device, 0x406800, 0xc0000000);
+	nvkm_wr32(device, 0x400c04, 0xc0000000);
+	nvkm_wr32(device, 0x401800, 0xc0000000);
+	nvkm_wr32(device, 0x405018, 0xc0000000);
+	nvkm_wr32(device, 0x402000, 0xc0000000);
+
+	units = nvkm_rd32(device, 0x001540);
 	for (i = 0; i < 16; i++) {
 		if (!(units & (1 << i)))
 			continue;
 
-		if (nv_device(priv)->chipset < 0xa0) {
-			nv_wr32(priv, 0x408900 + (i << 12), 0xc0000000);
-			nv_wr32(priv, 0x408e08 + (i << 12), 0xc0000000);
-			nv_wr32(priv, 0x408314 + (i << 12), 0xc0000000);
+		if (device->chipset < 0xa0) {
+			nvkm_wr32(device, 0x408900 + (i << 12), 0xc0000000);
+			nvkm_wr32(device, 0x408e08 + (i << 12), 0xc0000000);
+			nvkm_wr32(device, 0x408314 + (i << 12), 0xc0000000);
 		} else {
-			nv_wr32(priv, 0x408600 + (i << 11), 0xc0000000);
-			nv_wr32(priv, 0x408708 + (i << 11), 0xc0000000);
-			nv_wr32(priv, 0x40831c + (i << 11), 0xc0000000);
+			nvkm_wr32(device, 0x408600 + (i << 11), 0xc0000000);
+			nvkm_wr32(device, 0x408708 + (i << 11), 0xc0000000);
+			nvkm_wr32(device, 0x40831c + (i << 11), 0xc0000000);
 		}
 	}
 
-	nv_wr32(priv, 0x400108, 0xffffffff);
-	nv_wr32(priv, 0x400138, 0xffffffff);
-	nv_wr32(priv, 0x400100, 0xffffffff);
-	nv_wr32(priv, 0x40013c, 0xffffffff);
-	nv_wr32(priv, 0x400500, 0x00010001);
+	nvkm_wr32(device, 0x400108, 0xffffffff);
+	nvkm_wr32(device, 0x400138, 0xffffffff);
+	nvkm_wr32(device, 0x400100, 0xffffffff);
+	nvkm_wr32(device, 0x40013c, 0xffffffff);
+	nvkm_wr32(device, 0x400500, 0x00010001);
 
 	/* upload context program, initialise ctxctl defaults */
-	ret = nv50_grctx_init(nv_device(priv), &priv->size);
+	ret = nv50_grctx_init(device, &gr->size);
 	if (ret)
 		return ret;
 
-	nv_wr32(priv, 0x400824, 0x00000000);
-	nv_wr32(priv, 0x400828, 0x00000000);
-	nv_wr32(priv, 0x40082c, 0x00000000);
-	nv_wr32(priv, 0x400830, 0x00000000);
-	nv_wr32(priv, 0x40032c, 0x00000000);
-	nv_wr32(priv, 0x400330, 0x00000000);
+	nvkm_wr32(device, 0x400824, 0x00000000);
+	nvkm_wr32(device, 0x400828, 0x00000000);
+	nvkm_wr32(device, 0x40082c, 0x00000000);
+	nvkm_wr32(device, 0x400830, 0x00000000);
+	nvkm_wr32(device, 0x40032c, 0x00000000);
+	nvkm_wr32(device, 0x400330, 0x00000000);
 
 	/* some unknown zcull magic */
-	switch (nv_device(priv)->chipset & 0xf0) {
+	switch (device->chipset & 0xf0) {
 	case 0x50:
 	case 0x80:
 	case 0x90:
-		nv_wr32(priv, 0x402ca8, 0x00000800);
+		nvkm_wr32(device, 0x402ca8, 0x00000800);
 		break;
 	case 0xa0:
 	default:
-		if (nv_device(priv)->chipset == 0xa0 ||
-		    nv_device(priv)->chipset == 0xaa ||
-		    nv_device(priv)->chipset == 0xac) {
-			nv_wr32(priv, 0x402ca8, 0x00000802);
+		if (device->chipset == 0xa0 ||
+		    device->chipset == 0xaa ||
+		    device->chipset == 0xac) {
+			nvkm_wr32(device, 0x402ca8, 0x00000802);
 		} else {
-			nv_wr32(priv, 0x402cc0, 0x00000000);
-			nv_wr32(priv, 0x402ca8, 0x00000002);
+			nvkm_wr32(device, 0x402cc0, 0x00000000);
+			nvkm_wr32(device, 0x402ca8, 0x00000002);
 		}
 
 		break;
@@ -979,21 +748,47 @@ nv50_gr_init(struct nvkm_object *object)
 
 	/* zero out zcull regions */
 	for (i = 0; i < 8; i++) {
-		nv_wr32(priv, 0x402c20 + (i * 0x10), 0x00000000);
-		nv_wr32(priv, 0x402c24 + (i * 0x10), 0x00000000);
-		nv_wr32(priv, 0x402c28 + (i * 0x10), 0x00000000);
-		nv_wr32(priv, 0x402c2c + (i * 0x10), 0x00000000);
+		nvkm_wr32(device, 0x402c20 + (i * 0x10), 0x00000000);
+		nvkm_wr32(device, 0x402c24 + (i * 0x10), 0x00000000);
+		nvkm_wr32(device, 0x402c28 + (i * 0x10), 0x00000000);
+		nvkm_wr32(device, 0x402c2c + (i * 0x10), 0x00000000);
 	}
+
 	return 0;
 }
 
-struct nvkm_oclass
-nv50_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_gr_ctor,
-		.dtor = _nvkm_gr_dtor,
-		.init = nv50_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
+int
+nv50_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_gr **pgr)
+{
+	struct nv50_gr *gr;
+
+	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+		return -ENOMEM;
+	spin_lock_init(&gr->lock);
+	*pgr = &gr->base;
+
+	return nvkm_gr_ctor(func, device, index, 0x00201000, true, &gr->base);
+}
+
+static const struct nvkm_gr_func
+nv50_gr = {
+	.init = nv50_gr_init,
+	.intr = nv50_gr_intr,
+	.chan_new = nv50_gr_chan_new,
+	.units = nv50_gr_units,
+	.sclass = {
+		{ -1, -1, 0x0030, &nv50_gr_object },
+		{ -1, -1, 0x502d, &nv50_gr_object },
+		{ -1, -1, 0x5039, &nv50_gr_object },
+		{ -1, -1, 0x5097, &nv50_gr_object },
+		{ -1, -1, 0x50c0, &nv50_gr_object },
+		{}
+	}
 };
+
+int
+nv50_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv50_gr_new_(&nv50_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
index bcf786f6b731..45eec83a5969 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
@@ -1,8 +1,34 @@
 #ifndef __NV50_GR_H__
 #define __NV50_GR_H__
-#include <engine/gr.h>
-struct nvkm_device;
-struct nvkm_gpuobj;
+#define nv50_gr(p) container_of((p), struct nv50_gr, base)
+#include "priv.h"
+
+struct nv50_gr {
+	struct nvkm_gr base;
+	const struct nv50_gr_func *func;
+	spinlock_t lock;
+	u32 size;
+};
+
+int nv50_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index,
+		 struct nvkm_gr **);
+int nv50_gr_init(struct nvkm_gr *);
+void nv50_gr_intr(struct nvkm_gr *);
+u64 nv50_gr_units(struct nvkm_gr *);
+
+int g84_gr_tlb_flush(struct nvkm_gr *);
+
+#define nv50_gr_chan(p) container_of((p), struct nv50_gr_chan, object)
+
+struct nv50_gr_chan {
+	struct nvkm_object object;
+	struct nv50_gr *gr;
+};
+
+int nv50_gr_chan_new(struct nvkm_gr *, struct nvkm_fifo_chan *,
+		     const struct nvkm_oclass *, struct nvkm_object **);
+
+extern const struct nvkm_object_func nv50_gr_object;
 
 int  nv50_grctx_init(struct nvkm_device *, u32 *size);
 void nv50_grctx_fill(struct nvkm_device *, struct nvkm_gpuobj *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
new file mode 100644
index 000000000000..a234590be88e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
@@ -0,0 +1,38 @@
+#ifndef __NVKM_GR_PRIV_H__
+#define __NVKM_GR_PRIV_H__
+#define nvkm_gr(p) container_of((p), struct nvkm_gr, engine)
+#include <engine/gr.h>
+#include <core/enum.h>
+struct nvkm_fb_tile;
+struct nvkm_fifo_chan;
+
+int nvkm_gr_ctor(const struct nvkm_gr_func *, struct nvkm_device *,
+		 int index, u32 pmc_enable, bool enable,
+		 struct nvkm_gr *);
+
+bool nv04_gr_idle(struct nvkm_gr *);
+
+struct nvkm_gr_func {
+	void *(*dtor)(struct nvkm_gr *);
+	int (*oneinit)(struct nvkm_gr *);
+	int (*init)(struct nvkm_gr *);
+	void (*intr)(struct nvkm_gr *);
+	void (*tile)(struct nvkm_gr *, int region, struct nvkm_fb_tile *);
+	int (*tlb_flush)(struct nvkm_gr *);
+	int (*chan_new)(struct nvkm_gr *, struct nvkm_fifo_chan *,
+			const struct nvkm_oclass *, struct nvkm_object **);
+	int (*object_get)(struct nvkm_gr *, int, struct nvkm_sclass *);
+	/* Returns chipset-specific counts of units packed into an u64.
+	 */
+	u64 (*units)(struct nvkm_gr *);
+	struct nvkm_sclass sclass[];
+};
+
+extern const struct nvkm_bitfield nv04_gr_nsource[];
+extern const struct nvkm_object_func nv04_gr_object;
+
+extern const struct nvkm_bitfield nv10_gr_intr_name[];
+extern const struct nvkm_bitfield nv10_gr_nstatus[];
+
+extern const struct nvkm_enum nv50_data_error_names[];
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
index 0df889fa2611..34ff0014a6c1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
@@ -21,74 +21,24 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/mpeg.h>
-
-struct g84_mpeg_priv {
-	struct nvkm_mpeg base;
-};
-
-struct g84_mpeg_chan {
-	struct nvkm_mpeg_chan base;
-};
-
-/*******************************************************************************
- * MPEG object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_mpeg_sclass[] = {
-	{ 0x8274, &nv50_mpeg_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * PMPEG context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_mpeg_cclass = {
-	.handle = NV_ENGCTX(MPEG, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_mpeg_context_ctor,
-		.dtor = _nvkm_mpeg_context_dtor,
-		.init = _nvkm_mpeg_context_init,
-		.fini = _nvkm_mpeg_context_fini,
-		.rd32 = _nvkm_mpeg_context_rd32,
-		.wr32 = _nvkm_mpeg_context_wr32,
-	},
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+g84_mpeg = {
+	.init = nv50_mpeg_init,
+	.intr = nv50_mpeg_intr,
+	.cclass = &nv50_mpeg_cclass,
+	.sclass = {
+		{ -1, -1, G82_MPEG, &nv31_mpeg_object },
+		{}
+	}
 };
 
-/*******************************************************************************
- * PMPEG engine/subdev functions
- ******************************************************************************/
-
-static int
-g84_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+int
+g84_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
 {
-	struct g84_mpeg_priv *priv;
-	int ret;
-
-	ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000002;
-	nv_subdev(priv)->intr = nv50_mpeg_intr;
-	nv_engine(priv)->cclass = &g84_mpeg_cclass;
-	nv_engine(priv)->sclass = g84_mpeg_sclass;
-	return 0;
+	return nvkm_engine_new_(&g84_mpeg, device, index, 0x00000002,
+				true, pmpeg);
 }
-
-struct nvkm_oclass
-g84_mpeg_oclass = {
-	.handle = NV_ENGINE(MPEG, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g84_mpeg_ctor,
-		.dtor = _nvkm_mpeg_dtor,
-		.init = nv50_mpeg_init,
-		.fini = _nvkm_mpeg_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
index b5bef0718359..d4d8942b1347 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
@@ -24,281 +24,271 @@
 #include "nv31.h"
 
 #include <core/client.h>
-#include <core/handle.h>
-#include <engine/fifo.h>
-#include <subdev/instmem.h>
+#include <core/gpuobj.h>
 #include <subdev/fb.h>
 #include <subdev/timer.h>
+#include <engine/fifo.h>
+
+#include <nvif/class.h>
 
 /*******************************************************************************
  * MPEG object classes
  ******************************************************************************/
 
 static int
-nv31_mpeg_object_ctor(struct nvkm_object *parent,
-		      struct nvkm_object *engine,
-		      struct nvkm_oclass *oclass, void *data, u32 size,
-		      struct nvkm_object **pobject)
+nv31_mpeg_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		      int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_gpuobj *obj;
-	int ret;
-
-	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
-				 20, 16, 0, &obj);
-	*pobject = nv_object(obj);
-	if (ret)
-		return ret;
-
-	nv_wo32(obj, 0x00, nv_mclass(obj));
-	nv_wo32(obj, 0x04, 0x00000000);
-	nv_wo32(obj, 0x08, 0x00000000);
-	nv_wo32(obj, 0x0c, 0x00000000);
-	return 0;
+	int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16, align,
+				  false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, object->oclass);
+		nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
+		nvkm_done(*pgpuobj);
+	}
+	return ret;
 }
 
-static int
-nv31_mpeg_mthd_dma(struct nvkm_object *object, u32 mthd, void *arg, u32 len)
+const struct nvkm_object_func
+nv31_mpeg_object = {
+	.bind = nv31_mpeg_object_bind,
+};
+
+/*******************************************************************************
+ * PMPEG context
+ ******************************************************************************/
+
+static void *
+nv31_mpeg_chan_dtor(struct nvkm_object *object)
 {
-	struct nvkm_instmem *imem = nvkm_instmem(object);
-	struct nv31_mpeg_priv *priv = (void *)object->engine;
-	u32 inst = *(u32 *)arg << 4;
-	u32 dma0 = nv_ro32(imem, inst + 0);
-	u32 dma1 = nv_ro32(imem, inst + 4);
-	u32 dma2 = nv_ro32(imem, inst + 8);
+	struct nv31_mpeg_chan *chan = nv31_mpeg_chan(object);
+	struct nv31_mpeg *mpeg = chan->mpeg;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mpeg->engine.lock, flags);
+	if (mpeg->chan == chan)
+		mpeg->chan = NULL;
+	spin_unlock_irqrestore(&mpeg->engine.lock, flags);
+	return chan;
+}
+
+static const struct nvkm_object_func
+nv31_mpeg_chan = {
+	.dtor = nv31_mpeg_chan_dtor,
+};
+
+int
+nv31_mpeg_chan_new(struct nvkm_fifo_chan *fifoch,
+		   const struct nvkm_oclass *oclass,
+		   struct nvkm_object **pobject)
+{
+	struct nv31_mpeg *mpeg = nv31_mpeg(oclass->engine);
+	struct nv31_mpeg_chan *chan;
+	unsigned long flags;
+	int ret = -EBUSY;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv31_mpeg_chan, oclass, &chan->object);
+	chan->mpeg = mpeg;
+	chan->fifo = fifoch;
+	*pobject = &chan->object;
+
+	spin_lock_irqsave(&mpeg->engine.lock, flags);
+	if (!mpeg->chan) {
+		mpeg->chan = chan;
+		ret = 0;
+	}
+	spin_unlock_irqrestore(&mpeg->engine.lock, flags);
+	return ret;
+}
+
+/*******************************************************************************
+ * PMPEG engine/subdev functions
+ ******************************************************************************/
+
+void
+nv31_mpeg_tile(struct nvkm_engine *engine, int i, struct nvkm_fb_tile *tile)
+{
+	struct nv31_mpeg *mpeg = nv31_mpeg(engine);
+	struct nvkm_device *device = mpeg->engine.subdev.device;
+
+	nvkm_wr32(device, 0x00b008 + (i * 0x10), tile->pitch);
+	nvkm_wr32(device, 0x00b004 + (i * 0x10), tile->limit);
+	nvkm_wr32(device, 0x00b000 + (i * 0x10), tile->addr);
+}
+
+static bool
+nv31_mpeg_mthd_dma(struct nvkm_device *device, u32 mthd, u32 data)
+{
+	u32 inst = data << 4;
+	u32 dma0 = nvkm_rd32(device, 0x700000 + inst);
+	u32 dma1 = nvkm_rd32(device, 0x700004 + inst);
+	u32 dma2 = nvkm_rd32(device, 0x700008 + inst);
 	u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
 	u32 size = dma1 + 1;
 
 	/* only allow linear DMA objects */
 	if (!(dma0 & 0x00002000))
-		return -EINVAL;
+		return false;
 
 	if (mthd == 0x0190) {
 		/* DMA_CMD */
-		nv_mask(priv, 0x00b300, 0x00010000, (dma0 & 0x00030000) ? 0x00010000 : 0);
-		nv_wr32(priv, 0x00b334, base);
-		nv_wr32(priv, 0x00b324, size);
+		nvkm_mask(device, 0x00b300, 0x00010000,
+				  (dma0 & 0x00030000) ? 0x00010000 : 0);
+		nvkm_wr32(device, 0x00b334, base);
+		nvkm_wr32(device, 0x00b324, size);
 	} else
 	if (mthd == 0x01a0) {
 		/* DMA_DATA */
-		nv_mask(priv, 0x00b300, 0x00020000, (dma0 & 0x00030000) ? 0x00020000 : 0);
-		nv_wr32(priv, 0x00b360, base);
-		nv_wr32(priv, 0x00b364, size);
+		nvkm_mask(device, 0x00b300, 0x00020000,
+				  (dma0 & 0x00030000) ? 0x00020000 : 0);
+		nvkm_wr32(device, 0x00b360, base);
+		nvkm_wr32(device, 0x00b364, size);
 	} else {
 		/* DMA_IMAGE, VRAM only */
 		if (dma0 & 0x00030000)
-			return -EINVAL;
+			return false;
 
-		nv_wr32(priv, 0x00b370, base);
-		nv_wr32(priv, 0x00b374, size);
+		nvkm_wr32(device, 0x00b370, base);
+		nvkm_wr32(device, 0x00b374, size);
 	}
 
-	return 0;
+	return true;
 }
 
-struct nvkm_ofuncs
-nv31_mpeg_ofuncs = {
-	.ctor = nv31_mpeg_object_ctor,
-	.dtor = _nvkm_gpuobj_dtor,
-	.init = _nvkm_gpuobj_init,
-	.fini = _nvkm_gpuobj_fini,
-	.rd32 = _nvkm_gpuobj_rd32,
-	.wr32 = _nvkm_gpuobj_wr32,
-};
-
-static struct nvkm_omthds
-nv31_mpeg_omthds[] = {
-	{ 0x0190, 0x0190, nv31_mpeg_mthd_dma },
-	{ 0x01a0, 0x01a0, nv31_mpeg_mthd_dma },
-	{ 0x01b0, 0x01b0, nv31_mpeg_mthd_dma },
-	{}
-};
-
-struct nvkm_oclass
-nv31_mpeg_sclass[] = {
-	{ 0x3174, &nv31_mpeg_ofuncs, nv31_mpeg_omthds },
-	{}
-};
-
-/*******************************************************************************
- * PMPEG context
- ******************************************************************************/
-
-static int
-nv31_mpeg_context_ctor(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **pobject)
+static bool
+nv31_mpeg_mthd(struct nv31_mpeg *mpeg, u32 mthd, u32 data)
 {
-	struct nv31_mpeg_priv *priv = (void *)engine;
-	struct nv31_mpeg_chan *chan;
-	unsigned long flags;
-	int ret;
-
-	ret = nvkm_object_create(parent, engine, oclass, 0, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	spin_lock_irqsave(&nv_engine(priv)->lock, flags);
-	if (priv->chan) {
-		spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
-		nvkm_object_destroy(&chan->base);
-		*pobject = NULL;
-		return -EBUSY;
+	struct nvkm_device *device = mpeg->engine.subdev.device;
+	switch (mthd) {
+	case 0x190:
+	case 0x1a0:
+	case 0x1b0:
+		return mpeg->func->mthd_dma(device, mthd, data);
+	default:
+		break;
 	}
-	priv->chan = chan;
-	spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
-	return 0;
+	return false;
 }
 
 static void
-nv31_mpeg_context_dtor(struct nvkm_object *object)
+nv31_mpeg_intr(struct nvkm_engine *engine)
 {
-	struct nv31_mpeg_priv *priv = (void *)object->engine;
-	struct nv31_mpeg_chan *chan = (void *)object;
-	unsigned long flags;
-
-	spin_lock_irqsave(&nv_engine(priv)->lock, flags);
-	priv->chan = NULL;
-	spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
-	nvkm_object_destroy(&chan->base);
-}
-
-struct nvkm_oclass
-nv31_mpeg_cclass = {
-	.handle = NV_ENGCTX(MPEG, 0x31),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv31_mpeg_context_ctor,
-		.dtor = nv31_mpeg_context_dtor,
-		.init = nvkm_object_init,
-		.fini = nvkm_object_fini,
-	},
-};
-
-/*******************************************************************************
- * PMPEG engine/subdev functions
- ******************************************************************************/
-
-void
-nv31_mpeg_tile_prog(struct nvkm_engine *engine, int i)
-{
-	struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i];
-	struct nv31_mpeg_priv *priv = (void *)engine;
-
-	nv_wr32(priv, 0x00b008 + (i * 0x10), tile->pitch);
-	nv_wr32(priv, 0x00b004 + (i * 0x10), tile->limit);
-	nv_wr32(priv, 0x00b000 + (i * 0x10), tile->addr);
-}
-
-void
-nv31_mpeg_intr(struct nvkm_subdev *subdev)
-{
-	struct nv31_mpeg_priv *priv = (void *)subdev;
-	struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
-	struct nvkm_handle *handle;
-	struct nvkm_object *engctx;
-	u32 stat = nv_rd32(priv, 0x00b100);
-	u32 type = nv_rd32(priv, 0x00b230);
-	u32 mthd = nv_rd32(priv, 0x00b234);
-	u32 data = nv_rd32(priv, 0x00b238);
+	struct nv31_mpeg *mpeg = nv31_mpeg(engine);
+	struct nvkm_subdev *subdev = &mpeg->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x00b100);
+	u32 type = nvkm_rd32(device, 0x00b230);
+	u32 mthd = nvkm_rd32(device, 0x00b234);
+	u32 data = nvkm_rd32(device, 0x00b238);
 	u32 show = stat;
 	unsigned long flags;
 
-	spin_lock_irqsave(&nv_engine(priv)->lock, flags);
-	engctx = nv_object(priv->chan);
+	spin_lock_irqsave(&mpeg->engine.lock, flags);
 
 	if (stat & 0x01000000) {
 		/* happens on initial binding of the object */
 		if (type == 0x00000020 && mthd == 0x0000) {
-			nv_mask(priv, 0x00b308, 0x00000000, 0x00000000);
+			nvkm_mask(device, 0x00b308, 0x00000000, 0x00000000);
 			show &= ~0x01000000;
 		}
 
-		if (type == 0x00000010 && engctx) {
-			handle = nvkm_handle_get_class(engctx, 0x3174);
-			if (handle && !nv_call(handle->object, mthd, data))
+		if (type == 0x00000010) {
+			if (!nv31_mpeg_mthd(mpeg, mthd, data))
 				show &= ~0x01000000;
-			nvkm_handle_put(handle);
 		}
 	}
 
-	nv_wr32(priv, 0x00b100, stat);
-	nv_wr32(priv, 0x00b230, 0x00000001);
+	nvkm_wr32(device, 0x00b100, stat);
+	nvkm_wr32(device, 0x00b230, 0x00000001);
 
 	if (show) {
-		nv_error(priv, "ch %d [%s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
-			 pfifo->chid(pfifo, engctx),
-			 nvkm_client_name(engctx), stat, type, mthd, data);
+		nvkm_error(subdev, "ch %d [%s] %08x %08x %08x %08x\n",
+			   mpeg->chan ? mpeg->chan->fifo->chid : -1,
+			   mpeg->chan ? mpeg->chan->object.client->name :
+			   "unknown", stat, type, mthd, data);
 	}
 
-	spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
-}
-
-static int
-nv31_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nv31_mpeg_priv *priv;
-	int ret;
-
-	ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000002;
-	nv_subdev(priv)->intr = nv31_mpeg_intr;
-	nv_engine(priv)->cclass = &nv31_mpeg_cclass;
-	nv_engine(priv)->sclass = nv31_mpeg_sclass;
-	nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
-	return 0;
+	spin_unlock_irqrestore(&mpeg->engine.lock, flags);
 }
 
 int
-nv31_mpeg_init(struct nvkm_object *object)
+nv31_mpeg_init(struct nvkm_engine *mpeg)
 {
-	struct nvkm_engine *engine = nv_engine(object);
-	struct nv31_mpeg_priv *priv = (void *)object;
-	struct nvkm_fb *pfb = nvkm_fb(object);
-	int ret, i;
-
-	ret = nvkm_mpeg_init(&priv->base);
-	if (ret)
-		return ret;
+	struct nvkm_subdev *subdev = &mpeg->subdev;
+	struct nvkm_device *device = subdev->device;
 
 	/* VPE init */
-	nv_wr32(priv, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
-	nv_wr32(priv, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
-
-	for (i = 0; i < pfb->tile.regions; i++)
-		engine->tile_prog(engine, i);
+	nvkm_wr32(device, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
+	nvkm_wr32(device, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
 
 	/* PMPEG init */
-	nv_wr32(priv, 0x00b32c, 0x00000000);
-	nv_wr32(priv, 0x00b314, 0x00000100);
-	nv_wr32(priv, 0x00b220, 0x00000031);
-	nv_wr32(priv, 0x00b300, 0x02001ec1);
-	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
-
-	nv_wr32(priv, 0x00b100, 0xffffffff);
-	nv_wr32(priv, 0x00b140, 0xffffffff);
-
-	if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) {
-		nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200));
+	nvkm_wr32(device, 0x00b32c, 0x00000000);
+	nvkm_wr32(device, 0x00b314, 0x00000100);
+	nvkm_wr32(device, 0x00b220, 0x00000031);
+	nvkm_wr32(device, 0x00b300, 0x02001ec1);
+	nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000001);
+
+	nvkm_wr32(device, 0x00b100, 0xffffffff);
+	nvkm_wr32(device, 0x00b140, 0xffffffff);
+
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x00b200) & 0x00000001))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "timeout %08x\n",
+			   nvkm_rd32(device, 0x00b200));
 		return -EBUSY;
 	}
 
 	return 0;
 }
 
-struct nvkm_oclass
-nv31_mpeg_oclass = {
-	.handle = NV_ENGINE(MPEG, 0x31),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv31_mpeg_ctor,
-		.dtor = _nvkm_mpeg_dtor,
-		.init = nv31_mpeg_init,
-		.fini = _nvkm_mpeg_fini,
-	},
+static void *
+nv31_mpeg_dtor(struct nvkm_engine *engine)
+{
+	return nv31_mpeg(engine);
+}
+
+static const struct nvkm_engine_func
+nv31_mpeg_ = {
+	.dtor = nv31_mpeg_dtor,
+	.init = nv31_mpeg_init,
+	.intr = nv31_mpeg_intr,
+	.tile = nv31_mpeg_tile,
+	.fifo.cclass = nv31_mpeg_chan_new,
+	.sclass = {
+		{ -1, -1, NV31_MPEG, &nv31_mpeg_object },
+		{}
+	}
 };
+
+int
+nv31_mpeg_new_(const struct nv31_mpeg_func *func, struct nvkm_device *device,
+	       int index, struct nvkm_engine **pmpeg)
+{
+	struct nv31_mpeg *mpeg;
+
+	if (!(mpeg = kzalloc(sizeof(*mpeg), GFP_KERNEL)))
+		return -ENOMEM;
+	mpeg->func = func;
+	*pmpeg = &mpeg->engine;
+
+	return nvkm_engine_ctor(&nv31_mpeg_, device, index, 0x00000002,
+				true, &mpeg->engine);
+}
+
+static const struct nv31_mpeg_func
+nv31_mpeg = {
+	.mthd_dma = nv31_mpeg_mthd_dma,
+};
+
+int
+nv31_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
+{
+	return nv31_mpeg_new_(&nv31_mpeg, device, index, pmpeg);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
index 782b796d7458..d3bb34fcdebf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
@@ -1,13 +1,30 @@
 #ifndef __NV31_MPEG_H__
 #define __NV31_MPEG_H__
+#define nv31_mpeg(p) container_of((p), struct nv31_mpeg, engine)
+#include "priv.h"
 #include <engine/mpeg.h>
 
-struct nv31_mpeg_chan {
-	struct nvkm_object base;
+struct nv31_mpeg {
+	const struct nv31_mpeg_func *func;
+	struct nvkm_engine engine;
+	struct nv31_mpeg_chan *chan;
 };
 
-struct nv31_mpeg_priv {
-	struct nvkm_mpeg base;
-	struct nv31_mpeg_chan *chan;
+int nv31_mpeg_new_(const struct nv31_mpeg_func *, struct nvkm_device *,
+		   int index, struct nvkm_engine **);
+
+struct nv31_mpeg_func {
+	bool (*mthd_dma)(struct nvkm_device *, u32 mthd, u32 data);
 };
+
+#define nv31_mpeg_chan(p) container_of((p), struct nv31_mpeg_chan, object)
+
+struct nv31_mpeg_chan {
+	struct nvkm_object object;
+	struct nv31_mpeg *mpeg;
+	struct nvkm_fifo_chan *fifo;
+};
+
+int nv31_mpeg_chan_new(struct nvkm_fifo_chan *, const struct nvkm_oclass *,
+		       struct nvkm_object **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
index 9508bf9e140f..16de5bd94b14 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
@@ -25,110 +25,53 @@
 
 #include <subdev/instmem.h>
 
-/*******************************************************************************
- * MPEG object classes
- ******************************************************************************/
+#include <nvif/class.h>
 
-static int
-nv40_mpeg_mthd_dma(struct nvkm_object *object, u32 mthd, void *arg, u32 len)
+bool
+nv40_mpeg_mthd_dma(struct nvkm_device *device, u32 mthd, u32 data)
 {
-	struct nvkm_instmem *imem = nvkm_instmem(object);
-	struct nv31_mpeg_priv *priv = (void *)object->engine;
-	u32 inst = *(u32 *)arg << 4;
-	u32 dma0 = nv_ro32(imem, inst + 0);
-	u32 dma1 = nv_ro32(imem, inst + 4);
-	u32 dma2 = nv_ro32(imem, inst + 8);
+	struct nvkm_instmem *imem = device->imem;
+	u32 inst = data << 4;
+	u32 dma0 = nvkm_instmem_rd32(imem, inst + 0);
+	u32 dma1 = nvkm_instmem_rd32(imem, inst + 4);
+	u32 dma2 = nvkm_instmem_rd32(imem, inst + 8);
 	u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
 	u32 size = dma1 + 1;
 
 	/* only allow linear DMA objects */
 	if (!(dma0 & 0x00002000))
-		return -EINVAL;
+		return false;
 
 	if (mthd == 0x0190) {
 		/* DMA_CMD */
-		nv_mask(priv, 0x00b300, 0x00030000, (dma0 & 0x00030000));
-		nv_wr32(priv, 0x00b334, base);
-		nv_wr32(priv, 0x00b324, size);
+		nvkm_mask(device, 0x00b300, 0x00030000, (dma0 & 0x00030000));
+		nvkm_wr32(device, 0x00b334, base);
+		nvkm_wr32(device, 0x00b324, size);
 	} else
 	if (mthd == 0x01a0) {
 		/* DMA_DATA */
-		nv_mask(priv, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
-		nv_wr32(priv, 0x00b360, base);
-		nv_wr32(priv, 0x00b364, size);
+		nvkm_mask(device, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
+		nvkm_wr32(device, 0x00b360, base);
+		nvkm_wr32(device, 0x00b364, size);
 	} else {
 		/* DMA_IMAGE, VRAM only */
 		if (dma0 & 0x00030000)
-			return -EINVAL;
+			return false;
 
-		nv_wr32(priv, 0x00b370, base);
-		nv_wr32(priv, 0x00b374, size);
+		nvkm_wr32(device, 0x00b370, base);
+		nvkm_wr32(device, 0x00b374, size);
 	}
 
-	return 0;
+	return true;
 }
 
-static struct nvkm_omthds
-nv40_mpeg_omthds[] = {
-	{ 0x0190, 0x0190, nv40_mpeg_mthd_dma },
-	{ 0x01a0, 0x01a0, nv40_mpeg_mthd_dma },
-	{ 0x01b0, 0x01b0, nv40_mpeg_mthd_dma },
-	{}
+static const struct nv31_mpeg_func
+nv40_mpeg = {
+	.mthd_dma = nv40_mpeg_mthd_dma,
 };
 
-struct nvkm_oclass
-nv40_mpeg_sclass[] = {
-	{ 0x3174, &nv31_mpeg_ofuncs, nv40_mpeg_omthds },
-	{}
-};
-
-/*******************************************************************************
- * PMPEG engine/subdev functions
- ******************************************************************************/
-
-static void
-nv40_mpeg_intr(struct nvkm_subdev *subdev)
+int
+nv40_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
 {
-	struct nv31_mpeg_priv *priv = (void *)subdev;
-	u32 stat;
-
-	if ((stat = nv_rd32(priv, 0x00b100)))
-		nv31_mpeg_intr(subdev);
-
-	if ((stat = nv_rd32(priv, 0x00b800))) {
-		nv_error(priv, "PMSRCH 0x%08x\n", stat);
-		nv_wr32(priv, 0x00b800, stat);
-	}
+	return nv31_mpeg_new_(&nv40_mpeg, device, index, pmpeg);
 }
-
-static int
-nv40_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nv31_mpeg_priv *priv;
-	int ret;
-
-	ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000002;
-	nv_subdev(priv)->intr = nv40_mpeg_intr;
-	nv_engine(priv)->cclass = &nv31_mpeg_cclass;
-	nv_engine(priv)->sclass = nv40_mpeg_sclass;
-	nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
-	return 0;
-}
-
-struct nvkm_oclass
-nv40_mpeg_oclass = {
-	.handle = NV_ENGINE(MPEG, 0x40),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_mpeg_ctor,
-		.dtor = _nvkm_mpeg_dtor,
-		.init = nv31_mpeg_init,
-		.fini = _nvkm_mpeg_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
index 4720ac884468..d433cfa4a8ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
@@ -21,165 +21,197 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/mpeg.h>
+#define nv44_mpeg(p) container_of((p), struct nv44_mpeg, engine)
+#include "priv.h"
 
 #include <core/client.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
 
-struct nv44_mpeg_priv {
-	struct nvkm_mpeg base;
-};
+#include <nvif/class.h>
 
-struct nv44_mpeg_chan {
-	struct nvkm_mpeg_chan base;
+struct nv44_mpeg {
+	struct nvkm_engine engine;
+	struct list_head chan;
 };
 
 /*******************************************************************************
  * PMPEG context
  ******************************************************************************/
+#define nv44_mpeg_chan(p) container_of((p), struct nv44_mpeg_chan, object)
+
+struct nv44_mpeg_chan {
+	struct nvkm_object object;
+	struct nv44_mpeg *mpeg;
+	struct nvkm_fifo_chan *fifo;
+	struct list_head head;
+	u32 inst;
+};
 
 static int
-nv44_mpeg_context_ctor(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **pobject)
+nv44_mpeg_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		    int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nv44_mpeg_chan *chan;
-	int ret;
-
-	ret = nvkm_mpeg_context_create(parent, engine, oclass, NULL, 264 * 4,
-				       16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	nv_wo32(&chan->base.base, 0x78, 0x02001ec1);
-	return 0;
+	struct nv44_mpeg_chan *chan = nv44_mpeg_chan(object);
+	int ret = nvkm_gpuobj_new(chan->object.engine->subdev.device, 264 * 4,
+				  align, true, parent, pgpuobj);
+	if (ret == 0) {
+		chan->inst = (*pgpuobj)->addr;
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x78, 0x02001ec1);
+		nvkm_done(*pgpuobj);
+	}
+	return ret;
 }
 
 static int
-nv44_mpeg_context_fini(struct nvkm_object *object, bool suspend)
+nv44_mpeg_chan_fini(struct nvkm_object *object, bool suspend)
 {
 
-	struct nv44_mpeg_priv *priv = (void *)object->engine;
-	struct nv44_mpeg_chan *chan = (void *)object;
-	u32 inst = 0x80000000 | nv_gpuobj(chan)->addr >> 4;
+	struct nv44_mpeg_chan *chan = nv44_mpeg_chan(object);
+	struct nv44_mpeg *mpeg = chan->mpeg;
+	struct nvkm_device *device = mpeg->engine.subdev.device;
+	u32 inst = 0x80000000 | (chan->inst >> 4);
 
-	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000000);
-	if (nv_rd32(priv, 0x00b318) == inst)
-		nv_mask(priv, 0x00b318, 0x80000000, 0x00000000);
-	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000000);
+	if (nvkm_rd32(device, 0x00b318) == inst)
+		nvkm_mask(device, 0x00b318, 0x80000000, 0x00000000);
+	nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000001);
 	return 0;
 }
 
-static struct nvkm_oclass
-nv44_mpeg_cclass = {
-	.handle = NV_ENGCTX(MPEG, 0x44),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv44_mpeg_context_ctor,
-		.dtor = _nvkm_mpeg_context_dtor,
-		.init = _nvkm_mpeg_context_init,
-		.fini = nv44_mpeg_context_fini,
-		.rd32 = _nvkm_mpeg_context_rd32,
-		.wr32 = _nvkm_mpeg_context_wr32,
-	},
+static void *
+nv44_mpeg_chan_dtor(struct nvkm_object *object)
+{
+	struct nv44_mpeg_chan *chan = nv44_mpeg_chan(object);
+	struct nv44_mpeg *mpeg = chan->mpeg;
+	unsigned long flags;
+	spin_lock_irqsave(&mpeg->engine.lock, flags);
+	list_del(&chan->head);
+	spin_unlock_irqrestore(&mpeg->engine.lock, flags);
+	return chan;
+}
+
+static const struct nvkm_object_func
+nv44_mpeg_chan = {
+	.dtor = nv44_mpeg_chan_dtor,
+	.fini = nv44_mpeg_chan_fini,
+	.bind = nv44_mpeg_chan_bind,
 };
 
+static int
+nv44_mpeg_chan_new(struct nvkm_fifo_chan *fifoch,
+		   const struct nvkm_oclass *oclass,
+		   struct nvkm_object **pobject)
+{
+	struct nv44_mpeg *mpeg = nv44_mpeg(oclass->engine);
+	struct nv44_mpeg_chan *chan;
+	unsigned long flags;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv44_mpeg_chan, oclass, &chan->object);
+	chan->mpeg = mpeg;
+	chan->fifo = fifoch;
+	*pobject = &chan->object;
+
+	spin_lock_irqsave(&mpeg->engine.lock, flags);
+	list_add(&chan->head, &mpeg->chan);
+	spin_unlock_irqrestore(&mpeg->engine.lock, flags);
+	return 0;
+}
+
 /*******************************************************************************
  * PMPEG engine/subdev functions
  ******************************************************************************/
 
+static bool
+nv44_mpeg_mthd(struct nvkm_device *device, u32 mthd, u32 data)
+{
+	switch (mthd) {
+	case 0x190:
+	case 0x1a0:
+	case 0x1b0:
+		return nv40_mpeg_mthd_dma(device, mthd, data);
+	default:
+		break;
+	}
+	return false;
+}
+
 static void
-nv44_mpeg_intr(struct nvkm_subdev *subdev)
+nv44_mpeg_intr(struct nvkm_engine *engine)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
-	struct nvkm_engine *engine = nv_engine(subdev);
-	struct nvkm_object *engctx;
-	struct nvkm_handle *handle;
-	struct nv44_mpeg_priv *priv = (void *)subdev;
-	u32 inst = nv_rd32(priv, 0x00b318) & 0x000fffff;
-	u32 stat = nv_rd32(priv, 0x00b100);
-	u32 type = nv_rd32(priv, 0x00b230);
-	u32 mthd = nv_rd32(priv, 0x00b234);
-	u32 data = nv_rd32(priv, 0x00b238);
+	struct nv44_mpeg *mpeg = nv44_mpeg(engine);
+	struct nvkm_subdev *subdev = &mpeg->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nv44_mpeg_chan *temp, *chan = NULL;
+	unsigned long flags;
+	u32 inst = nvkm_rd32(device, 0x00b318) & 0x000fffff;
+	u32 stat = nvkm_rd32(device, 0x00b100);
+	u32 type = nvkm_rd32(device, 0x00b230);
+	u32 mthd = nvkm_rd32(device, 0x00b234);
+	u32 data = nvkm_rd32(device, 0x00b238);
 	u32 show = stat;
-	int chid;
 
-	engctx = nvkm_engctx_get(engine, inst);
-	chid   = pfifo->chid(pfifo, engctx);
+	spin_lock_irqsave(&mpeg->engine.lock, flags);
+	list_for_each_entry(temp, &mpeg->chan, head) {
+		if (temp->inst >> 4 == inst) {
+			chan = temp;
+			list_del(&chan->head);
+			list_add(&chan->head, &mpeg->chan);
+			break;
+		}
+	}
 
 	if (stat & 0x01000000) {
 		/* happens on initial binding of the object */
 		if (type == 0x00000020 && mthd == 0x0000) {
-			nv_mask(priv, 0x00b308, 0x00000000, 0x00000000);
+			nvkm_mask(device, 0x00b308, 0x00000000, 0x00000000);
 			show &= ~0x01000000;
 		}
 
 		if (type == 0x00000010) {
-			handle = nvkm_handle_get_class(engctx, 0x3174);
-			if (handle && !nv_call(handle->object, mthd, data))
+			if (!nv44_mpeg_mthd(subdev->device, mthd, data))
 				show &= ~0x01000000;
-			nvkm_handle_put(handle);
 		}
 	}
 
-	nv_wr32(priv, 0x00b100, stat);
-	nv_wr32(priv, 0x00b230, 0x00000001);
+	nvkm_wr32(device, 0x00b100, stat);
+	nvkm_wr32(device, 0x00b230, 0x00000001);
 
 	if (show) {
-		nv_error(priv,
-			 "ch %d [0x%08x %s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
-			 chid, inst << 4, nvkm_client_name(engctx), stat,
-			 type, mthd, data);
+		nvkm_error(subdev, "ch %d [%08x %s] %08x %08x %08x %08x\n",
+			   chan ? chan->fifo->chid : -1, inst << 4,
+			   chan ? chan->object.client->name : "unknown",
+			   stat, type, mthd, data);
 	}
 
-	nvkm_engctx_put(engctx);
+	spin_unlock_irqrestore(&mpeg->engine.lock, flags);
 }
 
-static void
-nv44_mpeg_me_intr(struct nvkm_subdev *subdev)
-{
-	struct nv44_mpeg_priv *priv = (void *)subdev;
-	u32 stat;
-
-	if ((stat = nv_rd32(priv, 0x00b100)))
-		nv44_mpeg_intr(subdev);
-
-	if ((stat = nv_rd32(priv, 0x00b800))) {
-		nv_error(priv, "PMSRCH 0x%08x\n", stat);
-		nv_wr32(priv, 0x00b800, stat);
+static const struct nvkm_engine_func
+nv44_mpeg = {
+	.init = nv31_mpeg_init,
+	.intr = nv44_mpeg_intr,
+	.tile = nv31_mpeg_tile,
+	.fifo.cclass = nv44_mpeg_chan_new,
+	.sclass = {
+		{ -1, -1, NV31_MPEG, &nv31_mpeg_object },
+		{}
 	}
-}
+};
 
-static int
-nv44_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+nv44_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
 {
-	struct nv44_mpeg_priv *priv;
-	int ret;
-
-	ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000002;
-	nv_subdev(priv)->intr = nv44_mpeg_me_intr;
-	nv_engine(priv)->cclass = &nv44_mpeg_cclass;
-	nv_engine(priv)->sclass = nv40_mpeg_sclass;
-	nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
-	return 0;
-}
+	struct nv44_mpeg *mpeg;
 
-struct nvkm_oclass
-nv44_mpeg_oclass = {
-	.handle = NV_ENGINE(MPEG, 0x44),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv44_mpeg_ctor,
-		.dtor = _nvkm_mpeg_dtor,
-		.init = nv31_mpeg_init,
-		.fini = _nvkm_mpeg_fini,
-	},
-};
+	if (!(mpeg = kzalloc(sizeof(*mpeg), GFP_KERNEL)))
+		return -ENOMEM;
+	INIT_LIST_HEAD(&mpeg->chan);
+	*pmpeg = &mpeg->engine;
+
+	return nvkm_engine_ctor(&nv44_mpeg, device, index, 0x00000002,
+				true, &mpeg->engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
index b3463f3739ce..c3a85dffc782 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
@@ -21,98 +21,35 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/mpeg.h>
+#include "priv.h"
 
-#include <subdev/bar.h>
+#include <core/gpuobj.h>
 #include <subdev/timer.h>
 
-struct nv50_mpeg_priv {
-	struct nvkm_mpeg base;
-};
-
-struct nv50_mpeg_chan {
-	struct nvkm_mpeg_chan base;
-};
-
-/*******************************************************************************
- * MPEG object classes
- ******************************************************************************/
-
-static int
-nv50_mpeg_object_ctor(struct nvkm_object *parent,
-		      struct nvkm_object *engine,
-		      struct nvkm_oclass *oclass, void *data, u32 size,
-		      struct nvkm_object **pobject)
-{
-	struct nvkm_gpuobj *obj;
-	int ret;
-
-	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
-				 16, 16, 0, &obj);
-	*pobject = nv_object(obj);
-	if (ret)
-		return ret;
-
-	nv_wo32(obj, 0x00, nv_mclass(obj));
-	nv_wo32(obj, 0x04, 0x00000000);
-	nv_wo32(obj, 0x08, 0x00000000);
-	nv_wo32(obj, 0x0c, 0x00000000);
-	return 0;
-}
-
-struct nvkm_ofuncs
-nv50_mpeg_ofuncs = {
-	.ctor = nv50_mpeg_object_ctor,
-	.dtor = _nvkm_gpuobj_dtor,
-	.init = _nvkm_gpuobj_init,
-	.fini = _nvkm_gpuobj_fini,
-	.rd32 = _nvkm_gpuobj_rd32,
-	.wr32 = _nvkm_gpuobj_wr32,
-};
-
-static struct nvkm_oclass
-nv50_mpeg_sclass[] = {
-	{ 0x3174, &nv50_mpeg_ofuncs },
-	{}
-};
+#include <nvif/class.h>
 
 /*******************************************************************************
  * PMPEG context
  ******************************************************************************/
 
-int
-nv50_mpeg_context_ctor(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **pobject)
+static int
+nv50_mpeg_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		      int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_mpeg_chan *chan;
-	int ret;
-
-	ret = nvkm_mpeg_context_create(parent, engine, oclass, NULL, 128 * 4,
-				       0, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	nv_wo32(chan, 0x0070, 0x00801ec1);
-	nv_wo32(chan, 0x007c, 0x0000037c);
-	bar->flush(bar);
-	return 0;
+	int ret = nvkm_gpuobj_new(object->engine->subdev.device, 128 * 4,
+				  align, true, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x70, 0x00801ec1);
+		nvkm_wo32(*pgpuobj, 0x7c, 0x0000037c);
+		nvkm_done(*pgpuobj);
+	}
+	return ret;
 }
 
-static struct nvkm_oclass
+const struct nvkm_object_func
 nv50_mpeg_cclass = {
-	.handle = NV_ENGCTX(MPEG, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_mpeg_context_ctor,
-		.dtor = _nvkm_mpeg_context_dtor,
-		.init = _nvkm_mpeg_context_init,
-		.fini = _nvkm_mpeg_context_fini,
-		.rd32 = _nvkm_mpeg_context_rd32,
-		.wr32 = _nvkm_mpeg_context_wr32,
-	},
+	.bind = nv50_mpeg_cclass_bind,
 };
 
 /*******************************************************************************
@@ -120,106 +57,79 @@ nv50_mpeg_cclass = {
  ******************************************************************************/
 
 void
-nv50_mpeg_intr(struct nvkm_subdev *subdev)
+nv50_mpeg_intr(struct nvkm_engine *mpeg)
 {
-	struct nv50_mpeg_priv *priv = (void *)subdev;
-	u32 stat = nv_rd32(priv, 0x00b100);
-	u32 type = nv_rd32(priv, 0x00b230);
-	u32 mthd = nv_rd32(priv, 0x00b234);
-	u32 data = nv_rd32(priv, 0x00b238);
+	struct nvkm_subdev *subdev = &mpeg->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x00b100);
+	u32 type = nvkm_rd32(device, 0x00b230);
+	u32 mthd = nvkm_rd32(device, 0x00b234);
+	u32 data = nvkm_rd32(device, 0x00b238);
 	u32 show = stat;
 
 	if (stat & 0x01000000) {
 		/* happens on initial binding of the object */
 		if (type == 0x00000020 && mthd == 0x0000) {
-			nv_wr32(priv, 0x00b308, 0x00000100);
+			nvkm_wr32(device, 0x00b308, 0x00000100);
 			show &= ~0x01000000;
 		}
 	}
 
 	if (show) {
-		nv_info(priv, "0x%08x 0x%08x 0x%08x 0x%08x\n",
-			stat, type, mthd, data);
-	}
-
-	nv_wr32(priv, 0x00b100, stat);
-	nv_wr32(priv, 0x00b230, 0x00000001);
-}
-
-static void
-nv50_vpe_intr(struct nvkm_subdev *subdev)
-{
-	struct nv50_mpeg_priv *priv = (void *)subdev;
-
-	if (nv_rd32(priv, 0x00b100))
-		nv50_mpeg_intr(subdev);
-
-	if (nv_rd32(priv, 0x00b800)) {
-		u32 stat = nv_rd32(priv, 0x00b800);
-		nv_info(priv, "PMSRCH: 0x%08x\n", stat);
-		nv_wr32(priv, 0xb800, stat);
+		nvkm_info(subdev, "%08x %08x %08x %08x\n",
+			  stat, type, mthd, data);
 	}
-}
 
-static int
-nv50_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nv50_mpeg_priv *priv;
-	int ret;
-
-	ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00400002;
-	nv_subdev(priv)->intr = nv50_vpe_intr;
-	nv_engine(priv)->cclass = &nv50_mpeg_cclass;
-	nv_engine(priv)->sclass = nv50_mpeg_sclass;
-	return 0;
+	nvkm_wr32(device, 0x00b100, stat);
+	nvkm_wr32(device, 0x00b230, 0x00000001);
 }
 
 int
-nv50_mpeg_init(struct nvkm_object *object)
+nv50_mpeg_init(struct nvkm_engine *mpeg)
 {
-	struct nv50_mpeg_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_mpeg_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x00b32c, 0x00000000);
-	nv_wr32(priv, 0x00b314, 0x00000100);
-	nv_wr32(priv, 0x00b0e0, 0x0000001a);
-
-	nv_wr32(priv, 0x00b220, 0x00000044);
-	nv_wr32(priv, 0x00b300, 0x00801ec1);
-	nv_wr32(priv, 0x00b390, 0x00000000);
-	nv_wr32(priv, 0x00b394, 0x00000000);
-	nv_wr32(priv, 0x00b398, 0x00000000);
-	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
-
-	nv_wr32(priv, 0x00b100, 0xffffffff);
-	nv_wr32(priv, 0x00b140, 0xffffffff);
-
-	if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) {
-		nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200));
+	struct nvkm_subdev *subdev = &mpeg->subdev;
+	struct nvkm_device *device = subdev->device;
+
+	nvkm_wr32(device, 0x00b32c, 0x00000000);
+	nvkm_wr32(device, 0x00b314, 0x00000100);
+	nvkm_wr32(device, 0x00b0e0, 0x0000001a);
+
+	nvkm_wr32(device, 0x00b220, 0x00000044);
+	nvkm_wr32(device, 0x00b300, 0x00801ec1);
+	nvkm_wr32(device, 0x00b390, 0x00000000);
+	nvkm_wr32(device, 0x00b394, 0x00000000);
+	nvkm_wr32(device, 0x00b398, 0x00000000);
+	nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000001);
+
+	nvkm_wr32(device, 0x00b100, 0xffffffff);
+	nvkm_wr32(device, 0x00b140, 0xffffffff);
+
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x00b200) & 0x00000001))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "timeout %08x\n",
+			   nvkm_rd32(device, 0x00b200));
 		return -EBUSY;
 	}
 
 	return 0;
 }
 
-struct nvkm_oclass
-nv50_mpeg_oclass = {
-	.handle = NV_ENGINE(MPEG, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_mpeg_ctor,
-		.dtor = _nvkm_mpeg_dtor,
-		.init = nv50_mpeg_init,
-		.fini = _nvkm_mpeg_fini,
-	},
+static const struct nvkm_engine_func
+nv50_mpeg = {
+	.init = nv50_mpeg_init,
+	.intr = nv50_mpeg_intr,
+	.cclass = &nv50_mpeg_cclass,
+	.sclass = {
+		{ -1, -1, NV31_MPEG, &nv31_mpeg_object },
+		{}
+	}
 };
+
+int
+nv50_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
+{
+	return nvkm_engine_new_(&nv50_mpeg, device, index, 0x00400002,
+				true, pmpeg);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h
new file mode 100644
index 000000000000..d5753103ff63
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h
@@ -0,0 +1,16 @@
+#ifndef __NVKM_MPEG_PRIV_H__
+#define __NVKM_MPEG_PRIV_H__
+#include <engine/mpeg.h>
+struct nvkm_fifo_chan;
+
+int nv31_mpeg_init(struct nvkm_engine *);
+void nv31_mpeg_tile(struct nvkm_engine *, int, struct nvkm_fb_tile *);
+extern const struct nvkm_object_func nv31_mpeg_object;
+
+bool nv40_mpeg_mthd_dma(struct nvkm_device *, u32, u32);
+
+int nv50_mpeg_init(struct nvkm_engine *);
+void nv50_mpeg_intr(struct nvkm_engine *);
+
+extern const struct nvkm_object_func nv50_mpeg_cclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild
index c59c83a67315..1a7151146e9d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild
@@ -1,3 +1,5 @@
+nvkm-y += nvkm/engine/mspdec/base.o
 nvkm-y += nvkm/engine/mspdec/g98.o
+nvkm-y += nvkm/engine/mspdec/gt215.o
 nvkm-y += nvkm/engine/mspdec/gf100.o
 nvkm-y += nvkm/engine/mspdec/gk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c
new file mode 100644
index 000000000000..80211f76093b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+int
+nvkm_mspdec_new_(const struct nvkm_falcon_func *func,
+		 struct nvkm_device *device, int index,
+		 struct nvkm_engine **pengine)
+{
+	return nvkm_falcon_new_(func, device, index, true, 0x085000, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
index 2174577793a4..1f1a99e927b2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
@@ -21,89 +21,31 @@
  *
  * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
  */
-#include <engine/mspdec.h>
-#include <engine/falcon.h>
+#include "priv.h"
 
-struct g98_mspdec_priv {
-	struct nvkm_falcon base;
-};
-
-/*******************************************************************************
- * MSPDEC object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_mspdec_sclass[] = {
-	{ 0x88b2, &nvkm_object_ofuncs },
-	{ 0x85b2, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PMSPDEC context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_mspdec_cclass = {
-	.handle = NV_ENGCTX(MSPDEC, 0x98),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PMSPDEC engine/subdev functions
- ******************************************************************************/
+#include <nvif/class.h>
 
-static int
-g98_mspdec_init(struct nvkm_object *object)
+void
+g98_mspdec_init(struct nvkm_falcon *mspdec)
 {
-	struct g98_mspdec_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x085010, 0x0000ffd2);
-	nv_wr32(priv, 0x08501c, 0x0000fff2);
-	return 0;
+	struct nvkm_device *device = mspdec->engine.subdev.device;
+	nvkm_wr32(device, 0x085010, 0x0000ffd2);
+	nvkm_wr32(device, 0x08501c, 0x0000fff2);
 }
 
-static int
-g98_mspdec_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
-{
-	struct g98_mspdec_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x085000, true,
-				 "PMSPDEC", "mspdec", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+static const struct nvkm_falcon_func
+g98_mspdec = {
+	.pmc_enable = 0x01020000,
+	.init = g98_mspdec_init,
+	.sclass = {
+		{ -1, -1, G98_MSPDEC },
+		{}
+	}
+};
 
-	nv_subdev(priv)->unit = 0x01020000;
-	nv_engine(priv)->cclass = &g98_mspdec_cclass;
-	nv_engine(priv)->sclass = g98_mspdec_sclass;
-	return 0;
+int
+g98_mspdec_new(struct nvkm_device *device, int index,
+	     struct nvkm_engine **pengine)
+{
+	return nvkm_mspdec_new_(&g98_mspdec, device, index, pengine);
 }
-
-struct nvkm_oclass
-g98_mspdec_oclass = {
-	.handle = NV_ENGINE(MSPDEC, 0x98),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g98_mspdec_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = g98_mspdec_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
index c814a5f65eb0..371fd6c3c663 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
@@ -21,89 +21,31 @@
  *
  * Authors: Maarten Lankhorst
  */
-#include <engine/mspdec.h>
-#include <engine/falcon.h>
+#include "priv.h"
 
-struct gf100_mspdec_priv {
-	struct nvkm_falcon base;
-};
-
-/*******************************************************************************
- * MSPDEC object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_mspdec_sclass[] = {
-	{ 0x90b2, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PMSPDEC context
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_mspdec_cclass = {
-	.handle = NV_ENGCTX(MSPDEC, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PMSPDEC engine/subdev functions
- ******************************************************************************/
+#include <nvif/class.h>
 
-static int
-gf100_mspdec_init(struct nvkm_object *object)
+void
+gf100_mspdec_init(struct nvkm_falcon *mspdec)
 {
-	struct gf100_mspdec_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x085010, 0x0000fff2);
-	nv_wr32(priv, 0x08501c, 0x0000fff2);
-	return 0;
+	struct nvkm_device *device = mspdec->engine.subdev.device;
+	nvkm_wr32(device, 0x085010, 0x0000fff2);
+	nvkm_wr32(device, 0x08501c, 0x0000fff2);
 }
 
-static int
-gf100_mspdec_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
-{
-	struct gf100_mspdec_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x085000, true,
-				 "PMSPDEC", "mspdec", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+static const struct nvkm_falcon_func
+gf100_mspdec = {
+	.pmc_enable = 0x00020000,
+	.init = gf100_mspdec_init,
+	.sclass = {
+		{ -1, -1, GF100_MSPDEC },
+		{}
+	}
+};
 
-	nv_subdev(priv)->unit = 0x00020000;
-	nv_subdev(priv)->intr = nvkm_falcon_intr;
-	nv_engine(priv)->cclass = &gf100_mspdec_cclass;
-	nv_engine(priv)->sclass = gf100_mspdec_sclass;
-	return 0;
+int
+gf100_mspdec_new(struct nvkm_device *device, int index,
+		 struct nvkm_engine **pengine)
+{
+	return nvkm_mspdec_new_(&gf100_mspdec, device, index, pengine);
 }
-
-struct nvkm_oclass
-gf100_mspdec_oclass = {
-	.handle = NV_ENGINE(MSPDEC, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_mspdec_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = gf100_mspdec_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
index 979920650dbd..de804a15bfd4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
@@ -21,89 +21,23 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/mspdec.h>
-#include <engine/falcon.h>
-
-struct gk104_mspdec_priv {
-	struct nvkm_falcon base;
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_falcon_func
+gk104_mspdec = {
+	.pmc_enable = 0x00020000,
+	.init = gf100_mspdec_init,
+	.sclass = {
+		{ -1, -1, GK104_MSPDEC },
+		{}
+	}
 };
 
-/*******************************************************************************
- * MSPDEC object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_mspdec_sclass[] = {
-	{ 0x95b2, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PMSPDEC context
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_mspdec_cclass = {
-	.handle = NV_ENGCTX(MSPDEC, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PMSPDEC engine/subdev functions
- ******************************************************************************/
-
-static int
-gk104_mspdec_init(struct nvkm_object *object)
+int
+gk104_mspdec_new(struct nvkm_device *device, int index,
+		 struct nvkm_engine **pengine)
 {
-	struct gk104_mspdec_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x085010, 0x0000fff2);
-	nv_wr32(priv, 0x08501c, 0x0000fff2);
-	return 0;
-}
-
-static int
-gk104_mspdec_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
-{
-	struct gk104_mspdec_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x085000, true,
-				 "PMSPDEC", "mspdec", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00020000;
-	nv_subdev(priv)->intr = nvkm_falcon_intr;
-	nv_engine(priv)->cclass = &gk104_mspdec_cclass;
-	nv_engine(priv)->sclass = gk104_mspdec_sclass;
-	return 0;
+	return nvkm_mspdec_new_(&gk104_mspdec, device, index, pengine);
 }
-
-struct nvkm_oclass
-gk104_mspdec_oclass = {
-	.handle = NV_ENGINE(MSPDEC, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_mspdec_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = gk104_mspdec_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c
new file mode 100644
index 000000000000..835631713c95
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
+ */
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_falcon_func
+gt215_mspdec = {
+	.pmc_enable = 0x01020000,
+	.init = g98_mspdec_init,
+	.sclass = {
+		{ -1, -1, GT212_MSPDEC },
+		{}
+	}
+};
+
+int
+gt215_mspdec_new(struct nvkm_device *device, int index,
+	     struct nvkm_engine **pengine)
+{
+	return nvkm_mspdec_new_(&gt215_mspdec, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
new file mode 100644
index 000000000000..d518af4bc9de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
@@ -0,0 +1,11 @@
+#ifndef __NVKM_MSPDEC_PRIV_H__
+#define __NVKM_MSPDEC_PRIV_H__
+#include <engine/mspdec.h>
+
+int nvkm_mspdec_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
+		     int index, struct nvkm_engine **);
+
+void g98_mspdec_init(struct nvkm_falcon *);
+
+void gf100_mspdec_init(struct nvkm_falcon *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild
index 4576a9eee39d..3ea7eafb408f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild
@@ -1,2 +1,4 @@
+nvkm-y += nvkm/engine/msppp/base.o
 nvkm-y += nvkm/engine/msppp/g98.o
+nvkm-y += nvkm/engine/msppp/gt215.o
 nvkm-y += nvkm/engine/msppp/gf100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c
new file mode 100644
index 000000000000..bfae5e60e925
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+int
+nvkm_msppp_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device,
+		int index, struct nvkm_engine **pengine)
+{
+	return nvkm_falcon_new_(func, device, index, true, 0x086000, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
index 7a602a2dec94..73f633ae2ee7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
@@ -21,89 +21,31 @@
  *
  * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
  */
-#include <engine/msppp.h>
-#include <engine/falcon.h>
+#include "priv.h"
 
-struct g98_msppp_priv {
-	struct nvkm_falcon base;
-};
-
-/*******************************************************************************
- * MSPPP object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_msppp_sclass[] = {
-	{ 0x88b3, &nvkm_object_ofuncs },
-	{ 0x85b3, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PMSPPP context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_msppp_cclass = {
-	.handle = NV_ENGCTX(MSPPP, 0x98),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PMSPPP engine/subdev functions
- ******************************************************************************/
+#include <nvif/class.h>
 
-static int
-g98_msppp_init(struct nvkm_object *object)
+void
+g98_msppp_init(struct nvkm_falcon *msppp)
 {
-	struct g98_msppp_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x086010, 0x0000ffd2);
-	nv_wr32(priv, 0x08601c, 0x0000fff2);
-	return 0;
+	struct nvkm_device *device = msppp->engine.subdev.device;
+	nvkm_wr32(device, 0x086010, 0x0000ffd2);
+	nvkm_wr32(device, 0x08601c, 0x0000fff2);
 }
 
-static int
-g98_msppp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct g98_msppp_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x086000, true,
-				 "PMSPPP", "msppp", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+static const struct nvkm_falcon_func
+g98_msppp = {
+	.pmc_enable = 0x00400002,
+	.init = g98_msppp_init,
+	.sclass = {
+		{ -1, -1, G98_MSPPP },
+		{}
+	}
+};
 
-	nv_subdev(priv)->unit = 0x00400002;
-	nv_engine(priv)->cclass = &g98_msppp_cclass;
-	nv_engine(priv)->sclass = g98_msppp_sclass;
-	return 0;
+int
+g98_msppp_new(struct nvkm_device *device, int index,
+	      struct nvkm_engine **pengine)
+{
+	return nvkm_msppp_new_(&g98_msppp, device, index, pengine);
 }
-
-struct nvkm_oclass
-g98_msppp_oclass = {
-	.handle = NV_ENGINE(MSPPP, 0x98),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g98_msppp_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = g98_msppp_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
index 6047baee1f75..c42c0c07e2db 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
@@ -21,89 +21,31 @@
  *
  * Authors: Maarten Lankhorst
  */
-#include <engine/msppp.h>
-#include <engine/falcon.h>
+#include "priv.h"
 
-struct gf100_msppp_priv {
-	struct nvkm_falcon base;
-};
-
-/*******************************************************************************
- * MSPPP object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_msppp_sclass[] = {
-	{ 0x90b3, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PMSPPP context
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_msppp_cclass = {
-	.handle = NV_ENGCTX(MSPPP, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PMSPPP engine/subdev functions
- ******************************************************************************/
+#include <nvif/class.h>
 
-static int
-gf100_msppp_init(struct nvkm_object *object)
+static void
+gf100_msppp_init(struct nvkm_falcon *msppp)
 {
-	struct gf100_msppp_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x086010, 0x0000fff2);
-	nv_wr32(priv, 0x08601c, 0x0000fff2);
-	return 0;
+	struct nvkm_device *device = msppp->engine.subdev.device;
+	nvkm_wr32(device, 0x086010, 0x0000fff2);
+	nvkm_wr32(device, 0x08601c, 0x0000fff2);
 }
 
-static int
-gf100_msppp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
-{
-	struct gf100_msppp_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x086000, true,
-				 "PMSPPP", "msppp", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+static const struct nvkm_falcon_func
+gf100_msppp = {
+	.pmc_enable = 0x00000002,
+	.init = gf100_msppp_init,
+	.sclass = {
+		{ -1, -1, GF100_MSPPP },
+		{}
+	}
+};
 
-	nv_subdev(priv)->unit = 0x00000002;
-	nv_subdev(priv)->intr = nvkm_falcon_intr;
-	nv_engine(priv)->cclass = &gf100_msppp_cclass;
-	nv_engine(priv)->sclass = gf100_msppp_sclass;
-	return 0;
+int
+gf100_msppp_new(struct nvkm_device *device, int index,
+		struct nvkm_engine **pengine)
+{
+	return nvkm_msppp_new_(&gf100_msppp, device, index, pengine);
 }
-
-struct nvkm_oclass
-gf100_msppp_oclass = {
-	.handle = NV_ENGINE(MSPPP, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_msppp_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = gf100_msppp_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c
new file mode 100644
index 000000000000..00e7795f1d51
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
+ */
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_falcon_func
+gt215_msppp = {
+	.pmc_enable = 0x00400002,
+	.init = g98_msppp_init,
+	.sclass = {
+		{ -1, -1, GT212_MSPPP },
+		{}
+	}
+};
+
+int
+gt215_msppp_new(struct nvkm_device *device, int index,
+	      struct nvkm_engine **pengine)
+{
+	return nvkm_msppp_new_(&gt215_msppp, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
new file mode 100644
index 000000000000..37a91f9d9181
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
@@ -0,0 +1,9 @@
+#ifndef __NVKM_MSPPP_PRIV_H__
+#define __NVKM_MSPPP_PRIV_H__
+#include <engine/msppp.h>
+
+int nvkm_msppp_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
+		    int index, struct nvkm_engine **);
+
+void g98_msppp_init(struct nvkm_falcon *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild
index 0c9811009e28..28c8ecd27b6d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild
@@ -1,3 +1,6 @@
+nvkm-y += nvkm/engine/msvld/base.o
 nvkm-y += nvkm/engine/msvld/g98.o
+nvkm-y += nvkm/engine/msvld/gt215.o
+nvkm-y += nvkm/engine/msvld/mcp89.o
 nvkm-y += nvkm/engine/msvld/gf100.o
 nvkm-y += nvkm/engine/msvld/gk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c
new file mode 100644
index 000000000000..745bbb653dc0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+int
+nvkm_msvld_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device,
+		int index, struct nvkm_engine **pengine)
+{
+	return nvkm_falcon_new_(func, device, index, true, 0x084000, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
index c8a6b4ef52a1..47e2929bfaf0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
@@ -21,90 +21,31 @@
  *
  * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
  */
-#include <engine/msvld.h>
-#include <engine/falcon.h>
+#include "priv.h"
 
-struct g98_msvld_priv {
-	struct nvkm_falcon base;
-};
-
-/*******************************************************************************
- * MSVLD object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_msvld_sclass[] = {
-	{ 0x88b1, &nvkm_object_ofuncs },
-	{ 0x85b1, &nvkm_object_ofuncs },
-	{ 0x86b1, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PMSVLD context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_msvld_cclass = {
-	.handle = NV_ENGCTX(MSVLD, 0x98),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PMSVLD engine/subdev functions
- ******************************************************************************/
+#include <nvif/class.h>
 
-static int
-g98_msvld_init(struct nvkm_object *object)
+void
+g98_msvld_init(struct nvkm_falcon *msvld)
 {
-	struct g98_msvld_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x084010, 0x0000ffd2);
-	nv_wr32(priv, 0x08401c, 0x0000fff2);
-	return 0;
+	struct nvkm_device *device = msvld->engine.subdev.device;
+	nvkm_wr32(device, 0x084010, 0x0000ffd2);
+	nvkm_wr32(device, 0x08401c, 0x0000fff2);
 }
 
-static int
-g98_msvld_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct g98_msvld_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x084000, true,
-				 "PMSVLD", "msvld", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+static const struct nvkm_falcon_func
+g98_msvld = {
+	.pmc_enable = 0x04008000,
+	.init = g98_msvld_init,
+	.sclass = {
+		{ -1, -1, G98_MSVLD },
+		{}
+	}
+};
 
-	nv_subdev(priv)->unit = 0x04008000;
-	nv_engine(priv)->cclass = &g98_msvld_cclass;
-	nv_engine(priv)->sclass = g98_msvld_sclass;
-	return 0;
+int
+g98_msvld_new(struct nvkm_device *device, int index,
+	      struct nvkm_engine **pengine)
+{
+	return nvkm_msvld_new_(&g98_msvld, device, index, pengine);
 }
-
-struct nvkm_oclass
-g98_msvld_oclass = {
-	.handle = NV_ENGINE(MSVLD, 0x98),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g98_msvld_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = g98_msvld_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
index b8d1e0f521ef..1ac581ba9f96 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
@@ -21,89 +21,31 @@
  *
  * Authors: Maarten Lankhorst
  */
-#include <engine/msvld.h>
-#include <engine/falcon.h>
+#include "priv.h"
 
-struct gf100_msvld_priv {
-	struct nvkm_falcon base;
-};
-
-/*******************************************************************************
- * MSVLD object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_msvld_sclass[] = {
-	{ 0x90b1, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PMSVLD context
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_msvld_cclass = {
-	.handle = NV_ENGCTX(MSVLD, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PMSVLD engine/subdev functions
- ******************************************************************************/
+#include <nvif/class.h>
 
-static int
-gf100_msvld_init(struct nvkm_object *object)
+void
+gf100_msvld_init(struct nvkm_falcon *msvld)
 {
-	struct gf100_msvld_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x084010, 0x0000fff2);
-	nv_wr32(priv, 0x08401c, 0x0000fff2);
-	return 0;
+	struct nvkm_device *device = msvld->engine.subdev.device;
+	nvkm_wr32(device, 0x084010, 0x0000fff2);
+	nvkm_wr32(device, 0x08401c, 0x0000fff2);
 }
 
-static int
-gf100_msvld_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
-{
-	struct gf100_msvld_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x084000, true,
-				 "PMSVLD", "msvld", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+static const struct nvkm_falcon_func
+gf100_msvld = {
+	.pmc_enable = 0x00008000,
+	.init = gf100_msvld_init,
+	.sclass = {
+		{ -1, -1, GF100_MSVLD },
+		{}
+	}
+};
 
-	nv_subdev(priv)->unit = 0x00008000;
-	nv_subdev(priv)->intr = nvkm_falcon_intr;
-	nv_engine(priv)->cclass = &gf100_msvld_cclass;
-	nv_engine(priv)->sclass = gf100_msvld_sclass;
-	return 0;
+int
+gf100_msvld_new(struct nvkm_device *device, int index,
+		struct nvkm_engine **pengine)
+{
+	return nvkm_msvld_new_(&gf100_msvld, device, index, pengine);
 }
-
-struct nvkm_oclass
-gf100_msvld_oclass = {
-	.handle = NV_ENGINE(MSVLD, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_msvld_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = gf100_msvld_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
index a0b0927834df..4bba16e0f560 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
@@ -21,89 +21,23 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/msvld.h>
-#include <engine/falcon.h>
-
-struct gk104_msvld_priv {
-	struct nvkm_falcon base;
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_falcon_func
+gk104_msvld = {
+	.pmc_enable = 0x00008000,
+	.init = gf100_msvld_init,
+	.sclass = {
+		{ -1, -1, GK104_MSVLD },
+		{}
+	}
 };
 
-/*******************************************************************************
- * MSVLD object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_msvld_sclass[] = {
-	{ 0x95b1, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PMSVLD context
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_msvld_cclass = {
-	.handle = NV_ENGCTX(MSVLD, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PMSVLD engine/subdev functions
- ******************************************************************************/
-
-static int
-gk104_msvld_init(struct nvkm_object *object)
+int
+gk104_msvld_new(struct nvkm_device *device, int index,
+		struct nvkm_engine **pengine)
 {
-	struct gk104_msvld_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x084010, 0x0000fff2);
-	nv_wr32(priv, 0x08401c, 0x0000fff2);
-	return 0;
-}
-
-static int
-gk104_msvld_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
-{
-	struct gk104_msvld_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x084000, true,
-				 "PMSVLD", "msvld", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00008000;
-	nv_subdev(priv)->intr = nvkm_falcon_intr;
-	nv_engine(priv)->cclass = &gk104_msvld_cclass;
-	nv_engine(priv)->sclass = gk104_msvld_sclass;
-	return 0;
+	return nvkm_msvld_new_(&gk104_msvld, device, index, pengine);
 }
-
-struct nvkm_oclass
-gk104_msvld_oclass = {
-	.handle = NV_ENGINE(MSVLD, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_msvld_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = gk104_msvld_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c
new file mode 100644
index 000000000000..e17cb5605b2d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
+ */
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_falcon_func
+gt215_msvld = {
+	.pmc_enable = 0x04008000,
+	.init = g98_msvld_init,
+	.sclass = {
+		{ -1, -1, GT212_MSVLD },
+		{}
+	}
+};
+
+int
+gt215_msvld_new(struct nvkm_device *device, int index,
+	      struct nvkm_engine **pengine)
+{
+	return nvkm_msvld_new_(&gt215_msvld, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c
new file mode 100644
index 000000000000..511800f6a43b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
+ */
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_falcon_func
+mcp89_msvld = {
+	.pmc_enable = 0x04008000,
+	.init = g98_msvld_init,
+	.sclass = {
+		{ -1, -1, IGT21A_MSVLD },
+		{}
+	}
+};
+
+int
+mcp89_msvld_new(struct nvkm_device *device, int index,
+	      struct nvkm_engine **pengine)
+{
+	return nvkm_msvld_new_(&mcp89_msvld, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
new file mode 100644
index 000000000000..9dc1da67d929
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
@@ -0,0 +1,11 @@
+#ifndef __NVKM_MSVLD_PRIV_H__
+#define __NVKM_MSVLD_PRIV_H__
+#include <engine/msvld.h>
+
+int nvkm_msvld_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
+		    int index, struct nvkm_engine **);
+
+void g98_msvld_init(struct nvkm_falcon *);
+
+void gf100_msvld_init(struct nvkm_falcon *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild
index 413b6091e256..1614d385fb0c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild
@@ -1,9 +1,10 @@
 nvkm-y += nvkm/engine/pm/base.o
-nvkm-y += nvkm/engine/pm/daemon.o
 nvkm-y += nvkm/engine/pm/nv40.o
 nvkm-y += nvkm/engine/pm/nv50.o
 nvkm-y += nvkm/engine/pm/g84.o
+nvkm-y += nvkm/engine/pm/gt200.o
 nvkm-y += nvkm/engine/pm/gt215.o
 nvkm-y += nvkm/engine/pm/gf100.o
+nvkm-y += nvkm/engine/pm/gf108.o
+nvkm-y += nvkm/engine/pm/gf117.o
 nvkm-y += nvkm/engine/pm/gk104.o
-nvkm-y += nvkm/engine/pm/gk110.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index 4cf36a3aa814..0db9be202c42 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -24,370 +24,751 @@
 #include "priv.h"
 
 #include <core/client.h>
-#include <core/device.h>
 #include <core/option.h>
 
 #include <nvif/class.h>
 #include <nvif/ioctl.h>
 #include <nvif/unpack.h>
 
-#define QUAD_MASK 0x0f
-#define QUAD_FREE 0x01
+static u8
+nvkm_pm_count_perfdom(struct nvkm_pm *pm)
+{
+	struct nvkm_perfdom *dom;
+	u8 domain_nr = 0;
 
-static struct nvkm_perfsig *
-nvkm_perfsig_find_(struct nvkm_perfdom *dom, const char *name, u32 size)
+	list_for_each_entry(dom, &pm->domains, head)
+		domain_nr++;
+	return domain_nr;
+}
+
+static u16
+nvkm_perfdom_count_perfsig(struct nvkm_perfdom *dom)
 {
-	char path[64];
+	u16 signal_nr = 0;
 	int i;
 
-	if (name[0] != '/') {
+	if (dom) {
 		for (i = 0; i < dom->signal_nr; i++) {
-			if ( dom->signal[i].name &&
-			    !strncmp(name, dom->signal[i].name, size))
-				return &dom->signal[i];
-		}
-	} else {
-		for (i = 0; i < dom->signal_nr; i++) {
-			snprintf(path, sizeof(path), "/%s/%02x", dom->name, i);
-			if (!strncmp(name, path, size))
-				return &dom->signal[i];
+			if (dom->signal[i].name)
+				signal_nr++;
 		}
 	}
+	return signal_nr;
+}
 
+static struct nvkm_perfdom *
+nvkm_perfdom_find(struct nvkm_pm *pm, int di)
+{
+	struct nvkm_perfdom *dom;
+	int tmp = 0;
+
+	list_for_each_entry(dom, &pm->domains, head) {
+		if (tmp++ == di)
+			return dom;
+	}
 	return NULL;
 }
 
 struct nvkm_perfsig *
-nvkm_perfsig_find(struct nvkm_pm *ppm, const char *name, u32 size,
-		  struct nvkm_perfdom **pdom)
+nvkm_perfsig_find(struct nvkm_pm *pm, u8 di, u8 si, struct nvkm_perfdom **pdom)
 {
 	struct nvkm_perfdom *dom = *pdom;
-	struct nvkm_perfsig *sig;
 
 	if (dom == NULL) {
-		list_for_each_entry(dom, &ppm->domains, head) {
-			sig = nvkm_perfsig_find_(dom, name, size);
-			if (sig) {
-				*pdom = dom;
-				return sig;
-			}
-		}
+		dom = nvkm_perfdom_find(pm, di);
+		if (dom == NULL)
+			return NULL;
+		*pdom = dom;
+	}
 
+	if (!dom->signal[si].name)
 		return NULL;
-	}
+	return &dom->signal[si];
+}
 
-	return nvkm_perfsig_find_(dom, name, size);
+static u8
+nvkm_perfsig_count_perfsrc(struct nvkm_perfsig *sig)
+{
+	u8 source_nr = 0, i;
+
+	for (i = 0; i < ARRAY_SIZE(sig->source); i++) {
+		if (sig->source[i])
+			source_nr++;
+	}
+	return source_nr;
 }
 
-struct nvkm_perfctr *
-nvkm_perfsig_wrap(struct nvkm_pm *ppm, const char *name,
-		  struct nvkm_perfdom **pdom)
+static struct nvkm_perfsrc *
+nvkm_perfsrc_find(struct nvkm_pm *pm, struct nvkm_perfsig *sig, int si)
 {
-	struct nvkm_perfsig *sig;
-	struct nvkm_perfctr *ctr;
+	struct nvkm_perfsrc *src;
+	bool found = false;
+	int tmp = 1; /* Sources ID start from 1 */
+	u8 i;
+
+	for (i = 0; i < ARRAY_SIZE(sig->source) && sig->source[i]; i++) {
+		if (sig->source[i] == si) {
+			found = true;
+			break;
+		}
+	}
 
-	sig = nvkm_perfsig_find(ppm, name, strlen(name), pdom);
-	if (!sig)
-		return NULL;
+	if (found) {
+		list_for_each_entry(src, &pm->sources, head) {
+			if (tmp++ == si)
+				return src;
+		}
+	}
 
-	ctr = kzalloc(sizeof(*ctr), GFP_KERNEL);
-	if (ctr) {
-		ctr->signal[0] = sig;
-		ctr->logic_op = 0xaaaa;
+	return NULL;
+}
+
+static int
+nvkm_perfsrc_enable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
+{
+	struct nvkm_subdev *subdev = &pm->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_perfdom *dom = NULL;
+	struct nvkm_perfsig *sig;
+	struct nvkm_perfsrc *src;
+	u32 mask, value;
+	int i, j;
+
+	for (i = 0; i < 4; i++) {
+		for (j = 0; j < 8 && ctr->source[i][j]; j++) {
+			sig = nvkm_perfsig_find(pm, ctr->domain,
+						ctr->signal[i], &dom);
+			if (!sig)
+				return -EINVAL;
+
+			src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
+			if (!src)
+				return -EINVAL;
+
+			/* set enable bit if needed */
+			mask = value = 0x00000000;
+			if (src->enable)
+				mask = value = 0x80000000;
+			mask  |= (src->mask << src->shift);
+			value |= ((ctr->source[i][j] >> 32) << src->shift);
+
+			/* enable the source */
+			nvkm_mask(device, src->addr, mask, value);
+			nvkm_debug(subdev,
+				   "enabled source %08x %08x %08x\n",
+				   src->addr, mask, value);
+		}
 	}
+	return 0;
+}
 
-	return ctr;
+static int
+nvkm_perfsrc_disable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
+{
+	struct nvkm_subdev *subdev = &pm->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_perfdom *dom = NULL;
+	struct nvkm_perfsig *sig;
+	struct nvkm_perfsrc *src;
+	u32 mask;
+	int i, j;
+
+	for (i = 0; i < 4; i++) {
+		for (j = 0; j < 8 && ctr->source[i][j]; j++) {
+			sig = nvkm_perfsig_find(pm, ctr->domain,
+						ctr->signal[i], &dom);
+			if (!sig)
+				return -EINVAL;
+
+			src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
+			if (!src)
+				return -EINVAL;
+
+			/* unset enable bit if needed */
+			mask = 0x00000000;
+			if (src->enable)
+				mask = 0x80000000;
+			mask |= (src->mask << src->shift);
+
+			/* disable the source */
+			nvkm_mask(device, src->addr, mask, 0);
+			nvkm_debug(subdev, "disabled source %08x %08x\n",
+				   src->addr, mask);
+		}
+	}
+	return 0;
 }
 
 /*******************************************************************************
- * Perfmon object classes
+ * Perfdom object classes
  ******************************************************************************/
 static int
-nvkm_perfctr_query(struct nvkm_object *object, void *data, u32 size)
+nvkm_perfdom_init(struct nvkm_perfdom *dom, void *data, u32 size)
 {
 	union {
-		struct nvif_perfctr_query_v0 v0;
+		struct nvif_perfdom_init none;
 	} *args = data;
-	struct nvkm_device *device = nv_device(object);
-	struct nvkm_pm *ppm = (void *)object->engine;
-	struct nvkm_perfdom *dom = NULL, *chk;
-	const bool all = nvkm_boolopt(device->cfgopt, "NvPmShowAll", false);
-	const bool raw = nvkm_boolopt(device->cfgopt, "NvPmUnnamed", all);
-	const char *name;
-	int tmp = 0, di, si;
+	struct nvkm_object *object = &dom->object;
+	struct nvkm_pm *pm = dom->perfmon->pm;
+	int ret, i;
+
+	nvif_ioctl(object, "perfdom init size %d\n", size);
+	if (nvif_unvers(args->none)) {
+		nvif_ioctl(object, "perfdom init\n");
+	} else
+		return ret;
+
+	for (i = 0; i < 4; i++) {
+		if (dom->ctr[i]) {
+			dom->func->init(pm, dom, dom->ctr[i]);
+
+			/* enable sources */
+			nvkm_perfsrc_enable(pm, dom->ctr[i]);
+		}
+	}
+
+	/* start next batch of counters for sampling */
+	dom->func->next(pm, dom);
+	return 0;
+}
+
+static int
+nvkm_perfdom_sample(struct nvkm_perfdom *dom, void *data, u32 size)
+{
+	union {
+		struct nvif_perfdom_sample none;
+	} *args = data;
+	struct nvkm_object *object = &dom->object;
+	struct nvkm_pm *pm = dom->perfmon->pm;
 	int ret;
 
-	nv_ioctl(object, "perfctr query size %d\n", size);
+	nvif_ioctl(object, "perfdom sample size %d\n", size);
+	if (nvif_unvers(args->none)) {
+		nvif_ioctl(object, "perfdom sample\n");
+	} else
+		return ret;
+	pm->sequence++;
+
+	/* sample previous batch of counters */
+	list_for_each_entry(dom, &pm->domains, head)
+		dom->func->next(pm, dom);
+
+	return 0;
+}
+
+static int
+nvkm_perfdom_read(struct nvkm_perfdom *dom, void *data, u32 size)
+{
+	union {
+		struct nvif_perfdom_read_v0 v0;
+	} *args = data;
+	struct nvkm_object *object = &dom->object;
+	struct nvkm_pm *pm = dom->perfmon->pm;
+	int ret, i;
+
+	nvif_ioctl(object, "perfdom read size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "perfctr query vers %d iter %08x\n",
-			 args->v0.version, args->v0.iter);
-		di = (args->v0.iter & 0xff000000) >> 24;
-		si = (args->v0.iter & 0x00ffffff) - 1;
+		nvif_ioctl(object, "perfdom read vers %d\n", args->v0.version);
 	} else
 		return ret;
 
-	list_for_each_entry(chk, &ppm->domains, head) {
-		if (tmp++ == di) {
-			dom = chk;
-			break;
-		}
+	for (i = 0; i < 4; i++) {
+		if (dom->ctr[i])
+			dom->func->read(pm, dom, dom->ctr[i]);
 	}
 
-	if (dom == NULL || si >= (int)dom->signal_nr)
-		return -EINVAL;
+	if (!dom->clk)
+		return -EAGAIN;
 
-	if (si >= 0) {
-		if (raw || !(name = dom->signal[si].name)) {
-			snprintf(args->v0.name, sizeof(args->v0.name),
-				 "/%s/%02x", dom->name, si);
-		} else {
-			strncpy(args->v0.name, name, sizeof(args->v0.name));
+	for (i = 0; i < 4; i++)
+		if (dom->ctr[i])
+			args->v0.ctr[i] = dom->ctr[i]->ctr;
+	args->v0.clk = dom->clk;
+	return 0;
+}
+
+static int
+nvkm_perfdom_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+{
+	struct nvkm_perfdom *dom = nvkm_perfdom(object);
+	switch (mthd) {
+	case NVIF_PERFDOM_V0_INIT:
+		return nvkm_perfdom_init(dom, data, size);
+	case NVIF_PERFDOM_V0_SAMPLE:
+		return nvkm_perfdom_sample(dom, data, size);
+	case NVIF_PERFDOM_V0_READ:
+		return nvkm_perfdom_read(dom, data, size);
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+static void *
+nvkm_perfdom_dtor(struct nvkm_object *object)
+{
+	struct nvkm_perfdom *dom = nvkm_perfdom(object);
+	struct nvkm_pm *pm = dom->perfmon->pm;
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		struct nvkm_perfctr *ctr = dom->ctr[i];
+		if (ctr) {
+			nvkm_perfsrc_disable(pm, ctr);
+			if (ctr->head.next)
+				list_del(&ctr->head);
 		}
+		kfree(ctr);
 	}
 
-	do {
-		while (++si < dom->signal_nr) {
-			if (all || dom->signal[si].name) {
-				args->v0.iter = (di << 24) | ++si;
-				return 0;
-			}
+	return dom;
+}
+
+static int
+nvkm_perfctr_new(struct nvkm_perfdom *dom, int slot, u8 domain,
+		 struct nvkm_perfsig *signal[4], u64 source[4][8],
+		 u16 logic_op, struct nvkm_perfctr **pctr)
+{
+	struct nvkm_perfctr *ctr;
+	int i, j;
+
+	if (!dom)
+		return -EINVAL;
+
+	ctr = *pctr = kzalloc(sizeof(*ctr), GFP_KERNEL);
+	if (!ctr)
+		return -ENOMEM;
+
+	ctr->domain   = domain;
+	ctr->logic_op = logic_op;
+	ctr->slot     = slot;
+	for (i = 0; i < 4; i++) {
+		if (signal[i]) {
+			ctr->signal[i] = signal[i] - dom->signal;
+			for (j = 0; j < 8; j++)
+				ctr->source[i][j] = source[i][j];
 		}
-		si = -1;
-		di = di + 1;
-		dom = list_entry(dom->head.next, typeof(*dom), head);
-	} while (&dom->head != &ppm->domains);
+	}
+	list_add_tail(&ctr->head, &dom->list);
 
-	args->v0.iter = 0xffffffff;
 	return 0;
 }
 
+static const struct nvkm_object_func
+nvkm_perfdom = {
+	.dtor = nvkm_perfdom_dtor,
+	.mthd = nvkm_perfdom_mthd,
+};
+
 static int
-nvkm_perfctr_sample(struct nvkm_object *object, void *data, u32 size)
+nvkm_perfdom_new_(struct nvkm_perfmon *perfmon,
+		  const struct nvkm_oclass *oclass, void *data, u32 size,
+		  struct nvkm_object **pobject)
 {
 	union {
-		struct nvif_perfctr_sample none;
+		struct nvif_perfdom_v0 v0;
 	} *args = data;
-	struct nvkm_pm *ppm = (void *)object->engine;
-	struct nvkm_perfctr *ctr, *tmp;
+	struct nvkm_pm *pm = perfmon->pm;
+	struct nvkm_object *parent = oclass->parent;
+	struct nvkm_perfdom *sdom = NULL;
+	struct nvkm_perfctr *ctr[4] = {};
 	struct nvkm_perfdom *dom;
+	int c, s, m;
 	int ret;
 
-	nv_ioctl(object, "perfctr sample size %d\n", size);
-	if (nvif_unvers(args->none)) {
-		nv_ioctl(object, "perfctr sample\n");
+	nvif_ioctl(parent, "create perfdom size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create perfdom vers %d dom %d mode %02x\n",
+			   args->v0.version, args->v0.domain, args->v0.mode);
 	} else
 		return ret;
-	ppm->sequence++;
-
-	list_for_each_entry(dom, &ppm->domains, head) {
-		/* sample previous batch of counters */
-		if (dom->quad != QUAD_MASK) {
-			dom->func->next(ppm, dom);
-			tmp = NULL;
-			while (!list_empty(&dom->list)) {
-				ctr = list_first_entry(&dom->list,
-						       typeof(*ctr), head);
-				if (ctr->slot < 0) break;
-				if ( tmp && tmp == ctr) break;
-				if (!tmp) tmp = ctr;
-				dom->func->read(ppm, dom, ctr);
-				ctr->slot  = -1;
-				list_move_tail(&ctr->head, &dom->list);
-			}
-		}
 
-		dom->quad = QUAD_MASK;
-
-		/* setup next batch of counters for sampling */
-		list_for_each_entry(ctr, &dom->list, head) {
-			ctr->slot = ffs(dom->quad) - 1;
-			if (ctr->slot < 0)
-				break;
-			dom->quad &= ~(QUAD_FREE << ctr->slot);
-			dom->func->init(ppm, dom, ctr);
+	for (c = 0; c < ARRAY_SIZE(args->v0.ctr); c++) {
+		struct nvkm_perfsig *sig[4] = {};
+		u64 src[4][8] = {};
+
+		for (s = 0; s < ARRAY_SIZE(args->v0.ctr[c].signal); s++) {
+			sig[s] = nvkm_perfsig_find(pm, args->v0.domain,
+						   args->v0.ctr[c].signal[s],
+						   &sdom);
+			if (args->v0.ctr[c].signal[s] && !sig[s])
+				return -EINVAL;
+
+			for (m = 0; m < 8; m++) {
+				src[s][m] = args->v0.ctr[c].source[s][m];
+				if (src[s][m] && !nvkm_perfsrc_find(pm, sig[s],
+							            src[s][m]))
+					return -EINVAL;
+			}
 		}
 
-		if (dom->quad != QUAD_MASK)
-			dom->func->next(ppm, dom);
+		ret = nvkm_perfctr_new(sdom, c, args->v0.domain, sig, src,
+				       args->v0.ctr[c].logic_op, &ctr[c]);
+		if (ret)
+			return ret;
 	}
 
+	if (!sdom)
+		return -EINVAL;
+
+	if (!(dom = kzalloc(sizeof(*dom), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nvkm_perfdom, oclass, &dom->object);
+	dom->perfmon = perfmon;
+	*pobject = &dom->object;
+
+	dom->func = sdom->func;
+	dom->addr = sdom->addr;
+	dom->mode = args->v0.mode;
+	for (c = 0; c < ARRAY_SIZE(ctr); c++)
+		dom->ctr[c] = ctr[c];
 	return 0;
 }
 
+/*******************************************************************************
+ * Perfmon object classes
+ ******************************************************************************/
 static int
-nvkm_perfctr_read(struct nvkm_object *object, void *data, u32 size)
+nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon *perfmon,
+			       void *data, u32 size)
 {
 	union {
-		struct nvif_perfctr_read_v0 v0;
+		struct nvif_perfmon_query_domain_v0 v0;
 	} *args = data;
-	struct nvkm_perfctr *ctr = (void *)object;
-	int ret;
+	struct nvkm_object *object = &perfmon->object;
+	struct nvkm_pm *pm = perfmon->pm;
+	struct nvkm_perfdom *dom;
+	u8 domain_nr;
+	int di, ret;
 
-	nv_ioctl(object, "perfctr read size %d\n", size);
+	nvif_ioctl(object, "perfmon query domain size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "perfctr read vers %d\n", args->v0.version);
+		nvif_ioctl(object, "perfmon domain vers %d iter %02x\n",
+			   args->v0.version, args->v0.iter);
+		di = (args->v0.iter & 0xff) - 1;
 	} else
 		return ret;
 
-	if (!ctr->clk)
-		return -EAGAIN;
+	domain_nr = nvkm_pm_count_perfdom(pm);
+	if (di >= (int)domain_nr)
+		return -EINVAL;
+
+	if (di >= 0) {
+		dom = nvkm_perfdom_find(pm, di);
+		if (dom == NULL)
+			return -EINVAL;
+
+		args->v0.id         = di;
+		args->v0.signal_nr  = nvkm_perfdom_count_perfsig(dom);
+		strncpy(args->v0.name, dom->name, sizeof(args->v0.name));
+
+		/* Currently only global counters (PCOUNTER) are implemented
+		 * but this will be different for local counters (MP). */
+		args->v0.counter_nr = 4;
+	}
 
-	args->v0.clk = ctr->clk;
-	args->v0.ctr = ctr->ctr;
+	if (++di < domain_nr) {
+		args->v0.iter = ++di;
+		return 0;
+	}
+
+	args->v0.iter = 0xff;
 	return 0;
 }
 
 static int
-nvkm_perfctr_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon *perfmon,
+			       void *data, u32 size)
 {
-	switch (mthd) {
-	case NVIF_PERFCTR_V0_QUERY:
-		return nvkm_perfctr_query(object, data, size);
-	case NVIF_PERFCTR_V0_SAMPLE:
-		return nvkm_perfctr_sample(object, data, size);
-	case NVIF_PERFCTR_V0_READ:
-		return nvkm_perfctr_read(object, data, size);
-	default:
-		break;
+	union {
+		struct nvif_perfmon_query_signal_v0 v0;
+	} *args = data;
+	struct nvkm_object *object = &perfmon->object;
+	struct nvkm_pm *pm = perfmon->pm;
+	struct nvkm_device *device = pm->engine.subdev.device;
+	struct nvkm_perfdom *dom;
+	struct nvkm_perfsig *sig;
+	const bool all = nvkm_boolopt(device->cfgopt, "NvPmShowAll", false);
+	const bool raw = nvkm_boolopt(device->cfgopt, "NvPmUnnamed", all);
+	int ret, si;
+
+	nvif_ioctl(object, "perfmon query signal size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(object,
+			   "perfmon query signal vers %d dom %d iter %04x\n",
+			   args->v0.version, args->v0.domain, args->v0.iter);
+		si = (args->v0.iter & 0xffff) - 1;
+	} else
+		return ret;
+
+	dom = nvkm_perfdom_find(pm, args->v0.domain);
+	if (dom == NULL || si >= (int)dom->signal_nr)
+		return -EINVAL;
+
+	if (si >= 0) {
+		sig = &dom->signal[si];
+		if (raw || !sig->name) {
+			snprintf(args->v0.name, sizeof(args->v0.name),
+				 "/%s/%02x", dom->name, si);
+		} else {
+			strncpy(args->v0.name, sig->name,
+				sizeof(args->v0.name));
+		}
+
+		args->v0.signal = si;
+		args->v0.source_nr = nvkm_perfsig_count_perfsrc(sig);
 	}
-	return -EINVAL;
-}
 
-static void
-nvkm_perfctr_dtor(struct nvkm_object *object)
-{
-	struct nvkm_perfctr *ctr = (void *)object;
-	if (ctr->head.next)
-		list_del(&ctr->head);
-	nvkm_object_destroy(&ctr->base);
+	while (++si < dom->signal_nr) {
+		if (all || dom->signal[si].name) {
+			args->v0.iter = ++si;
+			return 0;
+		}
+	}
+
+	args->v0.iter = 0xffff;
+	return 0;
 }
 
 static int
-nvkm_perfctr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nvkm_perfmon_mthd_query_source(struct nvkm_perfmon *perfmon,
+			       void *data, u32 size)
 {
 	union {
-		struct nvif_perfctr_v0 v0;
+		struct nvif_perfmon_query_source_v0 v0;
 	} *args = data;
-	struct nvkm_pm *ppm = (void *)engine;
+	struct nvkm_object *object = &perfmon->object;
+	struct nvkm_pm *pm = perfmon->pm;
 	struct nvkm_perfdom *dom = NULL;
-	struct nvkm_perfsig *sig[4] = {};
-	struct nvkm_perfctr *ctr;
-	int ret, i;
+	struct nvkm_perfsig *sig;
+	struct nvkm_perfsrc *src;
+	u8 source_nr = 0;
+	int si, ret;
 
-	nv_ioctl(parent, "create perfctr size %d\n", size);
+	nvif_ioctl(object, "perfmon query source size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create perfctr vers %d logic_op %04x\n",
-			 args->v0.version, args->v0.logic_op);
+		nvif_ioctl(object,
+			   "perfmon source vers %d dom %d sig %02x iter %02x\n",
+			   args->v0.version, args->v0.domain, args->v0.signal,
+			   args->v0.iter);
+		si = (args->v0.iter & 0xff) - 1;
 	} else
 		return ret;
 
-	for (i = 0; i < ARRAY_SIZE(args->v0.name) && args->v0.name[i][0]; i++) {
-		sig[i] = nvkm_perfsig_find(ppm, args->v0.name[i],
-					   strnlen(args->v0.name[i],
-						   sizeof(args->v0.name[i])),
-					   &dom);
-		if (!sig[i])
+	sig = nvkm_perfsig_find(pm, args->v0.domain, args->v0.signal, &dom);
+	if (!sig)
+		return -EINVAL;
+
+	source_nr = nvkm_perfsig_count_perfsrc(sig);
+	if (si >= (int)source_nr)
+		return -EINVAL;
+
+	if (si >= 0) {
+		src = nvkm_perfsrc_find(pm, sig, sig->source[si]);
+		if (!src)
 			return -EINVAL;
+
+		args->v0.source = sig->source[si];
+		args->v0.mask   = src->mask;
+		strncpy(args->v0.name, src->name, sizeof(args->v0.name));
 	}
 
-	ret = nvkm_object_create(parent, engine, oclass, 0, &ctr);
-	*pobject = nv_object(ctr);
-	if (ret)
-		return ret;
+	if (++si < source_nr) {
+		args->v0.iter = ++si;
+		return 0;
+	}
 
-	ctr->slot = -1;
-	ctr->logic_op = args->v0.logic_op;
-	ctr->signal[0] = sig[0];
-	ctr->signal[1] = sig[1];
-	ctr->signal[2] = sig[2];
-	ctr->signal[3] = sig[3];
-	if (dom)
-		list_add_tail(&ctr->head, &dom->list);
+	args->v0.iter = 0xff;
 	return 0;
 }
 
-static struct nvkm_ofuncs
-nvkm_perfctr_ofuncs = {
-	.ctor = nvkm_perfctr_ctor,
-	.dtor = nvkm_perfctr_dtor,
-	.init = nvkm_object_init,
-	.fini = nvkm_object_fini,
-	.mthd = nvkm_perfctr_mthd,
-};
+static int
+nvkm_perfmon_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+{
+	struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
+	switch (mthd) {
+	case NVIF_PERFMON_V0_QUERY_DOMAIN:
+		return nvkm_perfmon_mthd_query_domain(perfmon, data, size);
+	case NVIF_PERFMON_V0_QUERY_SIGNAL:
+		return nvkm_perfmon_mthd_query_signal(perfmon, data, size);
+	case NVIF_PERFMON_V0_QUERY_SOURCE:
+		return nvkm_perfmon_mthd_query_source(perfmon, data, size);
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+static int
+nvkm_perfmon_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
+		       struct nvkm_object **pobject)
+{
+	struct nvkm_perfmon *perfmon = nvkm_perfmon(oclass->parent);
+	return nvkm_perfdom_new_(perfmon, oclass, data, size, pobject);
+}
+
+static int
+nvkm_perfmon_child_get(struct nvkm_object *object, int index,
+		       struct nvkm_oclass *oclass)
+{
+	if (index == 0) {
+		oclass->base.oclass = NVIF_IOCTL_NEW_V0_PERFDOM;
+		oclass->base.minver = 0;
+		oclass->base.maxver = 0;
+		oclass->ctor = nvkm_perfmon_child_new;
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static void *
+nvkm_perfmon_dtor(struct nvkm_object *object)
+{
+	struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
+	struct nvkm_pm *pm = perfmon->pm;
+	mutex_lock(&pm->engine.subdev.mutex);
+	if (pm->perfmon == &perfmon->object)
+		pm->perfmon = NULL;
+	mutex_unlock(&pm->engine.subdev.mutex);
+	return perfmon;
+}
 
-struct nvkm_oclass
-nvkm_pm_sclass[] = {
-	{ .handle = NVIF_IOCTL_NEW_V0_PERFCTR,
-	  .ofuncs = &nvkm_perfctr_ofuncs,
-	},
-	{},
+static struct nvkm_object_func
+nvkm_perfmon = {
+	.dtor = nvkm_perfmon_dtor,
+	.mthd = nvkm_perfmon_mthd,
+	.sclass = nvkm_perfmon_child_get,
 };
 
-/*******************************************************************************
- * PPM context
- ******************************************************************************/
-static void
-nvkm_perfctx_dtor(struct nvkm_object *object)
+static int
+nvkm_perfmon_new(struct nvkm_pm *pm, const struct nvkm_oclass *oclass,
+		 void *data, u32 size, struct nvkm_object **pobject)
 {
-	struct nvkm_pm *ppm = (void *)object->engine;
-	struct nvkm_perfctx *ctx = (void *)object;
-
-	mutex_lock(&nv_subdev(ppm)->mutex);
-	nvkm_engctx_destroy(&ctx->base);
-	if (ppm->context == ctx)
-		ppm->context = NULL;
-	mutex_unlock(&nv_subdev(ppm)->mutex);
+	struct nvkm_perfmon *perfmon;
+
+	if (!(perfmon = kzalloc(sizeof(*perfmon), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nvkm_perfmon, oclass, &perfmon->object);
+	perfmon->pm = pm;
+	*pobject = &perfmon->object;
+	return 0;
 }
 
+/*******************************************************************************
+ * PPM engine/subdev functions
+ ******************************************************************************/
+
 static int
-nvkm_perfctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nvkm_pm_oclass_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
+		   void *data, u32 size, struct nvkm_object **pobject)
 {
-	struct nvkm_pm *ppm = (void *)engine;
-	struct nvkm_perfctx *ctx;
+	struct nvkm_pm *pm = nvkm_pm(oclass->engine);
 	int ret;
 
-	ret = nvkm_engctx_create(parent, engine, oclass, NULL, 0, 0, 0, &ctx);
-	*pobject = nv_object(ctx);
+	ret = nvkm_perfmon_new(pm, oclass, data, size, pobject);
 	if (ret)
 		return ret;
 
-	mutex_lock(&nv_subdev(ppm)->mutex);
-	if (ppm->context == NULL)
-		ppm->context = ctx;
-	if (ctx != ppm->context)
-		ret = -EBUSY;
-	mutex_unlock(&nv_subdev(ppm)->mutex);
-
+	mutex_lock(&pm->engine.subdev.mutex);
+	if (pm->perfmon == NULL)
+		pm->perfmon = *pobject;
+	ret = (pm->perfmon == *pobject) ? 0 : -EBUSY;
+	mutex_unlock(&pm->engine.subdev.mutex);
 	return ret;
 }
 
-struct nvkm_oclass
-nvkm_pm_cclass = {
-	.handle = NV_ENGCTX(PM, 0x00),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nvkm_perfctx_ctor,
-		.dtor = nvkm_perfctx_dtor,
-		.init = _nvkm_engctx_init,
-		.fini = _nvkm_engctx_fini,
-	},
+static const struct nvkm_device_oclass
+nvkm_pm_oclass = {
+	.base.oclass = NVIF_IOCTL_NEW_V0_PERFMON,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = nvkm_pm_oclass_new,
 };
 
-/*******************************************************************************
- * PPM engine/subdev functions
- ******************************************************************************/
+static int
+nvkm_pm_oclass_get(struct nvkm_oclass *oclass, int index,
+		   const struct nvkm_device_oclass **class)
+{
+	if (index == 0) {
+		oclass->base = nvkm_pm_oclass.base;
+		*class = &nvkm_pm_oclass;
+		return index;
+	}
+	return 1;
+}
+
+int
+nvkm_perfsrc_new(struct nvkm_pm *pm, struct nvkm_perfsig *sig,
+		 const struct nvkm_specsrc *spec)
+{
+	const struct nvkm_specsrc *ssrc;
+	const struct nvkm_specmux *smux;
+	struct nvkm_perfsrc *src;
+	u8 source_nr = 0;
+
+	if (!spec) {
+		/* No sources are defined for this signal. */
+		return 0;
+	}
+
+	ssrc = spec;
+	while (ssrc->name) {
+		smux = ssrc->mux;
+		while (smux->name) {
+			bool found = false;
+			u8 source_id = 0;
+			u32 len;
+
+			list_for_each_entry(src, &pm->sources, head) {
+				if (src->addr == ssrc->addr &&
+				    src->shift == smux->shift) {
+					found = true;
+					break;
+				}
+				source_id++;
+			}
+
+			if (!found) {
+				src = kzalloc(sizeof(*src), GFP_KERNEL);
+				if (!src)
+					return -ENOMEM;
+
+				src->addr   = ssrc->addr;
+				src->mask   = smux->mask;
+				src->shift  = smux->shift;
+				src->enable = smux->enable;
+
+				len = strlen(ssrc->name) +
+				      strlen(smux->name) + 2;
+				src->name = kzalloc(len, GFP_KERNEL);
+				if (!src->name) {
+					kfree(src);
+					return -ENOMEM;
+				}
+				snprintf(src->name, len, "%s_%s", ssrc->name,
+					 smux->name);
+
+				list_add_tail(&src->head, &pm->sources);
+			}
+
+			sig->source[source_nr++] = source_id + 1;
+			smux++;
+		}
+		ssrc++;
+	}
+
+	return 0;
+}
+
 int
-nvkm_perfdom_new(struct nvkm_pm *ppm, const char *name, u32 mask,
+nvkm_perfdom_new(struct nvkm_pm *pm, const char *name, u32 mask,
 		 u32 base, u32 size_unit, u32 size_domain,
 		 const struct nvkm_specdom *spec)
 {
 	const struct nvkm_specdom *sdom;
 	const struct nvkm_specsig *ssig;
 	struct nvkm_perfdom *dom;
-	int i;
+	int ret, i;
 
 	for (i = 0; i == 0 || mask; i++) {
 		u32 addr = base + (i * size_unit);
@@ -410,16 +791,20 @@ nvkm_perfdom_new(struct nvkm_pm *ppm, const char *name, u32 mask,
 					 "%s/%02x", name, (int)(sdom - spec));
 			}
 
-			list_add_tail(&dom->head, &ppm->domains);
+			list_add_tail(&dom->head, &pm->domains);
 			INIT_LIST_HEAD(&dom->list);
 			dom->func = sdom->func;
 			dom->addr = addr;
-			dom->quad = QUAD_MASK;
 			dom->signal_nr = sdom->signal_nr;
 
 			ssig = (sdom++)->signal;
 			while (ssig->name) {
-				dom->signal[ssig->signal].name = ssig->name;
+				struct nvkm_perfsig *sig =
+					&dom->signal[ssig->signal];
+				sig->name = ssig->name;
+				ret = nvkm_perfsrc_new(pm, sig, ssig->source);
+				if (ret)
+					return ret;
 				ssig++;
 			}
 
@@ -432,47 +817,49 @@ nvkm_perfdom_new(struct nvkm_pm *ppm, const char *name, u32 mask,
 	return 0;
 }
 
-int
-_nvkm_pm_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nvkm_pm *ppm = (void *)object;
-	return nvkm_engine_fini(&ppm->base, suspend);
-}
-
-int
-_nvkm_pm_init(struct nvkm_object *object)
+static int
+nvkm_pm_fini(struct nvkm_engine *engine, bool suspend)
 {
-	struct nvkm_pm *ppm = (void *)object;
-	return nvkm_engine_init(&ppm->base);
+	struct nvkm_pm *pm = nvkm_pm(engine);
+	if (pm->func->fini)
+		pm->func->fini(pm);
+	return 0;
 }
 
-void
-_nvkm_pm_dtor(struct nvkm_object *object)
+static void *
+nvkm_pm_dtor(struct nvkm_engine *engine)
 {
-	struct nvkm_pm *ppm = (void *)object;
-	struct nvkm_perfdom *dom, *tmp;
+	struct nvkm_pm *pm = nvkm_pm(engine);
+	struct nvkm_perfdom *dom, *next_dom;
+	struct nvkm_perfsrc *src, *next_src;
 
-	list_for_each_entry_safe(dom, tmp, &ppm->domains, head) {
+	list_for_each_entry_safe(dom, next_dom, &pm->domains, head) {
 		list_del(&dom->head);
 		kfree(dom);
 	}
 
-	nvkm_engine_destroy(&ppm->base);
+	list_for_each_entry_safe(src, next_src, &pm->sources, head) {
+		list_del(&src->head);
+		kfree(src->name);
+		kfree(src);
+	}
+
+	return pm;
 }
 
+static const struct nvkm_engine_func
+nvkm_pm = {
+	.dtor = nvkm_pm_dtor,
+	.fini = nvkm_pm_fini,
+	.base.sclass = nvkm_pm_oclass_get,
+};
+
 int
-nvkm_pm_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, int length, void **pobject)
+nvkm_pm_ctor(const struct nvkm_pm_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_pm *pm)
 {
-	struct nvkm_pm *ppm;
-	int ret;
-
-	ret = nvkm_engine_create_(parent, engine, oclass, true, "PPM",
-				  "pm", length, pobject);
-	ppm = *pobject;
-	if (ret)
-		return ret;
-
-	INIT_LIST_HEAD(&ppm->domains);
-	return 0;
+	pm->func = func;
+	INIT_LIST_HEAD(&pm->domains);
+	INIT_LIST_HEAD(&pm->sources);
+	return nvkm_engine_ctor(&nvkm_pm, device, index, 0, true, &pm->engine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/daemon.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/daemon.c
deleted file mode 100644
index a7a5f3a3c91b..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/daemon.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-static void
-pwr_perfctr_init(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
-		 struct nvkm_perfctr *ctr)
-{
-	u32 mask = 0x00000000;
-	u32 ctrl = 0x00000001;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(ctr->signal) && ctr->signal[i]; i++)
-		mask |= 1 << (ctr->signal[i] - dom->signal);
-
-	nv_wr32(ppm, 0x10a504 + (ctr->slot * 0x10), mask);
-	nv_wr32(ppm, 0x10a50c + (ctr->slot * 0x10), ctrl);
-	nv_wr32(ppm, 0x10a50c + (ppm->last * 0x10), 0x00000003);
-}
-
-static void
-pwr_perfctr_read(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
-		 struct nvkm_perfctr *ctr)
-{
-	ctr->ctr = ppm->pwr[ctr->slot];
-	ctr->clk = ppm->pwr[ppm->last];
-}
-
-static void
-pwr_perfctr_next(struct nvkm_pm *ppm, struct nvkm_perfdom *dom)
-{
-	int i;
-
-	for (i = 0; i <= ppm->last; i++) {
-		ppm->pwr[i] = nv_rd32(ppm, 0x10a508 + (i * 0x10));
-		nv_wr32(ppm, 0x10a508 + (i * 0x10), 0x80000000);
-	}
-}
-
-static const struct nvkm_funcdom
-pwr_perfctr_func = {
-	.init = pwr_perfctr_init,
-	.read = pwr_perfctr_read,
-	.next = pwr_perfctr_next,
-};
-
-const struct nvkm_specdom
-gt215_pm_pwr[] = {
-	{ 0x20, (const struct nvkm_specsig[]) {
-			{ 0x00, "pwr_gr_idle" },
-			{ 0x04, "pwr_bsp_idle" },
-			{ 0x05, "pwr_vp_idle" },
-			{ 0x06, "pwr_ppp_idle" },
-			{ 0x13, "pwr_ce0_idle" },
-			{}
-		}, &pwr_perfctr_func },
-	{}
-};
-
-const struct nvkm_specdom
-gf100_pm_pwr[] = {
-	{ 0x20, (const struct nvkm_specsig[]) {
-			{ 0x00, "pwr_gr_idle" },
-			{ 0x04, "pwr_bsp_idle" },
-			{ 0x05, "pwr_vp_idle" },
-			{ 0x06, "pwr_ppp_idle" },
-			{ 0x13, "pwr_ce0_idle" },
-			{ 0x14, "pwr_ce1_idle" },
-			{}
-		}, &pwr_perfctr_func },
-	{}
-};
-
-const struct nvkm_specdom
-gk104_pm_pwr[] = {
-	{ 0x20, (const struct nvkm_specsig[]) {
-			{ 0x00, "pwr_gr_idle" },
-			{ 0x04, "pwr_bsp_idle" },
-			{ 0x05, "pwr_vp_idle" },
-			{ 0x06, "pwr_ppp_idle" },
-			{ 0x13, "pwr_ce0_idle" },
-			{ 0x14, "pwr_ce1_idle" },
-			{ 0x15, "pwr_ce2_idle" },
-			{}
-		}, &pwr_perfctr_func },
-	{}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
index d54c6705ba17..6e441ddafd86 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
@@ -23,15 +23,121 @@
  */
 #include "nv40.h"
 
+const struct nvkm_specsrc
+g84_vfetch_sources[] = {
+	{ 0x400c0c, (const struct nvkm_specmux[]) {
+			{ 0x3, 0, "unk0" },
+			{}
+		}, "pgraph_vfetch_unk0c" },
+	{}
+};
+
+static const struct nvkm_specsrc
+g84_prop_sources[] = {
+	{ 0x408e50, (const struct nvkm_specmux[]) {
+			{ 0x1f, 0, "sel", true },
+			{}
+		}, "pgraph_tpc0_prop_pm_mux" },
+	{}
+};
+
+static const struct nvkm_specsrc
+g84_crop_sources[] = {
+	{ 0x407008, (const struct nvkm_specmux[]) {
+			{ 0xf, 0, "sel0", true },
+			{ 0x7, 16, "sel1", true },
+			{}
+		}, "pgraph_rop0_crop_pm_mux" },
+	{}
+};
+
+static const struct nvkm_specsrc
+g84_tex_sources[] = {
+	{ 0x408808, (const struct nvkm_specmux[]) {
+			{ 0xfffff, 0, "unk0" },
+			{}
+		}, "pgraph_tpc0_tex_unk08" },
+	{}
+};
+
 static const struct nvkm_specdom
 g84_pm[] = {
 	{ 0x20, (const struct nvkm_specsig[]) {
 			{}
 		}, &nv40_perfctr_func },
-	{ 0x20, (const struct nvkm_specsig[]) {
+	{ 0xf0, (const struct nvkm_specsig[]) {
+			{ 0xbd, "pc01_gr_idle" },
+			{ 0x5e, "pc01_strmout_00" },
+			{ 0x5f, "pc01_strmout_01" },
+			{ 0xd2, "pc01_trast_00" },
+			{ 0xd3, "pc01_trast_01" },
+			{ 0xd4, "pc01_trast_02" },
+			{ 0xd5, "pc01_trast_03" },
+			{ 0xd8, "pc01_trast_04" },
+			{ 0xd9, "pc01_trast_05" },
+			{ 0x5c, "pc01_vattr_00" },
+			{ 0x5d, "pc01_vattr_01" },
+			{ 0x66, "pc01_vfetch_00", g84_vfetch_sources },
+			{ 0x67, "pc01_vfetch_01", g84_vfetch_sources },
+			{ 0x68, "pc01_vfetch_02", g84_vfetch_sources },
+			{ 0x69, "pc01_vfetch_03", g84_vfetch_sources },
+			{ 0x6a, "pc01_vfetch_04", g84_vfetch_sources },
+			{ 0x6b, "pc01_vfetch_05", g84_vfetch_sources },
+			{ 0x6c, "pc01_vfetch_06", g84_vfetch_sources },
+			{ 0x6d, "pc01_vfetch_07", g84_vfetch_sources },
+			{ 0x6e, "pc01_vfetch_08", g84_vfetch_sources },
+			{ 0x6f, "pc01_vfetch_09", g84_vfetch_sources },
+			{ 0x70, "pc01_vfetch_0a", g84_vfetch_sources },
+			{ 0x71, "pc01_vfetch_0b", g84_vfetch_sources },
+			{ 0x72, "pc01_vfetch_0c", g84_vfetch_sources },
+			{ 0x73, "pc01_vfetch_0d", g84_vfetch_sources },
+			{ 0x74, "pc01_vfetch_0e", g84_vfetch_sources },
+			{ 0x75, "pc01_vfetch_0f", g84_vfetch_sources },
+			{ 0x76, "pc01_vfetch_10", g84_vfetch_sources },
+			{ 0x77, "pc01_vfetch_11", g84_vfetch_sources },
+			{ 0x78, "pc01_vfetch_12", g84_vfetch_sources },
+			{ 0x79, "pc01_vfetch_13", g84_vfetch_sources },
+			{ 0x7a, "pc01_vfetch_14", g84_vfetch_sources },
+			{ 0x7b, "pc01_vfetch_15", g84_vfetch_sources },
+			{ 0x7c, "pc01_vfetch_16", g84_vfetch_sources },
+			{ 0x7d, "pc01_vfetch_17", g84_vfetch_sources },
+			{ 0x7e, "pc01_vfetch_18", g84_vfetch_sources },
+			{ 0x7f, "pc01_vfetch_19", g84_vfetch_sources },
+			{ 0x07, "pc01_zcull_00", nv50_zcull_sources },
+			{ 0x08, "pc01_zcull_01", nv50_zcull_sources },
+			{ 0x09, "pc01_zcull_02", nv50_zcull_sources },
+			{ 0x0a, "pc01_zcull_03", nv50_zcull_sources },
+			{ 0x0b, "pc01_zcull_04", nv50_zcull_sources },
+			{ 0x0c, "pc01_zcull_05", nv50_zcull_sources },
+			{ 0xa4, "pc01_unk00" },
+			{ 0xec, "pc01_trailer" },
 			{}
 		}, &nv40_perfctr_func },
-	{ 0x20, (const struct nvkm_specsig[]) {
+	{ 0xa0, (const struct nvkm_specsig[]) {
+			{ 0x30, "pc02_crop_00", g84_crop_sources },
+			{ 0x31, "pc02_crop_01", g84_crop_sources },
+			{ 0x32, "pc02_crop_02", g84_crop_sources },
+			{ 0x33, "pc02_crop_03", g84_crop_sources },
+			{ 0x00, "pc02_prop_00", g84_prop_sources },
+			{ 0x01, "pc02_prop_01", g84_prop_sources },
+			{ 0x02, "pc02_prop_02", g84_prop_sources },
+			{ 0x03, "pc02_prop_03", g84_prop_sources },
+			{ 0x04, "pc02_prop_04", g84_prop_sources },
+			{ 0x05, "pc02_prop_05", g84_prop_sources },
+			{ 0x06, "pc02_prop_06", g84_prop_sources },
+			{ 0x07, "pc02_prop_07", g84_prop_sources },
+			{ 0x48, "pc02_tex_00", g84_tex_sources },
+			{ 0x49, "pc02_tex_01", g84_tex_sources },
+			{ 0x4a, "pc02_tex_02", g84_tex_sources },
+			{ 0x4b, "pc02_tex_03", g84_tex_sources },
+			{ 0x1a, "pc02_tex_04", g84_tex_sources },
+			{ 0x1b, "pc02_tex_05", g84_tex_sources },
+			{ 0x1c, "pc02_tex_06", g84_tex_sources },
+			{ 0x44, "pc02_zrop_00", nv50_zrop_sources },
+			{ 0x45, "pc02_zrop_01", nv50_zrop_sources },
+			{ 0x46, "pc02_zrop_02", nv50_zrop_sources },
+			{ 0x47, "pc02_zrop_03", nv50_zrop_sources },
+			{ 0x8c, "pc02_trailer" },
 			{}
 		}, &nv40_perfctr_func },
 	{ 0x20, (const struct nvkm_specsig[]) {
@@ -52,14 +158,8 @@ g84_pm[] = {
 	{}
 };
 
-struct nvkm_oclass *
-g84_pm_oclass = &(struct nv40_pm_oclass) {
-	.base.handle = NV_ENGINE(PM, 0x84),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_pm_ctor,
-		.dtor = _nvkm_pm_dtor,
-		.init = _nvkm_pm_init,
-		.fini = _nvkm_pm_fini,
-	},
-	.doms = g84_pm,
-}.base;
+int
+g84_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+	return nv40_pm_new_(g84_pm, device, index, ppm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
index 008fed73dd82..d2901e9a7808 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
@@ -23,62 +23,146 @@
  */
 #include "gf100.h"
 
+const struct nvkm_specsrc
+gf100_pbfb_sources[] = {
+	{ 0x10f100, (const struct nvkm_specmux[]) {
+			{ 0x1, 0, "unk0" },
+			{ 0x3f, 4, "unk4" },
+			{}
+		}, "pbfb_broadcast_pm_unk100" },
+	{}
+};
+
+const struct nvkm_specsrc
+gf100_pmfb_sources[] = {
+	{ 0x140028, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{ 0x7, 16, "unk16" },
+			{ 0x3, 24, "unk24" },
+			{ 0x2, 29, "unk29" },
+			{}
+		}, "pmfb0_pm_unk28" },
+	{}
+};
+
+static const struct nvkm_specsrc
+gf100_l1_sources[] = {
+	{ 0x5044a8, (const struct nvkm_specmux[]) {
+			{ 0x3f, 0, "sel", true },
+			{}
+		}, "pgraph_gpc0_tpc0_l1_pm_mux" },
+	{}
+};
+
+static const struct nvkm_specsrc
+gf100_tex_sources[] = {
+	{ 0x5042c0, (const struct nvkm_specmux[]) {
+			{ 0xf, 0, "sel0", true },
+			{ 0x7, 8, "sel1", true },
+			{}
+		}, "pgraph_gpc0_tpc0_tex_pm_mux_c_d" },
+	{}
+};
+
+static const struct nvkm_specsrc
+gf100_unk400_sources[] = {
+	{ 0x50440c, (const struct nvkm_specmux[]) {
+			{ 0x3f, 0, "sel", true },
+			{}
+		}, "pgraph_gpc0_tpc0_unk400_pm_mux" },
+	{}
+};
+
 static const struct nvkm_specdom
 gf100_pm_hub[] = {
 	{}
 };
 
-static const struct nvkm_specdom
+const struct nvkm_specdom
 gf100_pm_gpc[] = {
+	{ 0xe0, (const struct nvkm_specsig[]) {
+			{ 0x00, "gpc00_l1_00", gf100_l1_sources },
+			{ 0x01, "gpc00_l1_01", gf100_l1_sources },
+			{ 0x02, "gpc00_l1_02", gf100_l1_sources },
+			{ 0x03, "gpc00_l1_03", gf100_l1_sources },
+			{ 0x05, "gpc00_l1_04", gf100_l1_sources },
+			{ 0x06, "gpc00_l1_05", gf100_l1_sources },
+			{ 0x0a, "gpc00_tex_00", gf100_tex_sources },
+			{ 0x0b, "gpc00_tex_01", gf100_tex_sources },
+			{ 0x0c, "gpc00_tex_02", gf100_tex_sources },
+			{ 0x0d, "gpc00_tex_03", gf100_tex_sources },
+			{ 0x0e, "gpc00_tex_04", gf100_tex_sources },
+			{ 0x0f, "gpc00_tex_05", gf100_tex_sources },
+			{ 0x10, "gpc00_tex_06", gf100_tex_sources },
+			{ 0x11, "gpc00_tex_07", gf100_tex_sources },
+			{ 0x12, "gpc00_tex_08", gf100_tex_sources },
+			{ 0x26, "gpc00_unk400_00", gf100_unk400_sources },
+			{}
+		}, &gf100_perfctr_func },
 	{}
 };
 
-static const struct nvkm_specdom
+const struct nvkm_specdom
 gf100_pm_part[] = {
+	{ 0xe0, (const struct nvkm_specsig[]) {
+			{ 0x0f, "part00_pbfb_00", gf100_pbfb_sources },
+			{ 0x10, "part00_pbfb_01", gf100_pbfb_sources },
+			{ 0x21, "part00_pmfb_00", gf100_pmfb_sources },
+			{ 0x04, "part00_pmfb_01", gf100_pmfb_sources },
+			{ 0x00, "part00_pmfb_02", gf100_pmfb_sources },
+			{ 0x02, "part00_pmfb_03", gf100_pmfb_sources },
+			{ 0x01, "part00_pmfb_04", gf100_pmfb_sources },
+			{ 0x2e, "part00_pmfb_05", gf100_pmfb_sources },
+			{ 0x2f, "part00_pmfb_06", gf100_pmfb_sources },
+			{ 0x1b, "part00_pmfb_07", gf100_pmfb_sources },
+			{ 0x1c, "part00_pmfb_08", gf100_pmfb_sources },
+			{ 0x1d, "part00_pmfb_09", gf100_pmfb_sources },
+			{ 0x1e, "part00_pmfb_0a", gf100_pmfb_sources },
+			{ 0x1f, "part00_pmfb_0b", gf100_pmfb_sources },
+			{}
+		}, &gf100_perfctr_func },
 	{}
 };
 
 static void
-gf100_perfctr_init(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
+gf100_perfctr_init(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
 		   struct nvkm_perfctr *ctr)
 {
-	struct gf100_pm_priv *priv = (void *)ppm;
-	struct gf100_pm_cntr *cntr = (void *)ctr;
+	struct nvkm_device *device = pm->engine.subdev.device;
 	u32 log = ctr->logic_op;
 	u32 src = 0x00000000;
 	int i;
 
-	for (i = 0; i < 4 && ctr->signal[i]; i++)
-		src |= (ctr->signal[i] - dom->signal) << (i * 8);
+	for (i = 0; i < 4; i++)
+		src |= ctr->signal[i] << (i * 8);
 
-	nv_wr32(priv, dom->addr + 0x09c, 0x00040002);
-	nv_wr32(priv, dom->addr + 0x100, 0x00000000);
-	nv_wr32(priv, dom->addr + 0x040 + (cntr->base.slot * 0x08), src);
-	nv_wr32(priv, dom->addr + 0x044 + (cntr->base.slot * 0x08), log);
+	nvkm_wr32(device, dom->addr + 0x09c, 0x00040002 | (dom->mode << 3));
+	nvkm_wr32(device, dom->addr + 0x100, 0x00000000);
+	nvkm_wr32(device, dom->addr + 0x040 + (ctr->slot * 0x08), src);
+	nvkm_wr32(device, dom->addr + 0x044 + (ctr->slot * 0x08), log);
 }
 
 static void
-gf100_perfctr_read(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
+gf100_perfctr_read(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
 		   struct nvkm_perfctr *ctr)
 {
-	struct gf100_pm_priv *priv = (void *)ppm;
-	struct gf100_pm_cntr *cntr = (void *)ctr;
-
-	switch (cntr->base.slot) {
-	case 0: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x08c); break;
-	case 1: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x088); break;
-	case 2: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x080); break;
-	case 3: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x090); break;
+	struct nvkm_device *device = pm->engine.subdev.device;
+
+	switch (ctr->slot) {
+	case 0: ctr->ctr = nvkm_rd32(device, dom->addr + 0x08c); break;
+	case 1: ctr->ctr = nvkm_rd32(device, dom->addr + 0x088); break;
+	case 2: ctr->ctr = nvkm_rd32(device, dom->addr + 0x080); break;
+	case 3: ctr->ctr = nvkm_rd32(device, dom->addr + 0x090); break;
 	}
-	cntr->base.clk = nv_rd32(priv, dom->addr + 0x070);
+	dom->clk = nvkm_rd32(device, dom->addr + 0x070);
 }
 
 static void
-gf100_perfctr_next(struct nvkm_pm *ppm, struct nvkm_perfdom *dom)
+gf100_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
 {
-	struct gf100_pm_priv *priv = (void *)ppm;
-	nv_wr32(priv, dom->addr + 0x06c, dom->signal_nr - 0x40 + 0x27);
-	nv_wr32(priv, dom->addr + 0x0ec, 0x00000011);
+	struct nvkm_device *device = pm->engine.subdev.device;
+	nvkm_wr32(device, dom->addr + 0x06c, dom->signal_nr - 0x40 + 0x27);
+	nvkm_wr32(device, dom->addr + 0x0ec, 0x00000011);
 }
 
 const struct nvkm_funcdom
@@ -88,72 +172,72 @@ gf100_perfctr_func = {
 	.next = gf100_perfctr_next,
 };
 
-int
-gf100_pm_fini(struct nvkm_object *object, bool suspend)
+static void
+gf100_pm_fini(struct nvkm_pm *pm)
 {
-	struct gf100_pm_priv *priv = (void *)object;
-	nv_mask(priv, 0x000200, 0x10000000, 0x00000000);
-	nv_mask(priv, 0x000200, 0x10000000, 0x10000000);
-	return nvkm_pm_fini(&priv->base, suspend);
+	struct nvkm_device *device = pm->engine.subdev.device;
+	nvkm_mask(device, 0x000200, 0x10000000, 0x00000000);
+	nvkm_mask(device, 0x000200, 0x10000000, 0x10000000);
 }
 
-static int
-gf100_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+static const struct nvkm_pm_func
+gf100_pm_ = {
+	.fini = gf100_pm_fini,
+};
+
+int
+gf100_pm_new_(const struct gf100_pm_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_pm **ppm)
 {
-	struct gf100_pm_priv *priv;
+	struct nvkm_pm *pm;
 	u32 mask;
 	int ret;
 
-	ret = nvkm_pm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(pm = *ppm = kzalloc(sizeof(*pm), GFP_KERNEL)))
+		return -ENOMEM;
 
-	ret = nvkm_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0, gf100_pm_pwr);
+	ret = nvkm_pm_ctor(&gf100_pm_, device, index, pm);
 	if (ret)
 		return ret;
 
 	/* HUB */
-	ret = nvkm_perfdom_new(&priv->base, "hub", 0, 0x1b0000, 0, 0x200,
-			       gf100_pm_hub);
+	ret = nvkm_perfdom_new(pm, "hub", 0, 0x1b0000, 0, 0x200,
+			       func->doms_hub);
 	if (ret)
 		return ret;
 
 	/* GPC */
-	mask  = (1 << nv_rd32(priv, 0x022430)) - 1;
-	mask &= ~nv_rd32(priv, 0x022504);
-	mask &= ~nv_rd32(priv, 0x022584);
+	mask  = (1 << nvkm_rd32(device, 0x022430)) - 1;
+	mask &= ~nvkm_rd32(device, 0x022504);
+	mask &= ~nvkm_rd32(device, 0x022584);
 
-	ret = nvkm_perfdom_new(&priv->base, "gpc", mask, 0x180000,
-			       0x1000, 0x200, gf100_pm_gpc);
+	ret = nvkm_perfdom_new(pm, "gpc", mask, 0x180000,
+			       0x1000, 0x200, func->doms_gpc);
 	if (ret)
 		return ret;
 
 	/* PART */
-	mask  = (1 << nv_rd32(priv, 0x022438)) - 1;
-	mask &= ~nv_rd32(priv, 0x022548);
-	mask &= ~nv_rd32(priv, 0x0225c8);
+	mask  = (1 << nvkm_rd32(device, 0x022438)) - 1;
+	mask &= ~nvkm_rd32(device, 0x022548);
+	mask &= ~nvkm_rd32(device, 0x0225c8);
 
-	ret = nvkm_perfdom_new(&priv->base, "part", mask, 0x1a0000,
-			       0x1000, 0x200, gf100_pm_part);
+	ret = nvkm_perfdom_new(pm, "part", mask, 0x1a0000,
+			       0x1000, 0x200, func->doms_part);
 	if (ret)
 		return ret;
 
-	nv_engine(priv)->cclass = &nvkm_pm_cclass;
-	nv_engine(priv)->sclass =  nvkm_pm_sclass;
-	priv->base.last = 7;
 	return 0;
 }
 
-struct nvkm_oclass
-gf100_pm_oclass = {
-	.handle = NV_ENGINE(PM, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_pm_ctor,
-		.dtor = _nvkm_pm_dtor,
-		.init = _nvkm_pm_init,
-		.fini = gf100_pm_fini,
-	},
+static const struct gf100_pm_func
+gf100_pm = {
+	.doms_gpc = gf100_pm_gpc,
+	.doms_hub = gf100_pm_hub,
+	.doms_part = gf100_pm_part,
 };
+
+int
+gf100_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+	return gf100_pm_new_(&gf100_pm, device, index, ppm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
index 6a01fc7fec6f..56d0344853ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
@@ -2,14 +2,18 @@
 #define __NVKM_PM_NVC0_H__
 #include "priv.h"
 
-struct gf100_pm_priv {
-	struct nvkm_pm base;
+struct gf100_pm_func {
+	const struct nvkm_specdom *doms_hub;
+	const struct nvkm_specdom *doms_gpc;
+	const struct nvkm_specdom *doms_part;
 };
 
-struct gf100_pm_cntr {
-	struct nvkm_perfctr base;
-};
+int gf100_pm_new_(const struct gf100_pm_func *, struct nvkm_device *,
+		  int index, struct nvkm_pm **);
 
 extern const struct nvkm_funcdom gf100_perfctr_func;
-int gf100_pm_fini(struct nvkm_object *, bool);
+extern const struct nvkm_specdom gf100_pm_gpc[];
+
+extern const struct nvkm_specsrc gf100_pbfb_sources[];
+extern const struct nvkm_specsrc gf100_pmfb_sources[];
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c
new file mode 100644
index 000000000000..49b24c98a7f7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2015 Samuel Pitoiset
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Samuel Pitoiset
+ */
+#include "gf100.h"
+
+static const struct nvkm_specdom
+gf108_pm_hub[] = {
+	{}
+};
+
+static const struct nvkm_specdom
+gf108_pm_part[] = {
+	{ 0xe0, (const struct nvkm_specsig[]) {
+			{ 0x14, "part00_pbfb_00", gf100_pbfb_sources },
+			{ 0x15, "part00_pbfb_01", gf100_pbfb_sources },
+			{ 0x20, "part00_pbfb_02", gf100_pbfb_sources },
+			{ 0x21, "part00_pbfb_03", gf100_pbfb_sources },
+			{ 0x01, "part00_pmfb_00", gf100_pmfb_sources },
+			{ 0x04, "part00_pmfb_01", gf100_pmfb_sources },
+			{ 0x05, "part00_pmfb_02", gf100_pmfb_sources},
+			{ 0x07, "part00_pmfb_03", gf100_pmfb_sources },
+			{ 0x0d, "part00_pmfb_04", gf100_pmfb_sources },
+			{ 0x12, "part00_pmfb_05", gf100_pmfb_sources },
+			{ 0x13, "part00_pmfb_06", gf100_pmfb_sources },
+			{ 0x2c, "part00_pmfb_07", gf100_pmfb_sources },
+			{ 0x2d, "part00_pmfb_08", gf100_pmfb_sources },
+			{ 0x2e, "part00_pmfb_09", gf100_pmfb_sources },
+			{ 0x2f, "part00_pmfb_0a", gf100_pmfb_sources },
+			{ 0x30, "part00_pmfb_0b", gf100_pmfb_sources },
+			{}
+		}, &gf100_perfctr_func },
+	{}
+};
+
+static const struct gf100_pm_func
+gf108_pm = {
+	.doms_gpc = gf100_pm_gpc,
+	.doms_hub = gf108_pm_hub,
+	.doms_part = gf108_pm_part,
+};
+
+int
+gf108_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+	return gf100_pm_new_(&gf108_pm, device, index, ppm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c
new file mode 100644
index 000000000000..9170025fc988
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2015 Samuel Pitoiset
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Samuel Pitoiset
+ */
+#include "gf100.h"
+
+static const struct nvkm_specsrc
+gf117_pmfb_sources[] = {
+	{ 0x140028, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{ 0x7, 16, "unk16" },
+			{ 0x3, 24, "unk24" },
+			{ 0x2, 28, "unk28" },
+			{}
+		}, "pmfb0_pm_unk28" },
+	{ 0x14125c, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{}
+		}, "pmfb0_subp0_pm_unk25c" },
+	{}
+};
+
+static const struct nvkm_specdom
+gf117_pm_hub[] = {
+	{}
+};
+
+static const struct nvkm_specdom
+gf117_pm_part[] = {
+	{ 0xe0, (const struct nvkm_specsig[]) {
+			{ 0x00, "part00_pbfb_00", gf100_pbfb_sources },
+			{ 0x01, "part00_pbfb_01", gf100_pbfb_sources },
+			{ 0x12, "part00_pmfb_00", gf117_pmfb_sources },
+			{ 0x15, "part00_pmfb_01", gf117_pmfb_sources },
+			{ 0x16, "part00_pmfb_02", gf117_pmfb_sources },
+			{ 0x18, "part00_pmfb_03", gf117_pmfb_sources },
+			{ 0x1e, "part00_pmfb_04", gf117_pmfb_sources },
+			{ 0x23, "part00_pmfb_05", gf117_pmfb_sources },
+			{ 0x24, "part00_pmfb_06", gf117_pmfb_sources },
+			{ 0x0c, "part00_pmfb_07", gf117_pmfb_sources },
+			{ 0x0d, "part00_pmfb_08", gf117_pmfb_sources },
+			{ 0x0e, "part00_pmfb_09", gf117_pmfb_sources },
+			{ 0x0f, "part00_pmfb_0a", gf117_pmfb_sources },
+			{ 0x10, "part00_pmfb_0b", gf117_pmfb_sources },
+			{}
+		}, &gf100_perfctr_func },
+	{}
+};
+
+static const struct gf100_pm_func
+gf117_pm = {
+	.doms_gpc = gf100_pm_gpc,
+	.doms_hub = gf117_pm_hub,
+	.doms_part = gf117_pm_part,
+};
+
+int
+gf117_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+	return gf100_pm_new_(&gf117_pm, device, index, ppm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
index 75b9ff3d1a2c..07f946d26ac6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
@@ -23,6 +23,52 @@
  */
 #include "gf100.h"
 
+static const struct nvkm_specsrc
+gk104_pmfb_sources[] = {
+	{ 0x140028, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{ 0x7, 16, "unk16" },
+			{ 0x3, 24, "unk24" },
+			{ 0x2, 28, "unk28" },
+			{}
+		}, "pmfb0_pm_unk28" },
+	{ 0x14125c, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{}
+		}, "pmfb0_subp0_pm_unk25c" },
+	{ 0x14165c, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{}
+		}, "pmfb0_subp1_pm_unk25c" },
+	{ 0x141a5c, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{}
+		}, "pmfb0_subp2_pm_unk25c" },
+	{ 0x141e5c, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{}
+		}, "pmfb0_subp3_pm_unk25c" },
+	{}
+};
+
+static const struct nvkm_specsrc
+gk104_tex_sources[] = {
+	{ 0x5042c0, (const struct nvkm_specmux[]) {
+			{ 0xf, 0, "sel0", true },
+			{ 0x7, 8, "sel1", true },
+			{}
+		}, "pgraph_gpc0_tpc0_tex_pm_mux_c_d" },
+	{ 0x5042c8, (const struct nvkm_specmux[]) {
+			{ 0x1f, 0, "sel", true },
+			{}
+		}, "pgraph_gpc0_tpc0_tex_pm_unkc8" },
+	{ 0x5042b8, (const struct nvkm_specmux[]) {
+			{ 0xff, 0, "sel", true },
+			{}
+		}, "pgraph_gpc0_tpc0_tex_pm_unkb8" },
+	{}
+};
+
 static const struct nvkm_specdom
 gk104_pm_hub[] = {
 	{ 0x60, (const struct nvkm_specsig[]) {
@@ -69,12 +115,51 @@ gk104_pm_gpc[] = {
 			{ 0xc7, "gpc00_user_0" },
 			{}
 		}, &gf100_perfctr_func },
+	{ 0x20, (const struct nvkm_specsig[]) {
+			{}
+		}, &gf100_perfctr_func },
+	{ 0x20, (const struct nvkm_specsig[]) {
+			{ 0x00, "gpc02_tex_00", gk104_tex_sources },
+			{ 0x01, "gpc02_tex_01", gk104_tex_sources },
+			{ 0x02, "gpc02_tex_02", gk104_tex_sources },
+			{ 0x03, "gpc02_tex_03", gk104_tex_sources },
+			{ 0x04, "gpc02_tex_04", gk104_tex_sources },
+			{ 0x05, "gpc02_tex_05", gk104_tex_sources },
+			{ 0x06, "gpc02_tex_06", gk104_tex_sources },
+			{ 0x07, "gpc02_tex_07", gk104_tex_sources },
+			{ 0x08, "gpc02_tex_08", gk104_tex_sources },
+			{ 0x0a, "gpc02_tex_0a", gk104_tex_sources },
+			{ 0x0b, "gpc02_tex_0b", gk104_tex_sources },
+			{ 0x0d, "gpc02_tex_0c", gk104_tex_sources },
+			{ 0x0c, "gpc02_tex_0d", gk104_tex_sources },
+			{ 0x0e, "gpc02_tex_0e", gk104_tex_sources },
+			{ 0x0f, "gpc02_tex_0f", gk104_tex_sources },
+			{ 0x10, "gpc02_tex_10", gk104_tex_sources },
+			{ 0x11, "gpc02_tex_11", gk104_tex_sources },
+			{ 0x12, "gpc02_tex_12", gk104_tex_sources },
+			{}
+		}, &gf100_perfctr_func },
 	{}
 };
 
 static const struct nvkm_specdom
 gk104_pm_part[] = {
 	{ 0x60, (const struct nvkm_specsig[]) {
+			{ 0x00, "part00_pbfb_00", gf100_pbfb_sources },
+			{ 0x01, "part00_pbfb_01", gf100_pbfb_sources },
+			{ 0x0c, "part00_pmfb_00", gk104_pmfb_sources },
+			{ 0x0d, "part00_pmfb_01", gk104_pmfb_sources },
+			{ 0x0e, "part00_pmfb_02", gk104_pmfb_sources },
+			{ 0x0f, "part00_pmfb_03", gk104_pmfb_sources },
+			{ 0x10, "part00_pmfb_04", gk104_pmfb_sources },
+			{ 0x12, "part00_pmfb_05", gk104_pmfb_sources },
+			{ 0x15, "part00_pmfb_06", gk104_pmfb_sources },
+			{ 0x16, "part00_pmfb_07", gk104_pmfb_sources },
+			{ 0x18, "part00_pmfb_08", gk104_pmfb_sources },
+			{ 0x21, "part00_pmfb_09", gk104_pmfb_sources },
+			{ 0x25, "part00_pmfb_0a", gk104_pmfb_sources },
+			{ 0x26, "part00_pmfb_0b", gk104_pmfb_sources },
+			{ 0x27, "part00_pmfb_0c", gk104_pmfb_sources },
 			{ 0x47, "part00_user_0" },
 			{}
 		}, &gf100_perfctr_func },
@@ -85,64 +170,15 @@ gk104_pm_part[] = {
 	{}
 };
 
-static int
-gk104_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
-{
-	struct gf100_pm_priv *priv;
-	u32 mask;
-	int ret;
-
-	ret = nvkm_pm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	/* PDAEMON */
-	ret = nvkm_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0, gk104_pm_pwr);
-	if (ret)
-		return ret;
-
-	/* HUB */
-	ret = nvkm_perfdom_new(&priv->base, "hub", 0, 0x1b0000, 0, 0x200,
-			       gk104_pm_hub);
-	if (ret)
-		return ret;
-
-	/* GPC */
-	mask  = (1 << nv_rd32(priv, 0x022430)) - 1;
-	mask &= ~nv_rd32(priv, 0x022504);
-	mask &= ~nv_rd32(priv, 0x022584);
-
-	ret = nvkm_perfdom_new(&priv->base, "gpc", mask, 0x180000,
-			       0x1000, 0x200, gk104_pm_gpc);
-	if (ret)
-		return ret;
-
-	/* PART */
-	mask  = (1 << nv_rd32(priv, 0x022438)) - 1;
-	mask &= ~nv_rd32(priv, 0x022548);
-	mask &= ~nv_rd32(priv, 0x0225c8);
-
-	ret = nvkm_perfdom_new(&priv->base, "part", mask, 0x1a0000,
-			       0x1000, 0x200, gk104_pm_part);
-	if (ret)
-		return ret;
+static const struct gf100_pm_func
+gk104_pm = {
+	.doms_gpc = gk104_pm_gpc,
+	.doms_hub = gk104_pm_hub,
+	.doms_part = gk104_pm_part,
+};
 
-	nv_engine(priv)->cclass = &nvkm_pm_cclass;
-	nv_engine(priv)->sclass =  nvkm_pm_sclass;
-	priv->base.last = 7;
-	return 0;
+int
+gk104_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+	return gf100_pm_new_(&gk104_pm, device, index, ppm);
 }
-
-struct nvkm_oclass
-gk104_pm_oclass = {
-	.handle = NV_ENGINE(PM, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_pm_ctor,
-		.dtor = _nvkm_pm_dtor,
-		.init = _nvkm_pm_init,
-		.fini = gf100_pm_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c
new file mode 100644
index 000000000000..5cf5dd536fd0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2015 Nouveau project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Samuel Pitoiset
+ */
+#include "nv40.h"
+
+const struct nvkm_specsrc
+gt200_crop_sources[] = {
+	{ 0x407008, (const struct nvkm_specmux[]) {
+			{ 0xf, 0, "sel0", true },
+			{ 0x1f, 16, "sel1", true },
+			{}
+		}, "pgraph_rop0_crop_pm_mux" },
+	{}
+};
+
+const struct nvkm_specsrc
+gt200_prop_sources[] = {
+	{ 0x408750, (const struct nvkm_specmux[]) {
+			{ 0x3f, 0, "sel", true },
+			{}
+		}, "pgraph_tpc0_prop_pm_mux" },
+	{}
+};
+
+const struct nvkm_specsrc
+gt200_tex_sources[] = {
+	{ 0x408508, (const struct nvkm_specmux[]) {
+			{ 0xfffff, 0, "unk0" },
+			{}
+		}, "pgraph_tpc0_tex_unk08" },
+	{}
+};
+
+static const struct nvkm_specdom
+gt200_pm[] = {
+	{ 0x20, (const struct nvkm_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0xf0, (const struct nvkm_specsig[]) {
+			{ 0xc9, "pc01_gr_idle" },
+			{ 0x84, "pc01_strmout_00" },
+			{ 0x85, "pc01_strmout_01" },
+			{ 0xde, "pc01_trast_00" },
+			{ 0xdf, "pc01_trast_01" },
+			{ 0xe0, "pc01_trast_02" },
+			{ 0xe1, "pc01_trast_03" },
+			{ 0xe4, "pc01_trast_04" },
+			{ 0xe5, "pc01_trast_05" },
+			{ 0x82, "pc01_vattr_00" },
+			{ 0x83, "pc01_vattr_01" },
+			{ 0x46, "pc01_vfetch_00", g84_vfetch_sources },
+			{ 0x47, "pc01_vfetch_01", g84_vfetch_sources },
+			{ 0x48, "pc01_vfetch_02", g84_vfetch_sources },
+			{ 0x49, "pc01_vfetch_03", g84_vfetch_sources },
+			{ 0x4a, "pc01_vfetch_04", g84_vfetch_sources },
+			{ 0x4b, "pc01_vfetch_05", g84_vfetch_sources },
+			{ 0x4c, "pc01_vfetch_06", g84_vfetch_sources },
+			{ 0x4d, "pc01_vfetch_07", g84_vfetch_sources },
+			{ 0x4e, "pc01_vfetch_08", g84_vfetch_sources },
+			{ 0x4f, "pc01_vfetch_09", g84_vfetch_sources },
+			{ 0x50, "pc01_vfetch_0a", g84_vfetch_sources },
+			{ 0x51, "pc01_vfetch_0b", g84_vfetch_sources },
+			{ 0x52, "pc01_vfetch_0c", g84_vfetch_sources },
+			{ 0x53, "pc01_vfetch_0d", g84_vfetch_sources },
+			{ 0x54, "pc01_vfetch_0e", g84_vfetch_sources },
+			{ 0x55, "pc01_vfetch_0f", g84_vfetch_sources },
+			{ 0x56, "pc01_vfetch_10", g84_vfetch_sources },
+			{ 0x57, "pc01_vfetch_11", g84_vfetch_sources },
+			{ 0x58, "pc01_vfetch_12", g84_vfetch_sources },
+			{ 0x59, "pc01_vfetch_13", g84_vfetch_sources },
+			{ 0x5a, "pc01_vfetch_14", g84_vfetch_sources },
+			{ 0x5b, "pc01_vfetch_15", g84_vfetch_sources },
+			{ 0x5c, "pc01_vfetch_16", g84_vfetch_sources },
+			{ 0x5d, "pc01_vfetch_17", g84_vfetch_sources },
+			{ 0x5e, "pc01_vfetch_18", g84_vfetch_sources },
+			{ 0x5f, "pc01_vfetch_19", g84_vfetch_sources },
+			{ 0x07, "pc01_zcull_00", nv50_zcull_sources },
+			{ 0x08, "pc01_zcull_01", nv50_zcull_sources },
+			{ 0x09, "pc01_zcull_02", nv50_zcull_sources },
+			{ 0x0a, "pc01_zcull_03", nv50_zcull_sources },
+			{ 0x0b, "pc01_zcull_04", nv50_zcull_sources },
+			{ 0x0c, "pc01_zcull_05", nv50_zcull_sources },
+
+			{ 0xb0, "pc01_unk00" },
+			{ 0xec, "pc01_trailer" },
+			{}
+		}, &nv40_perfctr_func },
+	{ 0xf0, (const struct nvkm_specsig[]) {
+			{ 0x55, "pc02_crop_00", gt200_crop_sources },
+			{ 0x56, "pc02_crop_01", gt200_crop_sources },
+			{ 0x57, "pc02_crop_02", gt200_crop_sources },
+			{ 0x58, "pc02_crop_03", gt200_crop_sources },
+			{ 0x00, "pc02_prop_00", gt200_prop_sources },
+			{ 0x01, "pc02_prop_01", gt200_prop_sources },
+			{ 0x02, "pc02_prop_02", gt200_prop_sources },
+			{ 0x03, "pc02_prop_03", gt200_prop_sources },
+			{ 0x04, "pc02_prop_04", gt200_prop_sources },
+			{ 0x05, "pc02_prop_05", gt200_prop_sources },
+			{ 0x06, "pc02_prop_06", gt200_prop_sources },
+			{ 0x07, "pc02_prop_07", gt200_prop_sources },
+			{ 0x78, "pc02_tex_00", gt200_tex_sources },
+			{ 0x79, "pc02_tex_01", gt200_tex_sources },
+			{ 0x7a, "pc02_tex_02", gt200_tex_sources },
+			{ 0x7b, "pc02_tex_03", gt200_tex_sources },
+			{ 0x32, "pc02_tex_04", gt200_tex_sources },
+			{ 0x33, "pc02_tex_05", gt200_tex_sources },
+			{ 0x34, "pc02_tex_06", gt200_tex_sources },
+			{ 0x74, "pc02_zrop_00", nv50_zrop_sources },
+			{ 0x75, "pc02_zrop_01", nv50_zrop_sources },
+			{ 0x76, "pc02_zrop_02", nv50_zrop_sources },
+			{ 0x77, "pc02_zrop_03", nv50_zrop_sources },
+			{ 0xec, "pc02_trailer" },
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nvkm_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nvkm_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nvkm_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nvkm_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nvkm_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{}
+};
+
+int
+gt200_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+	return nv40_pm_new_(gt200_pm, device, index, ppm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
index d065bfc59bbf..c9227ad41b04 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
@@ -23,15 +23,94 @@
  */
 #include "nv40.h"
 
+static const struct nvkm_specsrc
+gt215_zcull_sources[] = {
+	{ 0x402ca4, (const struct nvkm_specmux[]) {
+			{ 0x7fff, 0, "unk0" },
+			{ 0xff, 24, "unk24" },
+			{}
+		}, "pgraph_zcull_pm_unka4" },
+	{}
+};
+
 static const struct nvkm_specdom
 gt215_pm[] = {
 	{ 0x20, (const struct nvkm_specsig[]) {
 			{}
 		}, &nv40_perfctr_func },
-	{ 0x20, (const struct nvkm_specsig[]) {
+	{ 0xf0, (const struct nvkm_specsig[]) {
+			{ 0xcb, "pc01_gr_idle" },
+			{ 0x86, "pc01_strmout_00" },
+			{ 0x87, "pc01_strmout_01" },
+			{ 0xe0, "pc01_trast_00" },
+			{ 0xe1, "pc01_trast_01" },
+			{ 0xe2, "pc01_trast_02" },
+			{ 0xe3, "pc01_trast_03" },
+			{ 0xe6, "pc01_trast_04" },
+			{ 0xe7, "pc01_trast_05" },
+			{ 0x84, "pc01_vattr_00" },
+			{ 0x85, "pc01_vattr_01" },
+			{ 0x46, "pc01_vfetch_00", g84_vfetch_sources },
+			{ 0x47, "pc01_vfetch_01", g84_vfetch_sources },
+			{ 0x48, "pc01_vfetch_02", g84_vfetch_sources },
+			{ 0x49, "pc01_vfetch_03", g84_vfetch_sources },
+			{ 0x4a, "pc01_vfetch_04", g84_vfetch_sources },
+			{ 0x4b, "pc01_vfetch_05", g84_vfetch_sources },
+			{ 0x4c, "pc01_vfetch_06", g84_vfetch_sources },
+			{ 0x4d, "pc01_vfetch_07", g84_vfetch_sources },
+			{ 0x4e, "pc01_vfetch_08", g84_vfetch_sources },
+			{ 0x4f, "pc01_vfetch_09", g84_vfetch_sources },
+			{ 0x50, "pc01_vfetch_0a", g84_vfetch_sources },
+			{ 0x51, "pc01_vfetch_0b", g84_vfetch_sources },
+			{ 0x52, "pc01_vfetch_0c", g84_vfetch_sources },
+			{ 0x53, "pc01_vfetch_0d", g84_vfetch_sources },
+			{ 0x54, "pc01_vfetch_0e", g84_vfetch_sources },
+			{ 0x55, "pc01_vfetch_0f", g84_vfetch_sources },
+			{ 0x56, "pc01_vfetch_10", g84_vfetch_sources },
+			{ 0x57, "pc01_vfetch_11", g84_vfetch_sources },
+			{ 0x58, "pc01_vfetch_12", g84_vfetch_sources },
+			{ 0x59, "pc01_vfetch_13", g84_vfetch_sources },
+			{ 0x5a, "pc01_vfetch_14", g84_vfetch_sources },
+			{ 0x5b, "pc01_vfetch_15", g84_vfetch_sources },
+			{ 0x5c, "pc01_vfetch_16", g84_vfetch_sources },
+			{ 0x5d, "pc01_vfetch_17", g84_vfetch_sources },
+			{ 0x5e, "pc01_vfetch_18", g84_vfetch_sources },
+			{ 0x5f, "pc01_vfetch_19", g84_vfetch_sources },
+			{ 0x07, "pc01_zcull_00", gt215_zcull_sources },
+			{ 0x08, "pc01_zcull_01", gt215_zcull_sources },
+			{ 0x09, "pc01_zcull_02", gt215_zcull_sources },
+			{ 0x0a, "pc01_zcull_03", gt215_zcull_sources },
+			{ 0x0b, "pc01_zcull_04", gt215_zcull_sources },
+			{ 0x0c, "pc01_zcull_05", gt215_zcull_sources },
+			{ 0xb2, "pc01_unk00" },
+			{ 0xec, "pc01_trailer" },
 			{}
 		}, &nv40_perfctr_func },
-	{ 0x20, (const struct nvkm_specsig[]) {
+	{ 0xe0, (const struct nvkm_specsig[]) {
+			{ 0x64, "pc02_crop_00", gt200_crop_sources },
+			{ 0x65, "pc02_crop_01", gt200_crop_sources },
+			{ 0x66, "pc02_crop_02", gt200_crop_sources },
+			{ 0x67, "pc02_crop_03", gt200_crop_sources },
+			{ 0x00, "pc02_prop_00", gt200_prop_sources },
+			{ 0x01, "pc02_prop_01", gt200_prop_sources },
+			{ 0x02, "pc02_prop_02", gt200_prop_sources },
+			{ 0x03, "pc02_prop_03", gt200_prop_sources },
+			{ 0x04, "pc02_prop_04", gt200_prop_sources },
+			{ 0x05, "pc02_prop_05", gt200_prop_sources },
+			{ 0x06, "pc02_prop_06", gt200_prop_sources },
+			{ 0x07, "pc02_prop_07", gt200_prop_sources },
+			{ 0x80, "pc02_tex_00", gt200_tex_sources },
+			{ 0x81, "pc02_tex_01", gt200_tex_sources },
+			{ 0x82, "pc02_tex_02", gt200_tex_sources },
+			{ 0x83, "pc02_tex_03", gt200_tex_sources },
+			{ 0x3a, "pc02_tex_04", gt200_tex_sources },
+			{ 0x3b, "pc02_tex_05", gt200_tex_sources },
+			{ 0x3c, "pc02_tex_06", gt200_tex_sources },
+			{ 0x7c, "pc02_zrop_00", nv50_zrop_sources },
+			{ 0x7d, "pc02_zrop_01", nv50_zrop_sources },
+			{ 0x7e, "pc02_zrop_02", nv50_zrop_sources },
+			{ 0x7f, "pc02_zrop_03", nv50_zrop_sources },
+			{ 0xcc, "pc02_trailer" },
 			{}
 		}, &nv40_perfctr_func },
 	{ 0x20, (const struct nvkm_specsig[]) {
@@ -52,32 +131,8 @@ gt215_pm[] = {
 	{}
 };
 
-static int
-gt215_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **object)
+int
+gt215_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
 {
-	int ret = nv40_pm_ctor(parent, engine, oclass, data, size, object);
-	if (ret == 0) {
-		struct nv40_pm_priv *priv = (void *)*object;
-		ret = nvkm_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
-				       gt215_pm_pwr);
-		if (ret)
-			return ret;
-
-		priv->base.last = 3;
-	}
-	return ret;
+	return nv40_pm_new_(gt215_pm, device, index, ppm);
 }
-
-struct nvkm_oclass *
-gt215_pm_oclass = &(struct nv40_pm_oclass) {
-	.base.handle = NV_ENGINE(PM, 0xa3),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gt215_pm_ctor,
-		.dtor = _nvkm_pm_dtor,
-		.init = _nvkm_pm_init,
-		.fini = _nvkm_pm_fini,
-	},
-	.doms = gt215_pm,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
index ff22f06b22b8..4bef72a9d106 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
@@ -24,46 +24,44 @@
 #include "nv40.h"
 
 static void
-nv40_perfctr_init(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
+nv40_perfctr_init(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
 		  struct nvkm_perfctr *ctr)
 {
-	struct nv40_pm_priv *priv = (void *)ppm;
-	struct nv40_pm_cntr *cntr = (void *)ctr;
+	struct nvkm_device *device = pm->engine.subdev.device;
 	u32 log = ctr->logic_op;
 	u32 src = 0x00000000;
 	int i;
 
-	for (i = 0; i < 4 && ctr->signal[i]; i++)
-		src |= (ctr->signal[i] - dom->signal) << (i * 8);
+	for (i = 0; i < 4; i++)
+		src |= ctr->signal[i] << (i * 8);
 
-	nv_wr32(priv, 0x00a7c0 + dom->addr, 0x00000001);
-	nv_wr32(priv, 0x00a400 + dom->addr + (cntr->base.slot * 0x40), src);
-	nv_wr32(priv, 0x00a420 + dom->addr + (cntr->base.slot * 0x40), log);
+	nvkm_wr32(device, 0x00a7c0 + dom->addr, 0x00000001 | (dom->mode << 4));
+	nvkm_wr32(device, 0x00a400 + dom->addr + (ctr->slot * 0x40), src);
+	nvkm_wr32(device, 0x00a420 + dom->addr + (ctr->slot * 0x40), log);
 }
 
 static void
-nv40_perfctr_read(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
+nv40_perfctr_read(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
 		  struct nvkm_perfctr *ctr)
 {
-	struct nv40_pm_priv *priv = (void *)ppm;
-	struct nv40_pm_cntr *cntr = (void *)ctr;
+	struct nvkm_device *device = pm->engine.subdev.device;
 
-	switch (cntr->base.slot) {
-	case 0: cntr->base.ctr = nv_rd32(priv, 0x00a700 + dom->addr); break;
-	case 1: cntr->base.ctr = nv_rd32(priv, 0x00a6c0 + dom->addr); break;
-	case 2: cntr->base.ctr = nv_rd32(priv, 0x00a680 + dom->addr); break;
-	case 3: cntr->base.ctr = nv_rd32(priv, 0x00a740 + dom->addr); break;
+	switch (ctr->slot) {
+	case 0: ctr->ctr = nvkm_rd32(device, 0x00a700 + dom->addr); break;
+	case 1: ctr->ctr = nvkm_rd32(device, 0x00a6c0 + dom->addr); break;
+	case 2: ctr->ctr = nvkm_rd32(device, 0x00a680 + dom->addr); break;
+	case 3: ctr->ctr = nvkm_rd32(device, 0x00a740 + dom->addr); break;
 	}
-	cntr->base.clk = nv_rd32(priv, 0x00a600 + dom->addr);
+	dom->clk = nvkm_rd32(device, 0x00a600 + dom->addr);
 }
 
 static void
-nv40_perfctr_next(struct nvkm_pm *ppm, struct nvkm_perfdom *dom)
+nv40_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
 {
-	struct nv40_pm_priv *priv = (void *)ppm;
-	if (priv->sequence != ppm->sequence) {
-		nv_wr32(priv, 0x400084, 0x00000020);
-		priv->sequence = ppm->sequence;
+	struct nvkm_device *device = pm->engine.subdev.device;
+	if (pm->sequence != pm->sequence) {
+		nvkm_wr32(device, 0x400084, 0x00000020);
+		pm->sequence = pm->sequence;
 	}
 }
 
@@ -74,6 +72,28 @@ nv40_perfctr_func = {
 	.next = nv40_perfctr_next,
 };
 
+static const struct nvkm_pm_func
+nv40_pm_ = {
+};
+
+int
+nv40_pm_new_(const struct nvkm_specdom *doms, struct nvkm_device *device,
+	     int index, struct nvkm_pm **ppm)
+{
+	struct nv40_pm *pm;
+	int ret;
+
+	if (!(pm = kzalloc(sizeof(*pm), GFP_KERNEL)))
+		return -ENOMEM;
+	*ppm = &pm->base;
+
+	ret = nvkm_pm_ctor(&nv40_pm_, device, index, &pm->base);
+	if (ret)
+		return ret;
+
+	return nvkm_perfdom_new(&pm->base, "pc", 0, 0, 0, 4, doms);
+}
+
 static const struct nvkm_specdom
 nv40_pm[] = {
 	{ 0x20, (const struct nvkm_specsig[]) {
@@ -95,36 +115,7 @@ nv40_pm[] = {
 };
 
 int
-nv40_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+nv40_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
 {
-	struct nv40_pm_oclass *mclass = (void *)oclass;
-	struct nv40_pm_priv *priv;
-	int ret;
-
-	ret = nvkm_pm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_perfdom_new(&priv->base, "pm", 0, 0, 0, 4, mclass->doms);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->cclass = &nvkm_pm_cclass;
-	nv_engine(priv)->sclass =  nvkm_pm_sclass;
-	return 0;
+	return nv40_pm_new_(nv40_pm, device, index, ppm);
 }
-
-struct nvkm_oclass *
-nv40_pm_oclass = &(struct nv40_pm_oclass) {
-	.base.handle = NV_ENGINE(PM, 0x40),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_pm_ctor,
-		.dtor = _nvkm_pm_dtor,
-		.init = _nvkm_pm_init,
-		.fini = _nvkm_pm_fini,
-	},
-	.doms = nv40_pm,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
index 2338e150420e..da481abe8f7a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
@@ -1,24 +1,14 @@
 #ifndef __NVKM_PM_NV40_H__
 #define __NVKM_PM_NV40_H__
+#define nv40_pm(p) container_of((p), struct nv40_pm, base)
 #include "priv.h"
 
-struct nv40_pm_oclass {
-	struct nvkm_oclass base;
-	const struct nvkm_specdom *doms;
-};
-
-struct nv40_pm_priv {
+struct nv40_pm {
 	struct nvkm_pm base;
 	u32 sequence;
 };
 
-int nv40_pm_ctor(struct nvkm_object *, struct nvkm_object *,
-		      struct nvkm_oclass *, void *data, u32 size,
-		      struct nvkm_object **pobject);
-
-struct nv40_pm_cntr {
-	struct nvkm_perfctr base;
-};
-
+int nv40_pm_new_(const struct nvkm_specdom *, struct nvkm_device *,
+		 int index, struct nvkm_pm **);
 extern const struct nvkm_funcdom nv40_perfctr_func;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
index 6af83b5d1b11..cc5a41d4c6f2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
@@ -23,35 +23,153 @@
  */
 #include "nv40.h"
 
+const struct nvkm_specsrc
+nv50_zcull_sources[] = {
+	{ 0x402ca4, (const struct nvkm_specmux[]) {
+			{ 0x7fff, 0, "unk0" },
+			{}
+		}, "pgraph_zcull_pm_unka4" },
+	{}
+};
+
+const struct nvkm_specsrc
+nv50_zrop_sources[] = {
+	{ 0x40708c, (const struct nvkm_specmux[]) {
+			{ 0xf, 0, "sel0", true },
+			{ 0xf, 16, "sel1", true },
+			{}
+		}, "pgraph_rop0_zrop_pm_mux" },
+	{}
+};
+
+static const struct nvkm_specsrc
+nv50_prop_sources[] = {
+	{ 0x40be50, (const struct nvkm_specmux[]) {
+			{ 0x1f, 0, "sel", true },
+			{}
+		}, "pgraph_tpc3_prop_pm_mux" },
+	{}
+};
+
+static const struct nvkm_specsrc
+nv50_crop_sources[] = {
+        { 0x407008, (const struct nvkm_specmux[]) {
+                        { 0x7, 0, "sel0", true },
+                        { 0x7, 16, "sel1", true },
+                        {}
+                }, "pgraph_rop0_crop_pm_mux" },
+        {}
+};
+
+static const struct nvkm_specsrc
+nv50_tex_sources[] = {
+	{ 0x40b808, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{}
+		}, "pgraph_tpc3_tex_unk08" },
+	{}
+};
+
+static const struct nvkm_specsrc
+nv50_vfetch_sources[] = {
+	{ 0x400c0c, (const struct nvkm_specmux[]) {
+			{ 0x1, 0, "unk0" },
+			{}
+		}, "pgraph_vfetch_unk0c" },
+	{}
+};
+
 static const struct nvkm_specdom
 nv50_pm[] = {
-	{ 0x040, (const struct nvkm_specsig[]) {
+	{ 0x20, (const struct nvkm_specsig[]) {
 			{}
 		}, &nv40_perfctr_func },
-	{ 0x100, (const struct nvkm_specsig[]) {
-			{ 0xc8, "gr_idle" },
+	{ 0xf0, (const struct nvkm_specsig[]) {
+			{ 0xc8, "pc01_gr_idle" },
+			{ 0x7f, "pc01_strmout_00" },
+			{ 0x80, "pc01_strmout_01" },
+			{ 0xdc, "pc01_trast_00" },
+			{ 0xdd, "pc01_trast_01" },
+			{ 0xde, "pc01_trast_02" },
+			{ 0xdf, "pc01_trast_03" },
+			{ 0xe2, "pc01_trast_04" },
+			{ 0xe3, "pc01_trast_05" },
+			{ 0x7c, "pc01_vattr_00" },
+			{ 0x7d, "pc01_vattr_01" },
+			{ 0x26, "pc01_vfetch_00", nv50_vfetch_sources },
+			{ 0x27, "pc01_vfetch_01", nv50_vfetch_sources },
+			{ 0x28, "pc01_vfetch_02", nv50_vfetch_sources },
+			{ 0x29, "pc01_vfetch_03", nv50_vfetch_sources },
+			{ 0x2a, "pc01_vfetch_04", nv50_vfetch_sources },
+			{ 0x2b, "pc01_vfetch_05", nv50_vfetch_sources },
+			{ 0x2c, "pc01_vfetch_06", nv50_vfetch_sources },
+			{ 0x2d, "pc01_vfetch_07", nv50_vfetch_sources },
+			{ 0x2e, "pc01_vfetch_08", nv50_vfetch_sources },
+			{ 0x2f, "pc01_vfetch_09", nv50_vfetch_sources },
+			{ 0x30, "pc01_vfetch_0a", nv50_vfetch_sources },
+			{ 0x31, "pc01_vfetch_0b", nv50_vfetch_sources },
+			{ 0x32, "pc01_vfetch_0c", nv50_vfetch_sources },
+			{ 0x33, "pc01_vfetch_0d", nv50_vfetch_sources },
+			{ 0x34, "pc01_vfetch_0e", nv50_vfetch_sources },
+			{ 0x35, "pc01_vfetch_0f", nv50_vfetch_sources },
+			{ 0x36, "pc01_vfetch_10", nv50_vfetch_sources },
+			{ 0x37, "pc01_vfetch_11", nv50_vfetch_sources },
+			{ 0x38, "pc01_vfetch_12", nv50_vfetch_sources },
+			{ 0x39, "pc01_vfetch_13", nv50_vfetch_sources },
+			{ 0x3a, "pc01_vfetch_14", nv50_vfetch_sources },
+			{ 0x3b, "pc01_vfetch_15", nv50_vfetch_sources },
+			{ 0x3c, "pc01_vfetch_16", nv50_vfetch_sources },
+			{ 0x3d, "pc01_vfetch_17", nv50_vfetch_sources },
+			{ 0x3e, "pc01_vfetch_18", nv50_vfetch_sources },
+			{ 0x3f, "pc01_vfetch_19", nv50_vfetch_sources },
+			{ 0x20, "pc01_zcull_00", nv50_zcull_sources },
+			{ 0x21, "pc01_zcull_01", nv50_zcull_sources },
+			{ 0x22, "pc01_zcull_02", nv50_zcull_sources },
+			{ 0x23, "pc01_zcull_03", nv50_zcull_sources },
+			{ 0x24, "pc01_zcull_04", nv50_zcull_sources },
+			{ 0x25, "pc01_zcull_05", nv50_zcull_sources },
+			{ 0xae, "pc01_unk00" },
+			{ 0xee, "pc01_trailer" },
 			{}
 		}, &nv40_perfctr_func },
-	{ 0x100, (const struct nvkm_specsig[]) {
+	{ 0xf0, (const struct nvkm_specsig[]) {
+			{ 0x52, "pc02_crop_00", nv50_crop_sources },
+			{ 0x53, "pc02_crop_01", nv50_crop_sources },
+			{ 0x54, "pc02_crop_02", nv50_crop_sources },
+			{ 0x55, "pc02_crop_03", nv50_crop_sources },
+			{ 0x00, "pc02_prop_00", nv50_prop_sources },
+			{ 0x01, "pc02_prop_01", nv50_prop_sources },
+			{ 0x02, "pc02_prop_02", nv50_prop_sources },
+			{ 0x03, "pc02_prop_03", nv50_prop_sources },
+			{ 0x04, "pc02_prop_04", nv50_prop_sources },
+			{ 0x05, "pc02_prop_05", nv50_prop_sources },
+			{ 0x06, "pc02_prop_06", nv50_prop_sources },
+			{ 0x07, "pc02_prop_07", nv50_prop_sources },
+			{ 0x70, "pc02_tex_00", nv50_tex_sources },
+			{ 0x71, "pc02_tex_01", nv50_tex_sources },
+			{ 0x72, "pc02_tex_02", nv50_tex_sources },
+			{ 0x73, "pc02_tex_03", nv50_tex_sources },
+			{ 0x40, "pc02_tex_04", nv50_tex_sources },
+			{ 0x41, "pc02_tex_05", nv50_tex_sources },
+			{ 0x42, "pc02_tex_06", nv50_tex_sources },
+			{ 0x6c, "pc02_zrop_00", nv50_zrop_sources },
+			{ 0x6d, "pc02_zrop_01", nv50_zrop_sources },
+			{ 0x6e, "pc02_zrop_02", nv50_zrop_sources },
+			{ 0x6f, "pc02_zrop_03", nv50_zrop_sources },
+			{ 0xee, "pc02_trailer" },
 			{}
 		}, &nv40_perfctr_func },
-	{ 0x020, (const struct nvkm_specsig[]) {
+	{ 0x20, (const struct nvkm_specsig[]) {
 			{}
 		}, &nv40_perfctr_func },
-	{ 0x040, (const struct nvkm_specsig[]) {
+	{ 0x20, (const struct nvkm_specsig[]) {
 			{}
 		}, &nv40_perfctr_func },
 	{}
 };
 
-struct nvkm_oclass *
-nv50_pm_oclass = &(struct nv40_pm_oclass) {
-	.base.handle = NV_ENGINE(PM, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_pm_ctor,
-		.dtor = _nvkm_pm_dtor,
-		.init = _nvkm_pm_init,
-		.fini = _nvkm_pm_fini,
-	},
-	.doms = nv50_pm,
-}.base;
+int
+nv50_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+	return nv40_pm_new_(nv50_pm, device, index, ppm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
index 1e6eff2a6d79..d7b81cbf82b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
@@ -1,58 +1,85 @@
 #ifndef __NVKM_PM_PRIV_H__
 #define __NVKM_PM_PRIV_H__
+#define nvkm_pm(p) container_of((p), struct nvkm_pm, engine)
 #include <engine/pm.h>
 
+int nvkm_pm_ctor(const struct nvkm_pm_func *, struct nvkm_device *,
+		 int index, struct nvkm_pm *);
+
+struct nvkm_pm_func {
+	void (*fini)(struct nvkm_pm *);
+};
+
 struct nvkm_perfctr {
-	struct nvkm_object base;
 	struct list_head head;
-	struct nvkm_perfsig *signal[4];
+	u8 domain;
+	u8  signal[4];
+	u64 source[4][8];
 	int slot;
 	u32 logic_op;
-	u32 clk;
 	u32 ctr;
 };
 
-extern struct nvkm_oclass nvkm_pm_sclass[];
+struct nvkm_specmux {
+	u32 mask;
+	u8 shift;
+	const char *name;
+	bool enable;
+};
 
-#include <core/engctx.h>
+struct nvkm_specsrc {
+	u32 addr;
+	const struct nvkm_specmux *mux;
+	const char *name;
+};
 
-struct nvkm_perfctx {
-	struct nvkm_engctx base;
+struct nvkm_perfsrc {
+	struct list_head head;
+	char *name;
+	u32 addr;
+	u32 mask;
+	u8 shift;
+	bool enable;
 };
 
-extern struct nvkm_oclass nvkm_pm_cclass;
+extern const struct nvkm_specsrc nv50_zcull_sources[];
+extern const struct nvkm_specsrc nv50_zrop_sources[];
+extern const struct nvkm_specsrc g84_vfetch_sources[];
+extern const struct nvkm_specsrc gt200_crop_sources[];
+extern const struct nvkm_specsrc gt200_prop_sources[];
+extern const struct nvkm_specsrc gt200_tex_sources[];
 
 struct nvkm_specsig {
 	u8 signal;
 	const char *name;
+	const struct nvkm_specsrc *source;
 };
 
 struct nvkm_perfsig {
 	const char *name;
+	u8 source[8];
 };
 
-struct nvkm_perfdom;
-struct nvkm_perfctr *
-nvkm_perfsig_wrap(struct nvkm_pm *, const char *, struct nvkm_perfdom **);
-
 struct nvkm_specdom {
 	u16 signal_nr;
 	const struct nvkm_specsig *signal;
 	const struct nvkm_funcdom *func;
 };
 
-extern const struct nvkm_specdom gt215_pm_pwr[];
-extern const struct nvkm_specdom gf100_pm_pwr[];
-extern const struct nvkm_specdom gk104_pm_pwr[];
+#define nvkm_perfdom(p) container_of((p), struct nvkm_perfdom, object)
 
 struct nvkm_perfdom {
+	struct nvkm_object object;
+	struct nvkm_perfmon *perfmon;
 	struct list_head head;
 	struct list_head list;
 	const struct nvkm_funcdom *func;
+	struct nvkm_perfctr *ctr[4];
 	char name[32];
 	u32 addr;
-	u8  quad;
-	u32 signal_nr;
+	u8  mode;
+	u32 clk;
+	u16 signal_nr;
 	struct nvkm_perfsig signal[];
 };
 
@@ -67,24 +94,10 @@ struct nvkm_funcdom {
 int nvkm_perfdom_new(struct nvkm_pm *, const char *, u32, u32, u32, u32,
 		     const struct nvkm_specdom *);
 
-#define nvkm_pm_create(p,e,o,d)                                        \
-	nvkm_pm_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_pm_dtor(p) ({                                             \
-	struct nvkm_pm *c = (p);                                       \
-	_nvkm_pm_dtor(nv_object(c));                                   \
-})
-#define nvkm_pm_init(p) ({                                             \
-	struct nvkm_pm *c = (p);                                       \
-	_nvkm_pm_init(nv_object(c));                                   \
-})
-#define nvkm_pm_fini(p,s) ({                                           \
-	struct nvkm_pm *c = (p);                                       \
-	_nvkm_pm_fini(nv_object(c), (s));                              \
-})
-
-int nvkm_pm_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, int, void **);
-void _nvkm_pm_dtor(struct nvkm_object *);
-int  _nvkm_pm_init(struct nvkm_object *);
-int  _nvkm_pm_fini(struct nvkm_object *, bool);
+#define nvkm_perfmon(p) container_of((p), struct nvkm_perfmon, object)
+
+struct nvkm_perfmon {
+	struct nvkm_object object;
+	struct nvkm_pm *pm;
+};
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s
index 06ee06071104..66b147bd58eb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s
@@ -1,5 +1,5 @@
 /*
- *  fuc microcode for g98 psec engine
+ *  fuc microcode for g98 sec engine
  *  Copyright (C) 2010  Marcin Kościelnicki
  *
  *  This program is free software; you can redistribute it and/or modify
@@ -17,7 +17,7 @@
  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
-.section #g98_psec_data
+.section #g98_sec_data
 
 ctx_dma:
 ctx_dma_query:		.b32 0
@@ -94,7 +94,7 @@ sec_dtable:
 
 .align 0x100
 
-.section #g98_psec_code
+.section #g98_sec_code
 
 	// $r0 is always set to 0 in our code - this allows some space savings.
 	clear b32 $r0
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
index 5d65c4fbb087..eca62221f299 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
@@ -1,4 +1,4 @@
-uint32_t g98_psec_data[] = {
+uint32_t g98_sec_data[] = {
 /* 0x0000: ctx_dma */
 /* 0x0000: ctx_dma_query */
 	0x00000000,
@@ -150,7 +150,7 @@ uint32_t g98_psec_data[] = {
 	0x00000000,
 };
 
-uint32_t g98_psec_code[] = {
+uint32_t g98_sec_code[] = {
 	0x17f004bd,
 	0x0010fe35,
 	0xf10004fe,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
index 9d5c1b8b1f8c..995c2c5ec150 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
@@ -22,47 +22,14 @@
  * Authors: Ben Skeggs
  */
 #include <engine/sec.h>
-#include <engine/falcon.h>
+#include <engine/fifo.h>
 #include "fuc/g98.fuc0s.h"
 
 #include <core/client.h>
 #include <core/enum.h>
-#include <engine/fifo.h>
-
-struct g98_sec_priv {
-	struct nvkm_falcon base;
-};
-
-/*******************************************************************************
- * Crypt object classes
- ******************************************************************************/
+#include <core/gpuobj.h>
 
-static struct nvkm_oclass
-g98_sec_sclass[] = {
-	{ 0x88b4, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PSEC context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_sec_cclass = {
-	.handle = NV_ENGCTX(SEC, 0x98),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PSEC engine/subdev functions
- ******************************************************************************/
+#include <nvif/class.h>
 
 static const struct nvkm_enum g98_sec_isr_error_name[] = {
 	{ 0x0000, "ILLEGAL_MTHD" },
@@ -73,77 +40,44 @@ static const struct nvkm_enum g98_sec_isr_error_name[] = {
 };
 
 static void
-g98_sec_intr(struct nvkm_subdev *subdev)
+g98_sec_intr(struct nvkm_falcon *sec, struct nvkm_fifo_chan *chan)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
-	struct nvkm_engine *engine = nv_engine(subdev);
-	struct nvkm_object *engctx;
-	struct g98_sec_priv *priv = (void *)subdev;
-	u32 disp = nv_rd32(priv, 0x08701c);
-	u32 stat = nv_rd32(priv, 0x087008) & disp & ~(disp >> 16);
-	u32 inst = nv_rd32(priv, 0x087050) & 0x3fffffff;
-	u32 ssta = nv_rd32(priv, 0x087040) & 0x0000ffff;
-	u32 addr = nv_rd32(priv, 0x087040) >> 16;
+	struct nvkm_subdev *subdev = &sec->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 ssta = nvkm_rd32(device, 0x087040) & 0x0000ffff;
+	u32 addr = nvkm_rd32(device, 0x087040) >> 16;
 	u32 mthd = (addr & 0x07ff) << 2;
 	u32 subc = (addr & 0x3800) >> 11;
-	u32 data = nv_rd32(priv, 0x087044);
-	int chid;
-
-	engctx = nvkm_engctx_get(engine, inst);
-	chid   = pfifo->chid(pfifo, engctx);
-
-	if (stat & 0x00000040) {
-		nv_error(priv, "DISPATCH_ERROR [");
-		nvkm_enum_print(g98_sec_isr_error_name, ssta);
-		pr_cont("] ch %d [0x%010llx %s] subc %d mthd 0x%04x data 0x%08x\n",
-		       chid, (u64)inst << 12, nvkm_client_name(engctx),
-		       subc, mthd, data);
-		nv_wr32(priv, 0x087004, 0x00000040);
-		stat &= ~0x00000040;
-	}
+	u32 data = nvkm_rd32(device, 0x087044);
+	const struct nvkm_enum *en =
+		nvkm_enum_find(g98_sec_isr_error_name, ssta);
+
+	nvkm_error(subdev, "DISPATCH_ERROR %04x [%s] ch %d [%010llx %s] "
+			   "subc %d mthd %04x data %08x\n", ssta,
+		   en ? en->name : "UNKNOWN", chan ? chan->chid : -1,
+		   chan ? chan->inst->addr : 0,
+		   chan ? chan->object.client->name : "unknown",
+		   subc, mthd, data);
+}
 
-	if (stat) {
-		nv_error(priv, "unhandled intr 0x%08x\n", stat);
-		nv_wr32(priv, 0x087004, stat);
+static const struct nvkm_falcon_func
+g98_sec = {
+	.code.data = g98_sec_code,
+	.code.size = sizeof(g98_sec_code),
+	.data.data = g98_sec_data,
+	.data.size = sizeof(g98_sec_data),
+	.pmc_enable = 0x00004000,
+	.intr = g98_sec_intr,
+	.sclass = {
+		{ -1, -1, G98_SEC },
+		{}
 	}
+};
 
-	nvkm_engctx_put(engctx);
-}
-
-static int
-g98_sec_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+int
+g98_sec_new(struct nvkm_device *device, int index,
+	    struct nvkm_engine **pengine)
 {
-	struct g98_sec_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x087000, true,
-				 "PSEC", "sec", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00004000;
-	nv_subdev(priv)->intr = g98_sec_intr;
-	nv_engine(priv)->cclass = &g98_sec_cclass;
-	nv_engine(priv)->sclass = g98_sec_sclass;
-	nv_falcon(priv)->code.data = g98_psec_code;
-	nv_falcon(priv)->code.size = sizeof(g98_psec_code);
-	nv_falcon(priv)->data.data = g98_psec_data;
-	nv_falcon(priv)->data.size = sizeof(g98_psec_data);
-	return 0;
+	return nvkm_falcon_new_(&g98_sec, device, index,
+				true, 0x087000, pengine);
 }
-
-struct nvkm_oclass
-g98_sec_oclass = {
-	.handle = NV_ENGINE(SEC, 0x98),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g98_sec_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = _nvkm_falcon_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild
index bdc3a05907d5..1c291e6fcf96 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild
@@ -1,4 +1,9 @@
+nvkm-y += nvkm/engine/sw/base.o
 nvkm-y += nvkm/engine/sw/nv04.o
 nvkm-y += nvkm/engine/sw/nv10.o
 nvkm-y += nvkm/engine/sw/nv50.o
 nvkm-y += nvkm/engine/sw/gf100.o
+
+nvkm-y += nvkm/engine/sw/chan.o
+
+nvkm-y += nvkm/engine/sw/nvsw.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
new file mode 100644
index 000000000000..53c1f7e75b54
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+#include "chan.h"
+
+#include <engine/fifo.h>
+
+bool
+nvkm_sw_mthd(struct nvkm_sw *sw, int chid, int subc, u32 mthd, u32 data)
+{
+	struct nvkm_sw_chan *chan;
+	bool handled = false;
+	unsigned long flags;
+
+	spin_lock_irqsave(&sw->engine.lock, flags);
+	list_for_each_entry(chan, &sw->chan, head) {
+		if (chan->fifo->chid == chid) {
+			handled = nvkm_sw_chan_mthd(chan, subc, mthd, data);
+			list_del(&chan->head);
+			list_add(&chan->head, &sw->chan);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&sw->engine.lock, flags);
+	return handled;
+}
+
+static int
+nvkm_sw_oclass_new(const struct nvkm_oclass *oclass, void *data, u32 size,
+		   struct nvkm_object **pobject)
+{
+	struct nvkm_sw_chan *chan = nvkm_sw_chan(oclass->parent);
+	const struct nvkm_sw_chan_sclass *sclass = oclass->engn;
+	return sclass->ctor(chan, oclass, data, size, pobject);
+}
+
+static int
+nvkm_sw_oclass_get(struct nvkm_oclass *oclass, int index)
+{
+	struct nvkm_sw *sw = nvkm_sw(oclass->engine);
+	int c = 0;
+
+	while (sw->func->sclass[c].ctor) {
+		if (c++ == index) {
+			oclass->engn = &sw->func->sclass[index];
+			oclass->base =  sw->func->sclass[index].base;
+			oclass->base.ctor = nvkm_sw_oclass_new;
+			return index;
+		}
+	}
+
+	return c;
+}
+
+static int
+nvkm_sw_cclass_get(struct nvkm_fifo_chan *fifoch,
+		   const struct nvkm_oclass *oclass,
+		   struct nvkm_object **pobject)
+{
+	struct nvkm_sw *sw = nvkm_sw(oclass->engine);
+	return sw->func->chan_new(sw, fifoch, oclass, pobject);
+}
+
+static void *
+nvkm_sw_dtor(struct nvkm_engine *engine)
+{
+	return nvkm_sw(engine);
+}
+
+static const struct nvkm_engine_func
+nvkm_sw = {
+	.dtor = nvkm_sw_dtor,
+	.fifo.cclass = nvkm_sw_cclass_get,
+	.fifo.sclass = nvkm_sw_oclass_get,
+};
+
+int
+nvkm_sw_new_(const struct nvkm_sw_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_sw **psw)
+{
+	struct nvkm_sw *sw;
+
+	if (!(sw = *psw = kzalloc(sizeof(*sw), GFP_KERNEL)))
+		return -ENOMEM;
+	INIT_LIST_HEAD(&sw->chan);
+	sw->func = func;
+
+	return nvkm_engine_ctor(&nvkm_sw, device, index, 0, true, &sw->engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c
new file mode 100644
index 000000000000..d082f4f73a80
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "chan.h"
+
+#include <core/notify.h>
+#include <engine/fifo.h>
+
+#include <nvif/event.h>
+#include <nvif/unpack.h>
+
+bool
+nvkm_sw_chan_mthd(struct nvkm_sw_chan *chan, int subc, u32 mthd, u32 data)
+{
+	switch (mthd) {
+	case 0x0000:
+		return true;
+	case 0x0500:
+		nvkm_event_send(&chan->event, 1, 0, NULL, 0);
+		return true;
+	default:
+		if (chan->func->mthd)
+			return chan->func->mthd(chan, subc, mthd, data);
+		break;
+	}
+	return false;
+}
+
+static int
+nvkm_sw_chan_event_ctor(struct nvkm_object *object, void *data, u32 size,
+			struct nvkm_notify *notify)
+{
+	union {
+		struct nvif_notify_uevent_req none;
+	} *req = data;
+	int ret;
+
+	if (nvif_unvers(req->none)) {
+		notify->size  = sizeof(struct nvif_notify_uevent_rep);
+		notify->types = 1;
+		notify->index = 0;
+	}
+
+	return ret;
+}
+
+static const struct nvkm_event_func
+nvkm_sw_chan_event = {
+	.ctor = nvkm_sw_chan_event_ctor,
+};
+
+static void *
+nvkm_sw_chan_dtor(struct nvkm_object *object)
+{
+	struct nvkm_sw_chan *chan = nvkm_sw_chan(object);
+	struct nvkm_sw *sw = chan->sw;
+	unsigned long flags;
+	void *data = chan;
+
+	if (chan->func->dtor)
+		data = chan->func->dtor(chan);
+	nvkm_event_fini(&chan->event);
+
+	spin_lock_irqsave(&sw->engine.lock, flags);
+	list_del(&chan->head);
+	spin_unlock_irqrestore(&sw->engine.lock, flags);
+	return data;
+}
+
+static const struct nvkm_object_func
+nvkm_sw_chan = {
+	.dtor = nvkm_sw_chan_dtor,
+};
+
+int
+nvkm_sw_chan_ctor(const struct nvkm_sw_chan_func *func, struct nvkm_sw *sw,
+		  struct nvkm_fifo_chan *fifo, const struct nvkm_oclass *oclass,
+		  struct nvkm_sw_chan *chan)
+{
+	unsigned long flags;
+
+	nvkm_object_ctor(&nvkm_sw_chan, oclass, &chan->object);
+	chan->func = func;
+	chan->sw = sw;
+	chan->fifo = fifo;
+	spin_lock_irqsave(&sw->engine.lock, flags);
+	list_add(&chan->head, &sw->chan);
+	spin_unlock_irqrestore(&sw->engine.lock, flags);
+
+	return nvkm_event_init(&nvkm_sw_chan_event, 1, 1, &chan->event);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
new file mode 100644
index 000000000000..6608bf6c6842
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
@@ -0,0 +1,26 @@
+#ifndef __NVKM_SW_CHAN_H__
+#define __NVKM_SW_CHAN_H__
+#define nvkm_sw_chan(p) container_of((p), struct nvkm_sw_chan, object)
+#include "priv.h"
+#include <core/event.h>
+
+struct nvkm_sw_chan {
+	const struct nvkm_sw_chan_func *func;
+	struct nvkm_object object;
+	struct nvkm_sw *sw;
+	struct nvkm_fifo_chan *fifo;
+	struct list_head head;
+
+	struct nvkm_event event;
+};
+
+struct nvkm_sw_chan_func {
+	void *(*dtor)(struct nvkm_sw_chan *);
+	bool (*mthd)(struct nvkm_sw_chan *, int subc, u32 mthd, u32 data);
+};
+
+int nvkm_sw_chan_ctor(const struct nvkm_sw_chan_func *, struct nvkm_sw *,
+		      struct nvkm_fifo_chan *, const struct nvkm_oclass *,
+		      struct nvkm_sw_chan *);
+bool nvkm_sw_chan_mthd(struct nvkm_sw_chan *, int subc, u32 mthd, u32 data);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
index 533d5d8ed363..b01ef7eca906 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
@@ -23,119 +23,133 @@
  */
 #include "nv50.h"
 
+#include <core/gpuobj.h>
 #include <subdev/bar.h>
+#include <engine/disp.h>
+#include <engine/fifo.h>
+
+#include <nvif/event.h>
+#include <nvif/ioctl.h>
 
 /*******************************************************************************
- * software object classes
+ * software context
  ******************************************************************************/
 
 static int
-gf100_sw_mthd_vblsem_offset(struct nvkm_object *object, u32 mthd,
-			    void *args, u32 size)
+gf100_sw_chan_vblsem_release(struct nvkm_notify *notify)
 {
-	struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
-	u64 data = *(u32 *)args;
-	if (mthd == 0x0400) {
-		chan->vblank.offset &= 0x00ffffffffULL;
-		chan->vblank.offset |= data << 32;
-	} else {
-		chan->vblank.offset &= 0xff00000000ULL;
-		chan->vblank.offset |= data;
-	}
-	return 0;
+	struct nv50_sw_chan *chan =
+		container_of(notify, typeof(*chan), vblank.notify[notify->index]);
+	struct nvkm_sw *sw = chan->base.sw;
+	struct nvkm_device *device = sw->engine.subdev.device;
+	u32 inst = chan->base.fifo->inst->addr >> 12;
+
+	nvkm_wr32(device, 0x001718, 0x80000000 | inst);
+	nvkm_bar_flush(device->bar);
+	nvkm_wr32(device, 0x06000c, upper_32_bits(chan->vblank.offset));
+	nvkm_wr32(device, 0x060010, lower_32_bits(chan->vblank.offset));
+	nvkm_wr32(device, 0x060014, chan->vblank.value);
+
+	return NVKM_NOTIFY_DROP;
 }
 
-static int
-gf100_sw_mthd_mp_control(struct nvkm_object *object, u32 mthd,
-			 void *args, u32 size)
+static bool
+gf100_sw_chan_mthd(struct nvkm_sw_chan *base, int subc, u32 mthd, u32 data)
 {
-	struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
-	struct nv50_sw_priv *priv = (void *)nv_object(chan)->engine;
-	u32 data = *(u32 *)args;
-
+	struct nv50_sw_chan *chan = nv50_sw_chan(base);
+	struct nvkm_engine *engine = chan->base.object.engine;
+	struct nvkm_device *device = engine->subdev.device;
 	switch (mthd) {
-	case 0x600:
-		nv_wr32(priv, 0x419e00, data); /* MP.PM_UNK000 */
-		break;
-	case 0x644:
-		if (data & ~0x1ffffe)
-			return -EINVAL;
-		nv_wr32(priv, 0x419e44, data); /* MP.TRAP_WARP_ERROR_EN */
+	case 0x0400:
+		chan->vblank.offset &= 0x00ffffffffULL;
+		chan->vblank.offset |= (u64)data << 32;
+		return true;
+	case 0x0404:
+		chan->vblank.offset &= 0xff00000000ULL;
+		chan->vblank.offset |= data;
+		return true;
+	case 0x0408:
+		chan->vblank.value = data;
+		return true;
+	case 0x040c:
+		if (data < device->disp->vblank.index_nr) {
+			nvkm_notify_get(&chan->vblank.notify[data]);
+			return true;
+		}
 		break;
-	case 0x6ac:
-		nv_wr32(priv, 0x419eac, data); /* MP.PM_UNK0AC */
+	case 0x600: /* MP.PM_UNK000 */
+		nvkm_wr32(device, 0x419e00, data);
+		return true;
+	case 0x644: /* MP.TRAP_WARP_ERROR_EN */
+		if (!(data & ~0x001ffffe)) {
+			nvkm_wr32(device, 0x419e44, data);
+			return true;
+		}
 		break;
+	case 0x6ac: /* MP.PM_UNK0AC */
+		nvkm_wr32(device, 0x419eac, data);
+		return true;
 	default:
-		return -EINVAL;
+		break;
 	}
-	return 0;
+	return false;
 }
 
-static struct nvkm_omthds
-gf100_sw_omthds[] = {
-	{ 0x0400, 0x0400, gf100_sw_mthd_vblsem_offset },
-	{ 0x0404, 0x0404, gf100_sw_mthd_vblsem_offset },
-	{ 0x0408, 0x0408, nv50_sw_mthd_vblsem_value },
-	{ 0x040c, 0x040c, nv50_sw_mthd_vblsem_release },
-	{ 0x0500, 0x0500, nv50_sw_mthd_flip },
-	{ 0x0600, 0x0600, gf100_sw_mthd_mp_control },
-	{ 0x0644, 0x0644, gf100_sw_mthd_mp_control },
-	{ 0x06ac, 0x06ac, gf100_sw_mthd_mp_control },
-	{}
+static const struct nvkm_sw_chan_func
+gf100_sw_chan = {
+	.dtor = nv50_sw_chan_dtor,
+	.mthd = gf100_sw_chan_mthd,
 };
 
-static struct nvkm_oclass
-gf100_sw_sclass[] = {
-	{ 0x906e, &nvkm_object_ofuncs, gf100_sw_omthds },
-	{}
-};
-
-/*******************************************************************************
- * software context
- ******************************************************************************/
-
 static int
-gf100_sw_vblsem_release(struct nvkm_notify *notify)
+gf100_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifoch,
+		  const struct nvkm_oclass *oclass,
+		  struct nvkm_object **pobject)
 {
-	struct nv50_sw_chan *chan =
-		container_of(notify, typeof(*chan), vblank.notify[notify->index]);
-	struct nv50_sw_priv *priv = (void *)nv_object(chan)->engine;
-	struct nvkm_bar *bar = nvkm_bar(priv);
+	struct nvkm_disp *disp = sw->engine.subdev.device->disp;
+	struct nv50_sw_chan *chan;
+	int ret, i;
 
-	nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
-	bar->flush(bar);
-	nv_wr32(priv, 0x06000c, upper_32_bits(chan->vblank.offset));
-	nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset));
-	nv_wr32(priv, 0x060014, chan->vblank.value);
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
 
-	return NVKM_NOTIFY_DROP;
-}
+	ret = nvkm_sw_chan_ctor(&gf100_sw_chan, sw, fifoch, oclass,
+				&chan->base);
+	if (ret)
+		return ret;
 
-static struct nv50_sw_cclass
-gf100_sw_cclass = {
-	.base.handle = NV_ENGCTX(SW, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_sw_context_ctor,
-		.dtor = nv50_sw_context_dtor,
-		.init = _nvkm_sw_context_init,
-		.fini = _nvkm_sw_context_fini,
-	},
-	.vblank = gf100_sw_vblsem_release,
-};
+	for (i = 0; disp && i < disp->vblank.index_nr; i++) {
+		ret = nvkm_notify_init(NULL, &disp->vblank,
+				       gf100_sw_chan_vblsem_release, false,
+				       &(struct nvif_notify_head_req_v0) {
+					.head = i,
+				       },
+				       sizeof(struct nvif_notify_head_req_v0),
+				       sizeof(struct nvif_notify_head_rep_v0),
+				       &chan->vblank.notify[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
 
 /*******************************************************************************
  * software engine/subdev functions
  ******************************************************************************/
 
-struct nvkm_oclass *
-gf100_sw_oclass = &(struct nv50_sw_oclass) {
-	.base.handle = NV_ENGINE(SW, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_sw_ctor,
-		.dtor = _nvkm_sw_dtor,
-		.init = _nvkm_sw_init,
-		.fini = _nvkm_sw_fini,
-	},
-	.cclass = &gf100_sw_cclass.base,
-	.sclass =  gf100_sw_sclass,
-}.base;
+static const struct nvkm_sw_func
+gf100_sw = {
+	.chan_new = gf100_sw_chan_new,
+	.sclass = {
+		{ nvkm_nvsw_new, { -1, -1, NVIF_IOCTL_NEW_V0_SW_GF100 } },
+		{}
+	}
+};
+
+int
+gf100_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
+{
+	return nvkm_sw_new_(&gf100_sw, device, index, psw);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
index 897024421d36..445217ffa791 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
@@ -21,15 +21,18 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/sw.h>
-#include <engine/fifo.h>
+#define nv04_sw_chan(p) container_of((p), struct nv04_sw_chan, base)
+#include "priv.h"
+#include "chan.h"
+#include "nvsw.h"
 
-struct nv04_sw_priv {
-	struct nvkm_sw base;
-};
+#include <nvif/class.h>
+#include <nvif/ioctl.h>
+#include <nvif/unpack.h>
 
 struct nv04_sw_chan {
 	struct nvkm_sw_chan base;
+	atomic_t ref;
 };
 
 /*******************************************************************************
@@ -37,103 +40,99 @@ struct nv04_sw_chan {
  ******************************************************************************/
 
 static int
-nv04_sw_set_ref(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+nv04_nvsw_mthd_get_ref(struct nvkm_nvsw *nvsw, void *data, u32 size)
 {
-	struct nvkm_object *channel = (void *)nv_engctx(object->parent);
-	struct nvkm_fifo_chan *fifo = (void *)channel->parent;
-	atomic_set(&fifo->refcnt, *(u32*)data);
-	return 0;
+	struct nv04_sw_chan *chan = nv04_sw_chan(nvsw->chan);
+	union {
+		struct nv04_nvsw_get_ref_v0 v0;
+	} *args = data;
+	int ret;
+
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		args->v0.ref = atomic_read(&chan->ref);
+	}
+
+	return ret;
 }
 
 static int
-nv04_sw_flip(struct nvkm_object *object, u32 mthd, void *args, u32 size)
+nv04_nvsw_mthd(struct nvkm_nvsw *nvsw, u32 mthd, void *data, u32 size)
 {
-	struct nv04_sw_chan *chan = (void *)nv_engctx(object->parent);
-	if (chan->base.flip)
-		return chan->base.flip(chan->base.flip_data);
+	switch (mthd) {
+	case NV04_NVSW_GET_REF:
+		return nv04_nvsw_mthd_get_ref(nvsw, data, size);
+	default:
+		break;
+	}
 	return -EINVAL;
 }
 
-static struct nvkm_omthds
-nv04_sw_omthds[] = {
-	{ 0x0150, 0x0150, nv04_sw_set_ref },
-	{ 0x0500, 0x0500, nv04_sw_flip },
-	{}
+static const struct nvkm_nvsw_func
+nv04_nvsw = {
+	.mthd = nv04_nvsw_mthd,
 };
 
-static struct nvkm_oclass
-nv04_sw_sclass[] = {
-	{ 0x006e, &nvkm_object_ofuncs, nv04_sw_omthds },
-	{}
-};
+static int
+nv04_nvsw_new(struct nvkm_sw_chan *chan, const struct nvkm_oclass *oclass,
+	      void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nvkm_nvsw_new_(&nv04_nvsw, chan, oclass, data, size, pobject);
+}
 
 /*******************************************************************************
  * software context
  ******************************************************************************/
 
-static int
-nv04_sw_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+static bool
+nv04_sw_chan_mthd(struct nvkm_sw_chan *base, int subc, u32 mthd, u32 data)
 {
-	struct nv04_sw_chan *chan;
-	int ret;
+	struct nv04_sw_chan *chan = nv04_sw_chan(base);
 
-	ret = nvkm_sw_context_create(parent, engine, oclass, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
+	switch (mthd) {
+	case 0x0150:
+		atomic_set(&chan->ref, data);
+		return true;
+	default:
+		break;
+	}
 
-	return 0;
+	return false;
 }
 
-static struct nvkm_oclass
-nv04_sw_cclass = {
-	.handle = NV_ENGCTX(SW, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_sw_context_ctor,
-		.dtor = _nvkm_sw_context_dtor,
-		.init = _nvkm_sw_context_init,
-		.fini = _nvkm_sw_context_fini,
-	},
+static const struct nvkm_sw_chan_func
+nv04_sw_chan = {
+	.mthd = nv04_sw_chan_mthd,
 };
 
-/*******************************************************************************
- * software engine/subdev functions
- ******************************************************************************/
-
-void
-nv04_sw_intr(struct nvkm_subdev *subdev)
-{
-	nv_mask(subdev, 0x000100, 0x80000000, 0x00000000);
-}
-
 static int
-nv04_sw_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+nv04_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifo,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
-	struct nv04_sw_priv *priv;
-	int ret;
+	struct nv04_sw_chan *chan;
 
-	ret = nvkm_sw_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	atomic_set(&chan->ref, 0);
+	*pobject = &chan->base.object;
 
-	nv_engine(priv)->cclass = &nv04_sw_cclass;
-	nv_engine(priv)->sclass = nv04_sw_sclass;
-	nv_subdev(priv)->intr = nv04_sw_intr;
-	return 0;
+	return nvkm_sw_chan_ctor(&nv04_sw_chan, sw, fifo, oclass, &chan->base);
 }
 
-struct nvkm_oclass *
-nv04_sw_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(SW, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_sw_ctor,
-		.dtor = _nvkm_sw_dtor,
-		.init = _nvkm_sw_init,
-		.fini = _nvkm_sw_fini,
-	},
+/*******************************************************************************
+ * software engine/subdev functions
+ ******************************************************************************/
+
+static const struct nvkm_sw_func
+nv04_sw = {
+	.chan_new = nv04_sw_chan_new,
+	.sclass = {
+		{ nv04_nvsw_new, { -1, -1, NVIF_IOCTL_NEW_V0_SW_NV04 } },
+		{}
+	}
 };
+
+int
+nv04_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
+{
+	return nvkm_sw_new_(&nv04_sw, device, index, psw);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
index c61153a3fb8b..adf70d92b244 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
@@ -21,102 +21,48 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/sw.h>
+#include "priv.h"
+#include "chan.h"
+#include "nvsw.h"
 
-struct nv10_sw_priv {
-	struct nvkm_sw base;
-};
-
-struct nv10_sw_chan {
-	struct nvkm_sw_chan base;
-};
+#include <nvif/ioctl.h>
 
 /*******************************************************************************
- * software object classes
+ * software context
  ******************************************************************************/
 
-static int
-nv10_sw_flip(struct nvkm_object *object, u32 mthd, void *args, u32 size)
-{
-	struct nv10_sw_chan *chan = (void *)nv_engctx(object->parent);
-	if (chan->base.flip)
-		return chan->base.flip(chan->base.flip_data);
-	return -EINVAL;
-}
-
-static struct nvkm_omthds
-nv10_sw_omthds[] = {
-	{ 0x0500, 0x0500, nv10_sw_flip },
-	{}
-};
-
-static struct nvkm_oclass
-nv10_sw_sclass[] = {
-	{ 0x016e, &nvkm_object_ofuncs, nv10_sw_omthds },
-	{}
+static const struct nvkm_sw_chan_func
+nv10_sw_chan = {
 };
 
-/*******************************************************************************
- * software context
- ******************************************************************************/
-
 static int
-nv10_sw_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+nv10_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifo,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
-	struct nv10_sw_chan *chan;
-	int ret;
+	struct nvkm_sw_chan *chan;
 
-	ret = nvkm_sw_context_create(parent, engine, oclass, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->object;
 
-	return 0;
+	return nvkm_sw_chan_ctor(&nv10_sw_chan, sw, fifo, oclass, chan);
 }
 
-static struct nvkm_oclass
-nv10_sw_cclass = {
-	.handle = NV_ENGCTX(SW, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv10_sw_context_ctor,
-		.dtor = _nvkm_sw_context_dtor,
-		.init = _nvkm_sw_context_init,
-		.fini = _nvkm_sw_context_fini,
-	},
-};
-
 /*******************************************************************************
  * software engine/subdev functions
  ******************************************************************************/
 
-static int
-nv10_sw_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
-{
-	struct nv10_sw_priv *priv;
-	int ret;
-
-	ret = nvkm_sw_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+static const struct nvkm_sw_func
+nv10_sw = {
+	.chan_new = nv10_sw_chan_new,
+	.sclass = {
+		{ nvkm_nvsw_new, { -1, -1, NVIF_IOCTL_NEW_V0_SW_NV10 } },
+		{}
+	}
+};
 
-	nv_engine(priv)->cclass = &nv10_sw_cclass;
-	nv_engine(priv)->sclass = nv10_sw_sclass;
-	nv_subdev(priv)->intr = nv04_sw_intr;
-	return 0;
+int
+nv10_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
+{
+	return nvkm_sw_new_(&nv10_sw, device, index, psw);
 }
-
-struct nvkm_oclass *
-nv10_sw_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(SW, 0x10),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv10_sw_ctor,
-		.dtor = _nvkm_sw_dtor,
-		.init = _nvkm_sw_init,
-		.fini = _nvkm_sw_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
index 401fcd73086b..a381196af69d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
@@ -23,153 +23,98 @@
  */
 #include "nv50.h"
 
-#include <core/device.h>
-#include <core/handle.h>
-#include <core/namedb.h>
+#include <core/gpuobj.h>
 #include <engine/disp.h>
+#include <engine/fifo/chan.h>
 #include <subdev/bar.h>
 
 #include <nvif/event.h>
-
-/*******************************************************************************
- * software object classes
- ******************************************************************************/
-
-static int
-nv50_sw_mthd_dma_vblsem(struct nvkm_object *object, u32 mthd,
-			void *args, u32 size)
-{
-	struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
-	struct nvkm_fifo_chan *fifo = (void *)nv_object(chan)->parent;
-	struct nvkm_handle *handle;
-	int ret = -EINVAL;
-
-	handle = nvkm_namedb_get(nv_namedb(fifo), *(u32 *)args);
-	if (!handle)
-		return -ENOENT;
-
-	if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
-		struct nvkm_gpuobj *gpuobj = nv_gpuobj(handle->object);
-		chan->vblank.ctxdma = gpuobj->node->offset >> 4;
-		ret = 0;
-	}
-	nvkm_namedb_put(handle);
-	return ret;
-}
-
-static int
-nv50_sw_mthd_vblsem_offset(struct nvkm_object *object, u32 mthd,
-			   void *args, u32 size)
-{
-	struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
-	chan->vblank.offset = *(u32 *)args;
-	return 0;
-}
-
-int
-nv50_sw_mthd_vblsem_value(struct nvkm_object *object, u32 mthd,
-			  void *args, u32 size)
-{
-	struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
-	chan->vblank.value = *(u32 *)args;
-	return 0;
-}
-
-int
-nv50_sw_mthd_vblsem_release(struct nvkm_object *object, u32 mthd,
-			    void *args, u32 size)
-{
-	struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
-	u32 head = *(u32 *)args;
-	if (head >= nvkm_disp(chan)->vblank.index_nr)
-		return -EINVAL;
-
-	nvkm_notify_get(&chan->vblank.notify[head]);
-	return 0;
-}
-
-int
-nv50_sw_mthd_flip(struct nvkm_object *object, u32 mthd, void *args, u32 size)
-{
-	struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
-	if (chan->base.flip)
-		return chan->base.flip(chan->base.flip_data);
-	return -EINVAL;
-}
-
-static struct nvkm_omthds
-nv50_sw_omthds[] = {
-	{ 0x018c, 0x018c, nv50_sw_mthd_dma_vblsem },
-	{ 0x0400, 0x0400, nv50_sw_mthd_vblsem_offset },
-	{ 0x0404, 0x0404, nv50_sw_mthd_vblsem_value },
-	{ 0x0408, 0x0408, nv50_sw_mthd_vblsem_release },
-	{ 0x0500, 0x0500, nv50_sw_mthd_flip },
-	{}
-};
-
-static struct nvkm_oclass
-nv50_sw_sclass[] = {
-	{ 0x506e, &nvkm_object_ofuncs, nv50_sw_omthds },
-	{}
-};
+#include <nvif/ioctl.h>
 
 /*******************************************************************************
  * software context
  ******************************************************************************/
 
 static int
-nv50_sw_vblsem_release(struct nvkm_notify *notify)
+nv50_sw_chan_vblsem_release(struct nvkm_notify *notify)
 {
 	struct nv50_sw_chan *chan =
 		container_of(notify, typeof(*chan), vblank.notify[notify->index]);
-	struct nv50_sw_priv *priv = (void *)nv_object(chan)->engine;
-	struct nvkm_bar *bar = nvkm_bar(priv);
+	struct nvkm_sw *sw = chan->base.sw;
+	struct nvkm_device *device = sw->engine.subdev.device;
 
-	nv_wr32(priv, 0x001704, chan->vblank.channel);
-	nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
-	bar->flush(bar);
+	nvkm_wr32(device, 0x001704, chan->base.fifo->inst->addr >> 12);
+	nvkm_wr32(device, 0x001710, 0x80000000 | chan->vblank.ctxdma);
+	nvkm_bar_flush(device->bar);
 
-	if (nv_device(priv)->chipset == 0x50) {
-		nv_wr32(priv, 0x001570, chan->vblank.offset);
-		nv_wr32(priv, 0x001574, chan->vblank.value);
+	if (device->chipset == 0x50) {
+		nvkm_wr32(device, 0x001570, chan->vblank.offset);
+		nvkm_wr32(device, 0x001574, chan->vblank.value);
 	} else {
-		nv_wr32(priv, 0x060010, chan->vblank.offset);
-		nv_wr32(priv, 0x060014, chan->vblank.value);
+		nvkm_wr32(device, 0x060010, chan->vblank.offset);
+		nvkm_wr32(device, 0x060014, chan->vblank.value);
 	}
 
 	return NVKM_NOTIFY_DROP;
 }
 
-void
-nv50_sw_context_dtor(struct nvkm_object *object)
+static bool
+nv50_sw_chan_mthd(struct nvkm_sw_chan *base, int subc, u32 mthd, u32 data)
 {
-	struct nv50_sw_chan *chan = (void *)object;
-	int i;
+	struct nv50_sw_chan *chan = nv50_sw_chan(base);
+	struct nvkm_engine *engine = chan->base.object.engine;
+	struct nvkm_device *device = engine->subdev.device;
+	switch (mthd) {
+	case 0x018c: chan->vblank.ctxdma = data; return true;
+	case 0x0400: chan->vblank.offset = data; return true;
+	case 0x0404: chan->vblank.value  = data; return true;
+	case 0x0408:
+		if (data < device->disp->vblank.index_nr) {
+			nvkm_notify_get(&chan->vblank.notify[data]);
+			return true;
+		}
+		break;
+	default:
+		break;
+	}
+	return false;
+}
 
+void *
+nv50_sw_chan_dtor(struct nvkm_sw_chan *base)
+{
+	struct nv50_sw_chan *chan = nv50_sw_chan(base);
+	int i;
 	for (i = 0; i < ARRAY_SIZE(chan->vblank.notify); i++)
 		nvkm_notify_fini(&chan->vblank.notify[i]);
-
-	nvkm_sw_context_destroy(&chan->base);
+	return chan;
 }
 
-int
-nv50_sw_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+static const struct nvkm_sw_chan_func
+nv50_sw_chan = {
+	.dtor = nv50_sw_chan_dtor,
+	.mthd = nv50_sw_chan_mthd,
+};
+
+static int
+nv50_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
-	struct nvkm_disp *pdisp = nvkm_disp(parent);
-	struct nv50_sw_cclass *pclass = (void *)oclass;
+	struct nvkm_disp *disp = sw->engine.subdev.device->disp;
 	struct nv50_sw_chan *chan;
 	int ret, i;
 
-	ret = nvkm_sw_context_create(parent, engine, oclass, &chan);
-	*pobject = nv_object(chan);
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = nvkm_sw_chan_ctor(&nv50_sw_chan, sw, fifoch, oclass, &chan->base);
 	if (ret)
 		return ret;
 
-	for (i = 0; pdisp && i < pdisp->vblank.index_nr; i++) {
-		ret = nvkm_notify_init(NULL, &pdisp->vblank, pclass->vblank,
-				       false,
+	for (i = 0; disp && i < disp->vblank.index_nr; i++) {
+		ret = nvkm_notify_init(NULL, &disp->vblank,
+				       nv50_sw_chan_vblsem_release, false,
 				       &(struct nvif_notify_head_req_v0) {
 					.head = i,
 				       },
@@ -180,55 +125,24 @@ nv50_sw_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
 			return ret;
 	}
 
-	chan->vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
 	return 0;
 }
 
-static struct nv50_sw_cclass
-nv50_sw_cclass = {
-	.base.handle = NV_ENGCTX(SW, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_sw_context_ctor,
-		.dtor = nv50_sw_context_dtor,
-		.init = _nvkm_sw_context_init,
-		.fini = _nvkm_sw_context_fini,
-	},
-	.vblank = nv50_sw_vblsem_release,
-};
-
 /*******************************************************************************
  * software engine/subdev functions
  ******************************************************************************/
 
+static const struct nvkm_sw_func
+nv50_sw = {
+	.chan_new = nv50_sw_chan_new,
+	.sclass = {
+		{ nvkm_nvsw_new, { -1, -1, NVIF_IOCTL_NEW_V0_SW_NV50 } },
+		{}
+	}
+};
+
 int
-nv50_sw_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+nv50_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
 {
-	struct nv50_sw_oclass *pclass = (void *)oclass;
-	struct nv50_sw_priv *priv;
-	int ret;
-
-	ret = nvkm_sw_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->cclass = pclass->cclass;
-	nv_engine(priv)->sclass = pclass->sclass;
-	nv_subdev(priv)->intr = nv04_sw_intr;
-	return 0;
+	return nvkm_sw_new_(&nv50_sw, device, index, psw);
 }
-
-struct nvkm_oclass *
-nv50_sw_oclass = &(struct nv50_sw_oclass) {
-	.base.handle = NV_ENGINE(SW, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_sw_ctor,
-		.dtor = _nvkm_sw_dtor,
-		.init = _nvkm_sw_init,
-		.fini = _nvkm_sw_fini,
-	},
-	.cclass = &nv50_sw_cclass.base,
-	.sclass =  nv50_sw_sclass,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h
index d8adc1108467..25cdfdef2d46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h
@@ -1,45 +1,20 @@
 #ifndef __NVKM_SW_NV50_H__
 #define __NVKM_SW_NV50_H__
-#include <engine/sw.h>
+#define nv50_sw_chan(p) container_of((p), struct nv50_sw_chan, base)
+#include "priv.h"
+#include "chan.h"
+#include "nvsw.h"
 #include <core/notify.h>
 
-struct nv50_sw_oclass {
-	struct nvkm_oclass base;
-	struct nvkm_oclass *cclass;
-	struct nvkm_oclass *sclass;
-};
-
-struct nv50_sw_priv {
-	struct nvkm_sw base;
-};
-
-int  nv50_sw_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-
-struct nv50_sw_cclass {
-	struct nvkm_oclass base;
-	int (*vblank)(struct nvkm_notify *);
-};
-
 struct nv50_sw_chan {
 	struct nvkm_sw_chan base;
 	struct {
 		struct nvkm_notify notify[4];
-		u32 channel;
 		u32 ctxdma;
 		u64 offset;
 		u32 value;
 	} vblank;
 };
 
-int  nv50_sw_context_ctor(struct nvkm_object *,
-				struct nvkm_object *,
-				struct nvkm_oclass *, void *, u32,
-				struct nvkm_object **);
-void nv50_sw_context_dtor(struct nvkm_object *);
-
-int nv50_sw_mthd_vblsem_value(struct nvkm_object *, u32, void *, u32);
-int nv50_sw_mthd_vblsem_release(struct nvkm_object *, u32, void *, u32);
-int nv50_sw_mthd_flip(struct nvkm_object *, u32, void *, u32);
+void *nv50_sw_chan_dtor(struct nvkm_sw_chan *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.c
new file mode 100644
index 000000000000..66cf986b9572
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "nvsw.h"
+#include "chan.h"
+
+#include <nvif/class.h>
+
+static int
+nvkm_nvsw_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+{
+	struct nvkm_nvsw *nvsw = nvkm_nvsw(object);
+	if (nvsw->func->mthd)
+		return nvsw->func->mthd(nvsw, mthd, data, size);
+	return -ENODEV;
+}
+
+static int
+nvkm_nvsw_ntfy_(struct nvkm_object *object, u32 mthd,
+		struct nvkm_event **pevent)
+{
+	struct nvkm_nvsw *nvsw = nvkm_nvsw(object);
+	switch (mthd) {
+	case NVSW_NTFY_UEVENT:
+		*pevent = &nvsw->chan->event;
+		return 0;
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+static const struct nvkm_object_func
+nvkm_nvsw_ = {
+	.mthd = nvkm_nvsw_mthd_,
+	.ntfy = nvkm_nvsw_ntfy_,
+};
+
+int
+nvkm_nvsw_new_(const struct nvkm_nvsw_func *func, struct nvkm_sw_chan *chan,
+	       const struct nvkm_oclass *oclass, void *data, u32 size,
+	       struct nvkm_object **pobject)
+{
+	struct nvkm_nvsw *nvsw;
+
+	if (!(nvsw = kzalloc(sizeof(*nvsw), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &nvsw->object;
+
+	nvkm_object_ctor(&nvkm_nvsw_, oclass, &nvsw->object);
+	nvsw->func = func;
+	nvsw->chan = chan;
+	return 0;
+}
+
+static const struct nvkm_nvsw_func
+nvkm_nvsw = {
+};
+
+int
+nvkm_nvsw_new(struct nvkm_sw_chan *chan, const struct nvkm_oclass *oclass,
+	      void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nvkm_nvsw_new_(&nvkm_nvsw, chan, oclass, data, size, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
new file mode 100644
index 000000000000..943ef4c10091
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
@@ -0,0 +1,21 @@
+#ifndef __NVKM_NVSW_H__
+#define __NVKM_NVSW_H__
+#define nvkm_nvsw(p) container_of((p), struct nvkm_nvsw, object)
+#include "priv.h"
+
+struct nvkm_nvsw {
+	struct nvkm_object object;
+	const struct nvkm_nvsw_func *func;
+	struct nvkm_sw_chan *chan;
+};
+
+struct nvkm_nvsw_func {
+	int (*mthd)(struct nvkm_nvsw *, u32 mthd, void *data, u32 size);
+};
+
+int nvkm_nvsw_new_(const struct nvkm_nvsw_func *, struct nvkm_sw_chan *,
+		   const struct nvkm_oclass *, void *data, u32 size,
+		   struct nvkm_object **pobject);
+int nvkm_nvsw_new(struct nvkm_sw_chan *, const struct nvkm_oclass *,
+		  void *data, u32 size, struct nvkm_object **pobject);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
new file mode 100644
index 000000000000..0ef1318dc2fd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
@@ -0,0 +1,21 @@
+#ifndef __NVKM_SW_PRIV_H__
+#define __NVKM_SW_PRIV_H__
+#define nvkm_sw(p) container_of((p), struct nvkm_sw, engine)
+#include <engine/sw.h>
+struct nvkm_sw_chan;
+
+int nvkm_sw_new_(const struct nvkm_sw_func *, struct nvkm_device *,
+		 int index, struct nvkm_sw **);
+
+struct nvkm_sw_chan_sclass {
+	int (*ctor)(struct nvkm_sw_chan *, const struct nvkm_oclass *,
+		    void *data, u32 size, struct nvkm_object **);
+	struct nvkm_sclass base;
+};
+
+struct nvkm_sw_func {
+	int (*chan_new)(struct nvkm_sw *, struct nvkm_fifo_chan *,
+			const struct nvkm_oclass *, struct nvkm_object **);
+	const struct nvkm_sw_chan_sclass sclass[];
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
index 45f4e186befc..4188c77ac927 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
@@ -22,72 +22,23 @@
  * Authors: Ben Skeggs, Ilia Mirkin
  */
 #include <engine/vp.h>
-#include <engine/xtensa.h>
 
-#include <core/engctx.h>
-
-/*******************************************************************************
- * VP object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_vp_sclass[] = {
-	{ 0x7476, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PVP context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_vp_cclass = {
-	.handle = NV_ENGCTX(VP, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_xtensa_engctx_ctor,
-		.dtor = _nvkm_engctx_dtor,
-		.init = _nvkm_engctx_init,
-		.fini = _nvkm_engctx_fini,
-		.rd32 = _nvkm_engctx_rd32,
-		.wr32 = _nvkm_engctx_wr32,
-	},
+#include <nvif/class.h>
+
+static const struct nvkm_xtensa_func
+g84_vp = {
+	.pmc_enable = 0x01020000,
+	.fifo_val = 0x111,
+	.unkd28 = 0x9c544,
+	.sclass = {
+		{ -1, -1, NV74_VP2 },
+		{}
+	}
 };
 
-/*******************************************************************************
- * PVP engine/subdev functions
- ******************************************************************************/
-
-static int
-g84_vp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	    struct nvkm_oclass *oclass, void *data, u32 size,
-	    struct nvkm_object **pobject)
+int
+g84_vp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
 {
-	struct nvkm_xtensa *priv;
-	int ret;
-
-	ret = nvkm_xtensa_create(parent, engine, oclass, 0xf000, true,
-				 "PVP", "vp", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x01020000;
-	nv_engine(priv)->cclass = &g84_vp_cclass;
-	nv_engine(priv)->sclass = g84_vp_sclass;
-	priv->fifo_val = 0x111;
-	priv->unkd28 = 0x9c544;
-	return 0;
+	return nvkm_xtensa_new_(&g84_vp, device, index,
+				true, 0x00f000, pengine);
 }
-
-struct nvkm_oclass
-g84_vp_oclass = {
-	.handle = NV_ENGINE(VP, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g84_vp_ctor,
-		.dtor = _nvkm_xtensa_dtor,
-		.init = _nvkm_xtensa_init,
-		.fini = _nvkm_xtensa_fini,
-		.rd32 = _nvkm_xtensa_rd32,
-		.wr32 = _nvkm_xtensa_wr32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
index cea90df533d9..a3d4f5bcec7a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
@@ -20,153 +20,173 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 #include <engine/xtensa.h>
-#include <core/device.h>
 
-#include <core/engctx.h>
+#include <core/gpuobj.h>
+#include <engine/fifo.h>
 
-u32
-_nvkm_xtensa_rd32(struct nvkm_object *object, u64 addr)
+static int
+nvkm_xtensa_oclass_get(struct nvkm_oclass *oclass, int index)
 {
-	struct nvkm_xtensa *xtensa = (void *)object;
-	return nv_rd32(xtensa, xtensa->addr + addr);
-}
+	struct nvkm_xtensa *xtensa = nvkm_xtensa(oclass->engine);
+	int c = 0;
 
-void
-_nvkm_xtensa_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
-	struct nvkm_xtensa *xtensa = (void *)object;
-	nv_wr32(xtensa, xtensa->addr + addr, data);
+	while (xtensa->func->sclass[c].oclass) {
+		if (c++ == index) {
+			oclass->base = xtensa->func->sclass[index];
+			return index;
+		}
+	}
+
+	return c;
 }
 
-int
-_nvkm_xtensa_engctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-			 struct nvkm_oclass *oclass, void *data, u32 size,
-			 struct nvkm_object **pobject)
+static int
+nvkm_xtensa_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+			int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_engctx *engctx;
-	int ret;
-
-	ret = nvkm_engctx_create(parent, engine, oclass, NULL, 0x10000, 0x1000,
-				 NVOBJ_FLAG_ZERO_ALLOC, &engctx);
-	*pobject = nv_object(engctx);
-	return ret;
+	return nvkm_gpuobj_new(object->engine->subdev.device, 0x10000, align,
+			       true, parent, pgpuobj);
 }
 
-void
-_nvkm_xtensa_intr(struct nvkm_subdev *subdev)
+static const struct nvkm_object_func
+nvkm_xtensa_cclass = {
+	.bind = nvkm_xtensa_cclass_bind,
+};
+
+static void
+nvkm_xtensa_intr(struct nvkm_engine *engine)
 {
-	struct nvkm_xtensa *xtensa = (void *)subdev;
-	u32 unk104 = nv_ro32(xtensa, 0xd04);
-	u32 intr = nv_ro32(xtensa, 0xc20);
-	u32 chan = nv_ro32(xtensa, 0xc28);
-	u32 unk10c = nv_ro32(xtensa, 0xd0c);
+	struct nvkm_xtensa *xtensa = nvkm_xtensa(engine);
+	struct nvkm_subdev *subdev = &xtensa->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	const u32 base = xtensa->addr;
+	u32 unk104 = nvkm_rd32(device, base + 0xd04);
+	u32 intr = nvkm_rd32(device, base + 0xc20);
+	u32 chan = nvkm_rd32(device, base + 0xc28);
+	u32 unk10c = nvkm_rd32(device, base + 0xd0c);
 
 	if (intr & 0x10)
-		nv_warn(xtensa, "Watchdog interrupt, engine hung.\n");
-	nv_wo32(xtensa, 0xc20, intr);
-	intr = nv_ro32(xtensa, 0xc20);
+		nvkm_warn(subdev, "Watchdog interrupt, engine hung.\n");
+	nvkm_wr32(device, base + 0xc20, intr);
+	intr = nvkm_rd32(device, base + 0xc20);
 	if (unk104 == 0x10001 && unk10c == 0x200 && chan && !intr) {
-		nv_debug(xtensa, "Enabling FIFO_CTRL\n");
-		nv_mask(xtensa, xtensa->addr + 0xd94, 0, xtensa->fifo_val);
+		nvkm_debug(subdev, "Enabling FIFO_CTRL\n");
+		nvkm_mask(device, xtensa->addr + 0xd94, 0, xtensa->func->fifo_val);
 	}
 }
 
-int
-nvkm_xtensa_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, u32 addr, bool enable,
-		    const char *iname, const char *fname,
-		    int length, void **pobject)
+static int
+nvkm_xtensa_fini(struct nvkm_engine *engine, bool suspend)
 {
-	struct nvkm_xtensa *xtensa;
-	int ret;
+	struct nvkm_xtensa *xtensa = nvkm_xtensa(engine);
+	struct nvkm_device *device = xtensa->engine.subdev.device;
+	const u32 base = xtensa->addr;
 
-	ret = nvkm_engine_create_(parent, engine, oclass, enable, iname,
-				  fname, length, pobject);
-	xtensa = *pobject;
-	if (ret)
-		return ret;
+	nvkm_wr32(device, base + 0xd84, 0); /* INTR_EN */
+	nvkm_wr32(device, base + 0xd94, 0); /* FIFO_CTRL */
 
-	nv_subdev(xtensa)->intr = _nvkm_xtensa_intr;
-	xtensa->addr = addr;
+	if (!suspend)
+		nvkm_memory_del(&xtensa->gpu_fw);
 	return 0;
 }
 
-int
-_nvkm_xtensa_init(struct nvkm_object *object)
+static int
+nvkm_xtensa_init(struct nvkm_engine *engine)
 {
-	struct nvkm_device *device = nv_device(object);
-	struct nvkm_xtensa *xtensa = (void *)object;
+	struct nvkm_xtensa *xtensa = nvkm_xtensa(engine);
+	struct nvkm_subdev *subdev = &xtensa->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	const u32 base = xtensa->addr;
 	const struct firmware *fw;
 	char name[32];
 	int i, ret;
+	u64 addr, size;
 	u32 tmp;
 
-	ret = nvkm_engine_init(&xtensa->base);
-	if (ret)
-		return ret;
-
 	if (!xtensa->gpu_fw) {
 		snprintf(name, sizeof(name), "nouveau/nv84_xuc%03x",
 			 xtensa->addr >> 12);
 
-		ret = request_firmware(&fw, name, nv_device_base(device));
+		ret = request_firmware(&fw, name, device->dev);
 		if (ret) {
-			nv_warn(xtensa, "unable to load firmware %s\n", name);
+			nvkm_warn(subdev, "unable to load firmware %s\n", name);
 			return ret;
 		}
 
 		if (fw->size > 0x40000) {
-			nv_warn(xtensa, "firmware %s too large\n", name);
+			nvkm_warn(subdev, "firmware %s too large\n", name);
 			release_firmware(fw);
 			return -EINVAL;
 		}
 
-		ret = nvkm_gpuobj_new(object, NULL, 0x40000, 0x1000, 0,
+		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+				      0x40000, 0x1000, false,
 				      &xtensa->gpu_fw);
 		if (ret) {
 			release_firmware(fw);
 			return ret;
 		}
 
-		nv_debug(xtensa, "Loading firmware to address: 0x%llx\n",
-			 xtensa->gpu_fw->addr);
-
+		nvkm_kmap(xtensa->gpu_fw);
 		for (i = 0; i < fw->size / 4; i++)
-			nv_wo32(xtensa->gpu_fw, i * 4, *((u32 *)fw->data + i));
+			nvkm_wo32(xtensa->gpu_fw, i * 4, *((u32 *)fw->data + i));
+		nvkm_done(xtensa->gpu_fw);
 		release_firmware(fw);
 	}
 
-	nv_wo32(xtensa, 0xd10, 0x1fffffff); /* ?? */
-	nv_wo32(xtensa, 0xd08, 0x0fffffff); /* ?? */
+	addr = nvkm_memory_addr(xtensa->gpu_fw);
+	size = nvkm_memory_size(xtensa->gpu_fw);
 
-	nv_wo32(xtensa, 0xd28, xtensa->unkd28); /* ?? */
-	nv_wo32(xtensa, 0xc20, 0x3f); /* INTR */
-	nv_wo32(xtensa, 0xd84, 0x3f); /* INTR_EN */
+	nvkm_wr32(device, base + 0xd10, 0x1fffffff); /* ?? */
+	nvkm_wr32(device, base + 0xd08, 0x0fffffff); /* ?? */
 
-	nv_wo32(xtensa, 0xcc0, xtensa->gpu_fw->addr >> 8); /* XT_REGION_BASE */
-	nv_wo32(xtensa, 0xcc4, 0x1c); /* XT_REGION_SETUP */
-	nv_wo32(xtensa, 0xcc8, xtensa->gpu_fw->size >> 8); /* XT_REGION_LIMIT */
+	nvkm_wr32(device, base + 0xd28, xtensa->func->unkd28); /* ?? */
+	nvkm_wr32(device, base + 0xc20, 0x3f); /* INTR */
+	nvkm_wr32(device, base + 0xd84, 0x3f); /* INTR_EN */
 
-	tmp = nv_rd32(xtensa, 0x0);
-	nv_wo32(xtensa, 0xde0, tmp); /* SCRATCH_H2X */
+	nvkm_wr32(device, base + 0xcc0, addr >> 8); /* XT_REGION_BASE */
+	nvkm_wr32(device, base + 0xcc4, 0x1c); /* XT_REGION_SETUP */
+	nvkm_wr32(device, base + 0xcc8, size >> 8); /* XT_REGION_LIMIT */
 
-	nv_wo32(xtensa, 0xce8, 0xf); /* XT_REGION_SETUP */
+	tmp = nvkm_rd32(device, 0x0);
+	nvkm_wr32(device, base + 0xde0, tmp); /* SCRATCH_H2X */
 
-	nv_wo32(xtensa, 0xc20, 0x3f); /* INTR */
-	nv_wo32(xtensa, 0xd84, 0x3f); /* INTR_EN */
+	nvkm_wr32(device, base + 0xce8, 0xf); /* XT_REGION_SETUP */
+
+	nvkm_wr32(device, base + 0xc20, 0x3f); /* INTR */
+	nvkm_wr32(device, base + 0xd84, 0x3f); /* INTR_EN */
 	return 0;
 }
 
-int
-_nvkm_xtensa_fini(struct nvkm_object *object, bool suspend)
+static void *
+nvkm_xtensa_dtor(struct nvkm_engine *engine)
 {
-	struct nvkm_xtensa *xtensa = (void *)object;
+	return nvkm_xtensa(engine);
+}
 
-	nv_wo32(xtensa, 0xd84, 0); /* INTR_EN */
-	nv_wo32(xtensa, 0xd94, 0); /* FIFO_CTRL */
+static const struct nvkm_engine_func
+nvkm_xtensa = {
+	.dtor = nvkm_xtensa_dtor,
+	.init = nvkm_xtensa_init,
+	.fini = nvkm_xtensa_fini,
+	.intr = nvkm_xtensa_intr,
+	.fifo.sclass = nvkm_xtensa_oclass_get,
+	.cclass = &nvkm_xtensa_cclass,
+};
 
-	if (!suspend)
-		nvkm_gpuobj_ref(NULL, &xtensa->gpu_fw);
+int
+nvkm_xtensa_new_(const struct nvkm_xtensa_func *func,
+		 struct nvkm_device *device, int index, bool enable,
+		 u32 addr, struct nvkm_engine **pengine)
+{
+	struct nvkm_xtensa *xtensa;
+
+	if (!(xtensa = kzalloc(sizeof(*xtensa), GFP_KERNEL)))
+		return -ENOMEM;
+	xtensa->func = func;
+	xtensa->addr = addr;
+	*pengine = &xtensa->engine;
 
-	return nvkm_engine_fini(&xtensa->base, suspend);
+	return nvkm_engine_ctor(&nvkm_xtensa, device, index, func->pmc_enable,
+				enable, &xtensa->engine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
index a1bb3e48739c..ee2c38f50ef5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
@@ -13,6 +13,7 @@ include $(src)/nvkm/subdev/ltc/Kbuild
 include $(src)/nvkm/subdev/mc/Kbuild
 include $(src)/nvkm/subdev/mmu/Kbuild
 include $(src)/nvkm/subdev/mxm/Kbuild
+include $(src)/nvkm/subdev/pci/Kbuild
 include $(src)/nvkm/subdev/pmu/Kbuild
 include $(src)/nvkm/subdev/therm/Kbuild
 include $(src)/nvkm/subdev/timer/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
index 1ab554a0b5e0..1e138b337955 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
@@ -1,4 +1,5 @@
 nvkm-y += nvkm/subdev/bar/base.o
 nvkm-y += nvkm/subdev/bar/nv50.o
+nvkm-y += nvkm/subdev/bar/g84.o
 nvkm-y += nvkm/subdev/bar/gf100.o
 nvkm-y += nvkm/subdev/bar/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
index 3502d00122ef..a9433ad45b1e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
@@ -23,122 +23,61 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
-#include <subdev/fb.h>
-#include <subdev/mmu.h>
-
-struct nvkm_barobj {
-	struct nvkm_object base;
-	struct nvkm_vma vma;
-	void __iomem *iomem;
-};
-
-static int
-nvkm_barobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
+void
+nvkm_bar_flush(struct nvkm_bar *bar)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct nvkm_bar *bar = nvkm_bar(device);
-	struct nvkm_mem *mem = data;
-	struct nvkm_barobj *barobj;
-	int ret;
-
-	ret = nvkm_object_create(parent, engine, oclass, 0, &barobj);
-	*pobject = nv_object(barobj);
-	if (ret)
-		return ret;
-
-	ret = bar->kmap(bar, mem, NV_MEM_ACCESS_RW, &barobj->vma);
-	if (ret)
-		return ret;
-
-	barobj->iomem = ioremap(nv_device_resource_start(device, 3) +
-				(u32)barobj->vma.offset, mem->size << 12);
-	if (!barobj->iomem) {
-		nv_warn(bar, "PRAMIN ioremap failed\n");
-		return -ENOMEM;
-	}
-
-	return 0;
+	if (bar && bar->func->flush)
+		bar->func->flush(bar);
 }
 
-static void
-nvkm_barobj_dtor(struct nvkm_object *object)
+struct nvkm_vm *
+nvkm_bar_kmap(struct nvkm_bar *bar)
 {
-	struct nvkm_bar *bar = nvkm_bar(object);
-	struct nvkm_barobj *barobj = (void *)object;
-	if (barobj->vma.node) {
-		if (barobj->iomem)
-			iounmap(barobj->iomem);
-		bar->unmap(bar, &barobj->vma);
-	}
-	nvkm_object_destroy(&barobj->base);
+	/* disallow kmap() until after vm has been bootstrapped */
+	if (bar && bar->func->kmap && bar->subdev.oneinit)
+		return bar->func->kmap(bar);
+	return NULL;
 }
 
-static u32
-nvkm_barobj_rd32(struct nvkm_object *object, u64 addr)
+int
+nvkm_bar_umap(struct nvkm_bar *bar, u64 size, int type, struct nvkm_vma *vma)
 {
-	struct nvkm_barobj *barobj = (void *)object;
-	return ioread32_native(barobj->iomem + addr);
+	return bar->func->umap(bar, size, type, vma);
 }
 
-static void
-nvkm_barobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
+static int
+nvkm_bar_oneinit(struct nvkm_subdev *subdev)
 {
-	struct nvkm_barobj *barobj = (void *)object;
-	iowrite32_native(data, barobj->iomem + addr);
+	struct nvkm_bar *bar = nvkm_bar(subdev);
+	return bar->func->oneinit(bar);
 }
 
-static struct nvkm_oclass
-nvkm_barobj_oclass = {
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nvkm_barobj_ctor,
-		.dtor = nvkm_barobj_dtor,
-		.init = nvkm_object_init,
-		.fini = nvkm_object_fini,
-		.rd32 = nvkm_barobj_rd32,
-		.wr32 = nvkm_barobj_wr32,
-	},
-};
-
-int
-nvkm_bar_alloc(struct nvkm_bar *bar, struct nvkm_object *parent,
-	       struct nvkm_mem *mem, struct nvkm_object **pobject)
+static int
+nvkm_bar_init(struct nvkm_subdev *subdev)
 {
-	struct nvkm_object *gpuobj;
-	int ret = nvkm_object_ctor(parent, &parent->engine->subdev.object,
-				   &nvkm_barobj_oclass, mem, 0, &gpuobj);
-	if (ret == 0)
-		*pobject = gpuobj;
-	return ret;
+	struct nvkm_bar *bar = nvkm_bar(subdev);
+	return bar->func->init(bar);
 }
 
-int
-nvkm_bar_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, int length, void **pobject)
+static void *
+nvkm_bar_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_bar *bar;
-	int ret;
-
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "BARCTL",
-				  "bar", length, pobject);
-	bar = *pobject;
-	if (ret)
-		return ret;
-
-	return 0;
+	struct nvkm_bar *bar = nvkm_bar(subdev);
+	return bar->func->dtor(bar);
 }
 
-void
-nvkm_bar_destroy(struct nvkm_bar *bar)
-{
-	nvkm_subdev_destroy(&bar->base);
-}
+static const struct nvkm_subdev_func
+nvkm_bar = {
+	.dtor = nvkm_bar_dtor,
+	.oneinit = nvkm_bar_oneinit,
+	.init = nvkm_bar_init,
+};
 
 void
-_nvkm_bar_dtor(struct nvkm_object *object)
+nvkm_bar_ctor(const struct nvkm_bar_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_bar *bar)
 {
-	struct nvkm_bar *bar = (void *)object;
-	nvkm_bar_destroy(bar);
+	nvkm_subdev_ctor(&nvkm_bar, device, index, 0, &bar->subdev);
+	bar->func = func;
+	spin_lock_init(&bar->lock);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
new file mode 100644
index 000000000000..ef717136c838
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "nv50.h"
+
+#include <subdev/timer.h>
+
+void
+g84_bar_flush(struct nvkm_bar *bar)
+{
+	struct nvkm_device *device = bar->subdev.device;
+	unsigned long flags;
+	spin_lock_irqsave(&bar->lock, flags);
+	nvkm_wr32(device, 0x070000, 0x00000001);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x070000) & 0x00000002))
+			break;
+	);
+	spin_unlock_irqrestore(&bar->lock, flags);
+}
+
+static const struct nvkm_bar_func
+g84_bar_func = {
+	.dtor = nv50_bar_dtor,
+	.oneinit = nv50_bar_oneinit,
+	.init = nv50_bar_init,
+	.kmap = nv50_bar_kmap,
+	.umap = nv50_bar_umap,
+	.flush = g84_bar_flush,
+};
+
+int
+g84_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+{
+	return nv50_bar_new_(&g84_bar_func, device, index, 0x200, pbar);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
index 12a1aebd9a96..c794b2c2d21e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -21,101 +21,60 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "gf100.h"
 
-#include <core/device.h>
 #include <core/gpuobj.h>
 #include <subdev/fb.h>
 #include <subdev/mmu.h>
 
-struct gf100_bar_priv_vm {
-	struct nvkm_gpuobj *mem;
-	struct nvkm_gpuobj *pgd;
-	struct nvkm_vm *vm;
-};
-
-struct gf100_bar_priv {
-	struct nvkm_bar base;
-	spinlock_t lock;
-	struct gf100_bar_priv_vm bar[2];
-};
-
-static int
-gf100_bar_kmap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags,
-	       struct nvkm_vma *vma)
-{
-	struct gf100_bar_priv *priv = (void *)bar;
-	int ret;
-
-	ret = nvkm_vm_get(priv->bar[0].vm, mem->size << 12, 12, flags, vma);
-	if (ret)
-		return ret;
-
-	nvkm_vm_map(vma, mem);
-	return 0;
-}
-
-static int
-gf100_bar_umap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags,
-	       struct nvkm_vma *vma)
+static struct nvkm_vm *
+gf100_bar_kmap(struct nvkm_bar *base)
 {
-	struct gf100_bar_priv *priv = (void *)bar;
-	int ret;
-
-	ret = nvkm_vm_get(priv->bar[1].vm, mem->size << 12,
-			  mem->page_shift, flags, vma);
-	if (ret)
-		return ret;
-
-	nvkm_vm_map(vma, mem);
-	return 0;
+	return gf100_bar(base)->bar[0].vm;
 }
 
-static void
-gf100_bar_unmap(struct nvkm_bar *bar, struct nvkm_vma *vma)
+int
+gf100_bar_umap(struct nvkm_bar *base, u64 size, int type, struct nvkm_vma *vma)
 {
-	nvkm_vm_unmap(vma);
-	nvkm_vm_put(vma);
+	struct gf100_bar *bar = gf100_bar(base);
+	return nvkm_vm_get(bar->bar[1].vm, size, type, NV_MEM_ACCESS_RW, vma);
 }
 
 static int
-gf100_bar_ctor_vm(struct gf100_bar_priv *priv, struct gf100_bar_priv_vm *bar_vm,
-		  int bar_nr)
+gf100_bar_ctor_vm(struct gf100_bar *bar, struct gf100_bar_vm *bar_vm,
+		  struct lock_class_key *key, int bar_nr)
 {
-	struct nvkm_device *device = nv_device(&priv->base);
+	struct nvkm_device *device = bar->base.subdev.device;
 	struct nvkm_vm *vm;
 	resource_size_t bar_len;
 	int ret;
 
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0,
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, false,
 			      &bar_vm->mem);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0,
-			      &bar_vm->pgd);
+	ret = nvkm_gpuobj_new(device, 0x8000, 0, false, NULL, &bar_vm->pgd);
 	if (ret)
 		return ret;
 
-	bar_len = nv_device_resource_len(device, bar_nr);
+	bar_len = device->func->resource_size(device, bar_nr);
 
-	ret = nvkm_vm_new(device, 0, bar_len, 0, &vm);
+	ret = nvkm_vm_new(device, 0, bar_len, 0, key, &vm);
 	if (ret)
 		return ret;
 
-	atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
+	atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
 
 	/*
 	 * Bootstrap page table lookup.
 	 */
 	if (bar_nr == 3) {
-		ret = nvkm_gpuobj_new(nv_object(priv), NULL,
-				      (bar_len >> 12) * 8, 0x1000,
-				      NVOBJ_FLAG_ZERO_ALLOC,
-				      &vm->pgt[0].obj[0]);
-		vm->pgt[0].refcount[0] = 1;
-		if (ret)
+		ret = nvkm_vm_boot(vm, bar_len);
+		if (ret) {
+			nvkm_vm_ref(NULL, &vm, NULL);
 			return ret;
+		}
 	}
 
 	ret = nvkm_vm_ref(vm, &bar_vm->vm, bar_vm->pgd);
@@ -123,97 +82,101 @@ gf100_bar_ctor_vm(struct gf100_bar_priv *priv, struct gf100_bar_priv_vm *bar_vm,
 	if (ret)
 		return ret;
 
-	nv_wo32(bar_vm->mem, 0x0200, lower_32_bits(bar_vm->pgd->addr));
-	nv_wo32(bar_vm->mem, 0x0204, upper_32_bits(bar_vm->pgd->addr));
-	nv_wo32(bar_vm->mem, 0x0208, lower_32_bits(bar_len - 1));
-	nv_wo32(bar_vm->mem, 0x020c, upper_32_bits(bar_len - 1));
+	nvkm_kmap(bar_vm->mem);
+	nvkm_wo32(bar_vm->mem, 0x0200, lower_32_bits(bar_vm->pgd->addr));
+	nvkm_wo32(bar_vm->mem, 0x0204, upper_32_bits(bar_vm->pgd->addr));
+	nvkm_wo32(bar_vm->mem, 0x0208, lower_32_bits(bar_len - 1));
+	nvkm_wo32(bar_vm->mem, 0x020c, upper_32_bits(bar_len - 1));
+	nvkm_done(bar_vm->mem);
 	return 0;
 }
 
 int
-gf100_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+gf100_bar_oneinit(struct nvkm_bar *base)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct gf100_bar_priv *priv;
-	bool has_bar3 = nv_device_resource_len(device, 3) != 0;
+	static struct lock_class_key bar1_lock;
+	static struct lock_class_key bar3_lock;
+	struct gf100_bar *bar = gf100_bar(base);
 	int ret;
 
-	ret = nvkm_bar_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
 	/* BAR3 */
-	if (has_bar3) {
-		ret = gf100_bar_ctor_vm(priv, &priv->bar[0], 3);
+	if (bar->base.func->kmap) {
+		ret = gf100_bar_ctor_vm(bar, &bar->bar[0], &bar3_lock, 3);
 		if (ret)
 			return ret;
 	}
 
 	/* BAR1 */
-	ret = gf100_bar_ctor_vm(priv, &priv->bar[1], 1);
+	ret = gf100_bar_ctor_vm(bar, &bar->bar[1], &bar1_lock, 1);
 	if (ret)
 		return ret;
 
-	if (has_bar3) {
-		priv->base.alloc = nvkm_bar_alloc;
-		priv->base.kmap = gf100_bar_kmap;
-	}
-	priv->base.umap = gf100_bar_umap;
-	priv->base.unmap = gf100_bar_unmap;
-	priv->base.flush = g84_bar_flush;
-	spin_lock_init(&priv->lock);
 	return 0;
 }
 
-void
-gf100_bar_dtor(struct nvkm_object *object)
+int
+gf100_bar_init(struct nvkm_bar *base)
 {
-	struct gf100_bar_priv *priv = (void *)object;
+	struct gf100_bar *bar = gf100_bar(base);
+	struct nvkm_device *device = bar->base.subdev.device;
+	u32 addr;
+
+	nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
+	nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
 
-	nvkm_vm_ref(NULL, &priv->bar[1].vm, priv->bar[1].pgd);
-	nvkm_gpuobj_ref(NULL, &priv->bar[1].pgd);
-	nvkm_gpuobj_ref(NULL, &priv->bar[1].mem);
+	addr = nvkm_memory_addr(bar->bar[1].mem) >> 12;
+	nvkm_wr32(device, 0x001704, 0x80000000 | addr);
 
-	if (priv->bar[0].vm) {
-		nvkm_gpuobj_ref(NULL, &priv->bar[0].vm->pgt[0].obj[0]);
-		nvkm_vm_ref(NULL, &priv->bar[0].vm, priv->bar[0].pgd);
+	if (bar->bar[0].mem) {
+		addr = nvkm_memory_addr(bar->bar[0].mem) >> 12;
+		nvkm_wr32(device, 0x001714, 0xc0000000 | addr);
 	}
-	nvkm_gpuobj_ref(NULL, &priv->bar[0].pgd);
-	nvkm_gpuobj_ref(NULL, &priv->bar[0].mem);
 
-	nvkm_bar_destroy(&priv->base);
+	return 0;
 }
 
-int
-gf100_bar_init(struct nvkm_object *object)
+void *
+gf100_bar_dtor(struct nvkm_bar *base)
 {
-	struct gf100_bar_priv *priv = (void *)object;
-	int ret;
+	struct gf100_bar *bar = gf100_bar(base);
 
-	ret = nvkm_bar_init(&priv->base);
-	if (ret)
-		return ret;
+	nvkm_vm_ref(NULL, &bar->bar[1].vm, bar->bar[1].pgd);
+	nvkm_gpuobj_del(&bar->bar[1].pgd);
+	nvkm_memory_del(&bar->bar[1].mem);
 
-	nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
-	nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
+	if (bar->bar[0].vm) {
+		nvkm_memory_del(&bar->bar[0].vm->pgt[0].mem[0]);
+		nvkm_vm_ref(NULL, &bar->bar[0].vm, bar->bar[0].pgd);
+	}
+	nvkm_gpuobj_del(&bar->bar[0].pgd);
+	nvkm_memory_del(&bar->bar[0].mem);
+	return bar;
+}
 
-	nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12);
-	if (priv->bar[0].mem)
-		nv_wr32(priv, 0x001714,
-			0xc0000000 | priv->bar[0].mem->addr >> 12);
+int
+gf100_bar_new_(const struct nvkm_bar_func *func, struct nvkm_device *device,
+	       int index, struct nvkm_bar **pbar)
+{
+	struct gf100_bar *bar;
+	if (!(bar = kzalloc(sizeof(*bar), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_bar_ctor(func, device, index, &bar->base);
+	*pbar = &bar->base;
 	return 0;
 }
 
-struct nvkm_oclass
-gf100_bar_oclass = {
-	.handle = NV_SUBDEV(BAR, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_bar_ctor,
-		.dtor = gf100_bar_dtor,
-		.init = gf100_bar_init,
-		.fini = _nvkm_bar_fini,
-	},
+static const struct nvkm_bar_func
+gf100_bar_func = {
+	.dtor = gf100_bar_dtor,
+	.oneinit = gf100_bar_oneinit,
+	.init = gf100_bar_init,
+	.kmap = gf100_bar_kmap,
+	.umap = gf100_bar_umap,
+	.flush = g84_bar_flush,
 };
+
+int
+gf100_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+{
+	return gf100_bar_new_(&gf100_bar_func, device, index, pbar);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
new file mode 100644
index 000000000000..f7dea69640d8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
@@ -0,0 +1,23 @@
+#ifndef __GF100_BAR_H__
+#define __GF100_BAR_H__
+#define gf100_bar(p) container_of((p), struct gf100_bar, base)
+#include "priv.h"
+
+struct gf100_bar_vm {
+	struct nvkm_memory *mem;
+	struct nvkm_gpuobj *pgd;
+	struct nvkm_vm *vm;
+};
+
+struct gf100_bar {
+	struct nvkm_bar base;
+	struct gf100_bar_vm bar[2];
+};
+
+int gf100_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *,
+		   int, struct nvkm_bar **);
+void *gf100_bar_dtor(struct nvkm_bar *);
+int gf100_bar_oneinit(struct nvkm_bar *);
+int gf100_bar_init(struct nvkm_bar *);
+int gf100_bar_umap(struct nvkm_bar *, u64, int, struct nvkm_vma *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
index 148f739a276e..9232fab4274c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
@@ -19,32 +19,22 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-#include "priv.h"
+#include "gf100.h"
+
+static const struct nvkm_bar_func
+gk20a_bar_func = {
+	.dtor = gf100_bar_dtor,
+	.oneinit = gf100_bar_oneinit,
+	.init = gf100_bar_init,
+	.umap = gf100_bar_umap,
+	.flush = g84_bar_flush,
+};
 
 int
-gk20a_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+gk20a_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
 {
-	struct nvkm_bar *bar;
-	int ret;
-
-	ret = gf100_bar_ctor(parent, engine, oclass, data, size, pobject);
-	if (ret)
-		return ret;
-
-	bar = (struct nvkm_bar *)*pobject;
-	bar->iomap_uncached = true;
-	return 0;
+	int ret = gf100_bar_new_(&gk20a_bar_func, device, index, pbar);
+	if (ret == 0)
+		(*pbar)->iomap_uncached = true;
+	return ret;
 }
-
-struct nvkm_oclass
-gk20a_bar_oclass = {
-	.handle = NV_SUBDEV(BAR, 0xea),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk20a_bar_ctor,
-		.dtor = gf100_bar_dtor,
-		.init = gf100_bar_init,
-		.fini = _nvkm_bar_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
index 8548adb91dcc..370dcd8ff7b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
@@ -21,251 +21,196 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "nv50.h"
 
-#include <core/device.h>
 #include <core/gpuobj.h>
 #include <subdev/fb.h>
 #include <subdev/mmu.h>
 #include <subdev/timer.h>
 
-struct nv50_bar_priv {
-	struct nvkm_bar base;
-	spinlock_t lock;
-	struct nvkm_gpuobj *mem;
-	struct nvkm_gpuobj *pad;
-	struct nvkm_gpuobj *pgd;
-	struct nvkm_vm *bar1_vm;
-	struct nvkm_gpuobj *bar1;
-	struct nvkm_vm *bar3_vm;
-	struct nvkm_gpuobj *bar3;
-};
-
-static int
-nv50_bar_kmap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags,
-	      struct nvkm_vma *vma)
-{
-	struct nv50_bar_priv *priv = (void *)bar;
-	int ret;
-
-	ret = nvkm_vm_get(priv->bar3_vm, mem->size << 12, 12, flags, vma);
-	if (ret)
-		return ret;
-
-	nvkm_vm_map(vma, mem);
-	return 0;
-}
-
-static int
-nv50_bar_umap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags,
-	      struct nvkm_vma *vma)
+struct nvkm_vm *
+nv50_bar_kmap(struct nvkm_bar *base)
 {
-	struct nv50_bar_priv *priv = (void *)bar;
-	int ret;
-
-	ret = nvkm_vm_get(priv->bar1_vm, mem->size << 12, 12, flags, vma);
-	if (ret)
-		return ret;
-
-	nvkm_vm_map(vma, mem);
-	return 0;
+	return nv50_bar(base)->bar3_vm;
 }
 
-static void
-nv50_bar_unmap(struct nvkm_bar *bar, struct nvkm_vma *vma)
+int
+nv50_bar_umap(struct nvkm_bar *base, u64 size, int type, struct nvkm_vma *vma)
 {
-	nvkm_vm_unmap(vma);
-	nvkm_vm_put(vma);
+	struct nv50_bar *bar = nv50_bar(base);
+	return nvkm_vm_get(bar->bar1_vm, size, type, NV_MEM_ACCESS_RW, vma);
 }
 
 static void
-nv50_bar_flush(struct nvkm_bar *bar)
-{
-	struct nv50_bar_priv *priv = (void *)bar;
-	unsigned long flags;
-	spin_lock_irqsave(&priv->lock, flags);
-	nv_wr32(priv, 0x00330c, 0x00000001);
-	if (!nv_wait(priv, 0x00330c, 0x00000002, 0x00000000))
-		nv_warn(priv, "flush timeout\n");
-	spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-void
-g84_bar_flush(struct nvkm_bar *bar)
+nv50_bar_flush(struct nvkm_bar *base)
 {
-	struct nv50_bar_priv *priv = (void *)bar;
+	struct nv50_bar *bar = nv50_bar(base);
+	struct nvkm_device *device = bar->base.subdev.device;
 	unsigned long flags;
-	spin_lock_irqsave(&priv->lock, flags);
-	nv_wr32(bar, 0x070000, 0x00000001);
-	if (!nv_wait(priv, 0x070000, 0x00000002, 0x00000000))
-		nv_warn(priv, "flush timeout\n");
-	spin_unlock_irqrestore(&priv->lock, flags);
+	spin_lock_irqsave(&bar->base.lock, flags);
+	nvkm_wr32(device, 0x00330c, 0x00000001);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x00330c) & 0x00000002))
+			break;
+	);
+	spin_unlock_irqrestore(&bar->base.lock, flags);
 }
 
-static int
-nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+int
+nv50_bar_oneinit(struct nvkm_bar *base)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct nvkm_object *heap;
+	struct nv50_bar *bar = nv50_bar(base);
+	struct nvkm_device *device = bar->base.subdev.device;
+	static struct lock_class_key bar1_lock;
+	static struct lock_class_key bar3_lock;
 	struct nvkm_vm *vm;
-	struct nv50_bar_priv *priv;
 	u64 start, limit;
 	int ret;
 
-	ret = nvkm_bar_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
+	ret = nvkm_gpuobj_new(device, 0x20000, 0, false, NULL, &bar->mem);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
-			      NVOBJ_FLAG_HEAP, &priv->mem);
-	heap = nv_object(priv->mem);
+	ret = nvkm_gpuobj_new(device, bar->pgd_addr, 0, false, bar->mem,
+			      &bar->pad);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_new(nv_object(priv), heap,
-			      (device->chipset == 0x50) ? 0x1400 : 0x0200,
-			      0, 0, &priv->pad);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), heap, 0x4000, 0, 0, &priv->pgd);
+	ret = nvkm_gpuobj_new(device, 0x4000, 0, false, bar->mem, &bar->pgd);
 	if (ret)
 		return ret;
 
 	/* BAR3 */
 	start = 0x0100000000ULL;
-	limit = start + nv_device_resource_len(device, 3);
+	limit = start + device->func->resource_size(device, 3);
 
-	ret = nvkm_vm_new(device, start, limit, start, &vm);
+	ret = nvkm_vm_new(device, start, limit, start, &bar3_lock, &vm);
 	if (ret)
 		return ret;
 
-	atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
+	atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
 
-	ret = nvkm_gpuobj_new(nv_object(priv), heap,
-			      ((limit-- - start) >> 12) * 8, 0x1000,
-			      NVOBJ_FLAG_ZERO_ALLOC, &vm->pgt[0].obj[0]);
-	vm->pgt[0].refcount[0] = 1;
+	ret = nvkm_vm_boot(vm, limit-- - start);
 	if (ret)
 		return ret;
 
-	ret = nvkm_vm_ref(vm, &priv->bar3_vm, priv->pgd);
+	ret = nvkm_vm_ref(vm, &bar->bar3_vm, bar->pgd);
 	nvkm_vm_ref(NULL, &vm, NULL);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar3);
+	ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar3);
 	if (ret)
 		return ret;
 
-	nv_wo32(priv->bar3, 0x00, 0x7fc00000);
-	nv_wo32(priv->bar3, 0x04, lower_32_bits(limit));
-	nv_wo32(priv->bar3, 0x08, lower_32_bits(start));
-	nv_wo32(priv->bar3, 0x0c, upper_32_bits(limit) << 24 |
-				  upper_32_bits(start));
-	nv_wo32(priv->bar3, 0x10, 0x00000000);
-	nv_wo32(priv->bar3, 0x14, 0x00000000);
+	nvkm_kmap(bar->bar3);
+	nvkm_wo32(bar->bar3, 0x00, 0x7fc00000);
+	nvkm_wo32(bar->bar3, 0x04, lower_32_bits(limit));
+	nvkm_wo32(bar->bar3, 0x08, lower_32_bits(start));
+	nvkm_wo32(bar->bar3, 0x0c, upper_32_bits(limit) << 24 |
+				   upper_32_bits(start));
+	nvkm_wo32(bar->bar3, 0x10, 0x00000000);
+	nvkm_wo32(bar->bar3, 0x14, 0x00000000);
+	nvkm_done(bar->bar3);
 
 	/* BAR1 */
 	start = 0x0000000000ULL;
-	limit = start + nv_device_resource_len(device, 1);
+	limit = start + device->func->resource_size(device, 1);
 
-	ret = nvkm_vm_new(device, start, limit--, start, &vm);
+	ret = nvkm_vm_new(device, start, limit--, start, &bar1_lock, &vm);
 	if (ret)
 		return ret;
 
-	atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
+	atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
 
-	ret = nvkm_vm_ref(vm, &priv->bar1_vm, priv->pgd);
+	ret = nvkm_vm_ref(vm, &bar->bar1_vm, bar->pgd);
 	nvkm_vm_ref(NULL, &vm, NULL);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar1);
+	ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar1);
 	if (ret)
 		return ret;
 
-	nv_wo32(priv->bar1, 0x00, 0x7fc00000);
-	nv_wo32(priv->bar1, 0x04, lower_32_bits(limit));
-	nv_wo32(priv->bar1, 0x08, lower_32_bits(start));
-	nv_wo32(priv->bar1, 0x0c, upper_32_bits(limit) << 24 |
-				  upper_32_bits(start));
-	nv_wo32(priv->bar1, 0x10, 0x00000000);
-	nv_wo32(priv->bar1, 0x14, 0x00000000);
-
-	priv->base.alloc = nvkm_bar_alloc;
-	priv->base.kmap = nv50_bar_kmap;
-	priv->base.umap = nv50_bar_umap;
-	priv->base.unmap = nv50_bar_unmap;
-	if (device->chipset == 0x50)
-		priv->base.flush = nv50_bar_flush;
-	else
-		priv->base.flush = g84_bar_flush;
-	spin_lock_init(&priv->lock);
+	nvkm_kmap(bar->bar1);
+	nvkm_wo32(bar->bar1, 0x00, 0x7fc00000);
+	nvkm_wo32(bar->bar1, 0x04, lower_32_bits(limit));
+	nvkm_wo32(bar->bar1, 0x08, lower_32_bits(start));
+	nvkm_wo32(bar->bar1, 0x0c, upper_32_bits(limit) << 24 |
+				   upper_32_bits(start));
+	nvkm_wo32(bar->bar1, 0x10, 0x00000000);
+	nvkm_wo32(bar->bar1, 0x14, 0x00000000);
+	nvkm_done(bar->bar1);
 	return 0;
 }
 
-static void
-nv50_bar_dtor(struct nvkm_object *object)
+int
+nv50_bar_init(struct nvkm_bar *base)
 {
-	struct nv50_bar_priv *priv = (void *)object;
-	nvkm_gpuobj_ref(NULL, &priv->bar1);
-	nvkm_vm_ref(NULL, &priv->bar1_vm, priv->pgd);
-	nvkm_gpuobj_ref(NULL, &priv->bar3);
-	if (priv->bar3_vm) {
-		nvkm_gpuobj_ref(NULL, &priv->bar3_vm->pgt[0].obj[0]);
-		nvkm_vm_ref(NULL, &priv->bar3_vm, priv->pgd);
-	}
-	nvkm_gpuobj_ref(NULL, &priv->pgd);
-	nvkm_gpuobj_ref(NULL, &priv->pad);
-	nvkm_gpuobj_ref(NULL, &priv->mem);
-	nvkm_bar_destroy(&priv->base);
-}
-
-static int
-nv50_bar_init(struct nvkm_object *object)
-{
-	struct nv50_bar_priv *priv = (void *)object;
-	int ret, i;
-
-	ret = nvkm_bar_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
-	nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
-	nv_wr32(priv, 0x100c80, 0x00060001);
-	if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000)) {
-		nv_error(priv, "vm flush timeout\n");
+	struct nv50_bar *bar = nv50_bar(base);
+	struct nvkm_device *device = bar->base.subdev.device;
+	int i;
+
+	nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
+	nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
+	nvkm_wr32(device, 0x100c80, 0x00060001);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
+			break;
+	) < 0)
 		return -EBUSY;
-	}
 
-	nv_wr32(priv, 0x001704, 0x00000000 | priv->mem->addr >> 12);
-	nv_wr32(priv, 0x001704, 0x40000000 | priv->mem->addr >> 12);
-	nv_wr32(priv, 0x001708, 0x80000000 | priv->bar1->node->offset >> 4);
-	nv_wr32(priv, 0x00170c, 0x80000000 | priv->bar3->node->offset >> 4);
+	nvkm_wr32(device, 0x001704, 0x00000000 | bar->mem->addr >> 12);
+	nvkm_wr32(device, 0x001704, 0x40000000 | bar->mem->addr >> 12);
+	nvkm_wr32(device, 0x001708, 0x80000000 | bar->bar1->node->offset >> 4);
+	nvkm_wr32(device, 0x00170c, 0x80000000 | bar->bar3->node->offset >> 4);
 	for (i = 0; i < 8; i++)
-		nv_wr32(priv, 0x001900 + (i * 4), 0x00000000);
+		nvkm_wr32(device, 0x001900 + (i * 4), 0x00000000);
 	return 0;
 }
 
-static int
-nv50_bar_fini(struct nvkm_object *object, bool suspend)
+void *
+nv50_bar_dtor(struct nvkm_bar *base)
 {
-	struct nv50_bar_priv *priv = (void *)object;
-	return nvkm_bar_fini(&priv->base, suspend);
+	struct nv50_bar *bar = nv50_bar(base);
+	nvkm_gpuobj_del(&bar->bar1);
+	nvkm_vm_ref(NULL, &bar->bar1_vm, bar->pgd);
+	nvkm_gpuobj_del(&bar->bar3);
+	if (bar->bar3_vm) {
+		nvkm_memory_del(&bar->bar3_vm->pgt[0].mem[0]);
+		nvkm_vm_ref(NULL, &bar->bar3_vm, bar->pgd);
+	}
+	nvkm_gpuobj_del(&bar->pgd);
+	nvkm_gpuobj_del(&bar->pad);
+	nvkm_gpuobj_del(&bar->mem);
+	return bar;
 }
 
-struct nvkm_oclass
-nv50_bar_oclass = {
-	.handle = NV_SUBDEV(BAR, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_bar_ctor,
-		.dtor = nv50_bar_dtor,
-		.init = nv50_bar_init,
-		.fini = nv50_bar_fini,
-	},
+int
+nv50_bar_new_(const struct nvkm_bar_func *func, struct nvkm_device *device,
+	      int index, u32 pgd_addr, struct nvkm_bar **pbar)
+{
+	struct nv50_bar *bar;
+	if (!(bar = kzalloc(sizeof(*bar), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_bar_ctor(func, device, index, &bar->base);
+	bar->pgd_addr = pgd_addr;
+	*pbar = &bar->base;
+	return 0;
+}
+
+static const struct nvkm_bar_func
+nv50_bar_func = {
+	.dtor = nv50_bar_dtor,
+	.oneinit = nv50_bar_oneinit,
+	.init = nv50_bar_init,
+	.kmap = nv50_bar_kmap,
+	.umap = nv50_bar_umap,
+	.flush = nv50_bar_flush,
 };
+
+int
+nv50_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+{
+	return nv50_bar_new_(&nv50_bar_func, device, index, 0x1400, pbar);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
new file mode 100644
index 000000000000..1eb764f22a49
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
@@ -0,0 +1,26 @@
+#ifndef __NV50_BAR_H__
+#define __NV50_BAR_H__
+#define nv50_bar(p) container_of((p), struct nv50_bar, base)
+#include "priv.h"
+
+struct nv50_bar {
+	struct nvkm_bar base;
+	u32 pgd_addr;
+	struct nvkm_gpuobj *mem;
+	struct nvkm_gpuobj *pad;
+	struct nvkm_gpuobj *pgd;
+	struct nvkm_vm *bar1_vm;
+	struct nvkm_gpuobj *bar1;
+	struct nvkm_vm *bar3_vm;
+	struct nvkm_gpuobj *bar3;
+};
+
+int nv50_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *,
+		  int, u32 pgd_addr, struct nvkm_bar **);
+void *nv50_bar_dtor(struct nvkm_bar *);
+int nv50_bar_oneinit(struct nvkm_bar *);
+int nv50_bar_init(struct nvkm_bar *);
+struct nvkm_vm *nv50_bar_kmap(struct nvkm_bar *);
+int nv50_bar_umap(struct nvkm_bar *, u64, int, struct nvkm_vma *);
+void nv50_bar_unmap(struct nvkm_bar *, struct nvkm_vma *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
index aa85f61b48c2..d834ef20db5b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
@@ -1,30 +1,19 @@
 #ifndef __NVKM_BAR_PRIV_H__
 #define __NVKM_BAR_PRIV_H__
+#define nvkm_bar(p) container_of((p), struct nvkm_bar, subdev)
 #include <subdev/bar.h>
 
-#define nvkm_bar_create(p,e,o,d)                                            \
-	nvkm_bar_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_bar_init(p)                                                    \
-	nvkm_subdev_init(&(p)->base)
-#define nvkm_bar_fini(p,s)                                                  \
-	nvkm_subdev_fini(&(p)->base, (s))
+void nvkm_bar_ctor(const struct nvkm_bar_func *, struct nvkm_device *,
+		   int, struct nvkm_bar *);
 
-int nvkm_bar_create_(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, int, void **);
-void nvkm_bar_destroy(struct nvkm_bar *);
-
-void _nvkm_bar_dtor(struct nvkm_object *);
-#define _nvkm_bar_init _nvkm_subdev_init
-#define _nvkm_bar_fini _nvkm_subdev_fini
-
-int  nvkm_bar_alloc(struct nvkm_bar *, struct nvkm_object *,
-		    struct nvkm_mem *, struct nvkm_object **);
+struct nvkm_bar_func {
+	void *(*dtor)(struct nvkm_bar *);
+	int (*oneinit)(struct nvkm_bar *);
+	int (*init)(struct nvkm_bar *);
+	struct nvkm_vm *(*kmap)(struct nvkm_bar *);
+	int  (*umap)(struct nvkm_bar *, u64 size, int type, struct nvkm_vma *);
+	void (*flush)(struct nvkm_bar *);
+};
 
 void g84_bar_flush(struct nvkm_bar *);
-
-int gf100_bar_ctor(struct nvkm_object *, struct nvkm_object *,
-		  struct nvkm_oclass *, void *, u32,
-		  struct nvkm_object **);
-void gf100_bar_dtor(struct nvkm_object *);
-int gf100_bar_init(struct nvkm_object *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c
index 08eb03fbc203..43f0ba1fba7d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c
@@ -33,14 +33,14 @@ nvbios_M0203Te(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 
 	if (!bit_entry(bios, 'M', &bit_M)) {
 		if (bit_M.version == 2 && bit_M.length > 0x04)
-			data = nv_ro16(bios, bit_M.offset + 0x03);
+			data = nvbios_rd16(bios, bit_M.offset + 0x03);
 		if (data) {
-			*ver = nv_ro08(bios, data + 0x00);
+			*ver = nvbios_rd08(bios, data + 0x00);
 			switch (*ver) {
 			case 0x10:
-				*hdr = nv_ro08(bios, data + 0x01);
-				*len = nv_ro08(bios, data + 0x02);
-				*cnt = nv_ro08(bios, data + 0x03);
+				*hdr = nvbios_rd08(bios, data + 0x01);
+				*len = nvbios_rd08(bios, data + 0x02);
+				*cnt = nvbios_rd08(bios, data + 0x03);
 				return data;
 			default:
 				break;
@@ -59,8 +59,8 @@ nvbios_M0203Tp(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x10:
-		info->type    = nv_ro08(bios, data + 0x04);
-		info->pointer = nv_ro16(bios, data + 0x05);
+		info->type    = nvbios_rd08(bios, data + 0x04);
+		info->pointer = nvbios_rd16(bios, data + 0x05);
 		break;
 	default:
 		break;
@@ -89,9 +89,9 @@ nvbios_M0203Ep(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x10:
-		info->type  = (nv_ro08(bios, data + 0x00) & 0x0f) >> 0;
-		info->strap = (nv_ro08(bios, data + 0x00) & 0xf0) >> 4;
-		info->group = (nv_ro08(bios, data + 0x01) & 0x0f) >> 0;
+		info->type  = (nvbios_rd08(bios, data + 0x00) & 0x0f) >> 0;
+		info->strap = (nvbios_rd08(bios, data + 0x00) & 0xf0) >> 4;
+		info->group = (nvbios_rd08(bios, data + 0x01) & 0x0f) >> 0;
 		return data;
 	default:
 		break;
@@ -103,12 +103,13 @@ u32
 nvbios_M0203Em(struct nvkm_bios *bios, u8 ramcfg, u8 *ver, u8 *hdr,
 	       struct nvbios_M0203E *info)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
 	struct nvbios_M0203T M0203T;
 	u8  cnt, len, idx = 0xff;
 	u32 data;
 
 	if (!nvbios_M0203Tp(bios, ver, hdr, &cnt, &len, &M0203T)) {
-		nv_warn(bios, "M0203T not found\n");
+		nvkm_warn(subdev, "M0203T not found\n");
 		return 0x00000000;
 	}
 
@@ -119,7 +120,7 @@ nvbios_M0203Em(struct nvkm_bios *bios, u8 ramcfg, u8 *ver, u8 *hdr,
 				continue;
 			return data;
 		default:
-			nv_warn(bios, "M0203T type %02x\n", M0203T.type);
+			nvkm_warn(subdev, "M0203T type %02x\n", M0203T.type);
 			return 0x00000000;
 		}
 	}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c
index e1a8ad5f3066..293a6af1b1d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c
@@ -34,16 +34,16 @@ nvbios_M0205Te(struct nvkm_bios *bios,
 
 	if (!bit_entry(bios, 'M', &bit_M)) {
 		if (bit_M.version == 2 && bit_M.length > 0x08)
-			data = nv_ro32(bios, bit_M.offset + 0x05);
+			data = nvbios_rd32(bios, bit_M.offset + 0x05);
 		if (data) {
-			*ver = nv_ro08(bios, data + 0x00);
+			*ver = nvbios_rd08(bios, data + 0x00);
 			switch (*ver) {
 			case 0x10:
-				*hdr = nv_ro08(bios, data + 0x01);
-				*len = nv_ro08(bios, data + 0x02);
-				*ssz = nv_ro08(bios, data + 0x03);
-				*snr = nv_ro08(bios, data + 0x04);
-				*cnt = nv_ro08(bios, data + 0x05);
+				*hdr = nvbios_rd08(bios, data + 0x01);
+				*len = nvbios_rd08(bios, data + 0x02);
+				*ssz = nvbios_rd08(bios, data + 0x03);
+				*snr = nvbios_rd08(bios, data + 0x04);
+				*cnt = nvbios_rd08(bios, data + 0x05);
 				return data;
 			default:
 				break;
@@ -63,7 +63,7 @@ nvbios_M0205Tp(struct nvkm_bios *bios,
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x10:
-		info->freq = nv_ro16(bios, data + 0x06);
+		info->freq = nvbios_rd16(bios, data + 0x06);
 		break;
 	default:
 		break;
@@ -96,7 +96,7 @@ nvbios_M0205Ep(struct nvkm_bios *bios, int idx,
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x10:
-		info->type = nv_ro08(bios, data + 0x00) & 0x0f;
+		info->type = nvbios_rd08(bios, data + 0x00) & 0x0f;
 		return data;
 	default:
 		break;
@@ -126,7 +126,7 @@ nvbios_M0205Sp(struct nvkm_bios *bios, int ent, int idx, u8 *ver, u8 *hdr,
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x10:
-		info->data = nv_ro08(bios, data + 0x00);
+		info->data = nvbios_rd08(bios, data + 0x00);
 		return data;
 	default:
 		break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c
index 3026920c3358..95d49a526472 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c
@@ -34,16 +34,16 @@ nvbios_M0209Te(struct nvkm_bios *bios,
 
 	if (!bit_entry(bios, 'M', &bit_M)) {
 		if (bit_M.version == 2 && bit_M.length > 0x0c)
-			data = nv_ro32(bios, bit_M.offset + 0x09);
+			data = nvbios_rd32(bios, bit_M.offset + 0x09);
 		if (data) {
-			*ver = nv_ro08(bios, data + 0x00);
+			*ver = nvbios_rd08(bios, data + 0x00);
 			switch (*ver) {
 			case 0x10:
-				*hdr = nv_ro08(bios, data + 0x01);
-				*len = nv_ro08(bios, data + 0x02);
-				*ssz = nv_ro08(bios, data + 0x03);
+				*hdr = nvbios_rd08(bios, data + 0x01);
+				*len = nvbios_rd08(bios, data + 0x02);
+				*ssz = nvbios_rd08(bios, data + 0x03);
 				*snr = 1;
-				*cnt = nv_ro08(bios, data + 0x04);
+				*cnt = nvbios_rd08(bios, data + 0x04);
 				return data;
 			default:
 				break;
@@ -78,12 +78,12 @@ nvbios_M0209Ep(struct nvkm_bios *bios, int idx,
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x10:
-		info->v00_40 = (nv_ro08(bios, data + 0x00) & 0x40) >> 6;
-		info->bits   =  nv_ro08(bios, data + 0x00) & 0x3f;
-		info->modulo =  nv_ro08(bios, data + 0x01);
-		info->v02_40 = (nv_ro08(bios, data + 0x02) & 0x40) >> 6;
-		info->v02_07 =  nv_ro08(bios, data + 0x02) & 0x07;
-		info->v03    =  nv_ro08(bios, data + 0x03);
+		info->v00_40 = (nvbios_rd08(bios, data + 0x00) & 0x40) >> 6;
+		info->bits   =  nvbios_rd08(bios, data + 0x00) & 0x3f;
+		info->modulo =  nvbios_rd08(bios, data + 0x01);
+		info->v02_40 = (nvbios_rd08(bios, data + 0x02) & 0x40) >> 6;
+		info->v02_07 =  nvbios_rd08(bios, data + 0x02) & 0x07;
+		info->v03    =  nvbios_rd08(bios, data + 0x03);
 		return data;
 	default:
 		break;
@@ -122,7 +122,7 @@ nvbios_M0209Sp(struct nvkm_bios *bios, int ent, int idx, u8 *ver, u8 *hdr,
 				u32 mask = (1ULL << M0209E.bits) - 1;
 				u16  off = bits / 8;
 				u8   mod = bits % 8;
-				info->data[i] = nv_ro32(bios, data + off);
+				info->data[i] = nvbios_rd32(bios, data + off);
 				info->data[i] = info->data[i] >> mod;
 				info->data[i] = info->data[i] & mask;
 			}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c
index b72edcf849b6..3f7db3eb3ad6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c
@@ -34,15 +34,15 @@ nvbios_P0260Te(struct nvkm_bios *bios,
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 2 && bit_P.length > 0x63)
-			data = nv_ro32(bios, bit_P.offset + 0x60);
+			data = nvbios_rd32(bios, bit_P.offset + 0x60);
 		if (data) {
-			*ver = nv_ro08(bios, data + 0);
+			*ver = nvbios_rd08(bios, data + 0);
 			switch (*ver) {
 			case 0x10:
-				*hdr = nv_ro08(bios, data + 1);
-				*cnt = nv_ro08(bios, data + 2);
+				*hdr = nvbios_rd08(bios, data + 1);
+				*cnt = nvbios_rd08(bios, data + 2);
 				*len = 4;
-				*xnr = nv_ro08(bios, data + 3);
+				*xnr = nvbios_rd08(bios, data + 3);
 				*xsz = 4;
 				return data;
 			default:
@@ -72,7 +72,7 @@ nvbios_P0260Ep(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x10:
-		info->data = nv_ro32(bios, data);
+		info->data = nvbios_rd32(bios, data);
 		return data;
 	default:
 		break;
@@ -98,7 +98,7 @@ nvbios_P0260Xp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x10:
-		info->data = nv_ro32(bios, data);
+		info->data = nvbios_rd32(bios, data);
 		return data;
 	default:
 		break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
index 8db204f92ed3..79536897efaa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
@@ -53,6 +53,20 @@ nvbios_findstr(const u8 *data, int size, const char *str, int len)
 }
 
 int
+nvbios_memcmp(struct nvkm_bios *bios, u32 addr, const char *str, u32 len)
+{
+	unsigned char c1, c2;
+
+	while (len--) {
+		c1 = nvbios_rd08(bios, addr++);
+		c2 = *(str++);
+		if (c1 != c2)
+			return c1 - c2;
+	}
+	return 0;
+}
+
+int
 nvbios_extend(struct nvkm_bios *bios, u32 length)
 {
 	if (bios->size < length) {
@@ -69,62 +83,29 @@ nvbios_extend(struct nvkm_bios *bios, u32 length)
 	return 0;
 }
 
-static u8
-nvkm_bios_rd08(struct nvkm_object *object, u64 addr)
-{
-	struct nvkm_bios *bios = (void *)object;
-	return bios->data[addr];
-}
-
-static u16
-nvkm_bios_rd16(struct nvkm_object *object, u64 addr)
+static void *
+nvkm_bios_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_bios *bios = (void *)object;
-	return get_unaligned_le16(&bios->data[addr]);
-}
-
-static u32
-nvkm_bios_rd32(struct nvkm_object *object, u64 addr)
-{
-	struct nvkm_bios *bios = (void *)object;
-	return get_unaligned_le32(&bios->data[addr]);
-}
-
-static void
-nvkm_bios_wr08(struct nvkm_object *object, u64 addr, u8 data)
-{
-	struct nvkm_bios *bios = (void *)object;
-	bios->data[addr] = data;
-}
-
-static void
-nvkm_bios_wr16(struct nvkm_object *object, u64 addr, u16 data)
-{
-	struct nvkm_bios *bios = (void *)object;
-	put_unaligned_le16(data, &bios->data[addr]);
+	struct nvkm_bios *bios = nvkm_bios(subdev);
+	kfree(bios->data);
+	return bios;
 }
 
-static void
-nvkm_bios_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
-	struct nvkm_bios *bios = (void *)object;
-	put_unaligned_le32(data, &bios->data[addr]);
-}
+static const struct nvkm_subdev_func
+nvkm_bios = {
+	.dtor = nvkm_bios_dtor,
+};
 
-static int
-nvkm_bios_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios)
 {
 	struct nvkm_bios *bios;
 	struct bit_entry bit_i;
 	int ret;
 
-	ret = nvkm_subdev_create(parent, engine, oclass, 0,
-				 "VBIOS", "bios", &bios);
-	*pobject = nv_object(bios);
-	if (ret)
-		return ret;
+	if (!(bios = *pbios = kzalloc(sizeof(*bios), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&nvkm_bios, device, index, 0, &bios->subdev);
 
 	ret = nvbios_shadow(bios);
 	if (ret)
@@ -134,73 +115,33 @@ nvkm_bios_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
 	bios->bmp_offset = nvbios_findstr(bios->data, bios->size,
 					  "\xff\x7f""NV\0", 5);
 	if (bios->bmp_offset) {
-		nv_info(bios, "BMP version %x.%x\n",
-			bmp_version(bios) >> 8,
-			bmp_version(bios) & 0xff);
+		nvkm_debug(&bios->subdev, "BMP version %x.%x\n",
+			   bmp_version(bios) >> 8,
+			   bmp_version(bios) & 0xff);
 	}
 
 	bios->bit_offset = nvbios_findstr(bios->data, bios->size,
 					  "\xff\xb8""BIT", 5);
 	if (bios->bit_offset)
-		nv_info(bios, "BIT signature found\n");
+		nvkm_debug(&bios->subdev, "BIT signature found\n");
 
 	/* determine the vbios version number */
 	if (!bit_entry(bios, 'i', &bit_i) && bit_i.length >= 4) {
-		bios->version.major = nv_ro08(bios, bit_i.offset + 3);
-		bios->version.chip  = nv_ro08(bios, bit_i.offset + 2);
-		bios->version.minor = nv_ro08(bios, bit_i.offset + 1);
-		bios->version.micro = nv_ro08(bios, bit_i.offset + 0);
-		bios->version.patch = nv_ro08(bios, bit_i.offset + 4);
+		bios->version.major = nvbios_rd08(bios, bit_i.offset + 3);
+		bios->version.chip  = nvbios_rd08(bios, bit_i.offset + 2);
+		bios->version.minor = nvbios_rd08(bios, bit_i.offset + 1);
+		bios->version.micro = nvbios_rd08(bios, bit_i.offset + 0);
+		bios->version.patch = nvbios_rd08(bios, bit_i.offset + 4);
 	} else
 	if (bmp_version(bios)) {
-		bios->version.major = nv_ro08(bios, bios->bmp_offset + 13);
-		bios->version.chip  = nv_ro08(bios, bios->bmp_offset + 12);
-		bios->version.minor = nv_ro08(bios, bios->bmp_offset + 11);
-		bios->version.micro = nv_ro08(bios, bios->bmp_offset + 10);
+		bios->version.major = nvbios_rd08(bios, bios->bmp_offset + 13);
+		bios->version.chip  = nvbios_rd08(bios, bios->bmp_offset + 12);
+		bios->version.minor = nvbios_rd08(bios, bios->bmp_offset + 11);
+		bios->version.micro = nvbios_rd08(bios, bios->bmp_offset + 10);
 	}
 
-	nv_info(bios, "version %02x.%02x.%02x.%02x.%02x\n",
-		bios->version.major, bios->version.chip,
-		bios->version.minor, bios->version.micro, bios->version.patch);
-
+	nvkm_info(&bios->subdev, "version %02x.%02x.%02x.%02x.%02x\n",
+		  bios->version.major, bios->version.chip,
+		  bios->version.minor, bios->version.micro, bios->version.patch);
 	return 0;
 }
-
-static void
-nvkm_bios_dtor(struct nvkm_object *object)
-{
-	struct nvkm_bios *bios = (void *)object;
-	kfree(bios->data);
-	nvkm_subdev_destroy(&bios->base);
-}
-
-static int
-nvkm_bios_init(struct nvkm_object *object)
-{
-	struct nvkm_bios *bios = (void *)object;
-	return nvkm_subdev_init(&bios->base);
-}
-
-static int
-nvkm_bios_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nvkm_bios *bios = (void *)object;
-	return nvkm_subdev_fini(&bios->base, suspend);
-}
-
-struct nvkm_oclass
-nvkm_bios_oclass = {
-	.handle = NV_SUBDEV(VBIOS, 0x00),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nvkm_bios_ctor,
-		.dtor = nvkm_bios_dtor,
-		.init = nvkm_bios_init,
-		.fini = nvkm_bios_fini,
-		.rd08 = nvkm_bios_rd08,
-		.rd16 = nvkm_bios_rd16,
-		.rd32 = nvkm_bios_rd32,
-		.wr08 = nvkm_bios_wr08,
-		.wr16 = nvkm_bios_wr16,
-		.wr32 = nvkm_bios_wr32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c
index eab540496cdf..070ff33f8d5e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c
@@ -28,18 +28,18 @@ int
 bit_entry(struct nvkm_bios *bios, u8 id, struct bit_entry *bit)
 {
 	if (likely(bios->bit_offset)) {
-		u8  entries = nv_ro08(bios, bios->bit_offset + 10);
+		u8  entries = nvbios_rd08(bios, bios->bit_offset + 10);
 		u32 entry   = bios->bit_offset + 12;
 		while (entries--) {
-			if (nv_ro08(bios, entry + 0) == id) {
-				bit->id      = nv_ro08(bios, entry + 0);
-				bit->version = nv_ro08(bios, entry + 1);
-				bit->length  = nv_ro16(bios, entry + 2);
-				bit->offset  = nv_ro16(bios, entry + 4);
+			if (nvbios_rd08(bios, entry + 0) == id) {
+				bit->id      = nvbios_rd08(bios, entry + 0);
+				bit->version = nvbios_rd08(bios, entry + 1);
+				bit->length  = nvbios_rd16(bios, entry + 2);
+				bit->offset  = nvbios_rd16(bios, entry + 4);
 				return 0;
 			}
 
-			entry += nv_ro08(bios, bios->bit_offset + 9);
+			entry += nvbios_rd08(bios, bios->bit_offset + 9);
 		}
 
 		return -ENOENT;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
index 12e958533f46..3756ec91a88d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
@@ -34,17 +34,17 @@ nvbios_boostTe(struct nvkm_bios *bios,
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 2)
-			boost = nv_ro16(bios, bit_P.offset + 0x30);
+			boost = nvbios_rd16(bios, bit_P.offset + 0x30);
 
 		if (boost) {
-			*ver = nv_ro08(bios, boost + 0);
+			*ver = nvbios_rd08(bios, boost + 0);
 			switch (*ver) {
 			case 0x11:
-				*hdr = nv_ro08(bios, boost + 1);
-				*cnt = nv_ro08(bios, boost + 5);
-				*len = nv_ro08(bios, boost + 2);
-				*snr = nv_ro08(bios, boost + 4);
-				*ssz = nv_ro08(bios, boost + 3);
+				*hdr = nvbios_rd08(bios, boost + 1);
+				*cnt = nvbios_rd08(bios, boost + 5);
+				*len = nvbios_rd08(bios, boost + 2);
+				*snr = nvbios_rd08(bios, boost + 4);
+				*ssz = nvbios_rd08(bios, boost + 3);
 				return boost;
 			default:
 				break;
@@ -78,9 +78,9 @@ nvbios_boostEp(struct nvkm_bios *bios, int idx,
 	u16 data = nvbios_boostEe(bios, idx, ver, hdr, cnt, len);
 	memset(info, 0x00, sizeof(*info));
 	if (data) {
-		info->pstate = (nv_ro16(bios, data + 0x00) & 0x01e0) >> 5;
-		info->min    =  nv_ro16(bios, data + 0x02) * 1000;
-		info->max    =  nv_ro16(bios, data + 0x04) * 1000;
+		info->pstate = (nvbios_rd16(bios, data + 0x00) & 0x01e0) >> 5;
+		info->min    =  nvbios_rd16(bios, data + 0x02) * 1000;
+		info->max    =  nvbios_rd16(bios, data + 0x04) * 1000;
 	}
 	return data;
 }
@@ -117,10 +117,10 @@ nvbios_boostSp(struct nvkm_bios *bios, int idx,
 	data = nvbios_boostSe(bios, idx, data, ver, hdr, cnt, len);
 	memset(info, 0x00, sizeof(*info));
 	if (data) {
-		info->domain  = nv_ro08(bios, data + 0x00);
-		info->percent = nv_ro08(bios, data + 0x01);
-		info->min     = nv_ro16(bios, data + 0x02) * 1000;
-		info->max     = nv_ro16(bios, data + 0x04) * 1000;
+		info->domain  = nvbios_rd08(bios, data + 0x00);
+		info->percent = nvbios_rd08(bios, data + 0x01);
+		info->min     = nvbios_rd16(bios, data + 0x02) * 1000;
+		info->max     = nvbios_rd16(bios, data + 0x04) * 1000;
 	}
 	return data;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c
index 706a1650a4f2..276823426332 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c
@@ -30,12 +30,12 @@ nvbios_connTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 {
 	u32 dcb = dcb_table(bios, ver, hdr, cnt, len);
 	if (dcb && *ver >= 0x30 && *hdr >= 0x16) {
-		u32 data = nv_ro16(bios, dcb + 0x14);
+		u32 data = nvbios_rd16(bios, dcb + 0x14);
 		if (data) {
-			*ver = nv_ro08(bios, data + 0);
-			*hdr = nv_ro08(bios, data + 1);
-			*cnt = nv_ro08(bios, data + 2);
-			*len = nv_ro08(bios, data + 3);
+			*ver = nvbios_rd08(bios, data + 0);
+			*hdr = nvbios_rd08(bios, data + 1);
+			*cnt = nvbios_rd08(bios, data + 2);
+			*len = nvbios_rd08(bios, data + 3);
 			return data;
 		}
 	}
@@ -77,18 +77,18 @@ nvbios_connEp(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len,
 	switch (!!data * *ver) {
 	case 0x30:
 	case 0x40:
-		info->type     =  nv_ro08(bios, data + 0x00);
-		info->location =  nv_ro08(bios, data + 0x01) & 0x0f;
-		info->hpd      = (nv_ro08(bios, data + 0x01) & 0x30) >> 4;
-		info->dp       = (nv_ro08(bios, data + 0x01) & 0xc0) >> 6;
+		info->type     =  nvbios_rd08(bios, data + 0x00);
+		info->location =  nvbios_rd08(bios, data + 0x01) & 0x0f;
+		info->hpd      = (nvbios_rd08(bios, data + 0x01) & 0x30) >> 4;
+		info->dp       = (nvbios_rd08(bios, data + 0x01) & 0xc0) >> 6;
 		if (*len < 4)
 			return data;
-		info->hpd     |= (nv_ro08(bios, data + 0x02) & 0x03) << 2;
-		info->dp      |=  nv_ro08(bios, data + 0x02) & 0x0c;
-		info->di       = (nv_ro08(bios, data + 0x02) & 0xf0) >> 4;
-		info->hpd     |= (nv_ro08(bios, data + 0x03) & 0x07) << 4;
-		info->sr       = (nv_ro08(bios, data + 0x03) & 0x08) >> 3;
-		info->lcdid    = (nv_ro08(bios, data + 0x03) & 0x70) >> 4;
+		info->hpd     |= (nvbios_rd08(bios, data + 0x02) & 0x03) << 2;
+		info->dp      |=  nvbios_rd08(bios, data + 0x02) & 0x0c;
+		info->di       = (nvbios_rd08(bios, data + 0x02) & 0xf0) >> 4;
+		info->hpd     |= (nvbios_rd08(bios, data + 0x03) & 0x07) << 4;
+		info->sr       = (nvbios_rd08(bios, data + 0x03) & 0x08) >> 3;
+		info->lcdid    = (nvbios_rd08(bios, data + 0x03) & 0x70) >> 4;
 		return data;
 	default:
 		break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
index 16f7ad8a4f80..32e01624a162 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
@@ -34,17 +34,17 @@ nvbios_cstepTe(struct nvkm_bios *bios,
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 2)
-			cstep = nv_ro16(bios, bit_P.offset + 0x34);
+			cstep = nvbios_rd16(bios, bit_P.offset + 0x34);
 
 		if (cstep) {
-			*ver = nv_ro08(bios, cstep + 0);
+			*ver = nvbios_rd08(bios, cstep + 0);
 			switch (*ver) {
 			case 0x10:
-				*hdr = nv_ro08(bios, cstep + 1);
-				*cnt = nv_ro08(bios, cstep + 3);
-				*len = nv_ro08(bios, cstep + 2);
-				*xnr = nv_ro08(bios, cstep + 5);
-				*xsz = nv_ro08(bios, cstep + 4);
+				*hdr = nvbios_rd08(bios, cstep + 1);
+				*cnt = nvbios_rd08(bios, cstep + 3);
+				*len = nvbios_rd08(bios, cstep + 2);
+				*xnr = nvbios_rd08(bios, cstep + 5);
+				*xsz = nvbios_rd08(bios, cstep + 4);
 				return cstep;
 			default:
 				break;
@@ -75,8 +75,8 @@ nvbios_cstepEp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
 	u16 data = nvbios_cstepEe(bios, idx, ver, hdr);
 	memset(info, 0x00, sizeof(*info));
 	if (data) {
-		info->pstate = (nv_ro16(bios, data + 0x00) & 0x01e0) >> 5;
-		info->index   = nv_ro08(bios, data + 0x03);
+		info->pstate = (nvbios_rd16(bios, data + 0x00) & 0x01e0) >> 5;
+		info->index   = nvbios_rd08(bios, data + 0x03);
 	}
 	return data;
 }
@@ -113,10 +113,10 @@ nvbios_cstepXp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
 	u16 data = nvbios_cstepXe(bios, idx, ver, hdr);
 	memset(info, 0x00, sizeof(*info));
 	if (data) {
-		info->freq    = nv_ro16(bios, data + 0x00) * 1000;
-		info->unkn[0] = nv_ro08(bios, data + 0x02);
-		info->unkn[1] = nv_ro08(bios, data + 0x03);
-		info->voltage = nv_ro08(bios, data + 0x04);
+		info->freq    = nvbios_rd16(bios, data + 0x00) * 1000;
+		info->unkn[0] = nvbios_rd08(bios, data + 0x02);
+		info->unkn[1] = nvbios_rd08(bios, data + 0x03);
+		info->voltage = nvbios_rd08(bios, data + 0x04);
 	}
 	return data;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c
index 8d78140f9401..8304b806f2a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c
@@ -24,38 +24,37 @@
 #include <subdev/bios.h>
 #include <subdev/bios/dcb.h>
 
-#include <core/device.h>
-
 u16
 dcb_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 {
-	struct nvkm_device *device = nv_device(bios);
+	struct nvkm_subdev *subdev = &bios->subdev;
+	struct nvkm_device *device = subdev->device;
 	u16 dcb = 0x0000;
 
 	if (device->card_type > NV_04)
-		dcb = nv_ro16(bios, 0x36);
+		dcb = nvbios_rd16(bios, 0x36);
 	if (!dcb) {
-		nv_warn(bios, "DCB table not found\n");
+		nvkm_warn(subdev, "DCB table not found\n");
 		return dcb;
 	}
 
-	*ver = nv_ro08(bios, dcb);
+	*ver = nvbios_rd08(bios, dcb);
 
 	if (*ver >= 0x42) {
-		nv_warn(bios, "DCB version 0x%02x unknown\n", *ver);
+		nvkm_warn(subdev, "DCB version 0x%02x unknown\n", *ver);
 		return 0x0000;
 	} else
 	if (*ver >= 0x30) {
-		if (nv_ro32(bios, dcb + 6) == 0x4edcbdcb) {
-			*hdr = nv_ro08(bios, dcb + 1);
-			*cnt = nv_ro08(bios, dcb + 2);
-			*len = nv_ro08(bios, dcb + 3);
+		if (nvbios_rd32(bios, dcb + 6) == 0x4edcbdcb) {
+			*hdr = nvbios_rd08(bios, dcb + 1);
+			*cnt = nvbios_rd08(bios, dcb + 2);
+			*len = nvbios_rd08(bios, dcb + 3);
 			return dcb;
 		}
 	} else
 	if (*ver >= 0x20) {
-		if (nv_ro32(bios, dcb + 4) == 0x4edcbdcb) {
-			u16 i2c = nv_ro16(bios, dcb + 2);
+		if (nvbios_rd32(bios, dcb + 4) == 0x4edcbdcb) {
+			u16 i2c = nvbios_rd16(bios, dcb + 2);
 			*hdr = 8;
 			*cnt = (i2c - dcb) / 8;
 			*len = 8;
@@ -63,8 +62,8 @@ dcb_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 		}
 	} else
 	if (*ver >= 0x15) {
-		if (!nv_memcmp(bios, dcb - 7, "DEV_REC", 7)) {
-			u16 i2c = nv_ro16(bios, dcb + 2);
+		if (!nvbios_memcmp(bios, dcb - 7, "DEV_REC", 7)) {
+			u16 i2c = nvbios_rd16(bios, dcb + 2);
 			*hdr = 4;
 			*cnt = (i2c - dcb) / 10;
 			*len = 10;
@@ -88,11 +87,11 @@ dcb_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 		 *
 		 * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
 		 */
-		nv_warn(bios, "DCB contains no useful data\n");
+		nvkm_debug(subdev, "DCB contains no useful data\n");
 		return 0x0000;
 	}
 
-	nv_warn(bios, "DCB header validation failed\n");
+	nvkm_warn(subdev, "DCB header validation failed\n");
 	return 0x0000;
 }
 
@@ -126,7 +125,7 @@ dcb_outp_parse(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len,
 	memset(outp, 0x00, sizeof(*outp));
 	if (dcb) {
 		if (*ver >= 0x20) {
-			u32 conn = nv_ro32(bios, dcb + 0x00);
+			u32 conn = nvbios_rd32(bios, dcb + 0x00);
 			outp->or        = (conn & 0x0f000000) >> 24;
 			outp->location  = (conn & 0x00300000) >> 20;
 			outp->bus       = (conn & 0x000f0000) >> 16;
@@ -140,7 +139,7 @@ dcb_outp_parse(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len,
 		}
 
 		if (*ver >= 0x40) {
-			u32 conf = nv_ro32(bios, dcb + 0x04);
+			u32 conf = nvbios_rd32(bios, dcb + 0x04);
 			switch (outp->type) {
 			case DCB_OUTPUT_DP:
 				switch (conf & 0x00e00000) {
@@ -156,20 +155,19 @@ dcb_outp_parse(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len,
 					break;
 				}
 
-				outp->dpconf.link_nr = (conf & 0x0f000000) >> 24;
-				if (*ver < 0x41) {
-					switch (outp->dpconf.link_nr) {
-					case 0x0f:
-						outp->dpconf.link_nr = 4;
-						break;
-					case 0x03:
-						outp->dpconf.link_nr = 2;
-						break;
-					case 0x01:
-					default:
-						outp->dpconf.link_nr = 1;
-						break;
-					}
+				switch ((conf & 0x0f000000) >> 24) {
+				case 0xf:
+				case 0x4:
+					outp->dpconf.link_nr = 4;
+					break;
+				case 0x3:
+				case 0x2:
+					outp->dpconf.link_nr = 2;
+					break;
+				case 0x1:
+				default:
+					outp->dpconf.link_nr = 1;
+					break;
 				}
 
 				/* fall-through... */
@@ -215,14 +213,14 @@ dcb_outp_foreach(struct nvkm_bios *bios, void *data,
 	u16 outp;
 
 	while ((outp = dcb_outp(bios, ++idx, &ver, &len))) {
-		if (nv_ro32(bios, outp) == 0x00000000)
+		if (nvbios_rd32(bios, outp) == 0x00000000)
 			break; /* seen on an NV11 with DCB v1.5 */
-		if (nv_ro32(bios, outp) == 0xffffffff)
+		if (nvbios_rd32(bios, outp) == 0xffffffff)
 			break; /* seen on an NV17 with DCB v2.0 */
 
-		if (nv_ro08(bios, outp) == DCB_OUTPUT_UNUSED)
+		if (nvbios_rd08(bios, outp) == DCB_OUTPUT_UNUSED)
 			continue;
-		if (nv_ro08(bios, outp) == DCB_OUTPUT_EOL)
+		if (nvbios_rd08(bios, outp) == DCB_OUTPUT_EOL)
 			break;
 
 		ret = exec(bios, data, idx, outp);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
index 262c410b7ee2..a5e92135cd77 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
@@ -33,17 +33,17 @@ nvbios_disp_table(struct nvkm_bios *bios,
 
 	if (!bit_entry(bios, 'U', &U)) {
 		if (U.version == 1) {
-			u16 data = nv_ro16(bios, U.offset);
+			u16 data = nvbios_rd16(bios, U.offset);
 			if (data) {
-				*ver = nv_ro08(bios, data + 0x00);
+				*ver = nvbios_rd08(bios, data + 0x00);
 				switch (*ver) {
 				case 0x20:
 				case 0x21:
 				case 0x22:
-					*hdr = nv_ro08(bios, data + 0x01);
-					*len = nv_ro08(bios, data + 0x02);
-					*cnt = nv_ro08(bios, data + 0x03);
-					*sub = nv_ro08(bios, data + 0x04);
+					*hdr = nvbios_rd08(bios, data + 0x01);
+					*len = nvbios_rd08(bios, data + 0x02);
+					*cnt = nvbios_rd08(bios, data + 0x03);
+					*sub = nvbios_rd08(bios, data + 0x04);
 					return data;
 				default:
 					break;
@@ -72,7 +72,7 @@ nvbios_disp_parse(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len, u8 *sub,
 {
 	u16 data = nvbios_disp_entry(bios, idx, ver, len, sub);
 	if (data && *len >= 2) {
-		info->data = nv_ro16(bios, data + 0);
+		info->data = nvbios_rd16(bios, data + 0);
 		return data;
 	}
 	return 0x0000;
@@ -85,7 +85,7 @@ nvbios_outp_entry(struct nvkm_bios *bios, u8 idx,
 	struct nvbios_disp info;
 	u16 data = nvbios_disp_parse(bios, idx, ver, len, hdr, &info);
 	if (data) {
-		*cnt = nv_ro08(bios, info.data + 0x05);
+		*cnt = nvbios_rd08(bios, info.data + 0x05);
 		*len = 0x06;
 		data = info.data;
 	}
@@ -98,15 +98,15 @@ nvbios_outp_parse(struct nvkm_bios *bios, u8 idx,
 {
 	u16 data = nvbios_outp_entry(bios, idx, ver, hdr, cnt, len);
 	if (data && *hdr >= 0x0a) {
-		info->type      = nv_ro16(bios, data + 0x00);
-		info->mask      = nv_ro32(bios, data + 0x02);
+		info->type      = nvbios_rd16(bios, data + 0x00);
+		info->mask      = nvbios_rd32(bios, data + 0x02);
 		if (*ver <= 0x20) /* match any link */
 			info->mask |= 0x00c0;
-		info->script[0] = nv_ro16(bios, data + 0x06);
-		info->script[1] = nv_ro16(bios, data + 0x08);
+		info->script[0] = nvbios_rd16(bios, data + 0x06);
+		info->script[1] = nvbios_rd16(bios, data + 0x08);
 		info->script[2] = 0x0000;
 		if (*hdr >= 0x0c)
-			info->script[2] = nv_ro16(bios, data + 0x0a);
+			info->script[2] = nvbios_rd16(bios, data + 0x0a);
 		return data;
 	}
 	return 0x0000;
@@ -141,9 +141,9 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
 {
 	u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
 	if (data) {
-		info->match     = nv_ro16(bios, data + 0x00);
-		info->clkcmp[0] = nv_ro16(bios, data + 0x02);
-		info->clkcmp[1] = nv_ro16(bios, data + 0x04);
+		info->match     = nvbios_rd16(bios, data + 0x00);
+		info->clkcmp[0] = nvbios_rd16(bios, data + 0x02);
+		info->clkcmp[1] = nvbios_rd16(bios, data + 0x04);
 	}
 	return data;
 }
@@ -164,8 +164,8 @@ u16
 nvbios_oclk_match(struct nvkm_bios *bios, u16 cmp, u32 khz)
 {
 	while (cmp) {
-		if (khz / 10 >= nv_ro16(bios, cmp + 0x00))
-			return  nv_ro16(bios, cmp + 0x02);
+		if (khz / 10 >= nvbios_rd16(bios, cmp + 0x00))
+			return  nvbios_rd16(bios, cmp + 0x02);
 		cmp += 0x04;
 	}
 	return 0x0000;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
index 95970faae6c8..05332476354a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
@@ -32,17 +32,17 @@ nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 
 	if (!bit_entry(bios, 'd', &d)) {
 		if (d.version == 1 && d.length >= 2) {
-			u16 data = nv_ro16(bios, d.offset);
+			u16 data = nvbios_rd16(bios, d.offset);
 			if (data) {
-				*ver = nv_ro08(bios, data + 0x00);
+				*ver = nvbios_rd08(bios, data + 0x00);
 				switch (*ver) {
 				case 0x21:
 				case 0x30:
 				case 0x40:
 				case 0x41:
-					*hdr = nv_ro08(bios, data + 0x01);
-					*len = nv_ro08(bios, data + 0x02);
-					*cnt = nv_ro08(bios, data + 0x03);
+					*hdr = nvbios_rd08(bios, data + 0x01);
+					*len = nvbios_rd08(bios, data + 0x02);
+					*cnt = nvbios_rd08(bios, data + 0x03);
 					return data;
 				default:
 					break;
@@ -60,17 +60,17 @@ nvbios_dpout_entry(struct nvkm_bios *bios, u8 idx,
 {
 	u16 data = nvbios_dp_table(bios, ver, hdr, cnt, len);
 	if (data && idx < *cnt) {
-		u16 outp = nv_ro16(bios, data + *hdr + idx * *len);
+		u16 outp = nvbios_rd16(bios, data + *hdr + idx * *len);
 		switch (*ver * !!outp) {
 		case 0x21:
 		case 0x30:
-			*hdr = nv_ro08(bios, data + 0x04);
-			*len = nv_ro08(bios, data + 0x05);
-			*cnt = nv_ro08(bios, outp + 0x04);
+			*hdr = nvbios_rd08(bios, data + 0x04);
+			*len = nvbios_rd08(bios, data + 0x05);
+			*cnt = nvbios_rd08(bios, outp + 0x04);
 			break;
 		case 0x40:
 		case 0x41:
-			*hdr = nv_ro08(bios, data + 0x04);
+			*hdr = nvbios_rd08(bios, data + 0x04);
 			*cnt = 0;
 			*len = 0;
 			break;
@@ -91,31 +91,31 @@ nvbios_dpout_parse(struct nvkm_bios *bios, u8 idx,
 	u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len);
 	memset(info, 0x00, sizeof(*info));
 	if (data && *ver) {
-		info->type = nv_ro16(bios, data + 0x00);
-		info->mask = nv_ro16(bios, data + 0x02);
+		info->type = nvbios_rd16(bios, data + 0x00);
+		info->mask = nvbios_rd16(bios, data + 0x02);
 		switch (*ver) {
 		case 0x21:
 		case 0x30:
-			info->flags     = nv_ro08(bios, data + 0x05);
-			info->script[0] = nv_ro16(bios, data + 0x06);
-			info->script[1] = nv_ro16(bios, data + 0x08);
-			info->lnkcmp    = nv_ro16(bios, data + 0x0a);
+			info->flags     = nvbios_rd08(bios, data + 0x05);
+			info->script[0] = nvbios_rd16(bios, data + 0x06);
+			info->script[1] = nvbios_rd16(bios, data + 0x08);
+			info->lnkcmp    = nvbios_rd16(bios, data + 0x0a);
 			if (*len >= 0x0f) {
-				info->script[2] = nv_ro16(bios, data + 0x0c);
-				info->script[3] = nv_ro16(bios, data + 0x0e);
+				info->script[2] = nvbios_rd16(bios, data + 0x0c);
+				info->script[3] = nvbios_rd16(bios, data + 0x0e);
 			}
 			if (*len >= 0x11)
-				info->script[4] = nv_ro16(bios, data + 0x10);
+				info->script[4] = nvbios_rd16(bios, data + 0x10);
 			break;
 		case 0x40:
 		case 0x41:
-			info->flags     = nv_ro08(bios, data + 0x04);
-			info->script[0] = nv_ro16(bios, data + 0x05);
-			info->script[1] = nv_ro16(bios, data + 0x07);
-			info->lnkcmp    = nv_ro16(bios, data + 0x09);
-			info->script[2] = nv_ro16(bios, data + 0x0b);
-			info->script[3] = nv_ro16(bios, data + 0x0d);
-			info->script[4] = nv_ro16(bios, data + 0x0f);
+			info->flags     = nvbios_rd08(bios, data + 0x04);
+			info->script[0] = nvbios_rd16(bios, data + 0x05);
+			info->script[1] = nvbios_rd16(bios, data + 0x07);
+			info->lnkcmp    = nvbios_rd16(bios, data + 0x09);
+			info->script[2] = nvbios_rd16(bios, data + 0x0b);
+			info->script[3] = nvbios_rd16(bios, data + 0x0d);
+			info->script[4] = nvbios_rd16(bios, data + 0x0f);
 			break;
 		default:
 			data = 0x0000;
@@ -147,8 +147,9 @@ nvbios_dpcfg_entry(struct nvkm_bios *bios, u16 outp, u8 idx,
 	if (*ver >= 0x40) {
 		outp = nvbios_dp_table(bios, ver, hdr, cnt, len);
 		*hdr = *hdr + (*len * * cnt);
-		*len = nv_ro08(bios, outp + 0x06);
-		*cnt = nv_ro08(bios, outp + 0x07);
+		*len = nvbios_rd08(bios, outp + 0x06);
+		*cnt = nvbios_rd08(bios, outp + 0x07) *
+		       nvbios_rd08(bios, outp + 0x05);
 	}
 
 	if (idx < *cnt)
@@ -167,17 +168,17 @@ nvbios_dpcfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
 	if (data) {
 		switch (*ver) {
 		case 0x21:
-			info->dc    = nv_ro08(bios, data + 0x02);
-			info->pe    = nv_ro08(bios, data + 0x03);
-			info->tx_pu = nv_ro08(bios, data + 0x04);
+			info->dc    = nvbios_rd08(bios, data + 0x02);
+			info->pe    = nvbios_rd08(bios, data + 0x03);
+			info->tx_pu = nvbios_rd08(bios, data + 0x04);
 			break;
 		case 0x30:
 		case 0x40:
 		case 0x41:
-			info->pc    = nv_ro08(bios, data + 0x00);
-			info->dc    = nv_ro08(bios, data + 0x01);
-			info->pe    = nv_ro08(bios, data + 0x02);
-			info->tx_pu = nv_ro08(bios, data + 0x03) & 0x0f;
+			info->pc    = nvbios_rd08(bios, data + 0x00);
+			info->dc    = nvbios_rd08(bios, data + 0x01);
+			info->pe    = nvbios_rd08(bios, data + 0x02);
+			info->tx_pu = nvbios_rd08(bios, data + 0x03);
 			break;
 		default:
 			data = 0x0000;
@@ -196,17 +197,15 @@ nvbios_dpcfg_match(struct nvkm_bios *bios, u16 outp, u8 pc, u8 vs, u8 pe,
 	u16 data;
 
 	if (*ver >= 0x30) {
-		/*XXX: there's a second set of these on at least 4.1, that
-		 *     i've witnessed nvidia using instead of the first
-		 *     on gm204.  figure out what/why
-		 */
 		const u8 vsoff[] = { 0, 4, 7, 9 };
 		idx = (pc * 10) + vsoff[vs] + pe;
+		if (*ver >= 0x40 && *hdr >= 0x12)
+			idx += nvbios_rd08(bios, outp + 0x11) * 40;
 	} else {
 		while ((data = nvbios_dpcfg_entry(bios, outp, ++idx,
 						  ver, hdr, cnt, len))) {
-			if (nv_ro08(bios, data + 0x00) == vs &&
-			    nv_ro08(bios, data + 0x01) == pe)
+			if (nvbios_rd08(bios, data + 0x00) == vs &&
+			    nvbios_rd08(bios, data + 0x01) == pe)
 				break;
 		}
 	}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
index a8503a1854c4..c9e6f6ff7c50 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
@@ -35,14 +35,14 @@ extdev_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
 	if (!dcb || (dcb_ver != 0x30 && dcb_ver != 0x40))
 		return 0x0000;
 
-	extdev = nv_ro16(bios, dcb + 18);
+	extdev = nvbios_rd16(bios, dcb + 18);
 	if (!extdev)
 		return 0x0000;
 
-	*ver = nv_ro08(bios, extdev + 0);
-	*hdr = nv_ro08(bios, extdev + 1);
-	*cnt = nv_ro08(bios, extdev + 2);
-	*len = nv_ro08(bios, extdev + 3);
+	*ver = nvbios_rd08(bios, extdev + 0);
+	*hdr = nvbios_rd08(bios, extdev + 1);
+	*cnt = nvbios_rd08(bios, extdev + 2);
+	*len = nvbios_rd08(bios, extdev + 3);
 	return extdev + *hdr;
 }
 
@@ -60,9 +60,9 @@ static void
 extdev_parse_entry(struct nvkm_bios *bios, u16 offset,
 		   struct nvbios_extdev_func *entry)
 {
-	entry->type = nv_ro08(bios, offset + 0);
-	entry->addr = nv_ro08(bios, offset + 1);
-	entry->bus = (nv_ro08(bios, offset + 2) >> 4) & 1;
+	entry->type = nvbios_rd08(bios, offset + 0);
+	entry->addr = nvbios_rd08(bios, offset + 1);
+	entry->bus = (nvbios_rd08(bios, offset + 2) >> 4) & 1;
 }
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
index 8dba70d9d9a9..43006db6fd58 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
@@ -33,15 +33,15 @@ nvbios_fan_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 2 && bit_P.length >= 0x5a)
-			fan = nv_ro16(bios, bit_P.offset + 0x58);
+			fan = nvbios_rd16(bios, bit_P.offset + 0x58);
 
 		if (fan) {
-			*ver = nv_ro08(bios, fan + 0);
+			*ver = nvbios_rd08(bios, fan + 0);
 			switch (*ver) {
 			case 0x10:
-				*hdr = nv_ro08(bios, fan + 1);
-				*len = nv_ro08(bios, fan + 2);
-				*cnt = nv_ro08(bios, fan + 3);
+				*hdr = nvbios_rd08(bios, fan + 1);
+				*len = nvbios_rd08(bios, fan + 2);
+				*cnt = nvbios_rd08(bios, fan + 3);
 				return fan;
 			default:
 				break;
@@ -69,7 +69,7 @@ nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
 
 	u16 data = nvbios_fan_entry(bios, 0, &ver, &hdr, &cnt, &len);
 	if (data) {
-		u8 type = nv_ro08(bios, data + 0x00);
+		u8 type = nvbios_rd08(bios, data + 0x00);
 		switch (type) {
 		case 0:
 			fan->type = NVBIOS_THERM_FAN_TOGGLE;
@@ -83,10 +83,10 @@ nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
 			fan->type = NVBIOS_THERM_FAN_UNK;
 		}
 
-		fan->min_duty = nv_ro08(bios, data + 0x02);
-		fan->max_duty = nv_ro08(bios, data + 0x03);
+		fan->min_duty = nvbios_rd08(bios, data + 0x02);
+		fan->max_duty = nvbios_rd08(bios, data + 0x03);
 
-		fan->pwm_freq = nv_ro32(bios, data + 0x0b) & 0xffffff;
+		fan->pwm_freq = nvbios_rd32(bios, data + 0x0b) & 0xffffff;
 	}
 
 	return data;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c
index 8ce154d88f51..2107b558437a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c
@@ -33,22 +33,22 @@ dcb_gpio_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 	u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
 	if (dcb) {
 		if (*ver >= 0x30 && *hdr >= 0x0c)
-			data = nv_ro16(bios, dcb + 0x0a);
+			data = nvbios_rd16(bios, dcb + 0x0a);
 		else
-		if (*ver >= 0x22 && nv_ro08(bios, dcb - 1) >= 0x13)
-			data = nv_ro16(bios, dcb - 0x0f);
+		if (*ver >= 0x22 && nvbios_rd08(bios, dcb - 1) >= 0x13)
+			data = nvbios_rd16(bios, dcb - 0x0f);
 
 		if (data) {
-			*ver = nv_ro08(bios, data + 0x00);
+			*ver = nvbios_rd08(bios, data + 0x00);
 			if (*ver < 0x30) {
 				*hdr = 3;
-				*cnt = nv_ro08(bios, data + 0x02);
-				*len = nv_ro08(bios, data + 0x01);
+				*cnt = nvbios_rd08(bios, data + 0x02);
+				*len = nvbios_rd08(bios, data + 0x01);
 			} else
 			if (*ver <= 0x41) {
-				*hdr = nv_ro08(bios, data + 0x01);
-				*cnt = nv_ro08(bios, data + 0x02);
-				*len = nv_ro08(bios, data + 0x03);
+				*hdr = nvbios_rd08(bios, data + 0x01);
+				*cnt = nvbios_rd08(bios, data + 0x02);
+				*len = nvbios_rd08(bios, data + 0x03);
 			} else {
 				data = 0x0000;
 			}
@@ -81,7 +81,7 @@ dcb_gpio_parse(struct nvkm_bios *bios, int idx, int ent, u8 *ver, u8 *len,
 	u16 data = dcb_gpio_entry(bios, idx, ent, ver, len);
 	if (data) {
 		if (*ver < 0x40) {
-			u16 info = nv_ro16(bios, data);
+			u16 info = nvbios_rd16(bios, data);
 			*gpio = (struct dcb_gpio_func) {
 				.line = (info & 0x001f) >> 0,
 				.func = (info & 0x07e0) >> 5,
@@ -91,7 +91,7 @@ dcb_gpio_parse(struct nvkm_bios *bios, int idx, int ent, u8 *ver, u8 *len,
 			};
 		} else
 		if (*ver < 0x41) {
-			u32 info = nv_ro32(bios, data);
+			u32 info = nvbios_rd32(bios, data);
 			*gpio = (struct dcb_gpio_func) {
 				.line = (info & 0x0000001f) >> 0,
 				.func = (info & 0x0000ff00) >> 8,
@@ -100,8 +100,8 @@ dcb_gpio_parse(struct nvkm_bios *bios, int idx, int ent, u8 *ver, u8 *len,
 				.param = !!(info & 0x80000000),
 			};
 		} else {
-			u32 info = nv_ro32(bios, data + 0);
-			u8 info1 = nv_ro32(bios, data + 4);
+			u32 info = nvbios_rd32(bios, data + 0);
+			u8 info1 = nvbios_rd32(bios, data + 4);
 			*gpio = (struct dcb_gpio_func) {
 				.line = (info & 0x0000003f) >> 0,
 				.func = (info & 0x0000ff00) >> 8,
@@ -131,8 +131,8 @@ dcb_gpio_match(struct nvkm_bios *bios, int idx, u8 func, u8 line,
 	/* DCB 2.2, fixed TVDAC GPIO data */
 	if ((data = dcb_table(bios, ver, &hdr, &cnt, len))) {
 		if (*ver >= 0x22 && *ver < 0x30 && func == DCB_GPIO_TVDAC0) {
-			u8 conf = nv_ro08(bios, data - 5);
-			u8 addr = nv_ro08(bios, data - 4);
+			u8 conf = nvbios_rd08(bios, data - 5);
+			u8 addr = nvbios_rd08(bios, data - 4);
 			if (conf & 0x01) {
 				*gpio = (struct dcb_gpio_func) {
 					.func = DCB_GPIO_TVDAC0,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
index c4e1f085ee10..0fc60be32727 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
@@ -32,21 +32,21 @@ dcb_i2c_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 	u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
 	if (dcb) {
 		if (*ver >= 0x15)
-			i2c = nv_ro16(bios, dcb + 2);
+			i2c = nvbios_rd16(bios, dcb + 2);
 		if (*ver >= 0x30)
-			i2c = nv_ro16(bios, dcb + 4);
+			i2c = nvbios_rd16(bios, dcb + 4);
 	}
 
 	if (i2c && *ver >= 0x42) {
-		nv_warn(bios, "ccb %02x not supported\n", *ver);
+		nvkm_warn(&bios->subdev, "ccb %02x not supported\n", *ver);
 		return 0x0000;
 	}
 
 	if (i2c && *ver >= 0x30) {
-		*ver = nv_ro08(bios, i2c + 0);
-		*hdr = nv_ro08(bios, i2c + 1);
-		*cnt = nv_ro08(bios, i2c + 2);
-		*len = nv_ro08(bios, i2c + 3);
+		*ver = nvbios_rd08(bios, i2c + 0);
+		*hdr = nvbios_rd08(bios, i2c + 1);
+		*cnt = nvbios_rd08(bios, i2c + 2);
+		*len = nvbios_rd08(bios, i2c + 3);
 	} else {
 		*ver = *ver; /* use DCB version */
 		*hdr = 0;
@@ -70,13 +70,14 @@ dcb_i2c_entry(struct nvkm_bios *bios, u8 idx, u8 *ver, u8 *len)
 int
 dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
 	u8  ver, len;
 	u16 ent = dcb_i2c_entry(bios, idx, &ver, &len);
 	if (ent) {
 		if (ver >= 0x41) {
-			u32 ent_value = nv_ro32(bios, ent);
-			u8 i2c_port = (ent_value >> 27) & 0x1f;
-			u8 dpaux_port = (ent_value >> 22) & 0x1f;
+			u32 ent_value = nvbios_rd32(bios, ent);
+			u8 i2c_port = (ent_value >> 0) & 0x1f;
+			u8 dpaux_port = (ent_value >> 5) & 0x1f;
 			/* value 0x1f means unused according to DCB 4.x spec */
 			if (i2c_port == 0x1f && dpaux_port == 0x1f)
 				info->type = DCB_I2C_UNUSED;
@@ -84,9 +85,9 @@ dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info)
 				info->type = DCB_I2C_PMGR;
 		} else
 		if (ver >= 0x30) {
-			info->type = nv_ro08(bios, ent + 0x03);
+			info->type = nvbios_rd08(bios, ent + 0x03);
 		} else {
-			info->type = nv_ro08(bios, ent + 0x03) & 0x07;
+			info->type = nvbios_rd08(bios, ent + 0x03) & 0x07;
 			if (info->type == 0x07)
 				info->type = DCB_I2C_UNUSED;
 		}
@@ -98,27 +99,27 @@ dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info)
 
 		switch (info->type) {
 		case DCB_I2C_NV04_BIT:
-			info->drive = nv_ro08(bios, ent + 0);
-			info->sense = nv_ro08(bios, ent + 1);
+			info->drive = nvbios_rd08(bios, ent + 0);
+			info->sense = nvbios_rd08(bios, ent + 1);
 			return 0;
 		case DCB_I2C_NV4E_BIT:
-			info->drive = nv_ro08(bios, ent + 1);
+			info->drive = nvbios_rd08(bios, ent + 1);
 			return 0;
 		case DCB_I2C_NVIO_BIT:
-			info->drive = nv_ro08(bios, ent + 0) & 0x0f;
-			if (nv_ro08(bios, ent + 1) & 0x01)
-				info->share = nv_ro08(bios, ent + 1) >> 1;
+			info->drive = nvbios_rd08(bios, ent + 0) & 0x0f;
+			if (nvbios_rd08(bios, ent + 1) & 0x01)
+				info->share = nvbios_rd08(bios, ent + 1) >> 1;
 			return 0;
 		case DCB_I2C_NVIO_AUX:
-			info->auxch = nv_ro08(bios, ent + 0) & 0x0f;
-			if (nv_ro08(bios, ent + 1) & 0x01)
+			info->auxch = nvbios_rd08(bios, ent + 0) & 0x0f;
+			if (nvbios_rd08(bios, ent + 1) & 0x01)
 					info->share = info->auxch;
 			return 0;
 		case DCB_I2C_PMGR:
-			info->drive = (nv_ro16(bios, ent + 0) & 0x01f) >> 0;
+			info->drive = (nvbios_rd16(bios, ent + 0) & 0x01f) >> 0;
 			if (info->drive == 0x1f)
 				info->drive = DCB_I2C_UNUSED;
-			info->auxch = (nv_ro16(bios, ent + 0) & 0x3e0) >> 5;
+			info->auxch = (nvbios_rd16(bios, ent + 0) & 0x3e0) >> 5;
 			if (info->auxch == 0x1f)
 				info->auxch = DCB_I2C_UNUSED;
 			info->share = info->auxch;
@@ -126,7 +127,7 @@ dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info)
 		case DCB_I2C_UNUSED:
 			return 0;
 		default:
-			nv_warn(bios, "unknown i2c type %d\n", info->type);
+			nvkm_warn(subdev, "unknown i2c type %d\n", info->type);
 			info->type = DCB_I2C_UNUSED;
 			return 0;
 		}
@@ -136,21 +137,21 @@ dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info)
 		/* BMP (from v4.0 has i2c info in the structure, it's in a
 		 * fixed location on earlier VBIOS
 		 */
-		if (nv_ro08(bios, bios->bmp_offset + 5) < 4)
+		if (nvbios_rd08(bios, bios->bmp_offset + 5) < 4)
 			ent = 0x0048;
 		else
 			ent = 0x0036 + bios->bmp_offset;
 
 		if (idx == 0) {
-			info->drive = nv_ro08(bios, ent + 4);
+			info->drive = nvbios_rd08(bios, ent + 4);
 			if (!info->drive) info->drive = 0x3f;
-			info->sense = nv_ro08(bios, ent + 5);
+			info->sense = nvbios_rd08(bios, ent + 5);
 			if (!info->sense) info->sense = 0x3e;
 		} else
 		if (idx == 1) {
-			info->drive = nv_ro08(bios, ent + 6);
+			info->drive = nvbios_rd08(bios, ent + 6);
 			if (!info->drive) info->drive = 0x37;
-			info->sense = nv_ro08(bios, ent + 7);
+			info->sense = nvbios_rd08(bios, ent + 7);
 			if (!info->sense) info->sense = 0x36;
 		}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
index 1815540a0e8b..74b14cf09308 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
@@ -29,20 +29,21 @@
 static bool
 nvbios_imagen(struct nvkm_bios *bios, struct nvbios_image *image)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
 	struct nvbios_pcirT pcir;
 	struct nvbios_npdeT npde;
 	u8  ver;
 	u16 hdr;
 	u32 data;
 
-	switch ((data = nv_ro16(bios, image->base + 0x00))) {
+	switch ((data = nvbios_rd16(bios, image->base + 0x00))) {
 	case 0xaa55:
 	case 0xbb77:
 	case 0x4e56: /* NV */
 		break;
 	default:
-		nv_debug(bios, "%08x: ROM signature (%04x) unknown\n",
-			 image->base, data);
+		nvkm_debug(subdev, "%08x: ROM signature (%04x) unknown\n",
+			   image->base, data);
 		return false;
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
index f4611e3f0971..65af31441e9c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
@@ -31,18 +31,18 @@
 #include <subdev/bios/init.h>
 #include <subdev/bios/ramcfg.h>
 
-#include <core/device.h>
 #include <subdev/devinit.h>
 #include <subdev/gpio.h>
 #include <subdev/i2c.h>
 #include <subdev/vga.h>
 
 #define bioslog(lvl, fmt, args...) do {                                        \
-	nv_printk(init->bios, lvl, "0x%04x[%c]: "fmt, init->offset,            \
-		  init_exec(init) ? '0' + (init->nested - 1) : ' ', ##args);   \
+	nvkm_printk(init->subdev, lvl, info, "0x%04x[%c]: "fmt,                \
+		    init->offset, init_exec(init) ?                            \
+		    '0' + (init->nested - 1) : ' ', ##args);                   \
 } while(0)
 #define cont(fmt, args...) do {                                                \
-	if (nv_subdev(init->bios)->debug >= NV_DBG_TRACE)                      \
+	if (init->subdev->debug >= NV_DBG_TRACE)                               \
 		printk(fmt, ##args);                                           \
 } while(0)
 #define trace(fmt, args...) bioslog(TRACE, fmt, ##args)
@@ -141,7 +141,7 @@ init_conn(struct nvbios_init *init)
 static inline u32
 init_nvreg(struct nvbios_init *init, u32 reg)
 {
-	struct nvkm_devinit *devinit = nvkm_devinit(init->bios);
+	struct nvkm_devinit *devinit = init->bios->subdev.device->devinit;
 
 	/* C51 (at least) sometimes has the lower bits set which the VBIOS
 	 * interprets to mean that access needs to go through certain IO
@@ -154,7 +154,7 @@ init_nvreg(struct nvbios_init *init, u32 reg)
 	/* GF8+ display scripts need register addresses mangled a bit to
 	 * select a specific CRTC/OR
 	 */
-	if (nv_device(init->bios)->card_type >= NV_50) {
+	if (init->bios->subdev.device->card_type >= NV_50) {
 		if (reg & 0x80000000) {
 			reg += init_crtc(init) * 0x800;
 			reg &= ~0x80000000;
@@ -173,35 +173,36 @@ init_nvreg(struct nvbios_init *init, u32 reg)
 	if (reg & ~0x00fffffc)
 		warn("unknown bits in register 0x%08x\n", reg);
 
-	if (devinit->mmio)
-		reg = devinit->mmio(devinit, reg);
-	return reg;
+	return nvkm_devinit_mmio(devinit, reg);
 }
 
 static u32
 init_rd32(struct nvbios_init *init, u32 reg)
 {
+	struct nvkm_device *device = init->bios->subdev.device;
 	reg = init_nvreg(init, reg);
 	if (reg != ~0 && init_exec(init))
-		return nv_rd32(init->subdev, reg);
+		return nvkm_rd32(device, reg);
 	return 0x00000000;
 }
 
 static void
 init_wr32(struct nvbios_init *init, u32 reg, u32 val)
 {
+	struct nvkm_device *device = init->bios->subdev.device;
 	reg = init_nvreg(init, reg);
 	if (reg != ~0 && init_exec(init))
-		nv_wr32(init->subdev, reg, val);
+		nvkm_wr32(device, reg, val);
 }
 
 static u32
 init_mask(struct nvbios_init *init, u32 reg, u32 mask, u32 val)
 {
+	struct nvkm_device *device = init->bios->subdev.device;
 	reg = init_nvreg(init, reg);
 	if (reg != ~0 && init_exec(init)) {
-		u32 tmp = nv_rd32(init->subdev, reg);
-		nv_wr32(init->subdev, reg, (tmp & ~mask) | val);
+		u32 tmp = nvkm_rd32(device, reg);
+		nvkm_wr32(device, reg, (tmp & ~mask) | val);
 		return tmp;
 	}
 	return 0x00000000;
@@ -211,7 +212,7 @@ static u8
 init_rdport(struct nvbios_init *init, u16 port)
 {
 	if (init_exec(init))
-		return nv_rdport(init->subdev, init->crtc, port);
+		return nvkm_rdport(init->subdev->device, init->crtc, port);
 	return 0x00;
 }
 
@@ -219,7 +220,7 @@ static void
 init_wrport(struct nvbios_init *init, u16 port, u8 value)
 {
 	if (init_exec(init))
-		nv_wrport(init->subdev, init->crtc, port, value);
+		nvkm_wrport(init->subdev->device, init->crtc, port, value);
 }
 
 static u8
@@ -228,7 +229,7 @@ init_rdvgai(struct nvbios_init *init, u16 port, u8 index)
 	struct nvkm_subdev *subdev = init->subdev;
 	if (init_exec(init)) {
 		int head = init->crtc < 0 ? 0 : init->crtc;
-		return nv_rdvgai(subdev, head, port, index);
+		return nvkm_rdvgai(subdev->device, head, port, index);
 	}
 	return 0x00;
 }
@@ -236,80 +237,80 @@ init_rdvgai(struct nvbios_init *init, u16 port, u8 index)
 static void
 init_wrvgai(struct nvbios_init *init, u16 port, u8 index, u8 value)
 {
+	struct nvkm_device *device = init->subdev->device;
+
 	/* force head 0 for updates to cr44, it only exists on first head */
-	if (nv_device(init->subdev)->card_type < NV_50) {
+	if (device->card_type < NV_50) {
 		if (port == 0x03d4 && index == 0x44)
 			init->crtc = 0;
 	}
 
 	if (init_exec(init)) {
 		int head = init->crtc < 0 ? 0 : init->crtc;
-		nv_wrvgai(init->subdev, head, port, index, value);
+		nvkm_wrvgai(device, head, port, index, value);
 	}
 
 	/* select head 1 if cr44 write selected it */
-	if (nv_device(init->subdev)->card_type < NV_50) {
+	if (device->card_type < NV_50) {
 		if (port == 0x03d4 && index == 0x44 && value == 3)
 			init->crtc = 1;
 	}
 }
 
-static struct nvkm_i2c_port *
+static struct i2c_adapter *
 init_i2c(struct nvbios_init *init, int index)
 {
-	struct nvkm_i2c *i2c = nvkm_i2c(init->bios);
+	struct nvkm_i2c *i2c = init->bios->subdev.device->i2c;
+	struct nvkm_i2c_bus *bus;
 
 	if (index == 0xff) {
-		index = NV_I2C_DEFAULT(0);
+		index = NVKM_I2C_BUS_PRI;
 		if (init->outp && init->outp->i2c_upper_default)
-			index = NV_I2C_DEFAULT(1);
-	} else
-	if (index < 0) {
-		if (!init->outp) {
-			if (init_exec(init))
-				error("script needs output for i2c\n");
-			return NULL;
-		}
-
-		if (index == -2 && init->outp->location) {
-			index = NV_I2C_TYPE_EXTAUX(init->outp->extdev);
-			return i2c->find_type(i2c, index);
-		}
-
-		index = init->outp->i2c_index;
-		if (init->outp->type == DCB_OUTPUT_DP)
-			index += NV_I2C_AUX(0);
+			index = NVKM_I2C_BUS_SEC;
 	}
 
-	return i2c->find(i2c, index);
+	bus = nvkm_i2c_bus_find(i2c, index);
+	return bus ? &bus->i2c : NULL;
 }
 
 static int
 init_rdi2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg)
 {
-	struct nvkm_i2c_port *port = init_i2c(init, index);
-	if (port && init_exec(init))
-		return nv_rdi2cr(port, addr, reg);
+	struct i2c_adapter *adap = init_i2c(init, index);
+	if (adap && init_exec(init))
+		return nvkm_rdi2cr(adap, addr, reg);
 	return -ENODEV;
 }
 
 static int
 init_wri2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg, u8 val)
 {
-	struct nvkm_i2c_port *port = init_i2c(init, index);
-	if (port && init_exec(init))
-		return nv_wri2cr(port, addr, reg, val);
+	struct i2c_adapter *adap = init_i2c(init, index);
+	if (adap && init_exec(init))
+		return nvkm_wri2cr(adap, addr, reg, val);
 	return -ENODEV;
 }
 
+static struct nvkm_i2c_aux *
+init_aux(struct nvbios_init *init)
+{
+	struct nvkm_i2c *i2c = init->bios->subdev.device->i2c;
+	if (!init->outp) {
+		if (init_exec(init))
+			error("script needs output for aux\n");
+		return NULL;
+	}
+	return nvkm_i2c_aux_find(i2c, init->outp->i2c_index);
+}
+
 static u8
 init_rdauxr(struct nvbios_init *init, u32 addr)
 {
-	struct nvkm_i2c_port *port = init_i2c(init, -2);
+	struct nvkm_i2c_aux *aux = init_aux(init);
 	u8 data;
 
-	if (port && init_exec(init)) {
-		int ret = nv_rdaux(port, addr, &data, 1);
+	if (aux && init_exec(init)) {
+		int ret = nvkm_rdaux(aux, addr, &data, 1);
 		if (ret == 0)
 			return data;
 		trace("auxch read failed with %d\n", ret);
@@ -321,9 +322,9 @@ init_rdauxr(struct nvbios_init *init, u32 addr)
 static int
 init_wrauxr(struct nvbios_init *init, u32 addr, u8 data)
 {
-	struct nvkm_i2c_port *port = init_i2c(init, -2);
-	if (port && init_exec(init)) {
-		int ret = nv_wraux(port, addr, &data, 1);
+	struct nvkm_i2c_aux *aux = init_aux(init);
+	if (aux && init_exec(init)) {
+		int ret = nvkm_wraux(aux, addr, &data, 1);
 		if (ret)
 			trace("auxch write failed with %d\n", ret);
 		return ret;
@@ -334,9 +335,9 @@ init_wrauxr(struct nvbios_init *init, u32 addr, u8 data)
 static void
 init_prog_pll(struct nvbios_init *init, u32 id, u32 freq)
 {
-	struct nvkm_devinit *devinit = nvkm_devinit(init->bios);
-	if (devinit->pll_set && init_exec(init)) {
-		int ret = devinit->pll_set(devinit, id, freq);
+	struct nvkm_devinit *devinit = init->bios->subdev.device->devinit;
+	if (init_exec(init)) {
+		int ret = nvkm_devinit_pll_set(devinit, id, freq);
 		if (ret)
 			warn("failed to prog pll 0x%08x to %dkHz\n", id, freq);
 	}
@@ -371,7 +372,7 @@ init_table_(struct nvbios_init *init, u16 offset, const char *name)
 	u16 len, data = init_table(bios, &len);
 	if (data) {
 		if (len >= offset + 2) {
-			data = nv_ro16(bios, data + offset);
+			data = nvbios_rd16(bios, data + offset);
 			if (data)
 				return data;
 
@@ -407,12 +408,12 @@ init_script(struct nvkm_bios *bios, int index)
 			return 0x0000;
 
 		data = bios->bmp_offset + (bmp_ver < 0x0200 ? 14 : 18);
-		return nv_ro16(bios, data + (index * 2));
+		return nvbios_rd16(bios, data + (index * 2));
 	}
 
 	data = init_script_table(&init);
 	if (data)
-		return nv_ro16(bios, data + (index * 2));
+		return nvbios_rd16(bios, data + (index * 2));
 
 	return 0x0000;
 }
@@ -422,7 +423,7 @@ init_unknown_script(struct nvkm_bios *bios)
 {
 	u16 len, data = init_table(bios, &len);
 	if (data && len >= 16)
-		return nv_ro16(bios, data + 14);
+		return nvbios_rd16(bios, data + 14);
 	return 0x0000;
 }
 
@@ -454,9 +455,9 @@ init_xlat_(struct nvbios_init *init, u8 index, u8 offset)
 	struct nvkm_bios *bios = init->bios;
 	u16 table = init_xlat_table(init);
 	if (table) {
-		u16 data = nv_ro16(bios, table + (index * 2));
+		u16 data = nvbios_rd16(bios, table + (index * 2));
 		if (data)
-			return nv_ro08(bios, data + offset);
+			return nvbios_rd08(bios, data + offset);
 		warn("xlat table pointer %d invalid\n", index);
 	}
 	return 0x00;
@@ -472,9 +473,9 @@ init_condition_met(struct nvbios_init *init, u8 cond)
 	struct nvkm_bios *bios = init->bios;
 	u16 table = init_condition_table(init);
 	if (table) {
-		u32 reg = nv_ro32(bios, table + (cond * 12) + 0);
-		u32 msk = nv_ro32(bios, table + (cond * 12) + 4);
-		u32 val = nv_ro32(bios, table + (cond * 12) + 8);
+		u32 reg = nvbios_rd32(bios, table + (cond * 12) + 0);
+		u32 msk = nvbios_rd32(bios, table + (cond * 12) + 4);
+		u32 val = nvbios_rd32(bios, table + (cond * 12) + 8);
 		trace("\t[0x%02x] (R[0x%06x] & 0x%08x) == 0x%08x\n",
 		      cond, reg, msk, val);
 		return (init_rd32(init, reg) & msk) == val;
@@ -488,10 +489,10 @@ init_io_condition_met(struct nvbios_init *init, u8 cond)
 	struct nvkm_bios *bios = init->bios;
 	u16 table = init_io_condition_table(init);
 	if (table) {
-		u16 port = nv_ro16(bios, table + (cond * 5) + 0);
-		u8 index = nv_ro08(bios, table + (cond * 5) + 2);
-		u8  mask = nv_ro08(bios, table + (cond * 5) + 3);
-		u8 value = nv_ro08(bios, table + (cond * 5) + 4);
+		u16 port = nvbios_rd16(bios, table + (cond * 5) + 0);
+		u8 index = nvbios_rd08(bios, table + (cond * 5) + 2);
+		u8  mask = nvbios_rd08(bios, table + (cond * 5) + 3);
+		u8 value = nvbios_rd08(bios, table + (cond * 5) + 4);
 		trace("\t[0x%02x] (0x%04x[0x%02x] & 0x%02x) == 0x%02x\n",
 		      cond, port, index, mask, value);
 		return (init_rdvgai(init, port, index) & mask) == value;
@@ -505,15 +506,15 @@ init_io_flag_condition_met(struct nvbios_init *init, u8 cond)
 	struct nvkm_bios *bios = init->bios;
 	u16 table = init_io_flag_condition_table(init);
 	if (table) {
-		u16 port = nv_ro16(bios, table + (cond * 9) + 0);
-		u8 index = nv_ro08(bios, table + (cond * 9) + 2);
-		u8  mask = nv_ro08(bios, table + (cond * 9) + 3);
-		u8 shift = nv_ro08(bios, table + (cond * 9) + 4);
-		u16 data = nv_ro16(bios, table + (cond * 9) + 5);
-		u8 dmask = nv_ro08(bios, table + (cond * 9) + 7);
-		u8 value = nv_ro08(bios, table + (cond * 9) + 8);
+		u16 port = nvbios_rd16(bios, table + (cond * 9) + 0);
+		u8 index = nvbios_rd08(bios, table + (cond * 9) + 2);
+		u8  mask = nvbios_rd08(bios, table + (cond * 9) + 3);
+		u8 shift = nvbios_rd08(bios, table + (cond * 9) + 4);
+		u16 data = nvbios_rd16(bios, table + (cond * 9) + 5);
+		u8 dmask = nvbios_rd08(bios, table + (cond * 9) + 7);
+		u8 value = nvbios_rd08(bios, table + (cond * 9) + 8);
 		u8 ioval = (init_rdvgai(init, port, index) & mask) >> shift;
-		return (nv_ro08(bios, data + ioval) & dmask) == value;
+		return (nvbios_rd08(bios, data + ioval) & dmask) == value;
 	}
 	return false;
 }
@@ -573,7 +574,7 @@ init_tmds_reg(struct nvbios_init *init, u8 tmds)
 static void
 init_reserved(struct nvbios_init *init)
 {
-	u8 opcode = nv_ro08(init->bios, init->offset);
+	u8 opcode = nvbios_rd08(init->bios, init->offset);
 	u8 length, i;
 
 	switch (opcode) {
@@ -587,7 +588,7 @@ init_reserved(struct nvbios_init *init)
 
 	trace("RESERVED 0x%02x\t", opcode);
 	for (i = 1; i < length; i++)
-		cont(" 0x%02x", nv_ro08(init->bios, init->offset + i));
+		cont(" 0x%02x", nvbios_rd08(init->bios, init->offset + i));
 	cont("\n");
 	init->offset += length;
 }
@@ -611,12 +612,12 @@ static void
 init_io_restrict_prog(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 port = nv_ro16(bios, init->offset + 1);
-	u8 index = nv_ro08(bios, init->offset + 3);
-	u8  mask = nv_ro08(bios, init->offset + 4);
-	u8 shift = nv_ro08(bios, init->offset + 5);
-	u8 count = nv_ro08(bios, init->offset + 6);
-	u32  reg = nv_ro32(bios, init->offset + 7);
+	u16 port = nvbios_rd16(bios, init->offset + 1);
+	u8 index = nvbios_rd08(bios, init->offset + 3);
+	u8  mask = nvbios_rd08(bios, init->offset + 4);
+	u8 shift = nvbios_rd08(bios, init->offset + 5);
+	u8 count = nvbios_rd08(bios, init->offset + 6);
+	u32  reg = nvbios_rd32(bios, init->offset + 7);
 	u8 conf, i;
 
 	trace("IO_RESTRICT_PROG\tR[0x%06x] = "
@@ -626,7 +627,7 @@ init_io_restrict_prog(struct nvbios_init *init)
 
 	conf = (init_rdvgai(init, port, index) & mask) >> shift;
 	for (i = 0; i < count; i++) {
-		u32 data = nv_ro32(bios, init->offset);
+		u32 data = nvbios_rd32(bios, init->offset);
 
 		if (i == conf) {
 			trace("\t0x%08x *\n", data);
@@ -648,7 +649,7 @@ static void
 init_repeat(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 count = nv_ro08(bios, init->offset + 1);
+	u8 count = nvbios_rd08(bios, init->offset + 1);
 	u16 repeat = init->repeat;
 
 	trace("REPEAT\t0x%02x\n", count);
@@ -674,13 +675,13 @@ static void
 init_io_restrict_pll(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 port = nv_ro16(bios, init->offset + 1);
-	u8 index = nv_ro08(bios, init->offset + 3);
-	u8  mask = nv_ro08(bios, init->offset + 4);
-	u8 shift = nv_ro08(bios, init->offset + 5);
-	s8  iofc = nv_ro08(bios, init->offset + 6);
-	u8 count = nv_ro08(bios, init->offset + 7);
-	u32  reg = nv_ro32(bios, init->offset + 8);
+	u16 port = nvbios_rd16(bios, init->offset + 1);
+	u8 index = nvbios_rd08(bios, init->offset + 3);
+	u8  mask = nvbios_rd08(bios, init->offset + 4);
+	u8 shift = nvbios_rd08(bios, init->offset + 5);
+	s8  iofc = nvbios_rd08(bios, init->offset + 6);
+	u8 count = nvbios_rd08(bios, init->offset + 7);
+	u32  reg = nvbios_rd32(bios, init->offset + 8);
 	u8 conf, i;
 
 	trace("IO_RESTRICT_PLL\tR[0x%06x] =PLL= "
@@ -690,7 +691,7 @@ init_io_restrict_pll(struct nvbios_init *init)
 
 	conf = (init_rdvgai(init, port, index) & mask) >> shift;
 	for (i = 0; i < count; i++) {
-		u32 freq = nv_ro16(bios, init->offset) * 10;
+		u32 freq = nvbios_rd16(bios, init->offset) * 10;
 
 		if (i == conf) {
 			trace("\t%dkHz *\n", freq);
@@ -730,12 +731,12 @@ static void
 init_copy(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  reg = nv_ro32(bios, init->offset + 1);
-	u8 shift = nv_ro08(bios, init->offset + 5);
-	u8 smask = nv_ro08(bios, init->offset + 6);
-	u16 port = nv_ro16(bios, init->offset + 7);
-	u8 index = nv_ro08(bios, init->offset + 9);
-	u8  mask = nv_ro08(bios, init->offset + 10);
+	u32  reg = nvbios_rd32(bios, init->offset + 1);
+	u8 shift = nvbios_rd08(bios, init->offset + 5);
+	u8 smask = nvbios_rd08(bios, init->offset + 6);
+	u16 port = nvbios_rd16(bios, init->offset + 7);
+	u8 index = nvbios_rd08(bios, init->offset + 9);
+	u8  mask = nvbios_rd08(bios, init->offset + 10);
 	u8  data;
 
 	trace("COPY\t0x%04x[0x%02x] &= 0x%02x |= "
@@ -769,7 +770,7 @@ static void
 init_io_flag_condition(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 cond = nv_ro08(bios, init->offset + 1);
+	u8 cond = nvbios_rd08(bios, init->offset + 1);
 
 	trace("IO_FLAG_CONDITION\t0x%02x\n", cond);
 	init->offset += 2;
@@ -787,8 +788,8 @@ init_dp_condition(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
 	struct nvbios_dpout info;
-	u8  cond = nv_ro08(bios, init->offset + 1);
-	u8  unkn = nv_ro08(bios, init->offset + 2);
+	u8  cond = nvbios_rd08(bios, init->offset + 1);
+	u8  unkn = nvbios_rd08(bios, init->offset + 2);
 	u8  ver, hdr, cnt, len;
 	u16 data;
 
@@ -834,7 +835,7 @@ static void
 init_io_mask_or(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 index = nv_ro08(bios, init->offset + 1);
+	u8 index = nvbios_rd08(bios, init->offset + 1);
 	u8    or = init_or(init);
 	u8  data;
 
@@ -853,7 +854,7 @@ static void
 init_io_or(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 index = nv_ro08(bios, init->offset + 1);
+	u8 index = nvbios_rd08(bios, init->offset + 1);
 	u8    or = init_or(init);
 	u8  data;
 
@@ -872,8 +873,8 @@ static void
 init_andn_reg(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  reg = nv_ro32(bios, init->offset + 1);
-	u32 mask = nv_ro32(bios, init->offset + 5);
+	u32  reg = nvbios_rd32(bios, init->offset + 1);
+	u32 mask = nvbios_rd32(bios, init->offset + 5);
 
 	trace("ANDN_REG\tR[0x%06x] &= ~0x%08x\n", reg, mask);
 	init->offset += 9;
@@ -889,8 +890,8 @@ static void
 init_or_reg(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  reg = nv_ro32(bios, init->offset + 1);
-	u32 mask = nv_ro32(bios, init->offset + 5);
+	u32  reg = nvbios_rd32(bios, init->offset + 1);
+	u32 mask = nvbios_rd32(bios, init->offset + 5);
 
 	trace("OR_REG\tR[0x%06x] |= 0x%08x\n", reg, mask);
 	init->offset += 9;
@@ -906,19 +907,19 @@ static void
 init_idx_addr_latched(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 creg = nv_ro32(bios, init->offset + 1);
-	u32 dreg = nv_ro32(bios, init->offset + 5);
-	u32 mask = nv_ro32(bios, init->offset + 9);
-	u32 data = nv_ro32(bios, init->offset + 13);
-	u8 count = nv_ro08(bios, init->offset + 17);
+	u32 creg = nvbios_rd32(bios, init->offset + 1);
+	u32 dreg = nvbios_rd32(bios, init->offset + 5);
+	u32 mask = nvbios_rd32(bios, init->offset + 9);
+	u32 data = nvbios_rd32(bios, init->offset + 13);
+	u8 count = nvbios_rd08(bios, init->offset + 17);
 
 	trace("INDEX_ADDRESS_LATCHED\tR[0x%06x] : R[0x%06x]\n", creg, dreg);
 	trace("\tCTRL &= 0x%08x |= 0x%08x\n", mask, data);
 	init->offset += 18;
 
 	while (count--) {
-		u8 iaddr = nv_ro08(bios, init->offset + 0);
-		u8 idata = nv_ro08(bios, init->offset + 1);
+		u8 iaddr = nvbios_rd08(bios, init->offset + 0);
+		u8 idata = nvbios_rd08(bios, init->offset + 1);
 
 		trace("\t[0x%02x] = 0x%02x\n", iaddr, idata);
 		init->offset += 2;
@@ -936,12 +937,12 @@ static void
 init_io_restrict_pll2(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 port = nv_ro16(bios, init->offset + 1);
-	u8 index = nv_ro08(bios, init->offset + 3);
-	u8  mask = nv_ro08(bios, init->offset + 4);
-	u8 shift = nv_ro08(bios, init->offset + 5);
-	u8 count = nv_ro08(bios, init->offset + 6);
-	u32  reg = nv_ro32(bios, init->offset + 7);
+	u16 port = nvbios_rd16(bios, init->offset + 1);
+	u8 index = nvbios_rd08(bios, init->offset + 3);
+	u8  mask = nvbios_rd08(bios, init->offset + 4);
+	u8 shift = nvbios_rd08(bios, init->offset + 5);
+	u8 count = nvbios_rd08(bios, init->offset + 6);
+	u32  reg = nvbios_rd32(bios, init->offset + 7);
 	u8  conf, i;
 
 	trace("IO_RESTRICT_PLL2\t"
@@ -951,7 +952,7 @@ init_io_restrict_pll2(struct nvbios_init *init)
 
 	conf = (init_rdvgai(init, port, index) & mask) >> shift;
 	for (i = 0; i < count; i++) {
-		u32 freq = nv_ro32(bios, init->offset);
+		u32 freq = nvbios_rd32(bios, init->offset);
 		if (i == conf) {
 			trace("\t%dkHz *\n", freq);
 			init_prog_pll(init, reg, freq);
@@ -971,8 +972,8 @@ static void
 init_pll2(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  reg = nv_ro32(bios, init->offset + 1);
-	u32 freq = nv_ro32(bios, init->offset + 5);
+	u32  reg = nvbios_rd32(bios, init->offset + 1);
+	u32 freq = nvbios_rd32(bios, init->offset + 5);
 
 	trace("PLL2\tR[0x%06x] =PLL= %dkHz\n", reg, freq);
 	init->offset += 9;
@@ -988,17 +989,17 @@ static void
 init_i2c_byte(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 index = nv_ro08(bios, init->offset + 1);
-	u8  addr = nv_ro08(bios, init->offset + 2) >> 1;
-	u8 count = nv_ro08(bios, init->offset + 3);
+	u8 index = nvbios_rd08(bios, init->offset + 1);
+	u8  addr = nvbios_rd08(bios, init->offset + 2) >> 1;
+	u8 count = nvbios_rd08(bios, init->offset + 3);
 
 	trace("I2C_BYTE\tI2C[0x%02x][0x%02x]\n", index, addr);
 	init->offset += 4;
 
 	while (count--) {
-		u8  reg = nv_ro08(bios, init->offset + 0);
-		u8 mask = nv_ro08(bios, init->offset + 1);
-		u8 data = nv_ro08(bios, init->offset + 2);
+		u8  reg = nvbios_rd08(bios, init->offset + 0);
+		u8 mask = nvbios_rd08(bios, init->offset + 1);
+		u8 data = nvbios_rd08(bios, init->offset + 2);
 		int val;
 
 		trace("\t[0x%02x] &= 0x%02x |= 0x%02x\n", reg, mask, data);
@@ -1019,16 +1020,16 @@ static void
 init_zm_i2c_byte(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 index = nv_ro08(bios, init->offset + 1);
-	u8  addr = nv_ro08(bios, init->offset + 2) >> 1;
-	u8 count = nv_ro08(bios, init->offset + 3);
+	u8 index = nvbios_rd08(bios, init->offset + 1);
+	u8  addr = nvbios_rd08(bios, init->offset + 2) >> 1;
+	u8 count = nvbios_rd08(bios, init->offset + 3);
 
 	trace("ZM_I2C_BYTE\tI2C[0x%02x][0x%02x]\n", index, addr);
 	init->offset += 4;
 
 	while (count--) {
-		u8  reg = nv_ro08(bios, init->offset + 0);
-		u8 data = nv_ro08(bios, init->offset + 1);
+		u8  reg = nvbios_rd08(bios, init->offset + 0);
+		u8 data = nvbios_rd08(bios, init->offset + 1);
 
 		trace("\t[0x%02x] = 0x%02x\n", reg, data);
 		init->offset += 2;
@@ -1045,28 +1046,28 @@ static void
 init_zm_i2c(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 index = nv_ro08(bios, init->offset + 1);
-	u8  addr = nv_ro08(bios, init->offset + 2) >> 1;
-	u8 count = nv_ro08(bios, init->offset + 3);
+	u8 index = nvbios_rd08(bios, init->offset + 1);
+	u8  addr = nvbios_rd08(bios, init->offset + 2) >> 1;
+	u8 count = nvbios_rd08(bios, init->offset + 3);
 	u8 data[256], i;
 
 	trace("ZM_I2C\tI2C[0x%02x][0x%02x]\n", index, addr);
 	init->offset += 4;
 
 	for (i = 0; i < count; i++) {
-		data[i] = nv_ro08(bios, init->offset);
+		data[i] = nvbios_rd08(bios, init->offset);
 		trace("\t0x%02x\n", data[i]);
 		init->offset++;
 	}
 
 	if (init_exec(init)) {
-		struct nvkm_i2c_port *port = init_i2c(init, index);
+		struct i2c_adapter *adap = init_i2c(init, index);
 		struct i2c_msg msg = {
 			.addr = addr, .flags = 0, .len = count, .buf = data,
 		};
 		int ret;
 
-		if (port && (ret = i2c_transfer(&port->adapter, &msg, 1)) != 1)
+		if (adap && (ret = i2c_transfer(adap, &msg, 1)) != 1)
 			warn("i2c wr failed, %d\n", ret);
 	}
 }
@@ -1079,10 +1080,10 @@ static void
 init_tmds(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 tmds = nv_ro08(bios, init->offset + 1);
-	u8 addr = nv_ro08(bios, init->offset + 2);
-	u8 mask = nv_ro08(bios, init->offset + 3);
-	u8 data = nv_ro08(bios, init->offset + 4);
+	u8 tmds = nvbios_rd08(bios, init->offset + 1);
+	u8 addr = nvbios_rd08(bios, init->offset + 2);
+	u8 mask = nvbios_rd08(bios, init->offset + 3);
+	u8 data = nvbios_rd08(bios, init->offset + 4);
 	u32 reg = init_tmds_reg(init, tmds);
 
 	trace("TMDS\tT[0x%02x][0x%02x] &= 0x%02x |= 0x%02x\n",
@@ -1105,16 +1106,16 @@ static void
 init_zm_tmds_group(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8  tmds = nv_ro08(bios, init->offset + 1);
-	u8 count = nv_ro08(bios, init->offset + 2);
+	u8  tmds = nvbios_rd08(bios, init->offset + 1);
+	u8 count = nvbios_rd08(bios, init->offset + 2);
 	u32  reg = init_tmds_reg(init, tmds);
 
 	trace("TMDS_ZM_GROUP\tT[0x%02x]\n", tmds);
 	init->offset += 3;
 
 	while (count--) {
-		u8 addr = nv_ro08(bios, init->offset + 0);
-		u8 data = nv_ro08(bios, init->offset + 1);
+		u8 addr = nvbios_rd08(bios, init->offset + 0);
+		u8 data = nvbios_rd08(bios, init->offset + 1);
 
 		trace("\t[0x%02x] = 0x%02x\n", addr, data);
 		init->offset += 2;
@@ -1132,10 +1133,10 @@ static void
 init_cr_idx_adr_latch(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 addr0 = nv_ro08(bios, init->offset + 1);
-	u8 addr1 = nv_ro08(bios, init->offset + 2);
-	u8  base = nv_ro08(bios, init->offset + 3);
-	u8 count = nv_ro08(bios, init->offset + 4);
+	u8 addr0 = nvbios_rd08(bios, init->offset + 1);
+	u8 addr1 = nvbios_rd08(bios, init->offset + 2);
+	u8  base = nvbios_rd08(bios, init->offset + 3);
+	u8 count = nvbios_rd08(bios, init->offset + 4);
 	u8 save0;
 
 	trace("CR_INDEX_ADDR C[%02x] C[%02x]\n", addr0, addr1);
@@ -1143,7 +1144,7 @@ init_cr_idx_adr_latch(struct nvbios_init *init)
 
 	save0 = init_rdvgai(init, 0x03d4, addr0);
 	while (count--) {
-		u8 data = nv_ro08(bios, init->offset);
+		u8 data = nvbios_rd08(bios, init->offset);
 
 		trace("\t\t[0x%02x] = 0x%02x\n", base, data);
 		init->offset += 1;
@@ -1162,9 +1163,9 @@ static void
 init_cr(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 addr = nv_ro08(bios, init->offset + 1);
-	u8 mask = nv_ro08(bios, init->offset + 2);
-	u8 data = nv_ro08(bios, init->offset + 3);
+	u8 addr = nvbios_rd08(bios, init->offset + 1);
+	u8 mask = nvbios_rd08(bios, init->offset + 2);
+	u8 data = nvbios_rd08(bios, init->offset + 3);
 	u8 val;
 
 	trace("CR\t\tC[0x%02x] &= 0x%02x |= 0x%02x\n", addr, mask, data);
@@ -1182,8 +1183,8 @@ static void
 init_zm_cr(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 addr = nv_ro08(bios, init->offset + 1);
-	u8 data = nv_ro08(bios, init->offset + 2);
+	u8 addr = nvbios_rd08(bios, init->offset + 1);
+	u8 data = nvbios_rd08(bios, init->offset + 2);
 
 	trace("ZM_CR\tC[0x%02x] = 0x%02x\n", addr,  data);
 	init->offset += 3;
@@ -1199,14 +1200,14 @@ static void
 init_zm_cr_group(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 count = nv_ro08(bios, init->offset + 1);
+	u8 count = nvbios_rd08(bios, init->offset + 1);
 
 	trace("ZM_CR_GROUP\n");
 	init->offset += 2;
 
 	while (count--) {
-		u8 addr = nv_ro08(bios, init->offset + 0);
-		u8 data = nv_ro08(bios, init->offset + 1);
+		u8 addr = nvbios_rd08(bios, init->offset + 0);
+		u8 data = nvbios_rd08(bios, init->offset + 1);
 
 		trace("\t\tC[0x%02x] = 0x%02x\n", addr, data);
 		init->offset += 2;
@@ -1223,8 +1224,8 @@ static void
 init_condition_time(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8  cond = nv_ro08(bios, init->offset + 1);
-	u8 retry = nv_ro08(bios, init->offset + 2);
+	u8  cond = nvbios_rd08(bios, init->offset + 1);
+	u8 retry = nvbios_rd08(bios, init->offset + 2);
 	u8  wait = min((u16)retry * 50, 100);
 
 	trace("CONDITION_TIME\t0x%02x 0x%02x\n", cond, retry);
@@ -1250,7 +1251,7 @@ static void
 init_ltime(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 msec = nv_ro16(bios, init->offset + 1);
+	u16 msec = nvbios_rd16(bios, init->offset + 1);
 
 	trace("LTIME\t0x%04x\n", msec);
 	init->offset += 3;
@@ -1267,14 +1268,14 @@ static void
 init_zm_reg_sequence(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 base = nv_ro32(bios, init->offset + 1);
-	u8 count = nv_ro08(bios, init->offset + 5);
+	u32 base = nvbios_rd32(bios, init->offset + 1);
+	u8 count = nvbios_rd08(bios, init->offset + 5);
 
 	trace("ZM_REG_SEQUENCE\t0x%02x\n", count);
 	init->offset += 6;
 
 	while (count--) {
-		u32 data = nv_ro32(bios, init->offset);
+		u32 data = nvbios_rd32(bios, init->offset);
 
 		trace("\t\tR[0x%06x] = 0x%08x\n", base, data);
 		init->offset += 4;
@@ -1292,9 +1293,9 @@ static void
 init_pll_indirect(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  reg = nv_ro32(bios, init->offset + 1);
-	u16 addr = nv_ro16(bios, init->offset + 5);
-	u32 freq = (u32)nv_ro16(bios, addr) * 1000;
+	u32  reg = nvbios_rd32(bios, init->offset + 1);
+	u16 addr = nvbios_rd16(bios, init->offset + 5);
+	u32 freq = (u32)nvbios_rd16(bios, addr) * 1000;
 
 	trace("PLL_INDIRECT\tR[0x%06x] =PLL= VBIOS[%04x] = %dkHz\n",
 	      reg, addr, freq);
@@ -1311,9 +1312,9 @@ static void
 init_zm_reg_indirect(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  reg = nv_ro32(bios, init->offset + 1);
-	u16 addr = nv_ro16(bios, init->offset + 5);
-	u32 data = nv_ro32(bios, addr);
+	u32  reg = nvbios_rd32(bios, init->offset + 1);
+	u16 addr = nvbios_rd16(bios, init->offset + 5);
+	u32 data = nvbios_rd32(bios, addr);
 
 	trace("ZM_REG_INDIRECT\tR[0x%06x] = VBIOS[0x%04x] = 0x%08x\n",
 	      reg, addr, data);
@@ -1330,7 +1331,7 @@ static void
 init_sub_direct(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 addr = nv_ro16(bios, init->offset + 1);
+	u16 addr = nvbios_rd16(bios, init->offset + 1);
 	u16 save;
 
 	trace("SUB_DIRECT\t0x%04x\n", addr);
@@ -1356,7 +1357,7 @@ static void
 init_jump(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 offset = nv_ro16(bios, init->offset + 1);
+	u16 offset = nvbios_rd16(bios, init->offset + 1);
 
 	trace("JUMP\t0x%04x\n", offset);
 
@@ -1374,11 +1375,11 @@ static void
 init_i2c_if(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 index = nv_ro08(bios, init->offset + 1);
-	u8  addr = nv_ro08(bios, init->offset + 2);
-	u8   reg = nv_ro08(bios, init->offset + 3);
-	u8  mask = nv_ro08(bios, init->offset + 4);
-	u8  data = nv_ro08(bios, init->offset + 5);
+	u8 index = nvbios_rd08(bios, init->offset + 1);
+	u8  addr = nvbios_rd08(bios, init->offset + 2);
+	u8   reg = nvbios_rd08(bios, init->offset + 3);
+	u8  mask = nvbios_rd08(bios, init->offset + 4);
+	u8  data = nvbios_rd08(bios, init->offset + 5);
 	u8 value;
 
 	trace("I2C_IF\tI2C[0x%02x][0x%02x][0x%02x] & 0x%02x == 0x%02x\n",
@@ -1401,12 +1402,12 @@ static void
 init_copy_nv_reg(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  sreg = nv_ro32(bios, init->offset + 1);
-	u8  shift = nv_ro08(bios, init->offset + 5);
-	u32 smask = nv_ro32(bios, init->offset + 6);
-	u32  sxor = nv_ro32(bios, init->offset + 10);
-	u32  dreg = nv_ro32(bios, init->offset + 14);
-	u32 dmask = nv_ro32(bios, init->offset + 18);
+	u32  sreg = nvbios_rd32(bios, init->offset + 1);
+	u8  shift = nvbios_rd08(bios, init->offset + 5);
+	u32 smask = nvbios_rd32(bios, init->offset + 6);
+	u32  sxor = nvbios_rd32(bios, init->offset + 10);
+	u32  dreg = nvbios_rd32(bios, init->offset + 14);
+	u32 dmask = nvbios_rd32(bios, init->offset + 18);
 	u32 data;
 
 	trace("COPY_NV_REG\tR[0x%06x] &= 0x%08x |= "
@@ -1427,9 +1428,9 @@ static void
 init_zm_index_io(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 port = nv_ro16(bios, init->offset + 1);
-	u8 index = nv_ro08(bios, init->offset + 3);
-	u8  data = nv_ro08(bios, init->offset + 4);
+	u16 port = nvbios_rd16(bios, init->offset + 1);
+	u8 index = nvbios_rd08(bios, init->offset + 3);
+	u8  data = nvbios_rd08(bios, init->offset + 4);
 
 	trace("ZM_INDEX_IO\tI[0x%04x][0x%02x] = 0x%02x\n", port, index, data);
 	init->offset += 5;
@@ -1444,14 +1445,14 @@ init_zm_index_io(struct nvbios_init *init)
 static void
 init_compute_mem(struct nvbios_init *init)
 {
-	struct nvkm_devinit *devinit = nvkm_devinit(init->bios);
+	struct nvkm_devinit *devinit = init->bios->subdev.device->devinit;
 
 	trace("COMPUTE_MEM\n");
 	init->offset += 1;
 
 	init_exec_force(init, true);
-	if (init_exec(init) && devinit->meminit)
-		devinit->meminit(devinit);
+	if (init_exec(init))
+		nvkm_devinit_meminit(devinit);
 	init_exec_force(init, false);
 }
 
@@ -1463,9 +1464,9 @@ static void
 init_reset(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32   reg = nv_ro32(bios, init->offset + 1);
-	u32 data1 = nv_ro32(bios, init->offset + 5);
-	u32 data2 = nv_ro32(bios, init->offset + 9);
+	u32   reg = nvbios_rd32(bios, init->offset + 1);
+	u32 data1 = nvbios_rd32(bios, init->offset + 5);
+	u32 data2 = nvbios_rd32(bios, init->offset + 9);
 	u32 savepci19;
 
 	trace("RESET\tR[0x%08x] = 0x%08x, 0x%08x", reg, data1, data2);
@@ -1513,14 +1514,14 @@ init_configure_mem(struct nvbios_init *init)
 
 	mdata = init_configure_mem_clk(init);
 	sdata = bmp_sdr_seq_table(bios);
-	if (nv_ro08(bios, mdata) & 0x01)
+	if (nvbios_rd08(bios, mdata) & 0x01)
 		sdata = bmp_ddr_seq_table(bios);
 	mdata += 6; /* skip to data */
 
 	data = init_rdvgai(init, 0x03c4, 0x01);
 	init_wrvgai(init, 0x03c4, 0x01, data | 0x20);
 
-	for (; (addr = nv_ro32(bios, sdata)) != 0xffffffff; sdata += 4) {
+	for (; (addr = nvbios_rd32(bios, sdata)) != 0xffffffff; sdata += 4) {
 		switch (addr) {
 		case 0x10021c: /* CKE_NORMAL */
 		case 0x1002d0: /* CMD_REFRESH */
@@ -1528,7 +1529,7 @@ init_configure_mem(struct nvbios_init *init)
 			data = 0x00000001;
 			break;
 		default:
-			data = nv_ro32(bios, mdata);
+			data = nvbios_rd32(bios, mdata);
 			mdata += 4;
 			if (data == 0xffffffff)
 				continue;
@@ -1563,12 +1564,12 @@ init_configure_clk(struct nvbios_init *init)
 	mdata = init_configure_mem_clk(init);
 
 	/* NVPLL */
-	clock = nv_ro16(bios, mdata + 4) * 10;
+	clock = nvbios_rd16(bios, mdata + 4) * 10;
 	init_prog_pll(init, 0x680500, clock);
 
 	/* MPLL */
-	clock = nv_ro16(bios, mdata + 2) * 10;
-	if (nv_ro08(bios, mdata) & 0x01)
+	clock = nvbios_rd16(bios, mdata + 2) * 10;
+	if (nvbios_rd08(bios, mdata) & 0x01)
 		clock *= 2;
 	init_prog_pll(init, 0x680504, clock);
 
@@ -1609,9 +1610,9 @@ static void
 init_io(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 port = nv_ro16(bios, init->offset + 1);
-	u8  mask = nv_ro16(bios, init->offset + 3);
-	u8  data = nv_ro16(bios, init->offset + 4);
+	u16 port = nvbios_rd16(bios, init->offset + 1);
+	u8  mask = nvbios_rd16(bios, init->offset + 3);
+	u8  data = nvbios_rd16(bios, init->offset + 4);
 	u8 value;
 
 	trace("IO\t\tI[0x%04x] &= 0x%02x |= 0x%02x\n", port, mask, data);
@@ -1621,7 +1622,7 @@ init_io(struct nvbios_init *init)
 	 * needed some day..  it's almost certainly wrong, but, it also
 	 * somehow makes things work...
 	 */
-	if (nv_device(init->bios)->card_type >= NV_50 &&
+	if (bios->subdev.device->card_type >= NV_50 &&
 	    port == 0x03c3 && data == 0x01) {
 		init_mask(init, 0x614100, 0xf0800000, 0x00800000);
 		init_mask(init, 0x00e18c, 0x00020000, 0x00020000);
@@ -1649,7 +1650,7 @@ static void
 init_sub(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 index = nv_ro08(bios, init->offset + 1);
+	u8 index = nvbios_rd08(bios, init->offset + 1);
 	u16 addr, save;
 
 	trace("SUB\t0x%02x\n", index);
@@ -1676,8 +1677,8 @@ static void
 init_ram_condition(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8  mask = nv_ro08(bios, init->offset + 1);
-	u8 value = nv_ro08(bios, init->offset + 2);
+	u8  mask = nvbios_rd08(bios, init->offset + 1);
+	u8 value = nvbios_rd08(bios, init->offset + 2);
 
 	trace("RAM_CONDITION\t"
 	      "(R[0x100000] & 0x%02x) == 0x%02x\n", mask, value);
@@ -1695,9 +1696,9 @@ static void
 init_nv_reg(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  reg = nv_ro32(bios, init->offset + 1);
-	u32 mask = nv_ro32(bios, init->offset + 5);
-	u32 data = nv_ro32(bios, init->offset + 9);
+	u32  reg = nvbios_rd32(bios, init->offset + 1);
+	u32 mask = nvbios_rd32(bios, init->offset + 5);
+	u32 data = nvbios_rd32(bios, init->offset + 9);
 
 	trace("NV_REG\tR[0x%06x] &= 0x%08x |= 0x%08x\n", reg, mask, data);
 	init->offset += 13;
@@ -1713,15 +1714,15 @@ static void
 init_macro(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8  macro = nv_ro08(bios, init->offset + 1);
+	u8  macro = nvbios_rd08(bios, init->offset + 1);
 	u16 table;
 
 	trace("MACRO\t0x%02x\n", macro);
 
 	table = init_macro_table(init);
 	if (table) {
-		u32 addr = nv_ro32(bios, table + (macro * 8) + 0);
-		u32 data = nv_ro32(bios, table + (macro * 8) + 4);
+		u32 addr = nvbios_rd32(bios, table + (macro * 8) + 0);
+		u32 data = nvbios_rd32(bios, table + (macro * 8) + 4);
 		trace("\t\tR[0x%06x] = 0x%08x\n", addr, data);
 		init_wr32(init, addr, data);
 	}
@@ -1742,6 +1743,24 @@ init_resume(struct nvbios_init *init)
 }
 
 /**
+ * INIT_STRAP_CONDITION - opcode 0x73
+ *
+ */
+static void
+init_strap_condition(struct nvbios_init *init)
+{
+	struct nvkm_bios *bios = init->bios;
+	u32 mask = nvbios_rd32(bios, init->offset + 1);
+	u32 value = nvbios_rd32(bios, init->offset + 5);
+
+	trace("STRAP_CONDITION\t(R[0x101000] & 0x%08x) == 0x%08x\n", mask, value);
+	init->offset += 9;
+
+	if ((init_rd32(init, 0x101000) & mask) != value)
+		init_exec_set(init, false);
+}
+
+/**
  * INIT_TIME - opcode 0x74
  *
  */
@@ -1749,7 +1768,7 @@ static void
 init_time(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 usec = nv_ro16(bios, init->offset + 1);
+	u16 usec = nvbios_rd16(bios, init->offset + 1);
 
 	trace("TIME\t0x%04x\n", usec);
 	init->offset += 3;
@@ -1770,7 +1789,7 @@ static void
 init_condition(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 cond = nv_ro08(bios, init->offset + 1);
+	u8 cond = nvbios_rd08(bios, init->offset + 1);
 
 	trace("CONDITION\t0x%02x\n", cond);
 	init->offset += 2;
@@ -1787,7 +1806,7 @@ static void
 init_io_condition(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 cond = nv_ro08(bios, init->offset + 1);
+	u8 cond = nvbios_rd08(bios, init->offset + 1);
 
 	trace("IO_CONDITION\t0x%02x\n", cond);
 	init->offset += 2;
@@ -1797,6 +1816,23 @@ init_io_condition(struct nvbios_init *init)
 }
 
 /**
+ * INIT_ZM_REG16 - opcode 0x77
+ *
+ */
+static void
+init_zm_reg16(struct nvbios_init *init)
+{
+	struct nvkm_bios *bios = init->bios;
+	u32 addr = nvbios_rd32(bios, init->offset + 1);
+	u16 data = nvbios_rd16(bios, init->offset + 5);
+
+	trace("ZM_REG\tR[0x%06x] = 0x%04x\n", addr, data);
+	init->offset += 7;
+
+	init_wr32(init, addr, data);
+}
+
+/**
  * INIT_INDEX_IO - opcode 0x78
  *
  */
@@ -1804,10 +1840,10 @@ static void
 init_index_io(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 port = nv_ro16(bios, init->offset + 1);
-	u8 index = nv_ro16(bios, init->offset + 3);
-	u8  mask = nv_ro08(bios, init->offset + 4);
-	u8  data = nv_ro08(bios, init->offset + 5);
+	u16 port = nvbios_rd16(bios, init->offset + 1);
+	u8 index = nvbios_rd16(bios, init->offset + 3);
+	u8  mask = nvbios_rd08(bios, init->offset + 4);
+	u8  data = nvbios_rd08(bios, init->offset + 5);
 	u8 value;
 
 	trace("INDEX_IO\tI[0x%04x][0x%02x] &= 0x%02x |= 0x%02x\n",
@@ -1826,8 +1862,8 @@ static void
 init_pll(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  reg = nv_ro32(bios, init->offset + 1);
-	u32 freq = nv_ro16(bios, init->offset + 5) * 10;
+	u32  reg = nvbios_rd32(bios, init->offset + 1);
+	u32 freq = nvbios_rd16(bios, init->offset + 5) * 10;
 
 	trace("PLL\tR[0x%06x] =PLL= %dkHz\n", reg, freq);
 	init->offset += 7;
@@ -1843,8 +1879,8 @@ static void
 init_zm_reg(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 addr = nv_ro32(bios, init->offset + 1);
-	u32 data = nv_ro32(bios, init->offset + 5);
+	u32 addr = nvbios_rd32(bios, init->offset + 1);
+	u32 data = nvbios_rd32(bios, init->offset + 5);
 
 	trace("ZM_REG\tR[0x%06x] = 0x%08x\n", addr, data);
 	init->offset += 9;
@@ -1863,7 +1899,7 @@ static void
 init_ram_restrict_pll(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8  type = nv_ro08(bios, init->offset + 1);
+	u8  type = nvbios_rd08(bios, init->offset + 1);
 	u8 count = init_ram_restrict_group_count(init);
 	u8 strap = init_ram_restrict(init);
 	u8 cconf;
@@ -1872,7 +1908,7 @@ init_ram_restrict_pll(struct nvbios_init *init)
 	init->offset += 2;
 
 	for (cconf = 0; cconf < count; cconf++) {
-		u32 freq = nv_ro32(bios, init->offset);
+		u32 freq = nvbios_rd32(bios, init->offset);
 
 		if (cconf == strap) {
 			trace("%dkHz *\n", freq);
@@ -1892,13 +1928,13 @@ init_ram_restrict_pll(struct nvbios_init *init)
 static void
 init_gpio(struct nvbios_init *init)
 {
-	struct nvkm_gpio *gpio = nvkm_gpio(init->bios);
+	struct nvkm_gpio *gpio = init->bios->subdev.device->gpio;
 
 	trace("GPIO\n");
 	init->offset += 1;
 
-	if (init_exec(init) && gpio && gpio->reset)
-		gpio->reset(gpio, DCB_GPIO_UNUSED);
+	if (init_exec(init))
+		nvkm_gpio_reset(gpio, DCB_GPIO_UNUSED);
 }
 
 /**
@@ -1909,9 +1945,9 @@ static void
 init_ram_restrict_zm_reg_group(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 addr = nv_ro32(bios, init->offset + 1);
-	u8  incr = nv_ro08(bios, init->offset + 5);
-	u8   num = nv_ro08(bios, init->offset + 6);
+	u32 addr = nvbios_rd32(bios, init->offset + 1);
+	u8  incr = nvbios_rd08(bios, init->offset + 5);
+	u8   num = nvbios_rd08(bios, init->offset + 6);
 	u8 count = init_ram_restrict_group_count(init);
 	u8 index = init_ram_restrict(init);
 	u8 i, j;
@@ -1923,7 +1959,7 @@ init_ram_restrict_zm_reg_group(struct nvbios_init *init)
 	for (i = 0; i < num; i++) {
 		trace("\tR[0x%06x] = {\n", addr);
 		for (j = 0; j < count; j++) {
-			u32 data = nv_ro32(bios, init->offset);
+			u32 data = nvbios_rd32(bios, init->offset);
 
 			if (j == index) {
 				trace("\t\t0x%08x *\n", data);
@@ -1947,8 +1983,8 @@ static void
 init_copy_zm_reg(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 sreg = nv_ro32(bios, init->offset + 1);
-	u32 dreg = nv_ro32(bios, init->offset + 5);
+	u32 sreg = nvbios_rd32(bios, init->offset + 1);
+	u32 dreg = nvbios_rd32(bios, init->offset + 5);
 
 	trace("COPY_ZM_REG\tR[0x%06x] = R[0x%06x]\n", dreg, sreg);
 	init->offset += 9;
@@ -1964,14 +2000,14 @@ static void
 init_zm_reg_group(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 addr = nv_ro32(bios, init->offset + 1);
-	u8 count = nv_ro08(bios, init->offset + 5);
+	u32 addr = nvbios_rd32(bios, init->offset + 1);
+	u8 count = nvbios_rd08(bios, init->offset + 5);
 
 	trace("ZM_REG_GROUP\tR[0x%06x] =\n", addr);
 	init->offset += 6;
 
 	while (count--) {
-		u32 data = nv_ro32(bios, init->offset);
+		u32 data = nvbios_rd32(bios, init->offset);
 		trace("\t0x%08x\n", data);
 		init_wr32(init, addr, data);
 		init->offset += 4;
@@ -1986,13 +2022,13 @@ static void
 init_xlat(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 saddr = nv_ro32(bios, init->offset + 1);
-	u8 sshift = nv_ro08(bios, init->offset + 5);
-	u8  smask = nv_ro08(bios, init->offset + 6);
-	u8  index = nv_ro08(bios, init->offset + 7);
-	u32 daddr = nv_ro32(bios, init->offset + 8);
-	u32 dmask = nv_ro32(bios, init->offset + 12);
-	u8  shift = nv_ro08(bios, init->offset + 16);
+	u32 saddr = nvbios_rd32(bios, init->offset + 1);
+	u8 sshift = nvbios_rd08(bios, init->offset + 5);
+	u8  smask = nvbios_rd08(bios, init->offset + 6);
+	u8  index = nvbios_rd08(bios, init->offset + 7);
+	u32 daddr = nvbios_rd32(bios, init->offset + 8);
+	u32 dmask = nvbios_rd32(bios, init->offset + 12);
+	u8  shift = nvbios_rd08(bios, init->offset + 16);
 	u32 data;
 
 	trace("INIT_XLAT\tR[0x%06x] &= 0x%08x |= "
@@ -2014,9 +2050,9 @@ static void
 init_zm_mask_add(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 addr = nv_ro32(bios, init->offset + 1);
-	u32 mask = nv_ro32(bios, init->offset + 5);
-	u32  add = nv_ro32(bios, init->offset + 9);
+	u32 addr = nvbios_rd32(bios, init->offset + 1);
+	u32 mask = nvbios_rd32(bios, init->offset + 5);
+	u32  add = nvbios_rd32(bios, init->offset + 9);
 	u32 data;
 
 	trace("ZM_MASK_ADD\tR[0x%06x] &= 0x%08x += 0x%08x\n", addr, mask, add);
@@ -2035,15 +2071,15 @@ static void
 init_auxch(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 addr = nv_ro32(bios, init->offset + 1);
-	u8 count = nv_ro08(bios, init->offset + 5);
+	u32 addr = nvbios_rd32(bios, init->offset + 1);
+	u8 count = nvbios_rd08(bios, init->offset + 5);
 
 	trace("AUXCH\tAUX[0x%08x] 0x%02x\n", addr, count);
 	init->offset += 6;
 
 	while (count--) {
-		u8 mask = nv_ro08(bios, init->offset + 0);
-		u8 data = nv_ro08(bios, init->offset + 1);
+		u8 mask = nvbios_rd08(bios, init->offset + 0);
+		u8 data = nvbios_rd08(bios, init->offset + 1);
 		trace("\tAUX[0x%08x] &= 0x%02x |= 0x%02x\n", addr, mask, data);
 		mask = init_rdauxr(init, addr) & mask;
 		init_wrauxr(init, addr, mask | data);
@@ -2059,14 +2095,14 @@ static void
 init_zm_auxch(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 addr = nv_ro32(bios, init->offset + 1);
-	u8 count = nv_ro08(bios, init->offset + 5);
+	u32 addr = nvbios_rd32(bios, init->offset + 1);
+	u8 count = nvbios_rd08(bios, init->offset + 5);
 
 	trace("ZM_AUXCH\tAUX[0x%08x] 0x%02x\n", addr, count);
 	init->offset += 6;
 
 	while (count--) {
-		u8 data = nv_ro08(bios, init->offset + 0);
+		u8 data = nvbios_rd08(bios, init->offset + 0);
 		trace("\tAUX[0x%08x] = 0x%02x\n", addr, data);
 		init_wrauxr(init, addr, data);
 		init->offset += 1;
@@ -2081,21 +2117,21 @@ static void
 init_i2c_long_if(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 index = nv_ro08(bios, init->offset + 1);
-	u8  addr = nv_ro08(bios, init->offset + 2) >> 1;
-	u8 reglo = nv_ro08(bios, init->offset + 3);
-	u8 reghi = nv_ro08(bios, init->offset + 4);
-	u8  mask = nv_ro08(bios, init->offset + 5);
-	u8  data = nv_ro08(bios, init->offset + 6);
-	struct nvkm_i2c_port *port;
+	u8 index = nvbios_rd08(bios, init->offset + 1);
+	u8  addr = nvbios_rd08(bios, init->offset + 2) >> 1;
+	u8 reglo = nvbios_rd08(bios, init->offset + 3);
+	u8 reghi = nvbios_rd08(bios, init->offset + 4);
+	u8  mask = nvbios_rd08(bios, init->offset + 5);
+	u8  data = nvbios_rd08(bios, init->offset + 6);
+	struct i2c_adapter *adap;
 
 	trace("I2C_LONG_IF\t"
 	      "I2C[0x%02x][0x%02x][0x%02x%02x] & 0x%02x == 0x%02x\n",
 	      index, addr, reglo, reghi, mask, data);
 	init->offset += 7;
 
-	port = init_i2c(init, index);
-	if (port) {
+	adap = init_i2c(init, index);
+	if (adap) {
 		u8 i[2] = { reghi, reglo };
 		u8 o[1] = {};
 		struct i2c_msg msg[] = {
@@ -2104,7 +2140,7 @@ init_i2c_long_if(struct nvbios_init *init)
 		};
 		int ret;
 
-		ret = i2c_transfer(&port->adapter, msg, 2);
+		ret = i2c_transfer(adap, msg, 2);
 		if (ret == 2 && ((o[0] & mask) == data))
 			return;
 	}
@@ -2120,9 +2156,9 @@ static void
 init_gpio_ne(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	struct nvkm_gpio *gpio = nvkm_gpio(bios);
+	struct nvkm_gpio *gpio = bios->subdev.device->gpio;
 	struct dcb_gpio_func func;
-	u8 count = nv_ro08(bios, init->offset + 1);
+	u8 count = nvbios_rd08(bios, init->offset + 1);
 	u8 idx = 0, ver, len;
 	u16 data, i;
 
@@ -2130,21 +2166,21 @@ init_gpio_ne(struct nvbios_init *init)
 	init->offset += 2;
 
 	for (i = init->offset; i < init->offset + count; i++)
-		cont("0x%02x ", nv_ro08(bios, i));
+		cont("0x%02x ", nvbios_rd08(bios, i));
 	cont("\n");
 
 	while ((data = dcb_gpio_parse(bios, 0, idx++, &ver, &len, &func))) {
 		if (func.func != DCB_GPIO_UNUSED) {
 			for (i = init->offset; i < init->offset + count; i++) {
-				if (func.func == nv_ro08(bios, i))
+				if (func.func == nvbios_rd08(bios, i))
 					break;
 			}
 
 			trace("\tFUNC[0x%02x]", func.func);
 			if (i == (init->offset + count)) {
 				cont(" *");
-				if (init_exec(init) && gpio && gpio->reset)
-					gpio->reset(gpio, func.func);
+				if (init_exec(init))
+					nvkm_gpio_reset(gpio, func.func);
 			}
 			cont("\n");
 		}
@@ -2202,9 +2238,11 @@ static struct nvbios_init_opcode {
 	[0x6f] = { init_macro },
 	[0x71] = { init_done },
 	[0x72] = { init_resume },
+	[0x73] = { init_strap_condition },
 	[0x74] = { init_time },
 	[0x75] = { init_condition },
 	[0x76] = { init_io_condition },
+	[0x77] = { init_zm_reg16 },
 	[0x78] = { init_index_io },
 	[0x79] = { init_pll },
 	[0x7a] = { init_zm_reg },
@@ -2232,7 +2270,7 @@ nvbios_exec(struct nvbios_init *init)
 {
 	init->nested++;
 	while (init->offset) {
-		u8 opcode = nv_ro08(init->bios, init->offset);
+		u8 opcode = nvbios_rd08(init->bios, init->offset);
 		if (opcode >= init_opcode_nr || !init_opcode[opcode].exec) {
 			error("unknown opcode 0x%02x\n", opcode);
 			return -EINVAL;
@@ -2247,13 +2285,13 @@ nvbios_exec(struct nvbios_init *init)
 int
 nvbios_init(struct nvkm_subdev *subdev, bool execute)
 {
-	struct nvkm_bios *bios = nvkm_bios(subdev);
+	struct nvkm_bios *bios = subdev->device->bios;
 	int ret = 0;
 	int i = -1;
 	u16 data;
 
 	if (execute)
-		nv_info(bios, "running init tables\n");
+		nvkm_debug(subdev, "running init tables\n");
 	while (!ret && (data = (init_script(bios, ++i)))) {
 		struct nvbios_init init = {
 			.subdev = subdev,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
index c4087df4f85e..3ddf0939ded3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
@@ -28,17 +28,18 @@
 u16
 mxm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
 	struct bit_entry x;
 
 	if (bit_entry(bios, 'x', &x)) {
-		nv_debug(bios, "BIT 'x' table not present\n");
+		nvkm_debug(subdev, "BIT 'x' table not present\n");
 		return 0x0000;
 	}
 
 	*ver = x.version;
 	*hdr = x.length;
 	if (*ver != 1 || *hdr < 3) {
-		nv_warn(bios, "BIT 'x' table %d/%d unknown\n", *ver, *hdr);
+		nvkm_warn(subdev, "BIT 'x' table %d/%d unknown\n", *ver, *hdr);
 		return 0x0000;
 	}
 
@@ -73,23 +74,24 @@ static u8 g98_sor_map[16] = {
 u8
 mxm_sor_map(struct nvkm_bios *bios, u8 conn)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
 	u8  ver, hdr;
 	u16 mxm = mxm_table(bios, &ver, &hdr);
 	if (mxm && hdr >= 6) {
-		u16 map = nv_ro16(bios, mxm + 4);
+		u16 map = nvbios_rd16(bios, mxm + 4);
 		if (map) {
-			ver = nv_ro08(bios, map);
+			ver = nvbios_rd08(bios, map);
 			if (ver == 0x10) {
-				if (conn < nv_ro08(bios, map + 3)) {
-					map += nv_ro08(bios, map + 1);
+				if (conn < nvbios_rd08(bios, map + 3)) {
+					map += nvbios_rd08(bios, map + 1);
 					map += conn;
-					return nv_ro08(bios, map);
+					return nvbios_rd08(bios, map);
 				}
 
 				return 0x00;
 			}
 
-			nv_warn(bios, "unknown sor map v%02x\n", ver);
+			nvkm_warn(subdev, "unknown sor map v%02x\n", ver);
 		}
 	}
 
@@ -102,30 +104,31 @@ mxm_sor_map(struct nvkm_bios *bios, u8 conn)
 	if (bios->version.chip == 0x98)
 		return g98_sor_map[conn];
 
-	nv_warn(bios, "missing sor map\n");
+	nvkm_warn(subdev, "missing sor map\n");
 	return 0x00;
 }
 
 u8
 mxm_ddc_map(struct nvkm_bios *bios, u8 port)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
 	u8  ver, hdr;
 	u16 mxm = mxm_table(bios, &ver, &hdr);
 	if (mxm && hdr >= 8) {
-		u16 map = nv_ro16(bios, mxm + 6);
+		u16 map = nvbios_rd16(bios, mxm + 6);
 		if (map) {
-			ver = nv_ro08(bios, map);
+			ver = nvbios_rd08(bios, map);
 			if (ver == 0x10) {
-				if (port < nv_ro08(bios, map + 3)) {
-					map += nv_ro08(bios, map + 1);
+				if (port < nvbios_rd08(bios, map + 3)) {
+					map += nvbios_rd08(bios, map + 1);
 					map += port;
-					return nv_ro08(bios, map);
+					return nvbios_rd08(bios, map);
 				}
 
 				return 0x00;
 			}
 
-			nv_warn(bios, "unknown ddc map v%02x\n", ver);
+			nvkm_warn(subdev, "unknown ddc map v%02x\n", ver);
 		}
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c
index fd7dd718b2bf..955df29635c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c
@@ -32,12 +32,13 @@ nvbios_npdeTe(struct nvkm_bios *bios, u32 base)
 	u8  ver; u16 hdr;
 	u32 data = nvbios_pcirTp(bios, base, &ver, &hdr, &pcir);
 	if (data = (data + hdr + 0x0f) & ~0x0f, data) {
-		switch (nv_ro32(bios, data + 0x00)) {
+		switch (nvbios_rd32(bios, data + 0x00)) {
 		case 0x4544504e: /* NPDE */
 			break;
 		default:
-			nv_debug(bios, "%08x: NPDE signature (%08x) unknown\n",
-				 data, nv_ro32(bios, data + 0x00));
+			nvkm_debug(&bios->subdev,
+				   "%08x: NPDE signature (%08x) unknown\n",
+				   data, nvbios_rd32(bios, data + 0x00));
 			data = 0;
 			break;
 		}
@@ -51,8 +52,8 @@ nvbios_npdeTp(struct nvkm_bios *bios, u32 base, struct nvbios_npdeT *info)
 	u32 data = nvbios_npdeTe(bios, base);
 	memset(info, 0x00, sizeof(*info));
 	if (data) {
-		info->image_size = nv_ro16(bios, data + 0x08) * 512;
-		info->last = nv_ro08(bios, data + 0x0a) & 0x80;
+		info->image_size = nvbios_rd16(bios, data + 0x08) * 512;
+		info->last = nvbios_rd08(bios, data + 0x0a) & 0x80;
 	}
 	return data;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c
index df5978753ae8..67cb3aeb2da7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c
@@ -27,19 +27,20 @@
 u32
 nvbios_pcirTe(struct nvkm_bios *bios, u32 base, u8 *ver, u16 *hdr)
 {
-	u32 data = nv_ro16(bios, base + 0x18);
+	u32 data = nvbios_rd16(bios, base + 0x18);
 	if (data) {
 		data += base;
-		switch (nv_ro32(bios, data + 0x00)) {
+		switch (nvbios_rd32(bios, data + 0x00)) {
 		case 0x52494350: /* PCIR */
 		case 0x53494752: /* RGIS */
 		case 0x5344504e: /* NPDS */
-			*hdr = nv_ro16(bios, data + 0x0a);
-			*ver = nv_ro08(bios, data + 0x0c);
+			*hdr = nvbios_rd16(bios, data + 0x0a);
+			*ver = nvbios_rd08(bios, data + 0x0c);
 			break;
 		default:
-			nv_debug(bios, "%08x: PCIR signature (%08x) unknown\n",
-				 data, nv_ro32(bios, data + 0x00));
+			nvkm_debug(&bios->subdev,
+				   "%08x: PCIR signature (%08x) unknown\n",
+				   data, nvbios_rd32(bios, data + 0x00));
 			data = 0;
 			break;
 		}
@@ -54,15 +55,15 @@ nvbios_pcirTp(struct nvkm_bios *bios, u32 base, u8 *ver, u16 *hdr,
 	u32 data = nvbios_pcirTe(bios, base, ver, hdr);
 	memset(info, 0x00, sizeof(*info));
 	if (data) {
-		info->vendor_id = nv_ro16(bios, data + 0x04);
-		info->device_id = nv_ro16(bios, data + 0x06);
-		info->class_code[0] = nv_ro08(bios, data + 0x0d);
-		info->class_code[1] = nv_ro08(bios, data + 0x0e);
-		info->class_code[2] = nv_ro08(bios, data + 0x0f);
-		info->image_size = nv_ro16(bios, data + 0x10) * 512;
-		info->image_rev = nv_ro16(bios, data + 0x12);
-		info->image_type = nv_ro08(bios, data + 0x14);
-		info->last = nv_ro08(bios, data + 0x15) & 0x80;
+		info->vendor_id = nvbios_rd16(bios, data + 0x04);
+		info->device_id = nvbios_rd16(bios, data + 0x06);
+		info->class_code[0] = nvbios_rd08(bios, data + 0x0d);
+		info->class_code[1] = nvbios_rd08(bios, data + 0x0e);
+		info->class_code[2] = nvbios_rd08(bios, data + 0x0f);
+		info->image_size = nvbios_rd16(bios, data + 0x10) * 512;
+		info->image_rev = nvbios_rd16(bios, data + 0x12);
+		info->image_type = nvbios_rd08(bios, data + 0x14);
+		info->last = nvbios_rd08(bios, data + 0x15) & 0x80;
 	}
 	return data;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
index 382ae9cdbf58..aa7e33b42b30 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
@@ -25,8 +25,6 @@
 #include <subdev/bios/bit.h>
 #include <subdev/bios/perf.h>
 
-#include <core/device.h>
-
 u16
 nvbios_perf_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
 		  u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
@@ -36,22 +34,22 @@ nvbios_perf_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version <= 2) {
-			perf = nv_ro16(bios, bit_P.offset + 0);
+			perf = nvbios_rd16(bios, bit_P.offset + 0);
 			if (perf) {
-				*ver = nv_ro08(bios, perf + 0);
-				*hdr = nv_ro08(bios, perf + 1);
+				*ver = nvbios_rd08(bios, perf + 0);
+				*hdr = nvbios_rd08(bios, perf + 1);
 				if (*ver >= 0x40 && *ver < 0x41) {
-					*cnt = nv_ro08(bios, perf + 5);
-					*len = nv_ro08(bios, perf + 2);
-					*snr = nv_ro08(bios, perf + 4);
-					*ssz = nv_ro08(bios, perf + 3);
+					*cnt = nvbios_rd08(bios, perf + 5);
+					*len = nvbios_rd08(bios, perf + 2);
+					*snr = nvbios_rd08(bios, perf + 4);
+					*ssz = nvbios_rd08(bios, perf + 3);
 					return perf;
 				} else
 				if (*ver >= 0x20 && *ver < 0x40) {
-					*cnt = nv_ro08(bios, perf + 2);
-					*len = nv_ro08(bios, perf + 3);
-					*snr = nv_ro08(bios, perf + 4);
-					*ssz = nv_ro08(bios, perf + 5);
+					*cnt = nvbios_rd08(bios, perf + 2);
+					*len = nvbios_rd08(bios, perf + 3);
+					*snr = nvbios_rd08(bios, perf + 4);
+					*ssz = nvbios_rd08(bios, perf + 5);
 					return perf;
 				}
 			}
@@ -59,13 +57,13 @@ nvbios_perf_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
 	}
 
 	if (bios->bmp_offset) {
-		if (nv_ro08(bios, bios->bmp_offset + 6) >= 0x25) {
-			perf = nv_ro16(bios, bios->bmp_offset + 0x94);
+		if (nvbios_rd08(bios, bios->bmp_offset + 6) >= 0x25) {
+			perf = nvbios_rd16(bios, bios->bmp_offset + 0x94);
 			if (perf) {
-				*hdr = nv_ro08(bios, perf + 0);
-				*ver = nv_ro08(bios, perf + 1);
-				*cnt = nv_ro08(bios, perf + 2);
-				*len = nv_ro08(bios, perf + 3);
+				*hdr = nvbios_rd08(bios, perf + 0);
+				*ver = nvbios_rd08(bios, perf + 1);
+				*cnt = nvbios_rd08(bios, perf + 2);
+				*len = nvbios_rd08(bios, perf + 3);
 				*snr = 0;
 				*ssz = 0;
 				return perf;
@@ -98,55 +96,55 @@ nvbios_perfEp(struct nvkm_bios *bios, int idx,
 {
 	u16 perf = nvbios_perf_entry(bios, idx, ver, hdr, cnt, len);
 	memset(info, 0x00, sizeof(*info));
-	info->pstate = nv_ro08(bios, perf + 0x00);
+	info->pstate = nvbios_rd08(bios, perf + 0x00);
 	switch (!!perf * *ver) {
 	case 0x12:
 	case 0x13:
 	case 0x14:
-		info->core     = nv_ro32(bios, perf + 0x01) * 10;
-		info->memory   = nv_ro32(bios, perf + 0x05) * 20;
-		info->fanspeed = nv_ro08(bios, perf + 0x37);
+		info->core     = nvbios_rd32(bios, perf + 0x01) * 10;
+		info->memory   = nvbios_rd32(bios, perf + 0x05) * 20;
+		info->fanspeed = nvbios_rd08(bios, perf + 0x37);
 		if (*hdr > 0x38)
-			info->voltage = nv_ro08(bios, perf + 0x38);
+			info->voltage = nvbios_rd08(bios, perf + 0x38);
 		break;
 	case 0x21:
 	case 0x23:
 	case 0x24:
-		info->fanspeed = nv_ro08(bios, perf + 0x04);
-		info->voltage  = nv_ro08(bios, perf + 0x05);
-		info->shader   = nv_ro16(bios, perf + 0x06) * 1000;
+		info->fanspeed = nvbios_rd08(bios, perf + 0x04);
+		info->voltage  = nvbios_rd08(bios, perf + 0x05);
+		info->shader   = nvbios_rd16(bios, perf + 0x06) * 1000;
 		info->core     = info->shader + (signed char)
-				 nv_ro08(bios, perf + 0x08) * 1000;
-		switch (nv_device(bios)->chipset) {
+				 nvbios_rd08(bios, perf + 0x08) * 1000;
+		switch (bios->subdev.device->chipset) {
 		case 0x49:
 		case 0x4b:
-			info->memory = nv_ro16(bios, perf + 0x0b) * 1000;
+			info->memory = nvbios_rd16(bios, perf + 0x0b) * 1000;
 			break;
 		default:
-			info->memory = nv_ro16(bios, perf + 0x0b) * 2000;
+			info->memory = nvbios_rd16(bios, perf + 0x0b) * 2000;
 			break;
 		}
 		break;
 	case 0x25:
-		info->fanspeed = nv_ro08(bios, perf + 0x04);
-		info->voltage  = nv_ro08(bios, perf + 0x05);
-		info->core     = nv_ro16(bios, perf + 0x06) * 1000;
-		info->shader   = nv_ro16(bios, perf + 0x0a) * 1000;
-		info->memory   = nv_ro16(bios, perf + 0x0c) * 1000;
+		info->fanspeed = nvbios_rd08(bios, perf + 0x04);
+		info->voltage  = nvbios_rd08(bios, perf + 0x05);
+		info->core     = nvbios_rd16(bios, perf + 0x06) * 1000;
+		info->shader   = nvbios_rd16(bios, perf + 0x0a) * 1000;
+		info->memory   = nvbios_rd16(bios, perf + 0x0c) * 1000;
 		break;
 	case 0x30:
-		info->script   = nv_ro16(bios, perf + 0x02);
+		info->script   = nvbios_rd16(bios, perf + 0x02);
 	case 0x35:
-		info->fanspeed = nv_ro08(bios, perf + 0x06);
-		info->voltage  = nv_ro08(bios, perf + 0x07);
-		info->core     = nv_ro16(bios, perf + 0x08) * 1000;
-		info->shader   = nv_ro16(bios, perf + 0x0a) * 1000;
-		info->memory   = nv_ro16(bios, perf + 0x0c) * 1000;
-		info->vdec     = nv_ro16(bios, perf + 0x10) * 1000;
-		info->disp     = nv_ro16(bios, perf + 0x14) * 1000;
+		info->fanspeed = nvbios_rd08(bios, perf + 0x06);
+		info->voltage  = nvbios_rd08(bios, perf + 0x07);
+		info->core     = nvbios_rd16(bios, perf + 0x08) * 1000;
+		info->shader   = nvbios_rd16(bios, perf + 0x0a) * 1000;
+		info->memory   = nvbios_rd16(bios, perf + 0x0c) * 1000;
+		info->vdec     = nvbios_rd16(bios, perf + 0x10) * 1000;
+		info->disp     = nvbios_rd16(bios, perf + 0x14) * 1000;
 		break;
 	case 0x40:
-		info->voltage  = nv_ro08(bios, perf + 0x02);
+		info->voltage  = nvbios_rd08(bios, perf + 0x02);
 		break;
 	default:
 		return 0x0000;
@@ -175,7 +173,7 @@ nvbios_perfSp(struct nvkm_bios *bios, u32 perfE, int idx,
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x40:
-		info->v40.freq = (nv_ro16(bios, data + 0x00) & 0x3fff) * 1000;
+		info->v40.freq = (nvbios_rd16(bios, data + 0x00) & 0x3fff) * 1000;
 		break;
 	default:
 		break;
@@ -193,7 +191,7 @@ nvbios_perf_fan_parse(struct nvkm_bios *bios,
 		return -ENODEV;
 
 	if (ver >= 0x20 && ver < 0x40 && hdr > 6)
-		fan->pwm_divisor = nv_ro16(bios, perf + 6);
+		fan->pwm_divisor = nvbios_rd16(bios, perf + 6);
 	else
 		fan->pwm_divisor = 0;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
index ebd402e19dbf..125ec2ed6c2e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
@@ -27,7 +27,6 @@
 #include <subdev/bios/pll.h>
 #include <subdev/vga.h>
 
-#include <core/device.h>
 
 struct pll_mapping {
 	u8  type;
@@ -84,20 +83,20 @@ pll_limits_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 	struct bit_entry bit_C;
 
 	if (!bit_entry(bios, 'C', &bit_C) && bit_C.length >= 10) {
-		u16 data = nv_ro16(bios, bit_C.offset + 8);
+		u16 data = nvbios_rd16(bios, bit_C.offset + 8);
 		if (data) {
-			*ver = nv_ro08(bios, data + 0);
-			*hdr = nv_ro08(bios, data + 1);
-			*len = nv_ro08(bios, data + 2);
-			*cnt = nv_ro08(bios, data + 3);
+			*ver = nvbios_rd08(bios, data + 0);
+			*hdr = nvbios_rd08(bios, data + 1);
+			*len = nvbios_rd08(bios, data + 2);
+			*cnt = nvbios_rd08(bios, data + 3);
 			return data;
 		}
 	}
 
 	if (bmp_version(bios) >= 0x0524) {
-		u16 data = nv_ro16(bios, bios->bmp_offset + 142);
+		u16 data = nvbios_rd16(bios, bios->bmp_offset + 142);
 		if (data) {
-			*ver = nv_ro08(bios, data + 0);
+			*ver = nvbios_rd08(bios, data + 0);
 			*hdr = 1;
 			*cnt = 1;
 			*len = 0x18;
@@ -112,7 +111,8 @@ pll_limits_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 static struct pll_mapping *
 pll_map(struct nvkm_bios *bios)
 {
-	switch (nv_device(bios)->card_type) {
+	struct nvkm_device *device = bios->subdev.device;
+	switch (device->card_type) {
 	case NV_04:
 	case NV_10:
 	case NV_11:
@@ -123,12 +123,12 @@ pll_map(struct nvkm_bios *bios)
 	case NV_40:
 		return nv40_pll_mapping;
 	case NV_50:
-		if (nv_device(bios)->chipset == 0x50)
+		if (device->chipset == 0x50)
 			return nv50_pll_mapping;
 		else
-		if (nv_device(bios)->chipset <  0xa3 ||
-		    nv_device(bios)->chipset == 0xaa ||
-		    nv_device(bios)->chipset == 0xac)
+		if (device->chipset <  0xa3 ||
+		    device->chipset == 0xaa ||
+		    device->chipset == 0xac)
 			return g84_pll_mapping;
 	default:
 		return NULL;
@@ -146,8 +146,8 @@ pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
 	if (data && *ver >= 0x30) {
 		data += hdr;
 		while (cnt--) {
-			if (nv_ro32(bios, data + 3) == reg) {
-				*type = nv_ro08(bios, data + 0);
+			if (nvbios_rd32(bios, data + 3) == reg) {
+				*type = nvbios_rd08(bios, data + 0);
 				return data;
 			}
 			data += *len;
@@ -161,7 +161,7 @@ pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
 			u16 addr = (data += hdr);
 			*type = map->type;
 			while (cnt--) {
-				if (nv_ro32(bios, data) == map->reg)
+				if (nvbios_rd32(bios, data) == map->reg)
 					return data;
 				data += *len;
 			}
@@ -188,8 +188,8 @@ pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
 	if (data && *ver >= 0x30) {
 		data += hdr;
 		while (cnt--) {
-			if (nv_ro08(bios, data + 0) == type) {
-				*reg = nv_ro32(bios, data + 3);
+			if (nvbios_rd08(bios, data + 0) == type) {
+				*reg = nvbios_rd32(bios, data + 3);
 				return data;
 			}
 			data += *len;
@@ -203,7 +203,7 @@ pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
 			u16 addr = (data += hdr);
 			*reg = map->reg;
 			while (cnt--) {
-				if (nv_ro32(bios, data) == map->reg)
+				if (nvbios_rd32(bios, data) == map->reg)
 					return data;
 				data += *len;
 			}
@@ -222,6 +222,8 @@ pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
 int
 nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
+	struct nvkm_device *device = subdev->device;
 	u8  ver, len;
 	u32 reg = type;
 	u16 data;
@@ -245,12 +247,12 @@ nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
 		break;
 	case 0x10:
 	case 0x11:
-		info->vco1.min_freq = nv_ro32(bios, data + 0);
-		info->vco1.max_freq = nv_ro32(bios, data + 4);
-		info->vco2.min_freq = nv_ro32(bios, data + 8);
-		info->vco2.max_freq = nv_ro32(bios, data + 12);
-		info->vco1.min_inputfreq = nv_ro32(bios, data + 16);
-		info->vco2.min_inputfreq = nv_ro32(bios, data + 20);
+		info->vco1.min_freq = nvbios_rd32(bios, data + 0);
+		info->vco1.max_freq = nvbios_rd32(bios, data + 4);
+		info->vco2.min_freq = nvbios_rd32(bios, data + 8);
+		info->vco2.max_freq = nvbios_rd32(bios, data + 12);
+		info->vco1.min_inputfreq = nvbios_rd32(bios, data + 16);
+		info->vco2.min_inputfreq = nvbios_rd32(bios, data + 20);
 		info->vco1.max_inputfreq = INT_MAX;
 		info->vco2.max_inputfreq = INT_MAX;
 
@@ -291,82 +293,82 @@ nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
 		break;
 	case 0x20:
 	case 0x21:
-		info->vco1.min_freq = nv_ro16(bios, data + 4) * 1000;
-		info->vco1.max_freq = nv_ro16(bios, data + 6) * 1000;
-		info->vco2.min_freq = nv_ro16(bios, data + 8) * 1000;
-		info->vco2.max_freq = nv_ro16(bios, data + 10) * 1000;
-		info->vco1.min_inputfreq = nv_ro16(bios, data + 12) * 1000;
-		info->vco2.min_inputfreq = nv_ro16(bios, data + 14) * 1000;
-		info->vco1.max_inputfreq = nv_ro16(bios, data + 16) * 1000;
-		info->vco2.max_inputfreq = nv_ro16(bios, data + 18) * 1000;
-		info->vco1.min_n = nv_ro08(bios, data + 20);
-		info->vco1.max_n = nv_ro08(bios, data + 21);
-		info->vco1.min_m = nv_ro08(bios, data + 22);
-		info->vco1.max_m = nv_ro08(bios, data + 23);
-		info->vco2.min_n = nv_ro08(bios, data + 24);
-		info->vco2.max_n = nv_ro08(bios, data + 25);
-		info->vco2.min_m = nv_ro08(bios, data + 26);
-		info->vco2.max_m = nv_ro08(bios, data + 27);
-
-		info->max_p = nv_ro08(bios, data + 29);
+		info->vco1.min_freq = nvbios_rd16(bios, data + 4) * 1000;
+		info->vco1.max_freq = nvbios_rd16(bios, data + 6) * 1000;
+		info->vco2.min_freq = nvbios_rd16(bios, data + 8) * 1000;
+		info->vco2.max_freq = nvbios_rd16(bios, data + 10) * 1000;
+		info->vco1.min_inputfreq = nvbios_rd16(bios, data + 12) * 1000;
+		info->vco2.min_inputfreq = nvbios_rd16(bios, data + 14) * 1000;
+		info->vco1.max_inputfreq = nvbios_rd16(bios, data + 16) * 1000;
+		info->vco2.max_inputfreq = nvbios_rd16(bios, data + 18) * 1000;
+		info->vco1.min_n = nvbios_rd08(bios, data + 20);
+		info->vco1.max_n = nvbios_rd08(bios, data + 21);
+		info->vco1.min_m = nvbios_rd08(bios, data + 22);
+		info->vco1.max_m = nvbios_rd08(bios, data + 23);
+		info->vco2.min_n = nvbios_rd08(bios, data + 24);
+		info->vco2.max_n = nvbios_rd08(bios, data + 25);
+		info->vco2.min_m = nvbios_rd08(bios, data + 26);
+		info->vco2.max_m = nvbios_rd08(bios, data + 27);
+
+		info->max_p = nvbios_rd08(bios, data + 29);
 		info->max_p_usable = info->max_p;
 		if (bios->version.chip < 0x60)
 			info->max_p_usable = 0x6;
-		info->bias_p = nv_ro08(bios, data + 30);
+		info->bias_p = nvbios_rd08(bios, data + 30);
 
 		if (len > 0x22)
-			info->refclk = nv_ro32(bios, data + 31);
+			info->refclk = nvbios_rd32(bios, data + 31);
 		break;
 	case 0x30:
-		data = nv_ro16(bios, data + 1);
-
-		info->vco1.min_freq = nv_ro16(bios, data + 0) * 1000;
-		info->vco1.max_freq = nv_ro16(bios, data + 2) * 1000;
-		info->vco2.min_freq = nv_ro16(bios, data + 4) * 1000;
-		info->vco2.max_freq = nv_ro16(bios, data + 6) * 1000;
-		info->vco1.min_inputfreq = nv_ro16(bios, data + 8) * 1000;
-		info->vco2.min_inputfreq = nv_ro16(bios, data + 10) * 1000;
-		info->vco1.max_inputfreq = nv_ro16(bios, data + 12) * 1000;
-		info->vco2.max_inputfreq = nv_ro16(bios, data + 14) * 1000;
-		info->vco1.min_n = nv_ro08(bios, data + 16);
-		info->vco1.max_n = nv_ro08(bios, data + 17);
-		info->vco1.min_m = nv_ro08(bios, data + 18);
-		info->vco1.max_m = nv_ro08(bios, data + 19);
-		info->vco2.min_n = nv_ro08(bios, data + 20);
-		info->vco2.max_n = nv_ro08(bios, data + 21);
-		info->vco2.min_m = nv_ro08(bios, data + 22);
-		info->vco2.max_m = nv_ro08(bios, data + 23);
-		info->max_p_usable = info->max_p = nv_ro08(bios, data + 25);
-		info->bias_p = nv_ro08(bios, data + 27);
-		info->refclk = nv_ro32(bios, data + 28);
+		data = nvbios_rd16(bios, data + 1);
+
+		info->vco1.min_freq = nvbios_rd16(bios, data + 0) * 1000;
+		info->vco1.max_freq = nvbios_rd16(bios, data + 2) * 1000;
+		info->vco2.min_freq = nvbios_rd16(bios, data + 4) * 1000;
+		info->vco2.max_freq = nvbios_rd16(bios, data + 6) * 1000;
+		info->vco1.min_inputfreq = nvbios_rd16(bios, data + 8) * 1000;
+		info->vco2.min_inputfreq = nvbios_rd16(bios, data + 10) * 1000;
+		info->vco1.max_inputfreq = nvbios_rd16(bios, data + 12) * 1000;
+		info->vco2.max_inputfreq = nvbios_rd16(bios, data + 14) * 1000;
+		info->vco1.min_n = nvbios_rd08(bios, data + 16);
+		info->vco1.max_n = nvbios_rd08(bios, data + 17);
+		info->vco1.min_m = nvbios_rd08(bios, data + 18);
+		info->vco1.max_m = nvbios_rd08(bios, data + 19);
+		info->vco2.min_n = nvbios_rd08(bios, data + 20);
+		info->vco2.max_n = nvbios_rd08(bios, data + 21);
+		info->vco2.min_m = nvbios_rd08(bios, data + 22);
+		info->vco2.max_m = nvbios_rd08(bios, data + 23);
+		info->max_p_usable = info->max_p = nvbios_rd08(bios, data + 25);
+		info->bias_p = nvbios_rd08(bios, data + 27);
+		info->refclk = nvbios_rd32(bios, data + 28);
 		break;
 	case 0x40:
-		info->refclk = nv_ro16(bios, data + 9) * 1000;
-		data = nv_ro16(bios, data + 1);
-
-		info->vco1.min_freq = nv_ro16(bios, data + 0) * 1000;
-		info->vco1.max_freq = nv_ro16(bios, data + 2) * 1000;
-		info->vco1.min_inputfreq = nv_ro16(bios, data + 4) * 1000;
-		info->vco1.max_inputfreq = nv_ro16(bios, data + 6) * 1000;
-		info->vco1.min_m = nv_ro08(bios, data + 8);
-		info->vco1.max_m = nv_ro08(bios, data + 9);
-		info->vco1.min_n = nv_ro08(bios, data + 10);
-		info->vco1.max_n = nv_ro08(bios, data + 11);
-		info->min_p = nv_ro08(bios, data + 12);
-		info->max_p = nv_ro08(bios, data + 13);
+		info->refclk = nvbios_rd16(bios, data + 9) * 1000;
+		data = nvbios_rd16(bios, data + 1);
+
+		info->vco1.min_freq = nvbios_rd16(bios, data + 0) * 1000;
+		info->vco1.max_freq = nvbios_rd16(bios, data + 2) * 1000;
+		info->vco1.min_inputfreq = nvbios_rd16(bios, data + 4) * 1000;
+		info->vco1.max_inputfreq = nvbios_rd16(bios, data + 6) * 1000;
+		info->vco1.min_m = nvbios_rd08(bios, data + 8);
+		info->vco1.max_m = nvbios_rd08(bios, data + 9);
+		info->vco1.min_n = nvbios_rd08(bios, data + 10);
+		info->vco1.max_n = nvbios_rd08(bios, data + 11);
+		info->min_p = nvbios_rd08(bios, data + 12);
+		info->max_p = nvbios_rd08(bios, data + 13);
 		break;
 	default:
-		nv_error(bios, "unknown pll limits version 0x%02x\n", ver);
+		nvkm_error(subdev, "unknown pll limits version 0x%02x\n", ver);
 		return -EINVAL;
 	}
 
 	if (!info->refclk) {
-		info->refclk = nv_device(bios)->crystal;
+		info->refclk = device->crystal;
 		if (bios->version.chip == 0x51) {
-			u32 sel_clk = nv_rd32(bios, 0x680524);
+			u32 sel_clk = nvkm_rd32(device, 0x680524);
 			if ((info->reg == 0x680508 && sel_clk & 0x20) ||
 			    (info->reg == 0x680520 && sel_clk & 0x80)) {
-				if (nv_rdvgac(bios, 0, 0x27) < 0xa3)
+				if (nvkm_rdvgac(device, 0, 0x27) < 0xa3)
 					info->refclk = 200000;
 				else
 					info->refclk = 25000;
@@ -380,8 +382,8 @@ nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
 	 * with an empty limit table (seen on nv18)
 	 */
 	if (!info->vco1.max_freq) {
-		info->vco1.max_freq = nv_ro32(bios, bios->bmp_offset + 67);
-		info->vco1.min_freq = nv_ro32(bios, bios->bmp_offset + 71);
+		info->vco1.max_freq = nvbios_rd32(bios, bios->bmp_offset + 67);
+		info->vco1.min_freq = nvbios_rd32(bios, bios->bmp_offset + 71);
 		if (bmp_version(bios) < 0x0506) {
 			info->vco1.max_freq = 256000;
 			info->vco1.min_freq = 128000;
@@ -393,7 +395,7 @@ nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
 		info->vco1.max_n = 0xff;
 		info->vco1.min_m = 0x1;
 
-		if (nv_device(bios)->crystal == 13500) {
+		if (device->crystal == 13500) {
 			/* nv05 does this, nv11 doesn't, nv10 unknown */
 			if (bios->version.chip < 0x11)
 				info->vco1.min_m = 0x7;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
index 20c5ce0cd573..441ec451b788 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
@@ -49,12 +49,12 @@ nvbios_pmuTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 
 	if (!bit_entry(bios, 'p', &bit_p)) {
 		if (bit_p.version == 2 && bit_p.length >= 4)
-			data = nv_ro32(bios, bit_p.offset + 0x00);
+			data = nvbios_rd32(bios, bit_p.offset + 0x00);
 		if ((data = weirdo_pointer(bios, data))) {
-			*ver = nv_ro08(bios, data + 0x00); /* maybe? */
-			*hdr = nv_ro08(bios, data + 0x01);
-			*len = nv_ro08(bios, data + 0x02);
-			*cnt = nv_ro08(bios, data + 0x03);
+			*ver = nvbios_rd08(bios, data + 0x00); /* maybe? */
+			*hdr = nvbios_rd08(bios, data + 0x01);
+			*len = nvbios_rd08(bios, data + 0x02);
+			*cnt = nvbios_rd08(bios, data + 0x03);
 		}
 	}
 
@@ -95,8 +95,8 @@ nvbios_pmuEp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	default:
-		info->type = nv_ro08(bios, data + 0x00);
-		info->data = nv_ro32(bios, data + 0x02);
+		info->type = nvbios_rd08(bios, data + 0x00);
+		info->data = nvbios_rd32(bios, data + 0x02);
 		break;
 	}
 	return data;
@@ -112,21 +112,21 @@ nvbios_pmuRm(struct nvkm_bios *bios, u8 type, struct nvbios_pmuR *info)
 	while ((data = nvbios_pmuEp(bios, idx++, &ver, &hdr, &pmuE))) {
 		if ( pmuE.type == type &&
 		    (data = weirdo_pointer(bios, pmuE.data))) {
-			info->init_addr_pmu = nv_ro32(bios, data + 0x08);
-			info->args_addr_pmu = nv_ro32(bios, data + 0x0c);
+			info->init_addr_pmu = nvbios_rd32(bios, data + 0x08);
+			info->args_addr_pmu = nvbios_rd32(bios, data + 0x0c);
 			info->boot_addr     = data + 0x30;
-			info->boot_addr_pmu = nv_ro32(bios, data + 0x10) +
-					      nv_ro32(bios, data + 0x18);
-			info->boot_size     = nv_ro32(bios, data + 0x1c) -
-					      nv_ro32(bios, data + 0x18);
+			info->boot_addr_pmu = nvbios_rd32(bios, data + 0x10) +
+					      nvbios_rd32(bios, data + 0x18);
+			info->boot_size     = nvbios_rd32(bios, data + 0x1c) -
+					      nvbios_rd32(bios, data + 0x18);
 			info->code_addr     = info->boot_addr + info->boot_size;
 			info->code_addr_pmu = info->boot_addr_pmu +
 					      info->boot_size;
-			info->code_size     = nv_ro32(bios, data + 0x20);
+			info->code_size     = nvbios_rd32(bios, data + 0x20);
 			info->data_addr     = data + 0x30 +
-					      nv_ro32(bios, data + 0x24);
-			info->data_addr_pmu = nv_ro32(bios, data + 0x28);
-			info->data_size     = nv_ro32(bios, data + 0x2c);
+					      nvbios_rd32(bios, data + 0x24);
+			info->data_addr_pmu = nvbios_rd32(bios, data + 0x28);
+			info->data_size     = nvbios_rd32(bios, data + 0x2c);
 			return true;
 		}
 	}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
index 95e4fa1531d6..e0ec2a6b7b79 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
@@ -1,5 +1,6 @@
 #ifndef __NVKM_BIOS_PRIV_H__
 #define __NVKM_BIOS_PRIV_H__
+#define nvkm_bios(p) container_of((p), struct nvkm_bios, subdev)
 #include <subdev/bios.h>
 
 struct nvbios_source {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c
index a17b221119b2..d5222af10b96 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c
@@ -29,7 +29,7 @@
 static u8
 nvbios_ramcfg_strap(struct nvkm_subdev *subdev)
 {
-	return (nv_rd32(subdev, 0x101000) & 0x0000003c) >> 2;
+	return (nvkm_rd32(subdev->device, 0x101000) & 0x0000003c) >> 2;
 }
 
 u8
@@ -39,9 +39,9 @@ nvbios_ramcfg_count(struct nvkm_bios *bios)
 
 	if (!bit_entry(bios, 'M', &bit_M)) {
 		if (bit_M.version == 1 && bit_M.length >= 5)
-			return nv_ro08(bios, bit_M.offset + 2);
+			return nvbios_rd08(bios, bit_M.offset + 2);
 		if (bit_M.version == 2 && bit_M.length >= 3)
-			return nv_ro08(bios, bit_M.offset + 0);
+			return nvbios_rd08(bios, bit_M.offset + 0);
 	}
 
 	return 0x00;
@@ -50,7 +50,7 @@ nvbios_ramcfg_count(struct nvkm_bios *bios)
 u8
 nvbios_ramcfg_index(struct nvkm_subdev *subdev)
 {
-	struct nvkm_bios *bios = nvkm_bios(subdev);
+	struct nvkm_bios *bios = subdev->device->bios;
 	u8 strap = nvbios_ramcfg_strap(subdev);
 	u32 xlat = 0x00000000;
 	struct bit_entry bit_M;
@@ -59,7 +59,7 @@ nvbios_ramcfg_index(struct nvkm_subdev *subdev)
 
 	if (!bit_entry(bios, 'M', &bit_M)) {
 		if (bit_M.version == 1 && bit_M.length >= 5)
-			xlat = nv_ro16(bios, bit_M.offset + 3);
+			xlat = nvbios_rd16(bios, bit_M.offset + 3);
 		if (bit_M.version == 2 && bit_M.length >= 3) {
 			/*XXX: is M ever shorter than this?
 			 *     if not - what is xlat used for now?
@@ -68,11 +68,11 @@ nvbios_ramcfg_index(struct nvkm_subdev *subdev)
 			if (bit_M.length >= 7 &&
 			    nvbios_M0203Em(bios, strap, &ver, &hdr, &M0203E))
 				return M0203E.group;
-			xlat = nv_ro16(bios, bit_M.offset + 1);
+			xlat = nvbios_rd16(bios, bit_M.offset + 1);
 		}
 	}
 
 	if (xlat)
-		strap = nv_ro08(bios, xlat + strap);
+		strap = nvbios_rd08(bios, xlat + strap);
 	return strap;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
index 8b17bb4b220c..f0e1fc74a52e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
@@ -34,18 +34,18 @@ nvbios_rammapTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 2)
-			rammap = nv_ro16(bios, bit_P.offset + 4);
+			rammap = nvbios_rd16(bios, bit_P.offset + 4);
 
 		if (rammap) {
-			*ver = nv_ro08(bios, rammap + 0);
+			*ver = nvbios_rd08(bios, rammap + 0);
 			switch (*ver) {
 			case 0x10:
 			case 0x11:
-				*hdr = nv_ro08(bios, rammap + 1);
-				*cnt = nv_ro08(bios, rammap + 5);
-				*len = nv_ro08(bios, rammap + 2);
-				*snr = nv_ro08(bios, rammap + 4);
-				*ssz = nv_ro08(bios, rammap + 3);
+				*hdr = nvbios_rd08(bios, rammap + 1);
+				*cnt = nvbios_rd08(bios, rammap + 5);
+				*len = nvbios_rd08(bios, rammap + 2);
+				*snr = nvbios_rd08(bios, rammap + 4);
+				*ssz = nvbios_rd08(bios, rammap + 3);
 				return rammap;
 			default:
 				break;
@@ -72,6 +72,21 @@ nvbios_rammapEe(struct nvkm_bios *bios, int idx,
 	return 0x0000;
 }
 
+/* Pretend a performance mode is also a rammap entry, helps coalesce entries
+ * later on */
+u32
+nvbios_rammapEp_from_perf(struct nvkm_bios *bios, u32 data, u8 size,
+		struct nvbios_ramcfg *p)
+{
+	memset(p, 0x00, sizeof(*p));
+
+	p->rammap_00_16_20 = (nvbios_rd08(bios, data + 0x16) & 0x20) >> 5;
+	p->rammap_00_16_40 = (nvbios_rd08(bios, data + 0x16) & 0x40) >> 6;
+	p->rammap_00_17_02 = (nvbios_rd08(bios, data + 0x17) & 0x02) >> 1;
+
+	return data;
+}
+
 u32
 nvbios_rammapEp(struct nvkm_bios *bios, int idx,
 		u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *p)
@@ -82,18 +97,18 @@ nvbios_rammapEp(struct nvkm_bios *bios, int idx,
 	p->rammap_hdr = *hdr;
 	switch (!!data * *ver) {
 	case 0x10:
-		p->rammap_min      =  nv_ro16(bios, data + 0x00);
-		p->rammap_max      =  nv_ro16(bios, data + 0x02);
-		p->rammap_10_04_02 = (nv_ro08(bios, data + 0x04) & 0x02) >> 1;
-		p->rammap_10_04_08 = (nv_ro08(bios, data + 0x04) & 0x08) >> 3;
+		p->rammap_min      =  nvbios_rd16(bios, data + 0x00);
+		p->rammap_max      =  nvbios_rd16(bios, data + 0x02);
+		p->rammap_10_04_02 = (nvbios_rd08(bios, data + 0x04) & 0x02) >> 1;
+		p->rammap_10_04_08 = (nvbios_rd08(bios, data + 0x04) & 0x08) >> 3;
 		break;
 	case 0x11:
-		p->rammap_min      =  nv_ro16(bios, data + 0x00);
-		p->rammap_max      =  nv_ro16(bios, data + 0x02);
-		p->rammap_11_08_01 = (nv_ro08(bios, data + 0x08) & 0x01) >> 0;
-		p->rammap_11_08_0c = (nv_ro08(bios, data + 0x08) & 0x0c) >> 2;
-		p->rammap_11_08_10 = (nv_ro08(bios, data + 0x08) & 0x10) >> 4;
-		temp = nv_ro32(bios, data + 0x09);
+		p->rammap_min      =  nvbios_rd16(bios, data + 0x00);
+		p->rammap_max      =  nvbios_rd16(bios, data + 0x02);
+		p->rammap_11_08_01 = (nvbios_rd08(bios, data + 0x08) & 0x01) >> 0;
+		p->rammap_11_08_0c = (nvbios_rd08(bios, data + 0x08) & 0x0c) >> 2;
+		p->rammap_11_08_10 = (nvbios_rd08(bios, data + 0x08) & 0x10) >> 4;
+		temp = nvbios_rd32(bios, data + 0x09);
 		p->rammap_11_09_01ff = (temp & 0x000001ff) >> 0;
 		p->rammap_11_0a_03fe = (temp & 0x0003fe00) >> 9;
 		p->rammap_11_0a_0400 = (temp & 0x00040000) >> 18;
@@ -102,10 +117,10 @@ nvbios_rammapEp(struct nvkm_bios *bios, int idx,
 		p->rammap_11_0b_0200 = (temp & 0x02000000) >> 25;
 		p->rammap_11_0b_0400 = (temp & 0x04000000) >> 26;
 		p->rammap_11_0b_0800 = (temp & 0x08000000) >> 27;
-		p->rammap_11_0d    =  nv_ro08(bios, data + 0x0d);
-		p->rammap_11_0e    =  nv_ro08(bios, data + 0x0e);
-		p->rammap_11_0f    =  nv_ro08(bios, data + 0x0f);
-		p->rammap_11_11_0c = (nv_ro08(bios, data + 0x11) & 0x0c) >> 2;
+		p->rammap_11_0d    =  nvbios_rd08(bios, data + 0x0d);
+		p->rammap_11_0e    =  nvbios_rd08(bios, data + 0x0e);
+		p->rammap_11_0f    =  nvbios_rd08(bios, data + 0x0f);
+		p->rammap_11_11_0c = (nvbios_rd08(bios, data + 0x11) & 0x0c) >> 2;
 		break;
 	default:
 		data = 0;
@@ -141,6 +156,36 @@ nvbios_rammapSe(struct nvkm_bios *bios, u32 data,
 }
 
 u32
+nvbios_rammapSp_from_perf(struct nvkm_bios *bios, u32 data, u8 size, int idx,
+		struct nvbios_ramcfg *p)
+{
+	data += (idx * size);
+
+	if (size < 11)
+		return 0x00000000;
+
+	p->ramcfg_ver = 0;
+	p->ramcfg_timing   =  nvbios_rd08(bios, data + 0x01);
+	p->ramcfg_00_03_01 = (nvbios_rd08(bios, data + 0x03) & 0x01) >> 0;
+	p->ramcfg_00_03_02 = (nvbios_rd08(bios, data + 0x03) & 0x02) >> 1;
+	p->ramcfg_DLLoff   = (nvbios_rd08(bios, data + 0x03) & 0x04) >> 2;
+	p->ramcfg_00_03_08 = (nvbios_rd08(bios, data + 0x03) & 0x08) >> 3;
+	p->ramcfg_RON      = (nvbios_rd08(bios, data + 0x03) & 0x10) >> 3;
+	p->ramcfg_00_04_02 = (nvbios_rd08(bios, data + 0x04) & 0x02) >> 1;
+	p->ramcfg_00_04_04 = (nvbios_rd08(bios, data + 0x04) & 0x04) >> 2;
+	p->ramcfg_00_04_20 = (nvbios_rd08(bios, data + 0x04) & 0x20) >> 5;
+	p->ramcfg_00_05    = (nvbios_rd08(bios, data + 0x05) & 0xff) >> 0;
+	p->ramcfg_00_06    = (nvbios_rd08(bios, data + 0x06) & 0xff) >> 0;
+	p->ramcfg_00_07    = (nvbios_rd08(bios, data + 0x07) & 0xff) >> 0;
+	p->ramcfg_00_08    = (nvbios_rd08(bios, data + 0x08) & 0xff) >> 0;
+	p->ramcfg_00_09    = (nvbios_rd08(bios, data + 0x09) & 0xff) >> 0;
+	p->ramcfg_00_0a_0f = (nvbios_rd08(bios, data + 0x0a) & 0x0f) >> 0;
+	p->ramcfg_00_0a_f0 = (nvbios_rd08(bios, data + 0x0a) & 0xf0) >> 4;
+
+	return data;
+}
+
+u32
 nvbios_rammapSp(struct nvkm_bios *bios, u32 data,
 		u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
 		u8 *ver, u8 *hdr, struct nvbios_ramcfg *p)
@@ -150,58 +195,58 @@ nvbios_rammapSp(struct nvkm_bios *bios, u32 data,
 	p->ramcfg_hdr = *hdr;
 	switch (!!data * *ver) {
 	case 0x10:
-		p->ramcfg_timing   =  nv_ro08(bios, data + 0x01);
-		p->ramcfg_10_02_01 = (nv_ro08(bios, data + 0x02) & 0x01) >> 0;
-		p->ramcfg_10_02_02 = (nv_ro08(bios, data + 0x02) & 0x02) >> 1;
-		p->ramcfg_10_02_04 = (nv_ro08(bios, data + 0x02) & 0x04) >> 2;
-		p->ramcfg_10_02_08 = (nv_ro08(bios, data + 0x02) & 0x08) >> 3;
-		p->ramcfg_10_02_10 = (nv_ro08(bios, data + 0x02) & 0x10) >> 4;
-		p->ramcfg_10_02_20 = (nv_ro08(bios, data + 0x02) & 0x20) >> 5;
-		p->ramcfg_10_DLLoff = (nv_ro08(bios, data + 0x02) & 0x40) >> 6;
-		p->ramcfg_10_03_0f = (nv_ro08(bios, data + 0x03) & 0x0f) >> 0;
-		p->ramcfg_10_04_01 = (nv_ro08(bios, data + 0x04) & 0x01) >> 0;
-		p->ramcfg_10_05    = (nv_ro08(bios, data + 0x05) & 0xff) >> 0;
-		p->ramcfg_10_06    = (nv_ro08(bios, data + 0x06) & 0xff) >> 0;
-		p->ramcfg_10_07    = (nv_ro08(bios, data + 0x07) & 0xff) >> 0;
-		p->ramcfg_10_08    = (nv_ro08(bios, data + 0x08) & 0xff) >> 0;
-		p->ramcfg_10_09_0f = (nv_ro08(bios, data + 0x09) & 0x0f) >> 0;
-		p->ramcfg_10_09_f0 = (nv_ro08(bios, data + 0x09) & 0xf0) >> 4;
+		p->ramcfg_timing   =  nvbios_rd08(bios, data + 0x01);
+		p->ramcfg_10_02_01 = (nvbios_rd08(bios, data + 0x02) & 0x01) >> 0;
+		p->ramcfg_10_02_02 = (nvbios_rd08(bios, data + 0x02) & 0x02) >> 1;
+		p->ramcfg_10_02_04 = (nvbios_rd08(bios, data + 0x02) & 0x04) >> 2;
+		p->ramcfg_10_02_08 = (nvbios_rd08(bios, data + 0x02) & 0x08) >> 3;
+		p->ramcfg_10_02_10 = (nvbios_rd08(bios, data + 0x02) & 0x10) >> 4;
+		p->ramcfg_10_02_20 = (nvbios_rd08(bios, data + 0x02) & 0x20) >> 5;
+		p->ramcfg_DLLoff   = (nvbios_rd08(bios, data + 0x02) & 0x40) >> 6;
+		p->ramcfg_10_03_0f = (nvbios_rd08(bios, data + 0x03) & 0x0f) >> 0;
+		p->ramcfg_10_04_01 = (nvbios_rd08(bios, data + 0x04) & 0x01) >> 0;
+		p->ramcfg_10_05    = (nvbios_rd08(bios, data + 0x05) & 0xff) >> 0;
+		p->ramcfg_10_06    = (nvbios_rd08(bios, data + 0x06) & 0xff) >> 0;
+		p->ramcfg_10_07    = (nvbios_rd08(bios, data + 0x07) & 0xff) >> 0;
+		p->ramcfg_10_08    = (nvbios_rd08(bios, data + 0x08) & 0xff) >> 0;
+		p->ramcfg_10_09_0f = (nvbios_rd08(bios, data + 0x09) & 0x0f) >> 0;
+		p->ramcfg_10_09_f0 = (nvbios_rd08(bios, data + 0x09) & 0xf0) >> 4;
 		break;
 	case 0x11:
-		p->ramcfg_timing   =  nv_ro08(bios, data + 0x00);
-		p->ramcfg_11_01_01 = (nv_ro08(bios, data + 0x01) & 0x01) >> 0;
-		p->ramcfg_11_01_02 = (nv_ro08(bios, data + 0x01) & 0x02) >> 1;
-		p->ramcfg_11_01_04 = (nv_ro08(bios, data + 0x01) & 0x04) >> 2;
-		p->ramcfg_11_01_08 = (nv_ro08(bios, data + 0x01) & 0x08) >> 3;
-		p->ramcfg_11_01_10 = (nv_ro08(bios, data + 0x01) & 0x10) >> 4;
-		p->ramcfg_11_01_20 = (nv_ro08(bios, data + 0x01) & 0x20) >> 5;
-		p->ramcfg_11_01_40 = (nv_ro08(bios, data + 0x01) & 0x40) >> 6;
-		p->ramcfg_11_01_80 = (nv_ro08(bios, data + 0x01) & 0x80) >> 7;
-		p->ramcfg_11_02_03 = (nv_ro08(bios, data + 0x02) & 0x03) >> 0;
-		p->ramcfg_11_02_04 = (nv_ro08(bios, data + 0x02) & 0x04) >> 2;
-		p->ramcfg_11_02_08 = (nv_ro08(bios, data + 0x02) & 0x08) >> 3;
-		p->ramcfg_11_02_10 = (nv_ro08(bios, data + 0x02) & 0x10) >> 4;
-		p->ramcfg_11_02_40 = (nv_ro08(bios, data + 0x02) & 0x40) >> 6;
-		p->ramcfg_11_02_80 = (nv_ro08(bios, data + 0x02) & 0x80) >> 7;
-		p->ramcfg_11_03_0f = (nv_ro08(bios, data + 0x03) & 0x0f) >> 0;
-		p->ramcfg_11_03_30 = (nv_ro08(bios, data + 0x03) & 0x30) >> 4;
-		p->ramcfg_11_03_c0 = (nv_ro08(bios, data + 0x03) & 0xc0) >> 6;
-		p->ramcfg_11_03_f0 = (nv_ro08(bios, data + 0x03) & 0xf0) >> 4;
-		p->ramcfg_11_04    = (nv_ro08(bios, data + 0x04) & 0xff) >> 0;
-		p->ramcfg_11_06    = (nv_ro08(bios, data + 0x06) & 0xff) >> 0;
-		p->ramcfg_11_07_02 = (nv_ro08(bios, data + 0x07) & 0x02) >> 1;
-		p->ramcfg_11_07_04 = (nv_ro08(bios, data + 0x07) & 0x04) >> 2;
-		p->ramcfg_11_07_08 = (nv_ro08(bios, data + 0x07) & 0x08) >> 3;
-		p->ramcfg_11_07_10 = (nv_ro08(bios, data + 0x07) & 0x10) >> 4;
-		p->ramcfg_11_07_40 = (nv_ro08(bios, data + 0x07) & 0x40) >> 6;
-		p->ramcfg_11_07_80 = (nv_ro08(bios, data + 0x07) & 0x80) >> 7;
-		p->ramcfg_11_08_01 = (nv_ro08(bios, data + 0x08) & 0x01) >> 0;
-		p->ramcfg_11_08_02 = (nv_ro08(bios, data + 0x08) & 0x02) >> 1;
-		p->ramcfg_11_08_04 = (nv_ro08(bios, data + 0x08) & 0x04) >> 2;
-		p->ramcfg_11_08_08 = (nv_ro08(bios, data + 0x08) & 0x08) >> 3;
-		p->ramcfg_11_08_10 = (nv_ro08(bios, data + 0x08) & 0x10) >> 4;
-		p->ramcfg_11_08_20 = (nv_ro08(bios, data + 0x08) & 0x20) >> 5;
-		p->ramcfg_11_09    = (nv_ro08(bios, data + 0x09) & 0xff) >> 0;
+		p->ramcfg_timing   =  nvbios_rd08(bios, data + 0x00);
+		p->ramcfg_11_01_01 = (nvbios_rd08(bios, data + 0x01) & 0x01) >> 0;
+		p->ramcfg_11_01_02 = (nvbios_rd08(bios, data + 0x01) & 0x02) >> 1;
+		p->ramcfg_11_01_04 = (nvbios_rd08(bios, data + 0x01) & 0x04) >> 2;
+		p->ramcfg_11_01_08 = (nvbios_rd08(bios, data + 0x01) & 0x08) >> 3;
+		p->ramcfg_11_01_10 = (nvbios_rd08(bios, data + 0x01) & 0x10) >> 4;
+		p->ramcfg_11_01_20 = (nvbios_rd08(bios, data + 0x01) & 0x20) >> 5;
+		p->ramcfg_11_01_40 = (nvbios_rd08(bios, data + 0x01) & 0x40) >> 6;
+		p->ramcfg_11_01_80 = (nvbios_rd08(bios, data + 0x01) & 0x80) >> 7;
+		p->ramcfg_11_02_03 = (nvbios_rd08(bios, data + 0x02) & 0x03) >> 0;
+		p->ramcfg_11_02_04 = (nvbios_rd08(bios, data + 0x02) & 0x04) >> 2;
+		p->ramcfg_11_02_08 = (nvbios_rd08(bios, data + 0x02) & 0x08) >> 3;
+		p->ramcfg_11_02_10 = (nvbios_rd08(bios, data + 0x02) & 0x10) >> 4;
+		p->ramcfg_11_02_40 = (nvbios_rd08(bios, data + 0x02) & 0x40) >> 6;
+		p->ramcfg_11_02_80 = (nvbios_rd08(bios, data + 0x02) & 0x80) >> 7;
+		p->ramcfg_11_03_0f = (nvbios_rd08(bios, data + 0x03) & 0x0f) >> 0;
+		p->ramcfg_11_03_30 = (nvbios_rd08(bios, data + 0x03) & 0x30) >> 4;
+		p->ramcfg_11_03_c0 = (nvbios_rd08(bios, data + 0x03) & 0xc0) >> 6;
+		p->ramcfg_11_03_f0 = (nvbios_rd08(bios, data + 0x03) & 0xf0) >> 4;
+		p->ramcfg_11_04    = (nvbios_rd08(bios, data + 0x04) & 0xff) >> 0;
+		p->ramcfg_11_06    = (nvbios_rd08(bios, data + 0x06) & 0xff) >> 0;
+		p->ramcfg_11_07_02 = (nvbios_rd08(bios, data + 0x07) & 0x02) >> 1;
+		p->ramcfg_11_07_04 = (nvbios_rd08(bios, data + 0x07) & 0x04) >> 2;
+		p->ramcfg_11_07_08 = (nvbios_rd08(bios, data + 0x07) & 0x08) >> 3;
+		p->ramcfg_11_07_10 = (nvbios_rd08(bios, data + 0x07) & 0x10) >> 4;
+		p->ramcfg_11_07_40 = (nvbios_rd08(bios, data + 0x07) & 0x40) >> 6;
+		p->ramcfg_11_07_80 = (nvbios_rd08(bios, data + 0x07) & 0x80) >> 7;
+		p->ramcfg_11_08_01 = (nvbios_rd08(bios, data + 0x08) & 0x01) >> 0;
+		p->ramcfg_11_08_02 = (nvbios_rd08(bios, data + 0x08) & 0x02) >> 1;
+		p->ramcfg_11_08_04 = (nvbios_rd08(bios, data + 0x08) & 0x04) >> 2;
+		p->ramcfg_11_08_08 = (nvbios_rd08(bios, data + 0x08) & 0x08) >> 3;
+		p->ramcfg_11_08_10 = (nvbios_rd08(bios, data + 0x08) & 0x10) >> 4;
+		p->ramcfg_11_08_20 = (nvbios_rd08(bios, data + 0x08) & 0x20) >> 5;
+		p->ramcfg_11_09    = (nvbios_rd08(bios, data + 0x09) & 0xff) >> 0;
 		break;
 	default:
 		data = 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
index 8c2b7cba5cff..792f017525f6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
@@ -23,13 +23,11 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
 #include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/image.h>
 
 struct shadow {
-	struct nvkm_oclass base;
 	u32 skip;
 	const struct nvbios_source *func;
 	void *data;
@@ -38,9 +36,8 @@ struct shadow {
 };
 
 static bool
-shadow_fetch(struct nvkm_bios *bios, u32 upto)
+shadow_fetch(struct nvkm_bios *bios, struct shadow *mthd, u32 upto)
 {
-	struct shadow *mthd = (void *)nv_object(bios)->oclass;
 	const u32 limit = (upto + 3) & ~3;
 	const u32 start = bios->size;
 	void *data = mthd->data;
@@ -51,65 +48,35 @@ shadow_fetch(struct nvkm_bios *bios, u32 upto)
 	return bios->size >= limit;
 }
 
-static u8
-shadow_rd08(struct nvkm_object *object, u64 addr)
-{
-	struct nvkm_bios *bios = (void *)object;
-	if (shadow_fetch(bios, addr + 1))
-		return bios->data[addr];
-	return 0x00;
-}
-
-static u16
-shadow_rd16(struct nvkm_object *object, u64 addr)
-{
-	struct nvkm_bios *bios = (void *)object;
-	if (shadow_fetch(bios, addr + 2))
-		return get_unaligned_le16(&bios->data[addr]);
-	return 0x0000;
-}
-
-static u32
-shadow_rd32(struct nvkm_object *object, u64 addr)
-{
-	struct nvkm_bios *bios = (void *)object;
-	if (shadow_fetch(bios, addr + 4))
-		return get_unaligned_le32(&bios->data[addr]);
-	return 0x00000000;
-}
-
-static struct nvkm_oclass
-shadow_class = {
-	.handle = NV_SUBDEV(VBIOS, 0x00),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.rd08 = shadow_rd08,
-		.rd16 = shadow_rd16,
-		.rd32 = shadow_rd32,
-	},
-};
-
 static int
-shadow_image(struct nvkm_bios *bios, int idx, struct shadow *mthd)
+shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
 	struct nvbios_image image;
 	int score = 1;
 
+	if (!shadow_fetch(bios, mthd, offset + 0x1000)) {
+		nvkm_debug(subdev, "%08x: header fetch failed\n", offset);
+		return 0;
+	}
+
 	if (!nvbios_image(bios, idx, &image)) {
-		nv_debug(bios, "image %d invalid\n", idx);
+		nvkm_debug(subdev, "image %d invalid\n", idx);
 		return 0;
 	}
-	nv_debug(bios, "%08x: type %02x, %d bytes\n",
-		 image.base, image.type, image.size);
+	nvkm_debug(subdev, "%08x: type %02x, %d bytes\n",
+		   image.base, image.type, image.size);
 
-	if (!shadow_fetch(bios, image.size)) {
-		nv_debug(bios, "%08x: fetch failed\n", image.base);
+	if (!shadow_fetch(bios, mthd, image.size)) {
+		nvkm_debug(subdev, "%08x: fetch failed\n", image.base);
 		return 0;
 	}
 
 	switch (image.type) {
 	case 0x00:
 		if (nvbios_checksum(&bios->data[image.base], image.size)) {
-			nv_debug(bios, "%08x: checksum failed\n", image.base);
+			nvkm_debug(subdev, "%08x: checksum failed\n",
+				   image.base);
 			if (mthd->func->rw)
 				score += 1;
 			score += 1;
@@ -123,28 +90,17 @@ shadow_image(struct nvkm_bios *bios, int idx, struct shadow *mthd)
 	}
 
 	if (!image.last)
-		score += shadow_image(bios, idx + 1, mthd);
+		score += shadow_image(bios, idx + 1, offset + image.size, mthd);
 	return score;
 }
 
 static int
-shadow_score(struct nvkm_bios *bios, struct shadow *mthd)
-{
-	struct nvkm_oclass *oclass = nv_object(bios)->oclass;
-	int score;
-	nv_object(bios)->oclass = &mthd->base;
-	score = shadow_image(bios, 0, mthd);
-	nv_object(bios)->oclass = oclass;
-	return score;
-
-}
-
-static int
 shadow_method(struct nvkm_bios *bios, struct shadow *mthd, const char *name)
 {
 	const struct nvbios_source *func = mthd->func;
+	struct nvkm_subdev *subdev = &bios->subdev;
 	if (func->name) {
-		nv_debug(bios, "trying %s...\n", name ? name : func->name);
+		nvkm_debug(subdev, "trying %s...\n", name ? name : func->name);
 		if (func->init) {
 			mthd->data = func->init(bios, name);
 			if (IS_ERR(mthd->data)) {
@@ -152,10 +108,10 @@ shadow_method(struct nvkm_bios *bios, struct shadow *mthd, const char *name)
 				return 0;
 			}
 		}
-		mthd->score = shadow_score(bios, mthd);
+		mthd->score = shadow_image(bios, 0, 0, mthd);
 		if (func->fini)
 			func->fini(mthd->data);
-		nv_debug(bios, "scored %d\n", mthd->score);
+		nvkm_debug(subdev, "scored %d\n", mthd->score);
 		mthd->data = bios->data;
 		mthd->size = bios->size;
 		bios->data  = NULL;
@@ -178,7 +134,7 @@ shadow_fw_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
 static void *
 shadow_fw_init(struct nvkm_bios *bios, const char *name)
 {
-	struct device *dev = &nv_device(bios)->pdev->dev;
+	struct device *dev = bios->subdev.device->dev;
 	const struct firmware *fw;
 	int ret = request_firmware(&fw, name, dev);
 	if (ret)
@@ -198,22 +154,24 @@ shadow_fw = {
 int
 nvbios_shadow(struct nvkm_bios *bios)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
+	struct nvkm_device *device = subdev->device;
 	struct shadow mthds[] = {
-		{ shadow_class, 0, &nvbios_of },
-		{ shadow_class, 0, &nvbios_ramin },
-		{ shadow_class, 0, &nvbios_rom },
-		{ shadow_class, 0, &nvbios_acpi_fast },
-		{ shadow_class, 4, &nvbios_acpi_slow },
-		{ shadow_class, 1, &nvbios_pcirom },
-		{ shadow_class, 1, &nvbios_platform },
-		{ shadow_class }
-	}, *mthd = mthds, *best = NULL;
+		{ 0, &nvbios_of },
+		{ 0, &nvbios_ramin },
+		{ 0, &nvbios_rom },
+		{ 0, &nvbios_acpi_fast },
+		{ 4, &nvbios_acpi_slow },
+		{ 1, &nvbios_pcirom },
+		{ 1, &nvbios_platform },
+		{}
+	}, *mthd, *best = NULL;
 	const char *optarg;
 	char *source;
 	int optlen;
 
 	/* handle user-specified bios source */
-	optarg = nvkm_stropt(nv_device(bios)->cfgopt, "NvBios", &optlen);
+	optarg = nvkm_stropt(device->cfgopt, "NvBios", &optlen);
 	source = optarg ? kstrndup(optarg, optlen, GFP_KERNEL) : NULL;
 	if (source) {
 		/* try to match one of the built-in methods */
@@ -234,7 +192,7 @@ nvbios_shadow(struct nvkm_bios *bios)
 		}
 
 		if (!best->score) {
-			nv_error(bios, "%s invalid\n", source);
+			nvkm_error(subdev, "%s invalid\n", source);
 			kfree(source);
 			source = NULL;
 		}
@@ -259,12 +217,12 @@ nvbios_shadow(struct nvkm_bios *bios)
 	}
 
 	if (!best->score) {
-		nv_fatal(bios, "unable to locate usable image\n");
+		nvkm_error(subdev, "unable to locate usable image\n");
 		return -EINVAL;
 	}
 
-	nv_info(bios, "using image from %s\n", best->func ?
-		best->func->name : source);
+	nvkm_debug(subdev, "using image from %s\n", best->func ?
+		   best->func->name : source);
 	bios->data = best->data;
 	bios->size = best->size;
 	kfree(source);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
index f9d0eb5647fa..8fecb5ff22a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
@@ -22,14 +22,12 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
-
 #if defined(CONFIG_ACPI) && defined(CONFIG_X86)
 int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
-bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
+bool nouveau_acpi_rom_supported(struct device *);
 #else
 static inline bool
-nouveau_acpi_rom_supported(struct pci_dev *pdev)
+nouveau_acpi_rom_supported(struct device *dev)
 {
 	return false;
 }
@@ -90,7 +88,7 @@ acpi_read_slow(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
 static void *
 acpi_init(struct nvkm_bios *bios, const char *name)
 {
-	if (!nouveau_acpi_rom_supported(nv_device(bios)->pdev))
+	if (!nouveau_acpi_rom_supported(bios->subdev.device->dev))
 		return ERR_PTR(-ENODEV);
 	return NULL;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
index 4c19a7dba803..bd60d7dd09f5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
@@ -21,8 +21,7 @@
  *
  */
 #include "priv.h"
-
-#include <core/device.h>
+#include <core/pci.h>
 
 #if defined(__powerpc__)
 struct priv {
@@ -44,7 +43,7 @@ of_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
 static void *
 of_init(struct nvkm_bios *bios, const char *name)
 {
-	struct pci_dev *pdev = nv_device(bios)->pdev;
+	struct pci_dev *pdev = bios->subdev.device->func->pci(bios->subdev.device)->pdev;
 	struct device_node *dn;
 	struct priv *priv;
 	if (!(dn = pci_device_to_OF_node(pdev)))
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
index 1b045483dc87..9b91da09dc5f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
@@ -22,7 +22,7 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
+#include <core/pci.h>
 
 struct priv {
 	struct pci_dev *pdev;
@@ -53,10 +53,16 @@ pcirom_fini(void *data)
 static void *
 pcirom_init(struct nvkm_bios *bios, const char *name)
 {
-	struct pci_dev *pdev = nv_device(bios)->pdev;
+	struct nvkm_device *device = bios->subdev.device;
 	struct priv *priv = NULL;
+	struct pci_dev *pdev;
 	int ret;
 
+	if (device->func->pci)
+		pdev = device->func->pci(device)->pdev;
+	else
+		return ERR_PTR(-ENODEV);
+
 	if (!(ret = pci_enable_rom(pdev))) {
 		if (ret = -ENOMEM,
 		    (priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
@@ -85,10 +91,16 @@ nvbios_pcirom = {
 static void *
 platform_init(struct nvkm_bios *bios, const char *name)
 {
-	struct pci_dev *pdev = nv_device(bios)->pdev;
+	struct nvkm_device *device = bios->subdev.device;
+	struct pci_dev *pdev;
 	struct priv *priv;
 	int ret = -ENOMEM;
 
+	if (device->func->pci)
+		pdev = device->func->pci(device)->pdev;
+	else
+		return ERR_PTR(-ENODEV);
+
 	if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
 		if (ret = -ENODEV,
 		    (priv->rom = pci_platform_rom(pdev, &priv->size)))
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
index abe8ae4d3a9f..0f537c22804c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
@@ -22,8 +22,6 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
-
 struct priv {
 	struct nvkm_bios *bios;
 	u32 bar0;
@@ -32,10 +30,11 @@ struct priv {
 static u32
 pramin_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
 {
+	struct nvkm_device *device = bios->subdev.device;
 	u32 i;
 	if (offset + length <= 0x00100000) {
 		for (i = offset; i < offset + length; i += 4)
-			*(u32 *)&bios->data[i] = nv_rd32(bios, 0x700000 + i);
+			*(u32 *)&bios->data[i] = nvkm_rd32(device, 0x700000 + i);
 		return length;
 	}
 	return 0;
@@ -46,7 +45,8 @@ pramin_fini(void *data)
 {
 	struct priv *priv = data;
 	if (priv) {
-		nv_wr32(priv->bios, 0x001700, priv->bar0);
+		struct nvkm_device *device = priv->bios->subdev.device;
+		nvkm_wr32(device, 0x001700, priv->bar0);
 		kfree(priv);
 	}
 }
@@ -54,21 +54,23 @@ pramin_fini(void *data)
 static void *
 pramin_init(struct nvkm_bios *bios, const char *name)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
+	struct nvkm_device *device = subdev->device;
 	struct priv *priv = NULL;
 	u64 addr = 0;
 
 	/* PRAMIN always potentially available prior to nv50 */
-	if (nv_device(bios)->card_type < NV_50)
+	if (device->card_type < NV_50)
 		return NULL;
 
 	/* we can't get the bios image pointer without PDISP */
-	if (nv_device(bios)->card_type >= GM100)
-		addr = nv_rd32(bios, 0x021c04);
+	if (device->card_type >= GM100)
+		addr = nvkm_rd32(device, 0x021c04);
 	else
-	if (nv_device(bios)->card_type >= NV_C0)
-		addr = nv_rd32(bios, 0x022500);
+	if (device->card_type >= NV_C0)
+		addr = nvkm_rd32(device, 0x022500);
 	if (addr & 0x00000001) {
-		nv_debug(bios, "... display disabled\n");
+		nvkm_debug(subdev, "... display disabled\n");
 		return ERR_PTR(-ENODEV);
 	}
 
@@ -76,32 +78,32 @@ pramin_init(struct nvkm_bios *bios, const char *name)
 	 * important as we don't want to be touching vram on an
 	 * uninitialised board
 	 */
-	addr = nv_rd32(bios, 0x619f04);
+	addr = nvkm_rd32(device, 0x619f04);
 	if (!(addr & 0x00000008)) {
-		nv_debug(bios, "... not enabled\n");
+		nvkm_debug(subdev, "... not enabled\n");
 		return ERR_PTR(-ENODEV);
 	}
 	if ( (addr & 0x00000003) != 1) {
-		nv_debug(bios, "... not in vram\n");
+		nvkm_debug(subdev, "... not in vram\n");
 		return ERR_PTR(-ENODEV);
 	}
 
 	/* some alternate method inherited from xf86-video-nv... */
 	addr = (addr & 0xffffff00) << 8;
 	if (!addr) {
-		addr  = (u64)nv_rd32(bios, 0x001700) << 16;
+		addr  = (u64)nvkm_rd32(device, 0x001700) << 16;
 		addr += 0xf0000;
 	}
 
 	/* modify bar0 PRAMIN window to cover the bios image */
 	if (!(priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
-		nv_error(bios, "... out of memory\n");
+		nvkm_error(subdev, "... out of memory\n");
 		return ERR_PTR(-ENOMEM);
 	}
 
 	priv->bios = bios;
-	priv->bar0 = nv_rd32(bios, 0x001700);
-	nv_wr32(bios, 0x001700, addr >> 16);
+	priv->bar0 = nvkm_rd32(device, 0x001700);
+	nvkm_wr32(device, 0x001700, addr >> 16);
 	return priv;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c
index 6ec3b237925e..ffa4b395220a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c
@@ -22,15 +22,16 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
+#include <subdev/pci.h>
 
 static u32
 prom_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
 {
+	struct nvkm_device *device = data;
 	u32 i;
 	if (offset + length <= 0x00100000) {
 		for (i = offset; i < offset + length; i += 4)
-			*(u32 *)&bios->data[i] = nv_rd32(bios, 0x300000 + i);
+			*(u32 *)&bios->data[i] = nvkm_rd32(device, 0x300000 + i);
 		return length;
 	}
 	return 0;
@@ -39,25 +40,18 @@ prom_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
 static void
 prom_fini(void *data)
 {
-	struct nvkm_bios *bios = data;
-	if (nv_device(bios)->card_type < NV_50)
-		nv_mask(bios, 0x001850, 0x00000001, 0x00000001);
-	else
-		nv_mask(bios, 0x088050, 0x00000001, 0x00000001);
+	struct nvkm_device *device = data;
+	nvkm_pci_rom_shadow(device->pci, true);
 }
 
 static void *
 prom_init(struct nvkm_bios *bios, const char *name)
 {
-	if (nv_device(bios)->card_type < NV_50) {
-		if (nv_device(bios)->card_type == NV_40 &&
-		    nv_device(bios)->chipset >= 0x4c)
-			return ERR_PTR(-ENODEV);
-		nv_mask(bios, 0x001850, 0x00000001, 0x00000000);
-	} else {
-		nv_mask(bios, 0x088050, 0x00000001, 0x00000000);
-	}
-	return bios;
+	struct nvkm_device *device = bios->subdev.device;
+	if (device->card_type == NV_40 && device->chipset >= 0x4c)
+		return ERR_PTR(-ENODEV);
+	nvkm_pci_rom_shadow(device->pci, false);
+	return device;
 }
 
 const struct nvbios_source
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c
index 249ff6d583df..a54cfec0550d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c
@@ -25,8 +25,6 @@
 #include <subdev/bios/bit.h>
 #include <subdev/bios/therm.h>
 
-#include <core/device.h>
-
 static u16
 therm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
 {
@@ -35,24 +33,24 @@ therm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 1)
-			therm = nv_ro16(bios, bit_P.offset + 12);
+			therm = nvbios_rd16(bios, bit_P.offset + 12);
 		else if (bit_P.version == 2)
-			therm = nv_ro16(bios, bit_P.offset + 16);
+			therm = nvbios_rd16(bios, bit_P.offset + 16);
 		else
-			nv_error(bios,
-				"unknown offset for thermal in BIT P %d\n",
-				bit_P.version);
+			nvkm_error(&bios->subdev,
+				   "unknown offset for thermal in BIT P %d\n",
+				   bit_P.version);
 	}
 
 	/* exit now if we haven't found the thermal table */
 	if (!therm)
 		return 0x0000;
 
-	*ver = nv_ro08(bios, therm + 0);
-	*hdr = nv_ro08(bios, therm + 1);
-	*len = nv_ro08(bios, therm + 2);
-	*cnt = nv_ro08(bios, therm + 3);
-	return therm + nv_ro08(bios, therm + 1);
+	*ver = nvbios_rd08(bios, therm + 0);
+	*hdr = nvbios_rd08(bios, therm + 1);
+	*len = nvbios_rd08(bios, therm + 2);
+	*cnt = nvbios_rd08(bios, therm + 3);
+	return therm + nvbios_rd08(bios, therm + 1);
 }
 
 static u16
@@ -83,9 +81,9 @@ nvbios_therm_sensor_parse(struct nvkm_bios *bios,
 	sensor_section = -1;
 	i = 0;
 	while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) {
-		s16 value = nv_ro16(bios, entry + 1);
+		s16 value = nvbios_rd16(bios, entry + 1);
 
-		switch (nv_ro08(bios, entry + 0)) {
+		switch (nvbios_rd08(bios, entry + 0)) {
 		case 0x0:
 			thrs_section = value;
 			if (value > 0)
@@ -94,7 +92,7 @@ nvbios_therm_sensor_parse(struct nvkm_bios *bios,
 		case 0x01:
 			sensor_section++;
 			if (sensor_section == 0) {
-				offset = ((s8) nv_ro08(bios, entry + 2)) / 2;
+				offset = ((s8) nvbios_rd08(bios, entry + 2)) / 2;
 				sensor->offset_constant = offset;
 			}
 			break;
@@ -165,9 +163,9 @@ nvbios_therm_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
 	fan->nr_fan_trip = 0;
 	fan->fan_mode = NVBIOS_THERM_FAN_OTHER;
 	while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) {
-		s16 value = nv_ro16(bios, entry + 1);
+		s16 value = nvbios_rd16(bios, entry + 1);
 
-		switch (nv_ro08(bios, entry + 0)) {
+		switch (nvbios_rd08(bios, entry + 0)) {
 		case 0x22:
 			fan->min_duty = value & 0xff;
 			fan->max_duty = (value & 0xff00) >> 8;
@@ -198,14 +196,14 @@ nvbios_therm_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
 		case 0x46:
 			if (fan->fan_mode > NVBIOS_THERM_FAN_LINEAR)
 				fan->fan_mode = NVBIOS_THERM_FAN_LINEAR;
-			fan->linear_min_temp = nv_ro08(bios, entry + 1);
-			fan->linear_max_temp = nv_ro08(bios, entry + 2);
+			fan->linear_min_temp = nvbios_rd08(bios, entry + 1);
+			fan->linear_max_temp = nvbios_rd08(bios, entry + 2);
 			break;
 		}
 	}
 
 	/* starting from fermi, fan management is always linear */
-	if (nv_device(bios)->card_type >= NV_C0 &&
+	if (bios->subdev.device->card_type >= NV_C0 &&
 		fan->fan_mode == NVBIOS_THERM_FAN_OTHER) {
 		fan->fan_mode = NVBIOS_THERM_FAN_LINEAR;
 	}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
index 763fd29a58f2..99f6432ac0af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
@@ -34,27 +34,27 @@ nvbios_timingTe(struct nvkm_bios *bios,
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 1)
-			timing = nv_ro16(bios, bit_P.offset + 4);
+			timing = nvbios_rd16(bios, bit_P.offset + 4);
 		else
 		if (bit_P.version == 2)
-			timing = nv_ro16(bios, bit_P.offset + 8);
+			timing = nvbios_rd16(bios, bit_P.offset + 8);
 
 		if (timing) {
-			*ver = nv_ro08(bios, timing + 0);
+			*ver = nvbios_rd08(bios, timing + 0);
 			switch (*ver) {
 			case 0x10:
-				*hdr = nv_ro08(bios, timing + 1);
-				*cnt = nv_ro08(bios, timing + 2);
-				*len = nv_ro08(bios, timing + 3);
+				*hdr = nvbios_rd08(bios, timing + 1);
+				*cnt = nvbios_rd08(bios, timing + 2);
+				*len = nvbios_rd08(bios, timing + 3);
 				*snr = 0;
 				*ssz = 0;
 				return timing;
 			case 0x20:
-				*hdr = nv_ro08(bios, timing + 1);
-				*cnt = nv_ro08(bios, timing + 5);
-				*len = nv_ro08(bios, timing + 2);
-				*snr = nv_ro08(bios, timing + 4);
-				*ssz = nv_ro08(bios, timing + 3);
+				*hdr = nvbios_rd08(bios, timing + 1);
+				*cnt = nvbios_rd08(bios, timing + 5);
+				*len = nvbios_rd08(bios, timing + 2);
+				*snr = nvbios_rd08(bios, timing + 4);
+				*ssz = nvbios_rd08(bios, timing + 3);
 				return timing;
 			default:
 				break;
@@ -90,18 +90,20 @@ nvbios_timingEp(struct nvkm_bios *bios, int idx,
 	p->timing_hdr = *hdr;
 	switch (!!data * *ver) {
 	case 0x10:
-		p->timing_10_WR    = nv_ro08(bios, data + 0x00);
-		p->timing_10_WTR   = nv_ro08(bios, data + 0x01);
-		p->timing_10_CL    = nv_ro08(bios, data + 0x02);
-		p->timing_10_RC    = nv_ro08(bios, data + 0x03);
-		p->timing_10_RFC   = nv_ro08(bios, data + 0x05);
-		p->timing_10_RAS   = nv_ro08(bios, data + 0x07);
-		p->timing_10_RP    = nv_ro08(bios, data + 0x09);
-		p->timing_10_RCDRD = nv_ro08(bios, data + 0x0a);
-		p->timing_10_RCDWR = nv_ro08(bios, data + 0x0b);
-		p->timing_10_RRD   = nv_ro08(bios, data + 0x0c);
-		p->timing_10_13    = nv_ro08(bios, data + 0x0d);
-		p->timing_10_ODT   = nv_ro08(bios, data + 0x0e) & 0x07;
+		p->timing_10_WR    = nvbios_rd08(bios, data + 0x00);
+		p->timing_10_WTR   = nvbios_rd08(bios, data + 0x01);
+		p->timing_10_CL    = nvbios_rd08(bios, data + 0x02);
+		p->timing_10_RC    = nvbios_rd08(bios, data + 0x03);
+		p->timing_10_RFC   = nvbios_rd08(bios, data + 0x05);
+		p->timing_10_RAS   = nvbios_rd08(bios, data + 0x07);
+		p->timing_10_RP    = nvbios_rd08(bios, data + 0x09);
+		p->timing_10_RCDRD = nvbios_rd08(bios, data + 0x0a);
+		p->timing_10_RCDWR = nvbios_rd08(bios, data + 0x0b);
+		p->timing_10_RRD   = nvbios_rd08(bios, data + 0x0c);
+		p->timing_10_13    = nvbios_rd08(bios, data + 0x0d);
+		p->timing_10_ODT   = nvbios_rd08(bios, data + 0x0e) & 0x07;
+		if (p->ramcfg_ver >= 0x10)
+			p->ramcfg_RON = nvbios_rd08(bios, data + 0x0e) & 0x07;
 
 		p->timing_10_24  = 0xff;
 		p->timing_10_21  = 0;
@@ -112,45 +114,45 @@ nvbios_timingEp(struct nvkm_bios *bios, int idx,
 
 		switch (min_t(u8, *hdr, 25)) {
 		case 25:
-			p->timing_10_24  = nv_ro08(bios, data + 0x18);
+			p->timing_10_24  = nvbios_rd08(bios, data + 0x18);
 		case 24:
 		case 23:
 		case 22:
-			p->timing_10_21  = nv_ro08(bios, data + 0x15);
+			p->timing_10_21  = nvbios_rd08(bios, data + 0x15);
 		case 21:
-			p->timing_10_20  = nv_ro08(bios, data + 0x14);
+			p->timing_10_20  = nvbios_rd08(bios, data + 0x14);
 		case 20:
-			p->timing_10_CWL = nv_ro08(bios, data + 0x13);
+			p->timing_10_CWL = nvbios_rd08(bios, data + 0x13);
 		case 19:
-			p->timing_10_18  = nv_ro08(bios, data + 0x12);
+			p->timing_10_18  = nvbios_rd08(bios, data + 0x12);
 		case 18:
 		case 17:
-			p->timing_10_16  = nv_ro08(bios, data + 0x10);
+			p->timing_10_16  = nvbios_rd08(bios, data + 0x10);
 		}
 
 		break;
 	case 0x20:
-		p->timing[0] = nv_ro32(bios, data + 0x00);
-		p->timing[1] = nv_ro32(bios, data + 0x04);
-		p->timing[2] = nv_ro32(bios, data + 0x08);
-		p->timing[3] = nv_ro32(bios, data + 0x0c);
-		p->timing[4] = nv_ro32(bios, data + 0x10);
-		p->timing[5] = nv_ro32(bios, data + 0x14);
-		p->timing[6] = nv_ro32(bios, data + 0x18);
-		p->timing[7] = nv_ro32(bios, data + 0x1c);
-		p->timing[8] = nv_ro32(bios, data + 0x20);
-		p->timing[9] = nv_ro32(bios, data + 0x24);
-		p->timing[10] = nv_ro32(bios, data + 0x28);
-		p->timing_20_2e_03 = (nv_ro08(bios, data + 0x2e) & 0x03) >> 0;
-		p->timing_20_2e_30 = (nv_ro08(bios, data + 0x2e) & 0x30) >> 4;
-		p->timing_20_2e_c0 = (nv_ro08(bios, data + 0x2e) & 0xc0) >> 6;
-		p->timing_20_2f_03 = (nv_ro08(bios, data + 0x2f) & 0x03) >> 0;
-		temp = nv_ro16(bios, data + 0x2c);
+		p->timing[0] = nvbios_rd32(bios, data + 0x00);
+		p->timing[1] = nvbios_rd32(bios, data + 0x04);
+		p->timing[2] = nvbios_rd32(bios, data + 0x08);
+		p->timing[3] = nvbios_rd32(bios, data + 0x0c);
+		p->timing[4] = nvbios_rd32(bios, data + 0x10);
+		p->timing[5] = nvbios_rd32(bios, data + 0x14);
+		p->timing[6] = nvbios_rd32(bios, data + 0x18);
+		p->timing[7] = nvbios_rd32(bios, data + 0x1c);
+		p->timing[8] = nvbios_rd32(bios, data + 0x20);
+		p->timing[9] = nvbios_rd32(bios, data + 0x24);
+		p->timing[10] = nvbios_rd32(bios, data + 0x28);
+		p->timing_20_2e_03 = (nvbios_rd08(bios, data + 0x2e) & 0x03) >> 0;
+		p->timing_20_2e_30 = (nvbios_rd08(bios, data + 0x2e) & 0x30) >> 4;
+		p->timing_20_2e_c0 = (nvbios_rd08(bios, data + 0x2e) & 0xc0) >> 6;
+		p->timing_20_2f_03 = (nvbios_rd08(bios, data + 0x2f) & 0x03) >> 0;
+		temp = nvbios_rd16(bios, data + 0x2c);
 		p->timing_20_2c_003f = (temp & 0x003f) >> 0;
 		p->timing_20_2c_1fc0 = (temp & 0x1fc0) >> 6;
-		p->timing_20_30_07 = (nv_ro08(bios, data + 0x30) & 0x07) >> 0;
-		p->timing_20_30_f8 = (nv_ro08(bios, data + 0x30) & 0xf8) >> 3;
-		temp = nv_ro16(bios, data + 0x31);
+		p->timing_20_30_07 = (nvbios_rd08(bios, data + 0x30) & 0x07) >> 0;
+		p->timing_20_30_f8 = (nvbios_rd08(bios, data + 0x30) & 0xf8) >> 3;
+		temp = nvbios_rd16(bios, data + 0x31);
 		p->timing_20_31_0007 = (temp & 0x0007) >> 0;
 		p->timing_20_31_0078 = (temp & 0x0078) >> 3;
 		p->timing_20_31_0780 = (temp & 0x0780) >> 7;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
index e95b69faa82e..2f13db745948 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
@@ -33,15 +33,15 @@ nvbios_vmap_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 2) {
-			vmap = nv_ro16(bios, bit_P.offset + 0x20);
+			vmap = nvbios_rd16(bios, bit_P.offset + 0x20);
 			if (vmap) {
-				*ver = nv_ro08(bios, vmap + 0);
+				*ver = nvbios_rd08(bios, vmap + 0);
 				switch (*ver) {
 				case 0x10:
 				case 0x20:
-					*hdr = nv_ro08(bios, vmap + 1);
-					*cnt = nv_ro08(bios, vmap + 3);
-					*len = nv_ro08(bios, vmap + 2);
+					*hdr = nvbios_rd08(bios, vmap + 1);
+					*cnt = nvbios_rd08(bios, vmap + 3);
+					*len = nvbios_rd08(bios, vmap + 2);
 					return vmap;
 				default:
 					break;
@@ -88,23 +88,23 @@ nvbios_vmap_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
 	switch (!!vmap * *ver) {
 	case 0x10:
 		info->link   = 0xff;
-		info->min    = nv_ro32(bios, vmap + 0x00);
-		info->max    = nv_ro32(bios, vmap + 0x04);
-		info->arg[0] = nv_ro32(bios, vmap + 0x08);
-		info->arg[1] = nv_ro32(bios, vmap + 0x0c);
-		info->arg[2] = nv_ro32(bios, vmap + 0x10);
+		info->min    = nvbios_rd32(bios, vmap + 0x00);
+		info->max    = nvbios_rd32(bios, vmap + 0x04);
+		info->arg[0] = nvbios_rd32(bios, vmap + 0x08);
+		info->arg[1] = nvbios_rd32(bios, vmap + 0x0c);
+		info->arg[2] = nvbios_rd32(bios, vmap + 0x10);
 		break;
 	case 0x20:
-		info->unk0   = nv_ro08(bios, vmap + 0x00);
-		info->link   = nv_ro08(bios, vmap + 0x01);
-		info->min    = nv_ro32(bios, vmap + 0x02);
-		info->max    = nv_ro32(bios, vmap + 0x06);
-		info->arg[0] = nv_ro32(bios, vmap + 0x0a);
-		info->arg[1] = nv_ro32(bios, vmap + 0x0e);
-		info->arg[2] = nv_ro32(bios, vmap + 0x12);
-		info->arg[3] = nv_ro32(bios, vmap + 0x16);
-		info->arg[4] = nv_ro32(bios, vmap + 0x1a);
-		info->arg[5] = nv_ro32(bios, vmap + 0x1e);
+		info->unk0   = nvbios_rd08(bios, vmap + 0x00);
+		info->link   = nvbios_rd08(bios, vmap + 0x01);
+		info->min    = nvbios_rd32(bios, vmap + 0x02);
+		info->max    = nvbios_rd32(bios, vmap + 0x06);
+		info->arg[0] = nvbios_rd32(bios, vmap + 0x0a);
+		info->arg[1] = nvbios_rd32(bios, vmap + 0x0e);
+		info->arg[2] = nvbios_rd32(bios, vmap + 0x12);
+		info->arg[3] = nvbios_rd32(bios, vmap + 0x16);
+		info->arg[4] = nvbios_rd32(bios, vmap + 0x1a);
+		info->arg[5] = nvbios_rd32(bios, vmap + 0x1e);
 		break;
 	}
 	return vmap;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
index 8454ab7c4a3d..615804c3887b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
@@ -33,30 +33,30 @@ nvbios_volt_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 2)
-			volt = nv_ro16(bios, bit_P.offset + 0x0c);
+			volt = nvbios_rd16(bios, bit_P.offset + 0x0c);
 		else
 		if (bit_P.version == 1)
-			volt = nv_ro16(bios, bit_P.offset + 0x10);
+			volt = nvbios_rd16(bios, bit_P.offset + 0x10);
 
 		if (volt) {
-			*ver = nv_ro08(bios, volt + 0);
+			*ver = nvbios_rd08(bios, volt + 0);
 			switch (*ver) {
 			case 0x12:
 				*hdr = 5;
-				*cnt = nv_ro08(bios, volt + 2);
-				*len = nv_ro08(bios, volt + 1);
+				*cnt = nvbios_rd08(bios, volt + 2);
+				*len = nvbios_rd08(bios, volt + 1);
 				return volt;
 			case 0x20:
-				*hdr = nv_ro08(bios, volt + 1);
-				*cnt = nv_ro08(bios, volt + 2);
-				*len = nv_ro08(bios, volt + 3);
+				*hdr = nvbios_rd08(bios, volt + 1);
+				*cnt = nvbios_rd08(bios, volt + 2);
+				*len = nvbios_rd08(bios, volt + 3);
 				return volt;
 			case 0x30:
 			case 0x40:
 			case 0x50:
-				*hdr = nv_ro08(bios, volt + 1);
-				*cnt = nv_ro08(bios, volt + 3);
-				*len = nv_ro08(bios, volt + 2);
+				*hdr = nvbios_rd08(bios, volt + 1);
+				*cnt = nvbios_rd08(bios, volt + 3);
+				*len = nvbios_rd08(bios, volt + 2);
 				return volt;
 			}
 		}
@@ -73,28 +73,28 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
 	memset(info, 0x00, sizeof(*info));
 	switch (!!volt * *ver) {
 	case 0x12:
-		info->vidmask = nv_ro08(bios, volt + 0x04);
+		info->vidmask = nvbios_rd08(bios, volt + 0x04);
 		break;
 	case 0x20:
-		info->vidmask = nv_ro08(bios, volt + 0x05);
+		info->vidmask = nvbios_rd08(bios, volt + 0x05);
 		break;
 	case 0x30:
-		info->vidmask = nv_ro08(bios, volt + 0x04);
+		info->vidmask = nvbios_rd08(bios, volt + 0x04);
 		break;
 	case 0x40:
-		info->base    = nv_ro32(bios, volt + 0x04);
-		info->step    = nv_ro16(bios, volt + 0x08);
-		info->vidmask = nv_ro08(bios, volt + 0x0b);
+		info->base    = nvbios_rd32(bios, volt + 0x04);
+		info->step    = nvbios_rd16(bios, volt + 0x08);
+		info->vidmask = nvbios_rd08(bios, volt + 0x0b);
 		/*XXX*/
 		info->min     = 0;
 		info->max     = info->base;
 		break;
 	case 0x50:
-		info->vidmask = nv_ro08(bios, volt + 0x06);
-		info->min     = nv_ro32(bios, volt + 0x0a);
-		info->max     = nv_ro32(bios, volt + 0x0e);
-		info->base    = nv_ro32(bios, volt + 0x12) & 0x00ffffff;
-		info->step    = nv_ro16(bios, volt + 0x16);
+		info->vidmask = nvbios_rd08(bios, volt + 0x06);
+		info->min     = nvbios_rd32(bios, volt + 0x0a);
+		info->max     = nvbios_rd32(bios, volt + 0x0e);
+		info->base    = nvbios_rd32(bios, volt + 0x12) & 0x00ffffff;
+		info->step    = nvbios_rd16(bios, volt + 0x16);
 		break;
 	}
 	return volt;
@@ -121,12 +121,12 @@ nvbios_volt_entry_parse(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len,
 	switch (!!volt * *ver) {
 	case 0x12:
 	case 0x20:
-		info->voltage = nv_ro08(bios, volt + 0x00) * 10000;
-		info->vid     = nv_ro08(bios, volt + 0x01);
+		info->voltage = nvbios_rd08(bios, volt + 0x00) * 10000;
+		info->vid     = nvbios_rd08(bios, volt + 0x01);
 		break;
 	case 0x30:
-		info->voltage = nv_ro08(bios, volt + 0x00) * 10000;
-		info->vid     = nv_ro08(bios, volt + 0x01) >> 2;
+		info->voltage = nvbios_rd08(bios, volt + 0x00) * 10000;
+		info->vid     = nvbios_rd08(bios, volt + 0x01) >> 2;
 		break;
 	case 0x40:
 	case 0x50:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c
index 63a5e1b5cb3c..250fc42d8608 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c
@@ -30,12 +30,12 @@ dcb_xpiod_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 {
 	u16 data = dcb_gpio_table(bios, ver, hdr, cnt, len);
 	if (data && *ver >= 0x40 && *hdr >= 0x06) {
-		u16 xpio = nv_ro16(bios, data + 0x04);
+		u16 xpio = nvbios_rd16(bios, data + 0x04);
 		if (xpio) {
-			*ver = nv_ro08(bios, data + 0x00);
-			*hdr = nv_ro08(bios, data + 0x01);
-			*cnt = nv_ro08(bios, data + 0x02);
-			*len = nv_ro08(bios, data + 0x03);
+			*ver = nvbios_rd08(bios, data + 0x00);
+			*hdr = nvbios_rd08(bios, data + 0x01);
+			*cnt = nvbios_rd08(bios, data + 0x02);
+			*len = nvbios_rd08(bios, data + 0x03);
 			return xpio;
 		}
 	}
@@ -48,12 +48,12 @@ dcb_xpio_table(struct nvkm_bios *bios, u8 idx,
 {
 	u16 data = dcb_xpiod_table(bios, ver, hdr, cnt, len);
 	if (data && idx < *cnt) {
-		u16 xpio = nv_ro16(bios, data + *hdr + (idx * *len));
+		u16 xpio = nvbios_rd16(bios, data + *hdr + (idx * *len));
 		if (xpio) {
-			*ver = nv_ro08(bios, data + 0x00);
-			*hdr = nv_ro08(bios, data + 0x01);
-			*cnt = nv_ro08(bios, data + 0x02);
-			*len = nv_ro08(bios, data + 0x03);
+			*ver = nvbios_rd08(bios, data + 0x00);
+			*hdr = nvbios_rd08(bios, data + 0x01);
+			*cnt = nvbios_rd08(bios, data + 0x02);
+			*len = nvbios_rd08(bios, data + 0x03);
 			return xpio;
 		}
 	}
@@ -66,9 +66,9 @@ dcb_xpio_parse(struct nvkm_bios *bios, u8 idx,
 {
 	u16 data = dcb_xpio_table(bios, idx, ver, hdr, cnt, len);
 	if (data && *len >= 6) {
-		info->type = nv_ro08(bios, data + 0x04);
-		info->addr = nv_ro08(bios, data + 0x05);
-		info->flags = nv_ro08(bios, data + 0x06);
+		info->type = nvbios_rd08(bios, data + 0x04);
+		info->addr = nvbios_rd08(bios, data + 0x05);
+		info->flags = nvbios_rd08(bios, data + 0x06);
 	}
 	return 0x0000;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild
index 83d80b13f149..5fa9e91835c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild
@@ -1,3 +1,4 @@
+nvkm-y += nvkm/subdev/bus/base.o
 nvkm-y += nvkm/subdev/bus/hwsq.o
 nvkm-y += nvkm/subdev/bus/nv04.o
 nvkm-y += nvkm/subdev/bus/nv31.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_family.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c
index 0698764354a2..dc5a10f18bdb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_family.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c
@@ -1,7 +1,5 @@
 /*
- * Copyright 2008 Advanced Micro Devices, Inc.
- * Copyright 2008 Red Hat Inc.
- * Copyright 2009 Jerome Glisse.
+ * Copyright 2015 Red Hat Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -21,42 +19,46 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
  *
- * Authors: Dave Airlie
- *          Alex Deucher
- *          Jerome Glisse
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
+#include "priv.h"
 
-/* this file defines the CHIP_  and family flags used in the pciids,
- * its is common between kms and non-kms because duplicating it and
- * changing one place is fail.
- */
-#ifndef AMDGPU_FAMILY_H
-#define AMDGPU_FAMILY_H
-/*
- * Supported ASIC types
- */
-enum amdgpu_asic_type {
-	CHIP_BONAIRE = 0,
-	CHIP_KAVERI,
-	CHIP_KABINI,
-	CHIP_HAWAII,
-	CHIP_MULLINS,
-	CHIP_TOPAZ,
-	CHIP_TONGA,
-	CHIP_CARRIZO,
-	CHIP_LAST,
-};
+static void
+nvkm_bus_intr(struct nvkm_subdev *subdev)
+{
+	struct nvkm_bus *bus = nvkm_bus(subdev);
+	bus->func->intr(bus);
+}
 
-/*
- * Chip flags
- */
-enum amdgpu_chip_flags {
-	AMDGPU_ASIC_MASK = 0x0000ffffUL,
-	AMDGPU_FLAGS_MASK  = 0xffff0000UL,
-	AMDGPU_IS_MOBILITY = 0x00010000UL,
-	AMDGPU_IS_APU      = 0x00020000UL,
-	AMDGPU_IS_PX       = 0x00040000UL,
-	AMDGPU_EXP_HW_SUPPORT = 0x00080000UL,
+static int
+nvkm_bus_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_bus *bus = nvkm_bus(subdev);
+	bus->func->init(bus);
+	return 0;
+}
+
+static void *
+nvkm_bus_dtor(struct nvkm_subdev *subdev)
+{
+	return nvkm_bus(subdev);
+}
+
+static const struct nvkm_subdev_func
+nvkm_bus = {
+	.dtor = nvkm_bus_dtor,
+	.init = nvkm_bus_init,
+	.intr = nvkm_bus_intr,
 };
 
-#endif
+int
+nvkm_bus_new_(const struct nvkm_bus_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_bus **pbus)
+{
+	struct nvkm_bus *bus;
+	if (!(bus = *pbus = kzalloc(sizeof(*bus), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&nvkm_bus, device, index, 0, &bus->subdev);
+	bus->func = func;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c
index cbe699e82593..9700b5c01cc6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c
@@ -22,37 +22,43 @@
  * Authors: Martin Peres <martin.peres@labri.fr>
  *          Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
 #include <subdev/timer.h>
 
 static int
-g94_bus_hwsq_exec(struct nvkm_bus *pbus, u32 *data, u32 size)
+g94_bus_hwsq_exec(struct nvkm_bus *bus, u32 *data, u32 size)
 {
-	struct nv50_bus_priv *priv = (void *)pbus;
+	struct nvkm_device *device = bus->subdev.device;
 	int i;
 
-	nv_mask(pbus, 0x001098, 0x00000008, 0x00000000);
-	nv_wr32(pbus, 0x001304, 0x00000000);
-	nv_wr32(pbus, 0x001318, 0x00000000);
+	nvkm_mask(device, 0x001098, 0x00000008, 0x00000000);
+	nvkm_wr32(device, 0x001304, 0x00000000);
+	nvkm_wr32(device, 0x001318, 0x00000000);
 	for (i = 0; i < size; i++)
-		nv_wr32(priv, 0x080000 + (i * 4), data[i]);
-	nv_mask(pbus, 0x001098, 0x00000018, 0x00000018);
-	nv_wr32(pbus, 0x00130c, 0x00000001);
+		nvkm_wr32(device, 0x080000 + (i * 4), data[i]);
+	nvkm_mask(device, 0x001098, 0x00000018, 0x00000018);
+	nvkm_wr32(device, 0x00130c, 0x00000001);
 
-	return nv_wait(pbus, 0x001308, 0x00000100, 0x00000000) ? 0 : -ETIMEDOUT;
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x001308) & 0x00000100))
+			break;
+	) < 0)
+		return -ETIMEDOUT;
+
+	return 0;
 }
 
-struct nvkm_oclass *
-g94_bus_oclass = &(struct nv04_bus_impl) {
-	.base.handle = NV_SUBDEV(BUS, 0x94),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_bus_ctor,
-		.dtor = _nvkm_bus_dtor,
-		.init = nv50_bus_init,
-		.fini = _nvkm_bus_fini,
-	},
+static const struct nvkm_bus_func
+g94_bus = {
+	.init = nv50_bus_init,
 	.intr = nv50_bus_intr,
 	.hwsq_exec = g94_bus_hwsq_exec,
 	.hwsq_size = 128,
-}.base;
+};
+
+int
+g94_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+{
+	return nvkm_bus_new_(&g94_bus, device, index, pbus);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
index ebc63ba968d4..e0930d5fdfb1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
@@ -22,59 +22,54 @@
  * Authors: Martin Peres <martin.peres@labri.fr>
  *          Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
 static void
-gf100_bus_intr(struct nvkm_subdev *subdev)
+gf100_bus_intr(struct nvkm_bus *bus)
 {
-	struct nvkm_bus *pbus = nvkm_bus(subdev);
-	u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
+	struct nvkm_subdev *subdev = &bus->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140);
 
 	if (stat & 0x0000000e) {
-		u32 addr = nv_rd32(pbus, 0x009084);
-		u32 data = nv_rd32(pbus, 0x009088);
+		u32 addr = nvkm_rd32(device, 0x009084);
+		u32 data = nvkm_rd32(device, 0x009088);
 
-		nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x [ %s%s%s]\n",
-			 (addr & 0x00000002) ? "write" : "read", data,
-			 (addr & 0x00fffffc),
-			 (stat & 0x00000002) ? "!ENGINE " : "",
-			 (stat & 0x00000004) ? "IBUS " : "",
-			 (stat & 0x00000008) ? "TIMEOUT " : "");
+		nvkm_error(subdev,
+			   "MMIO %s of %08x FAULT at %06x [ %s%s%s]\n",
+			   (addr & 0x00000002) ? "write" : "read", data,
+			   (addr & 0x00fffffc),
+			   (stat & 0x00000002) ? "!ENGINE " : "",
+			   (stat & 0x00000004) ? "IBUS " : "",
+			   (stat & 0x00000008) ? "TIMEOUT " : "");
 
-		nv_wr32(pbus, 0x009084, 0x00000000);
-		nv_wr32(pbus, 0x001100, (stat & 0x0000000e));
+		nvkm_wr32(device, 0x009084, 0x00000000);
+		nvkm_wr32(device, 0x001100, (stat & 0x0000000e));
 		stat &= ~0x0000000e;
 	}
 
 	if (stat) {
-		nv_error(pbus, "unknown intr 0x%08x\n", stat);
-		nv_mask(pbus, 0x001140, stat, 0x00000000);
+		nvkm_error(subdev, "intr %08x\n", stat);
+		nvkm_mask(device, 0x001140, stat, 0x00000000);
 	}
 }
 
-static int
-gf100_bus_init(struct nvkm_object *object)
+static void
+gf100_bus_init(struct nvkm_bus *bus)
 {
-	struct nv04_bus_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_bus_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x001100, 0xffffffff);
-	nv_wr32(priv, 0x001140, 0x0000000e);
-	return 0;
+	struct nvkm_device *device = bus->subdev.device;
+	nvkm_wr32(device, 0x001100, 0xffffffff);
+	nvkm_wr32(device, 0x001140, 0x0000000e);
 }
 
-struct nvkm_oclass *
-gf100_bus_oclass = &(struct nv04_bus_impl) {
-	.base.handle = NV_SUBDEV(BUS, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_bus_ctor,
-		.dtor = _nvkm_bus_dtor,
-		.init = gf100_bus_init,
-		.fini = _nvkm_bus_fini,
-	},
+static const struct nvkm_bus_func
+gf100_bus = {
+	.init = gf100_bus_init,
 	.intr = gf100_bus_intr,
-}.base;
+};
+
+int
+gf100_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+{
+	return nvkm_bus_new_(&gf100_bus, device, index, pbus);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
index 7622b41619a0..79f1cf513b36 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
@@ -21,10 +21,10 @@
  *
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
-#include <subdev/bus.h>
+#include "priv.h"
 
 struct nvkm_hwsq {
-	struct nvkm_bus *pbus;
+	struct nvkm_subdev *subdev;
 	u32 addr;
 	u32 data;
 	struct {
@@ -41,13 +41,13 @@ hwsq_cmd(struct nvkm_hwsq *hwsq, int size, u8 data[])
 }
 
 int
-nvkm_hwsq_init(struct nvkm_bus *pbus, struct nvkm_hwsq **phwsq)
+nvkm_hwsq_init(struct nvkm_subdev *subdev, struct nvkm_hwsq **phwsq)
 {
 	struct nvkm_hwsq *hwsq;
 
 	hwsq = *phwsq = kmalloc(sizeof(*hwsq), GFP_KERNEL);
 	if (hwsq) {
-		hwsq->pbus = pbus;
+		hwsq->subdev = subdev;
 		hwsq->addr = ~0;
 		hwsq->data = ~0;
 		memset(hwsq->c.data, 0x7f, sizeof(hwsq->c.data));
@@ -63,21 +63,23 @@ nvkm_hwsq_fini(struct nvkm_hwsq **phwsq, bool exec)
 	struct nvkm_hwsq *hwsq = *phwsq;
 	int ret = 0, i;
 	if (hwsq) {
-		struct nvkm_bus *pbus = hwsq->pbus;
+		struct nvkm_subdev *subdev = hwsq->subdev;
+		struct nvkm_bus *bus = subdev->device->bus;
 		hwsq->c.size = (hwsq->c.size + 4) / 4;
-		if (hwsq->c.size <= pbus->hwsq_size) {
+		if (hwsq->c.size <= bus->func->hwsq_size) {
 			if (exec)
-				ret = pbus->hwsq_exec(pbus, (u32 *)hwsq->c.data,
-						      hwsq->c.size);
+				ret = bus->func->hwsq_exec(bus,
+							   (u32 *)hwsq->c.data,
+								  hwsq->c.size);
 			if (ret)
-				nv_error(pbus, "hwsq exec failed: %d\n", ret);
+				nvkm_error(subdev, "hwsq exec failed: %d\n", ret);
 		} else {
-			nv_error(pbus, "hwsq ucode too large\n");
+			nvkm_error(subdev, "hwsq ucode too large\n");
 			ret = -ENOSPC;
 		}
 
 		for (i = 0; ret && i < hwsq->c.size; i++)
-			nv_error(pbus, "\t0x%08x\n", ((u32 *)hwsq->c.data)[i]);
+			nvkm_error(subdev, "\t%08x\n", ((u32 *)hwsq->c.data)[i]);
 
 		*phwsq = NULL;
 		kfree(hwsq);
@@ -88,7 +90,7 @@ nvkm_hwsq_fini(struct nvkm_hwsq **phwsq, bool exec)
 void
 nvkm_hwsq_wr32(struct nvkm_hwsq *hwsq, u32 addr, u32 data)
 {
-	nv_debug(hwsq->pbus, "R[%06x] = 0x%08x\n", addr, data);
+	nvkm_debug(hwsq->subdev, "R[%06x] = %08x\n", addr, data);
 
 	if (hwsq->data != data) {
 		if ((data & 0xffff0000) != (hwsq->data & 0xffff0000)) {
@@ -113,7 +115,7 @@ nvkm_hwsq_wr32(struct nvkm_hwsq *hwsq, u32 addr, u32 data)
 void
 nvkm_hwsq_setf(struct nvkm_hwsq *hwsq, u8 flag, int data)
 {
-	nv_debug(hwsq->pbus, " FLAG[%02x] = %d\n", flag, data);
+	nvkm_debug(hwsq->subdev, " FLAG[%02x] = %d\n", flag, data);
 	flag += 0x80;
 	if (data >= 0)
 		flag += 0x20;
@@ -125,7 +127,7 @@ nvkm_hwsq_setf(struct nvkm_hwsq *hwsq, u8 flag, int data)
 void
 nvkm_hwsq_wait(struct nvkm_hwsq *hwsq, u8 flag, u8 data)
 {
-	nv_debug(hwsq->pbus, " WAIT[%02x] = %d\n", flag, data);
+	nvkm_debug(hwsq->subdev, " WAIT[%02x] = %d\n", flag, data);
 	hwsq_cmd(hwsq, 3, (u8[]){ 0x5f, flag, data });
 }
 
@@ -138,6 +140,6 @@ nvkm_hwsq_nsec(struct nvkm_hwsq *hwsq, u32 nsec)
 		shift++;
 	}
 
-	nv_debug(hwsq->pbus, "    DELAY = %d ns\n", nsec);
+	nvkm_debug(hwsq->subdev, "    DELAY = %d ns\n", nsec);
 	hwsq_cmd(hwsq, 1, (u8[]){ 0x00 | (shift << 2) | usec });
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
index ebf709c27e3a..8117ec5a1468 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
@@ -59,10 +59,9 @@ hwsq_reg(u32 addr)
 static inline int
 hwsq_init(struct hwsq *ram, struct nvkm_subdev *subdev)
 {
-	struct nvkm_bus *pbus = nvkm_bus(subdev);
 	int ret;
 
-	ret = nvkm_hwsq_init(pbus, &ram->hwsq);
+	ret = nvkm_hwsq_init(subdev, &ram->hwsq);
 	if (ret)
 		return ret;
 
@@ -85,8 +84,9 @@ hwsq_exec(struct hwsq *ram, bool exec)
 static inline u32
 hwsq_rd32(struct hwsq *ram, struct hwsq_reg *reg)
 {
+	struct nvkm_device *device = ram->subdev->device;
 	if (reg->sequence != ram->sequence)
-		reg->data = nv_rd32(ram->subdev, reg->addr);
+		reg->data = nvkm_rd32(device, reg->addr);
 	return reg->data;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
index 19c8e50eeff7..c80b96789c31 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
@@ -22,73 +22,55 @@
  * Authors: Martin Peres <martin.peres@labri.fr>
  *          Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
+
+#include <subdev/gpio.h>
+
+#include <subdev/gpio.h>
 
 static void
-nv04_bus_intr(struct nvkm_subdev *subdev)
+nv04_bus_intr(struct nvkm_bus *bus)
 {
-	struct nvkm_bus *pbus = nvkm_bus(subdev);
-	u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
+	struct nvkm_subdev *subdev = &bus->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140);
 
 	if (stat & 0x00000001) {
-		nv_error(pbus, "BUS ERROR\n");
+		nvkm_error(subdev, "BUS ERROR\n");
 		stat &= ~0x00000001;
-		nv_wr32(pbus, 0x001100, 0x00000001);
+		nvkm_wr32(device, 0x001100, 0x00000001);
 	}
 
 	if (stat & 0x00000110) {
-		subdev = nvkm_subdev(subdev, NVDEV_SUBDEV_GPIO);
-		if (subdev && subdev->intr)
-			subdev->intr(subdev);
+		struct nvkm_gpio *gpio = device->gpio;
+		if (gpio)
+			nvkm_subdev_intr(&gpio->subdev);
 		stat &= ~0x00000110;
-		nv_wr32(pbus, 0x001100, 0x00000110);
+		nvkm_wr32(device, 0x001100, 0x00000110);
 	}
 
 	if (stat) {
-		nv_error(pbus, "unknown intr 0x%08x\n", stat);
-		nv_mask(pbus, 0x001140, stat, 0x00000000);
+		nvkm_error(subdev, "intr %08x\n", stat);
+		nvkm_mask(device, 0x001140, stat, 0x00000000);
 	}
 }
 
-static int
-nv04_bus_init(struct nvkm_object *object)
+static void
+nv04_bus_init(struct nvkm_bus *bus)
 {
-	struct nv04_bus_priv *priv = (void *)object;
-
-	nv_wr32(priv, 0x001100, 0xffffffff);
-	nv_wr32(priv, 0x001140, 0x00000111);
-
-	return nvkm_bus_init(&priv->base);
+	struct nvkm_device *device = bus->subdev.device;
+	nvkm_wr32(device, 0x001100, 0xffffffff);
+	nvkm_wr32(device, 0x001140, 0x00000111);
 }
 
+static const struct nvkm_bus_func
+nv04_bus = {
+	.init = nv04_bus_init,
+	.intr = nv04_bus_intr,
+};
+
 int
-nv04_bus_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+nv04_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
 {
-	struct nv04_bus_impl *impl = (void *)oclass;
-	struct nv04_bus_priv *priv;
-	int ret;
-
-	ret = nvkm_bus_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->intr = impl->intr;
-	priv->base.hwsq_exec = impl->hwsq_exec;
-	priv->base.hwsq_size = impl->hwsq_size;
-	return 0;
+	return nvkm_bus_new_(&nv04_bus, device, index, pbus);
 }
-
-struct nvkm_oclass *
-nv04_bus_oclass = &(struct nv04_bus_impl) {
-	.base.handle = NV_SUBDEV(BUS, 0x04),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_bus_ctor,
-		.dtor = _nvkm_bus_dtor,
-		.init = nv04_bus_init,
-		.fini = _nvkm_bus_fini,
-	},
-	.intr = nv04_bus_intr,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.h
deleted file mode 100644
index 3ddc8f91b1e3..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef __NVKM_BUS_NV04_H__
-#define __NVKM_BUS_NV04_H__
-#include <subdev/bus.h>
-
-struct nv04_bus_priv {
-	struct nvkm_bus base;
-};
-
-int  nv04_bus_ctor(struct nvkm_object *, struct nvkm_object *,
-		   struct nvkm_oclass *, void *, u32,
-		   struct nvkm_object **);
-int  nv50_bus_init(struct nvkm_object *);
-void nv50_bus_intr(struct nvkm_subdev *);
-
-struct nv04_bus_impl {
-	struct nvkm_oclass base;
-	void (*intr)(struct nvkm_subdev *);
-	int  (*hwsq_exec)(struct nvkm_bus *, u32 *, u32);
-	u32  hwsq_size;
-};
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
index c5739bce8052..5153d89e1f0b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
@@ -22,70 +22,67 @@
  * Authors: Martin Peres <martin.peres@labri.fr>
  *          Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
+
+#include <subdev/gpio.h>
+#include <subdev/therm.h>
 
 static void
-nv31_bus_intr(struct nvkm_subdev *subdev)
+nv31_bus_intr(struct nvkm_bus *bus)
 {
-	struct nvkm_bus *pbus = nvkm_bus(subdev);
-	u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
-	u32 gpio = nv_rd32(pbus, 0x001104) & nv_rd32(pbus, 0x001144);
+	struct nvkm_subdev *subdev = &bus->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140);
+	u32 gpio = nvkm_rd32(device, 0x001104) & nvkm_rd32(device, 0x001144);
 
 	if (gpio) {
-		subdev = nvkm_subdev(pbus, NVDEV_SUBDEV_GPIO);
-		if (subdev && subdev->intr)
-			subdev->intr(subdev);
+		struct nvkm_gpio *gpio = device->gpio;
+		if (gpio)
+			nvkm_subdev_intr(&gpio->subdev);
 	}
 
 	if (stat & 0x00000008) {  /* NV41- */
-		u32 addr = nv_rd32(pbus, 0x009084);
-		u32 data = nv_rd32(pbus, 0x009088);
+		u32 addr = nvkm_rd32(device, 0x009084);
+		u32 data = nvkm_rd32(device, 0x009088);
 
-		nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x\n",
-			 (addr & 0x00000002) ? "write" : "read", data,
-			 (addr & 0x00fffffc));
+		nvkm_error(subdev, "MMIO %s of %08x FAULT at %06x\n",
+			   (addr & 0x00000002) ? "write" : "read", data,
+			   (addr & 0x00fffffc));
 
 		stat &= ~0x00000008;
-		nv_wr32(pbus, 0x001100, 0x00000008);
+		nvkm_wr32(device, 0x001100, 0x00000008);
 	}
 
 	if (stat & 0x00070000) {
-		subdev = nvkm_subdev(pbus, NVDEV_SUBDEV_THERM);
-		if (subdev && subdev->intr)
-			subdev->intr(subdev);
+		struct nvkm_therm *therm = device->therm;
+		if (therm)
+			nvkm_subdev_intr(&therm->subdev);
 		stat &= ~0x00070000;
-		nv_wr32(pbus, 0x001100, 0x00070000);
+		nvkm_wr32(device, 0x001100, 0x00070000);
 	}
 
 	if (stat) {
-		nv_error(pbus, "unknown intr 0x%08x\n", stat);
-		nv_mask(pbus, 0x001140, stat, 0x00000000);
+		nvkm_error(subdev, "intr %08x\n", stat);
+		nvkm_mask(device, 0x001140, stat, 0x00000000);
 	}
 }
 
-static int
-nv31_bus_init(struct nvkm_object *object)
+static void
+nv31_bus_init(struct nvkm_bus *bus)
 {
-	struct nv04_bus_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_bus_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x001100, 0xffffffff);
-	nv_wr32(priv, 0x001140, 0x00070008);
-	return 0;
+	struct nvkm_device *device = bus->subdev.device;
+	nvkm_wr32(device, 0x001100, 0xffffffff);
+	nvkm_wr32(device, 0x001140, 0x00070008);
 }
 
-struct nvkm_oclass *
-nv31_bus_oclass = &(struct nv04_bus_impl) {
-	.base.handle = NV_SUBDEV(BUS, 0x31),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_bus_ctor,
-		.dtor = _nvkm_bus_dtor,
-		.init = nv31_bus_init,
-		.fini = _nvkm_bus_fini,
-	},
+static const struct nvkm_bus_func
+nv31_bus = {
+	.init = nv31_bus_init,
 	.intr = nv31_bus_intr,
-}.base;
+};
+
+int
+nv31_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+{
+	return nvkm_bus_new_(&nv31_bus, device, index, pbus);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
index 1987863d71ee..19e10fdc9291 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
@@ -22,83 +22,84 @@
  * Authors: Martin Peres <martin.peres@labri.fr>
  *          Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
+#include <subdev/therm.h>
 #include <subdev/timer.h>
 
 static int
-nv50_bus_hwsq_exec(struct nvkm_bus *pbus, u32 *data, u32 size)
+nv50_bus_hwsq_exec(struct nvkm_bus *bus, u32 *data, u32 size)
 {
-	struct nv50_bus_priv *priv = (void *)pbus;
+	struct nvkm_device *device = bus->subdev.device;
 	int i;
 
-	nv_mask(pbus, 0x001098, 0x00000008, 0x00000000);
-	nv_wr32(pbus, 0x001304, 0x00000000);
+	nvkm_mask(device, 0x001098, 0x00000008, 0x00000000);
+	nvkm_wr32(device, 0x001304, 0x00000000);
 	for (i = 0; i < size; i++)
-		nv_wr32(priv, 0x001400 + (i * 4), data[i]);
-	nv_mask(pbus, 0x001098, 0x00000018, 0x00000018);
-	nv_wr32(pbus, 0x00130c, 0x00000003);
+		nvkm_wr32(device, 0x001400 + (i * 4), data[i]);
+	nvkm_mask(device, 0x001098, 0x00000018, 0x00000018);
+	nvkm_wr32(device, 0x00130c, 0x00000003);
 
-	return nv_wait(pbus, 0x001308, 0x00000100, 0x00000000) ? 0 : -ETIMEDOUT;
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x001308) & 0x00000100))
+			break;
+	) < 0)
+		return -ETIMEDOUT;
+
+	return 0;
 }
 
 void
-nv50_bus_intr(struct nvkm_subdev *subdev)
+nv50_bus_intr(struct nvkm_bus *bus)
 {
-	struct nvkm_bus *pbus = nvkm_bus(subdev);
-	u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
+	struct nvkm_subdev *subdev = &bus->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140);
 
 	if (stat & 0x00000008) {
-		u32 addr = nv_rd32(pbus, 0x009084);
-		u32 data = nv_rd32(pbus, 0x009088);
+		u32 addr = nvkm_rd32(device, 0x009084);
+		u32 data = nvkm_rd32(device, 0x009088);
 
-		nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x\n",
-			 (addr & 0x00000002) ? "write" : "read", data,
-			 (addr & 0x00fffffc));
+		nvkm_error(subdev, "MMIO %s of %08x FAULT at %06x\n",
+			   (addr & 0x00000002) ? "write" : "read", data,
+			   (addr & 0x00fffffc));
 
 		stat &= ~0x00000008;
-		nv_wr32(pbus, 0x001100, 0x00000008);
+		nvkm_wr32(device, 0x001100, 0x00000008);
 	}
 
 	if (stat & 0x00010000) {
-		subdev = nvkm_subdev(pbus, NVDEV_SUBDEV_THERM);
-		if (subdev && subdev->intr)
-			subdev->intr(subdev);
+		struct nvkm_therm *therm = device->therm;
+		if (therm)
+			nvkm_subdev_intr(&therm->subdev);
 		stat &= ~0x00010000;
-		nv_wr32(pbus, 0x001100, 0x00010000);
+		nvkm_wr32(device, 0x001100, 0x00010000);
 	}
 
 	if (stat) {
-		nv_error(pbus, "unknown intr 0x%08x\n", stat);
-		nv_mask(pbus, 0x001140, stat, 0);
+		nvkm_error(subdev, "intr %08x\n", stat);
+		nvkm_mask(device, 0x001140, stat, 0);
 	}
 }
 
-int
-nv50_bus_init(struct nvkm_object *object)
+void
+nv50_bus_init(struct nvkm_bus *bus)
 {
-	struct nv04_bus_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_bus_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x001100, 0xffffffff);
-	nv_wr32(priv, 0x001140, 0x00010008);
-	return 0;
+	struct nvkm_device *device = bus->subdev.device;
+	nvkm_wr32(device, 0x001100, 0xffffffff);
+	nvkm_wr32(device, 0x001140, 0x00010008);
 }
 
-struct nvkm_oclass *
-nv50_bus_oclass = &(struct nv04_bus_impl) {
-	.base.handle = NV_SUBDEV(BUS, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_bus_ctor,
-		.dtor = _nvkm_bus_dtor,
-		.init = nv50_bus_init,
-		.fini = _nvkm_bus_fini,
-	},
+static const struct nvkm_bus_func
+nv50_bus = {
+	.init = nv50_bus_init,
 	.intr = nv50_bus_intr,
 	.hwsq_exec = nv50_bus_hwsq_exec,
 	.hwsq_size = 64,
-}.base;
+};
+
+int
+nv50_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+{
+	return nvkm_bus_new_(&nv50_bus, device, index, pbus);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
new file mode 100644
index 000000000000..a130f2c642d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
@@ -0,0 +1,18 @@
+#ifndef __NVKM_BUS_PRIV_H__
+#define __NVKM_BUS_PRIV_H__
+#define nvkm_bus(p) container_of((p), struct nvkm_bus, subdev)
+#include <subdev/bus.h>
+
+struct nvkm_bus_func {
+	void (*init)(struct nvkm_bus *);
+	void (*intr)(struct nvkm_bus *);
+	int (*hwsq_exec)(struct nvkm_bus *, u32 *, u32);
+	u32 hwsq_size;
+};
+
+int nvkm_bus_new_(const struct nvkm_bus_func *, struct nvkm_device *, int,
+		  struct nvkm_bus **);
+
+void nv50_bus_init(struct nvkm_bus *);
+void nv50_bus_intr(struct nvkm_bus *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
index 9c2f688c9602..ed7717bcc3a1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
@@ -8,5 +8,6 @@ nvkm-y += nvkm/subdev/clk/mcp77.o
 nvkm-y += nvkm/subdev/clk/gf100.o
 nvkm-y += nvkm/subdev/clk/gk104.o
 nvkm-y += nvkm/subdev/clk/gk20a.o
+
 nvkm-y += nvkm/subdev/clk/pllnv04.o
 nvkm-y += nvkm/subdev/clk/pllgt215.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index 39a83d82e0cd..dc8682c91cc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -21,7 +21,8 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/clk.h>
+#include "priv.h"
+
 #include <subdev/bios.h>
 #include <subdev/bios/boost.h>
 #include <subdev/bios/cstep.h>
@@ -30,7 +31,6 @@
 #include <subdev/therm.h>
 #include <subdev/volt.h>
 
-#include <core/device.h>
 #include <core/option.h>
 
 /******************************************************************************
@@ -40,7 +40,7 @@ static u32
 nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
 		u8 pstate, u8 domain, u32 input)
 {
-	struct nvkm_bios *bios = nvkm_bios(clk);
+	struct nvkm_bios *bios = clk->subdev.device->bios;
 	struct nvbios_boostE boostE;
 	u8  ver, hdr, cnt, len;
 	u16 data;
@@ -77,8 +77,10 @@ nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
 static int
 nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
 {
-	struct nvkm_therm *ptherm = nvkm_therm(clk);
-	struct nvkm_volt *volt = nvkm_volt(clk);
+	struct nvkm_subdev *subdev = &clk->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_therm *therm = device->therm;
+	struct nvkm_volt *volt = device->volt;
 	struct nvkm_cstate *cstate;
 	int ret;
 
@@ -88,41 +90,41 @@ nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
 		cstate = &pstate->base;
 	}
 
-	if (ptherm) {
-		ret = nvkm_therm_cstate(ptherm, pstate->fanspeed, +1);
+	if (therm) {
+		ret = nvkm_therm_cstate(therm, pstate->fanspeed, +1);
 		if (ret && ret != -ENODEV) {
-			nv_error(clk, "failed to raise fan speed: %d\n", ret);
+			nvkm_error(subdev, "failed to raise fan speed: %d\n", ret);
 			return ret;
 		}
 	}
 
 	if (volt) {
-		ret = volt->set_id(volt, cstate->voltage, +1);
+		ret = nvkm_volt_set_id(volt, cstate->voltage, +1);
 		if (ret && ret != -ENODEV) {
-			nv_error(clk, "failed to raise voltage: %d\n", ret);
+			nvkm_error(subdev, "failed to raise voltage: %d\n", ret);
 			return ret;
 		}
 	}
 
-	ret = clk->calc(clk, cstate);
+	ret = clk->func->calc(clk, cstate);
 	if (ret == 0) {
-		ret = clk->prog(clk);
-		clk->tidy(clk);
+		ret = clk->func->prog(clk);
+		clk->func->tidy(clk);
 	}
 
 	if (volt) {
-		ret = volt->set_id(volt, cstate->voltage, -1);
+		ret = nvkm_volt_set_id(volt, cstate->voltage, -1);
 		if (ret && ret != -ENODEV)
-			nv_error(clk, "failed to lower voltage: %d\n", ret);
+			nvkm_error(subdev, "failed to lower voltage: %d\n", ret);
 	}
 
-	if (ptherm) {
-		ret = nvkm_therm_cstate(ptherm, pstate->fanspeed, -1);
+	if (therm) {
+		ret = nvkm_therm_cstate(therm, pstate->fanspeed, -1);
 		if (ret && ret != -ENODEV)
-			nv_error(clk, "failed to lower fan speed: %d\n", ret);
+			nvkm_error(subdev, "failed to lower fan speed: %d\n", ret);
 	}
 
-	return 0;
+	return ret;
 }
 
 static void
@@ -135,8 +137,8 @@ nvkm_cstate_del(struct nvkm_cstate *cstate)
 static int
 nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
 {
-	struct nvkm_bios *bios = nvkm_bios(clk);
-	struct nvkm_domain *domain = clk->domains;
+	struct nvkm_bios *bios = clk->subdev.device->bios;
+	const struct nvkm_domain *domain = clk->domains;
 	struct nvkm_cstate *cstate = NULL;
 	struct nvbios_cstepX cstepX;
 	u8  ver, hdr;
@@ -172,7 +174,8 @@ nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
 static int
 nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
 {
-	struct nvkm_fb *pfb = nvkm_fb(clk);
+	struct nvkm_subdev *subdev = &clk->subdev;
+	struct nvkm_ram *ram = subdev->device->fb->ram;
 	struct nvkm_pstate *pstate;
 	int ret, idx = 0;
 
@@ -181,17 +184,17 @@ nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
 			break;
 	}
 
-	nv_debug(clk, "setting performance state %d\n", pstatei);
+	nvkm_debug(subdev, "setting performance state %d\n", pstatei);
 	clk->pstate = pstatei;
 
-	if (pfb->ram && pfb->ram->calc) {
+	if (ram && ram->func->calc) {
 		int khz = pstate->base.domain[nv_clk_src_mem];
 		do {
-			ret = pfb->ram->calc(pfb, khz);
+			ret = ram->func->calc(ram, khz);
 			if (ret == 0)
-				ret = pfb->ram->prog(pfb);
+				ret = ram->func->prog(ram);
 		} while (ret > 0);
-		pfb->ram->tidy(pfb);
+		ram->func->tidy(ram);
 	}
 
 	return nvkm_cstate_prog(clk, pstate, 0);
@@ -201,31 +204,32 @@ static void
 nvkm_pstate_work(struct work_struct *work)
 {
 	struct nvkm_clk *clk = container_of(work, typeof(*clk), work);
+	struct nvkm_subdev *subdev = &clk->subdev;
 	int pstate;
 
 	if (!atomic_xchg(&clk->waiting, 0))
 		return;
 	clk->pwrsrc = power_supply_is_system_supplied();
 
-	nv_trace(clk, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n",
-		 clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
-		 clk->astate, clk->tstate, clk->dstate);
+	nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n",
+		   clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
+		   clk->astate, clk->tstate, clk->dstate);
 
 	pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc;
 	if (clk->state_nr && pstate != -1) {
 		pstate = (pstate < 0) ? clk->astate : pstate;
-		pstate = min(pstate, clk->state_nr - 1 - clk->tstate);
+		pstate = min(pstate, clk->state_nr - 1 + clk->tstate);
 		pstate = max(pstate, clk->dstate);
 	} else {
 		pstate = clk->pstate = -1;
 	}
 
-	nv_trace(clk, "-> %d\n", pstate);
+	nvkm_trace(subdev, "-> %d\n", pstate);
 	if (pstate != clk->pstate) {
 		int ret = nvkm_pstate_prog(clk, pstate);
 		if (ret) {
-			nv_error(clk, "error setting pstate %d: %d\n",
-				 pstate, ret);
+			nvkm_error(subdev, "error setting pstate %d: %d\n",
+				   pstate, ret);
 		}
 	}
 
@@ -246,8 +250,9 @@ nvkm_pstate_calc(struct nvkm_clk *clk, bool wait)
 static void
 nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate)
 {
-	struct nvkm_domain *clock = clk->domains - 1;
+	const struct nvkm_domain *clock = clk->domains - 1;
 	struct nvkm_cstate *cstate;
+	struct nvkm_subdev *subdev = &clk->subdev;
 	char info[3][32] = { "", "", "" };
 	char name[4] = "--";
 	int i = -1;
@@ -261,12 +266,12 @@ nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate)
 		if (hi == 0)
 			continue;
 
-		nv_debug(clk, "%02x: %10d KHz\n", clock->name, lo);
+		nvkm_debug(subdev, "%02x: %10d KHz\n", clock->name, lo);
 		list_for_each_entry(cstate, &pstate->list, head) {
 			u32 freq = cstate->domain[clock->name];
 			lo = min(lo, freq);
 			hi = max(hi, freq);
-			nv_debug(clk, "%10d KHz\n", freq);
+			nvkm_debug(subdev, "%10d KHz\n", freq);
 		}
 
 		if (clock->mname && ++i < ARRAY_SIZE(info)) {
@@ -282,7 +287,7 @@ nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate)
 		}
 	}
 
-	nv_info(clk, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
+	nvkm_debug(subdev, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
 }
 
 static void
@@ -301,8 +306,8 @@ nvkm_pstate_del(struct nvkm_pstate *pstate)
 static int
 nvkm_pstate_new(struct nvkm_clk *clk, int idx)
 {
-	struct nvkm_bios *bios = nvkm_bios(clk);
-	struct nvkm_domain *domain = clk->domains - 1;
+	struct nvkm_bios *bios = clk->subdev.device->bios;
+	const struct nvkm_domain *domain = clk->domains - 1;
 	struct nvkm_pstate *pstate;
 	struct nvkm_cstate *cstate;
 	struct nvbios_cstepE cstepE;
@@ -471,32 +476,37 @@ nvkm_clk_pwrsrc(struct nvkm_notify *notify)
  *****************************************************************************/
 
 int
-_nvkm_clk_fini(struct nvkm_object *object, bool suspend)
+nvkm_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+{
+	return clk->func->read(clk, src);
+}
+
+static int
+nvkm_clk_fini(struct nvkm_subdev *subdev, bool suspend)
 {
-	struct nvkm_clk *clk = (void *)object;
+	struct nvkm_clk *clk = nvkm_clk(subdev);
 	nvkm_notify_put(&clk->pwrsrc_ntfy);
-	return nvkm_subdev_fini(&clk->base, suspend);
+	flush_work(&clk->work);
+	if (clk->func->fini)
+		clk->func->fini(clk);
+	return 0;
 }
 
-int
-_nvkm_clk_init(struct nvkm_object *object)
+static int
+nvkm_clk_init(struct nvkm_subdev *subdev)
 {
-	struct nvkm_clk *clk = (void *)object;
-	struct nvkm_domain *clock = clk->domains;
+	struct nvkm_clk *clk = nvkm_clk(subdev);
+	const struct nvkm_domain *clock = clk->domains;
 	int ret;
 
-	ret = nvkm_subdev_init(&clk->base);
-	if (ret)
-		return ret;
-
 	memset(&clk->bstate, 0x00, sizeof(clk->bstate));
 	INIT_LIST_HEAD(&clk->bstate.list);
 	clk->bstate.pstate = 0xff;
 
 	while (clock->name != nv_clk_src_max) {
-		ret = clk->read(clk, clock->name);
+		ret = nvkm_clk_read(clk, clock->name);
 		if (ret < 0) {
-			nv_error(clk, "%02x freq unknown\n", clock->name);
+			nvkm_error(subdev, "%02x freq unknown\n", clock->name);
 			return ret;
 		}
 		clk->bstate.base.domain[clock->name] = ret;
@@ -505,6 +515,9 @@ _nvkm_clk_init(struct nvkm_object *object)
 
 	nvkm_pstate_info(clk, &clk->bstate);
 
+	if (clk->func->init)
+		return clk->func->init(clk);
+
 	clk->astate = clk->state_nr - 1;
 	clk->tstate = 0;
 	clk->dstate = 0;
@@ -513,61 +526,63 @@ _nvkm_clk_init(struct nvkm_object *object)
 	return 0;
 }
 
-void
-_nvkm_clk_dtor(struct nvkm_object *object)
+static void *
+nvkm_clk_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_clk *clk = (void *)object;
+	struct nvkm_clk *clk = nvkm_clk(subdev);
 	struct nvkm_pstate *pstate, *temp;
 
 	nvkm_notify_fini(&clk->pwrsrc_ntfy);
 
+	/* Early return if the pstates have been provided statically */
+	if (clk->func->pstates)
+		return clk;
+
 	list_for_each_entry_safe(pstate, temp, &clk->states, head) {
 		nvkm_pstate_del(pstate);
 	}
 
-	nvkm_subdev_destroy(&clk->base);
+	return clk;
 }
 
+static const struct nvkm_subdev_func
+nvkm_clk = {
+	.dtor = nvkm_clk_dtor,
+	.init = nvkm_clk_init,
+	.fini = nvkm_clk_fini,
+};
+
 int
-nvkm_clk_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, struct nvkm_domain *clocks,
-		 struct nvkm_pstate *pstates, int nb_pstates,
-		 bool allow_reclock, int length, void **object)
+nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
+	      int index, bool allow_reclock, struct nvkm_clk *clk)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct nvkm_clk *clk;
 	int ret, idx, arglen;
 	const char *mode;
 
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "CLK",
-				  "clock", length, object);
-	clk = *object;
-	if (ret)
-		return ret;
-
+	nvkm_subdev_ctor(&nvkm_clk, device, index, 0, &clk->subdev);
+	clk->func = func;
 	INIT_LIST_HEAD(&clk->states);
-	clk->domains = clocks;
+	clk->domains = func->domains;
 	clk->ustate_ac = -1;
 	clk->ustate_dc = -1;
+	clk->allow_reclock = allow_reclock;
 
 	INIT_WORK(&clk->work, nvkm_pstate_work);
 	init_waitqueue_head(&clk->wait);
 	atomic_set(&clk->waiting, 0);
 
 	/* If no pstates are provided, try and fetch them from the BIOS */
-	if (!pstates) {
+	if (!func->pstates) {
 		idx = 0;
 		do {
 			ret = nvkm_pstate_new(clk, idx++);
 		} while (ret == 0);
 	} else {
-		for (idx = 0; idx < nb_pstates; idx++)
-			list_add_tail(&pstates[idx].head, &clk->states);
-		clk->state_nr = nb_pstates;
+		for (idx = 0; idx < func->nr_pstates; idx++)
+			list_add_tail(&func->pstates[idx].head, &clk->states);
+		clk->state_nr = func->nr_pstates;
 	}
 
-	clk->allow_reclock = allow_reclock;
-
 	ret = nvkm_notify_init(NULL, &device->event, nvkm_clk_pwrsrc, true,
 			       NULL, 0, 0, &clk->pwrsrc_ntfy);
 	if (ret)
@@ -589,3 +604,12 @@ nvkm_clk_create_(struct nvkm_object *parent, struct nvkm_object *engine,
 
 	return 0;
 }
+
+int
+nvkm_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
+	      int index, bool allow_reclock, struct nvkm_clk **pclk)
+{
+	if (!(*pclk = kzalloc(sizeof(**pclk), GFP_KERNEL)))
+		return -ENOMEM;
+	return nvkm_clk_ctor(func, device, index, allow_reclock, *pclk);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
index 4c90b9769d64..347da9ee20f5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
@@ -23,25 +23,26 @@
  */
 #include "nv50.h"
 
-static struct nvkm_domain
-g84_domains[] = {
-	{ nv_clk_src_crystal, 0xff },
-	{ nv_clk_src_href   , 0xff },
-	{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
-	{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
-	{ nv_clk_src_mem    , 0xff, 0, "memory", 1000 },
-	{ nv_clk_src_vdec   , 0xff },
-	{ nv_clk_src_max }
+static const struct nvkm_clk_func
+g84_clk = {
+	.read = nv50_clk_read,
+	.calc = nv50_clk_calc,
+	.prog = nv50_clk_prog,
+	.tidy = nv50_clk_tidy,
+	.domains = {
+		{ nv_clk_src_crystal, 0xff },
+		{ nv_clk_src_href   , 0xff },
+		{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
+		{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+		{ nv_clk_src_mem    , 0xff, 0, "memory", 1000 },
+		{ nv_clk_src_vdec   , 0xff },
+		{ nv_clk_src_max }
+	}
 };
 
-struct nvkm_oclass *
-g84_clk_oclass = &(struct nv50_clk_oclass) {
-	.base.handle = NV_SUBDEV(CLK, 0x84),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_clk_ctor,
-		.dtor = _nvkm_clk_dtor,
-		.init = _nvkm_clk_init,
-		.fini = _nvkm_clk_fini,
-	},
-	.domains = g84_domains,
-}.base;
+int
+g84_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+{
+	return nv50_clk_new_(&g84_clk, device, index,
+			     (device->chipset == 0xa0), pclk);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
index 3d7330d54b02..a52b7e7fce41 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
@@ -21,10 +21,10 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/clk.h>
+#define gf100_clk(p) container_of((p), struct gf100_clk, base)
+#include "priv.h"
 #include "pll.h"
 
-#include <core/device.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 #include <subdev/timer.h>
@@ -38,29 +38,29 @@ struct gf100_clk_info {
 	u32 coef;
 };
 
-struct gf100_clk_priv {
+struct gf100_clk {
 	struct nvkm_clk base;
 	struct gf100_clk_info eng[16];
 };
 
-static u32 read_div(struct gf100_clk_priv *, int, u32, u32);
+static u32 read_div(struct gf100_clk *, int, u32, u32);
 
 static u32
-read_vco(struct gf100_clk_priv *priv, u32 dsrc)
+read_vco(struct gf100_clk *clk, u32 dsrc)
 {
-	struct nvkm_clk *clk = &priv->base;
-	u32 ssrc = nv_rd32(priv, dsrc);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ssrc = nvkm_rd32(device, dsrc);
 	if (!(ssrc & 0x00000100))
-		return clk->read(clk, nv_clk_src_sppll0);
-	return clk->read(clk, nv_clk_src_sppll1);
+		return nvkm_clk_read(&clk->base, nv_clk_src_sppll0);
+	return nvkm_clk_read(&clk->base, nv_clk_src_sppll1);
 }
 
 static u32
-read_pll(struct gf100_clk_priv *priv, u32 pll)
+read_pll(struct gf100_clk *clk, u32 pll)
 {
-	struct nvkm_clk *clk = &priv->base;
-	u32 ctrl = nv_rd32(priv, pll + 0x00);
-	u32 coef = nv_rd32(priv, pll + 0x04);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ctrl = nvkm_rd32(device, pll + 0x00);
+	u32 coef = nvkm_rd32(device, pll + 0x04);
 	u32 P = (coef & 0x003f0000) >> 16;
 	u32 N = (coef & 0x0000ff00) >> 8;
 	u32 M = (coef & 0x000000ff) >> 0;
@@ -72,20 +72,20 @@ read_pll(struct gf100_clk_priv *priv, u32 pll)
 	switch (pll) {
 	case 0x00e800:
 	case 0x00e820:
-		sclk = nv_device(priv)->crystal;
+		sclk = device->crystal;
 		P = 1;
 		break;
 	case 0x132000:
-		sclk = clk->read(clk, nv_clk_src_mpllsrc);
+		sclk = nvkm_clk_read(&clk->base, nv_clk_src_mpllsrc);
 		break;
 	case 0x132020:
-		sclk = clk->read(clk, nv_clk_src_mpllsrcref);
+		sclk = nvkm_clk_read(&clk->base, nv_clk_src_mpllsrcref);
 		break;
 	case 0x137000:
 	case 0x137020:
 	case 0x137040:
 	case 0x1370e0:
-		sclk = read_div(priv, (pll & 0xff) / 0x20, 0x137120, 0x137140);
+		sclk = read_div(clk, (pll & 0xff) / 0x20, 0x137120, 0x137140);
 		break;
 	default:
 		return 0;
@@ -95,46 +95,48 @@ read_pll(struct gf100_clk_priv *priv, u32 pll)
 }
 
 static u32
-read_div(struct gf100_clk_priv *priv, int doff, u32 dsrc, u32 dctl)
+read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
 {
-	u32 ssrc = nv_rd32(priv, dsrc + (doff * 4));
-	u32 sctl = nv_rd32(priv, dctl + (doff * 4));
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
+	u32 sctl = nvkm_rd32(device, dctl + (doff * 4));
 
 	switch (ssrc & 0x00000003) {
 	case 0:
 		if ((ssrc & 0x00030000) != 0x00030000)
-			return nv_device(priv)->crystal;
+			return device->crystal;
 		return 108000;
 	case 2:
 		return 100000;
 	case 3:
 		if (sctl & 0x80000000) {
-			u32 sclk = read_vco(priv, dsrc + (doff * 4));
+			u32 sclk = read_vco(clk, dsrc + (doff * 4));
 			u32 sdiv = (sctl & 0x0000003f) + 2;
 			return (sclk * 2) / sdiv;
 		}
 
-		return read_vco(priv, dsrc + (doff * 4));
+		return read_vco(clk, dsrc + (doff * 4));
 	default:
 		return 0;
 	}
 }
 
 static u32
-read_clk(struct gf100_clk_priv *priv, int clk)
+read_clk(struct gf100_clk *clk, int idx)
 {
-	u32 sctl = nv_rd32(priv, 0x137250 + (clk * 4));
-	u32 ssel = nv_rd32(priv, 0x137100);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4));
+	u32 ssel = nvkm_rd32(device, 0x137100);
 	u32 sclk, sdiv;
 
-	if (ssel & (1 << clk)) {
-		if (clk < 7)
-			sclk = read_pll(priv, 0x137000 + (clk * 0x20));
+	if (ssel & (1 << idx)) {
+		if (idx < 7)
+			sclk = read_pll(clk, 0x137000 + (idx * 0x20));
 		else
-			sclk = read_pll(priv, 0x1370e0);
+			sclk = read_pll(clk, 0x1370e0);
 		sdiv = ((sctl & 0x00003f00) >> 8) + 2;
 	} else {
-		sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+		sclk = read_div(clk, idx, 0x137160, 0x1371d0);
 		sdiv = ((sctl & 0x0000003f) >> 0) + 2;
 	}
 
@@ -145,10 +147,11 @@ read_clk(struct gf100_clk_priv *priv, int clk)
 }
 
 static int
-gf100_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+gf100_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
 {
-	struct nvkm_device *device = nv_device(clk);
-	struct gf100_clk_priv *priv = (void *)clk;
+	struct gf100_clk *clk = gf100_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 
 	switch (src) {
 	case nv_clk_src_crystal:
@@ -156,47 +159,47 @@ gf100_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
 	case nv_clk_src_href:
 		return 100000;
 	case nv_clk_src_sppll0:
-		return read_pll(priv, 0x00e800);
+		return read_pll(clk, 0x00e800);
 	case nv_clk_src_sppll1:
-		return read_pll(priv, 0x00e820);
+		return read_pll(clk, 0x00e820);
 
 	case nv_clk_src_mpllsrcref:
-		return read_div(priv, 0, 0x137320, 0x137330);
+		return read_div(clk, 0, 0x137320, 0x137330);
 	case nv_clk_src_mpllsrc:
-		return read_pll(priv, 0x132020);
+		return read_pll(clk, 0x132020);
 	case nv_clk_src_mpll:
-		return read_pll(priv, 0x132000);
+		return read_pll(clk, 0x132000);
 	case nv_clk_src_mdiv:
-		return read_div(priv, 0, 0x137300, 0x137310);
+		return read_div(clk, 0, 0x137300, 0x137310);
 	case nv_clk_src_mem:
-		if (nv_rd32(priv, 0x1373f0) & 0x00000002)
-			return clk->read(clk, nv_clk_src_mpll);
-		return clk->read(clk, nv_clk_src_mdiv);
+		if (nvkm_rd32(device, 0x1373f0) & 0x00000002)
+			return nvkm_clk_read(&clk->base, nv_clk_src_mpll);
+		return nvkm_clk_read(&clk->base, nv_clk_src_mdiv);
 
 	case nv_clk_src_gpc:
-		return read_clk(priv, 0x00);
+		return read_clk(clk, 0x00);
 	case nv_clk_src_rop:
-		return read_clk(priv, 0x01);
+		return read_clk(clk, 0x01);
 	case nv_clk_src_hubk07:
-		return read_clk(priv, 0x02);
+		return read_clk(clk, 0x02);
 	case nv_clk_src_hubk06:
-		return read_clk(priv, 0x07);
+		return read_clk(clk, 0x07);
 	case nv_clk_src_hubk01:
-		return read_clk(priv, 0x08);
+		return read_clk(clk, 0x08);
 	case nv_clk_src_copy:
-		return read_clk(priv, 0x09);
+		return read_clk(clk, 0x09);
 	case nv_clk_src_daemon:
-		return read_clk(priv, 0x0c);
+		return read_clk(clk, 0x0c);
 	case nv_clk_src_vdec:
-		return read_clk(priv, 0x0e);
+		return read_clk(clk, 0x0e);
 	default:
-		nv_error(clk, "invalid clock source %d\n", src);
+		nvkm_error(subdev, "invalid clock source %d\n", src);
 		return -EINVAL;
 	}
 }
 
 static u32
-calc_div(struct gf100_clk_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
+calc_div(struct gf100_clk *clk, int idx, u32 ref, u32 freq, u32 *ddiv)
 {
 	u32 div = min((ref * 2) / freq, (u32)65);
 	if (div < 2)
@@ -207,7 +210,7 @@ calc_div(struct gf100_clk_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
 }
 
 static u32
-calc_src(struct gf100_clk_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
+calc_src(struct gf100_clk *clk, int idx, u32 freq, u32 *dsrc, u32 *ddiv)
 {
 	u32 sclk;
 
@@ -229,28 +232,29 @@ calc_src(struct gf100_clk_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
 	}
 
 	/* otherwise, calculate the closest divider */
-	sclk = read_vco(priv, 0x137160 + (clk * 4));
-	if (clk < 7)
-		sclk = calc_div(priv, clk, sclk, freq, ddiv);
+	sclk = read_vco(clk, 0x137160 + (idx * 4));
+	if (idx < 7)
+		sclk = calc_div(clk, idx, sclk, freq, ddiv);
 	return sclk;
 }
 
 static u32
-calc_pll(struct gf100_clk_priv *priv, int clk, u32 freq, u32 *coef)
+calc_pll(struct gf100_clk *clk, int idx, u32 freq, u32 *coef)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct nvbios_pll limits;
 	int N, M, P, ret;
 
-	ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
+	ret = nvbios_pll_parse(bios, 0x137000 + (idx * 0x20), &limits);
 	if (ret)
 		return 0;
 
-	limits.refclk = read_div(priv, clk, 0x137120, 0x137140);
+	limits.refclk = read_div(clk, idx, 0x137120, 0x137140);
 	if (!limits.refclk)
 		return 0;
 
-	ret = gt215_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P);
+	ret = gt215_pll_calc(subdev, &limits, freq, &N, NULL, &M, &P);
 	if (ret <= 0)
 		return 0;
 
@@ -259,10 +263,9 @@ calc_pll(struct gf100_clk_priv *priv, int clk, u32 freq, u32 *coef)
 }
 
 static int
-calc_clk(struct gf100_clk_priv *priv,
-	 struct nvkm_cstate *cstate, int clk, int dom)
+calc_clk(struct gf100_clk *clk, struct nvkm_cstate *cstate, int idx, int dom)
 {
-	struct gf100_clk_info *info = &priv->eng[clk];
+	struct gf100_clk_info *info = &clk->eng[idx];
 	u32 freq = cstate->domain[dom];
 	u32 src0, div0, div1D, div1P = 0;
 	u32 clk0, clk1 = 0;
@@ -272,16 +275,16 @@ calc_clk(struct gf100_clk_priv *priv,
 		return 0;
 
 	/* first possible path, using only dividers */
-	clk0 = calc_src(priv, clk, freq, &src0, &div0);
-	clk0 = calc_div(priv, clk, clk0, freq, &div1D);
+	clk0 = calc_src(clk, idx, freq, &src0, &div0);
+	clk0 = calc_div(clk, idx, clk0, freq, &div1D);
 
 	/* see if we can get any closer using PLLs */
-	if (clk0 != freq && (0x00004387 & (1 << clk))) {
-		if (clk <= 7)
-			clk1 = calc_pll(priv, clk, freq, &info->coef);
+	if (clk0 != freq && (0x00004387 & (1 << idx))) {
+		if (idx <= 7)
+			clk1 = calc_pll(clk, idx, freq, &info->coef);
 		else
 			clk1 = cstate->domain[nv_clk_src_hubk06];
-		clk1 = calc_div(priv, clk, clk1, freq, &div1P);
+		clk1 = calc_div(clk, idx, clk1, freq, &div1P);
 	}
 
 	/* select the method which gets closest to target freq */
@@ -303,7 +306,7 @@ calc_clk(struct gf100_clk_priv *priv,
 			info->mdiv |= 0x80000000;
 			info->mdiv |= div1P << 8;
 		}
-		info->ssel = (1 << clk);
+		info->ssel = (1 << idx);
 		info->freq = clk1;
 	}
 
@@ -311,81 +314,96 @@ calc_clk(struct gf100_clk_priv *priv,
 }
 
 static int
-gf100_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+gf100_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
 {
-	struct gf100_clk_priv *priv = (void *)clk;
+	struct gf100_clk *clk = gf100_clk(base);
 	int ret;
 
-	if ((ret = calc_clk(priv, cstate, 0x00, nv_clk_src_gpc)) ||
-	    (ret = calc_clk(priv, cstate, 0x01, nv_clk_src_rop)) ||
-	    (ret = calc_clk(priv, cstate, 0x02, nv_clk_src_hubk07)) ||
-	    (ret = calc_clk(priv, cstate, 0x07, nv_clk_src_hubk06)) ||
-	    (ret = calc_clk(priv, cstate, 0x08, nv_clk_src_hubk01)) ||
-	    (ret = calc_clk(priv, cstate, 0x09, nv_clk_src_copy)) ||
-	    (ret = calc_clk(priv, cstate, 0x0c, nv_clk_src_daemon)) ||
-	    (ret = calc_clk(priv, cstate, 0x0e, nv_clk_src_vdec)))
+	if ((ret = calc_clk(clk, cstate, 0x00, nv_clk_src_gpc)) ||
+	    (ret = calc_clk(clk, cstate, 0x01, nv_clk_src_rop)) ||
+	    (ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) ||
+	    (ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) ||
+	    (ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) ||
+	    (ret = calc_clk(clk, cstate, 0x09, nv_clk_src_copy)) ||
+	    (ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_daemon)) ||
+	    (ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec)))
 		return ret;
 
 	return 0;
 }
 
 static void
-gf100_clk_prog_0(struct gf100_clk_priv *priv, int clk)
+gf100_clk_prog_0(struct gf100_clk *clk, int idx)
 {
-	struct gf100_clk_info *info = &priv->eng[clk];
-	if (clk < 7 && !info->ssel) {
-		nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
-		nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
+	struct gf100_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
+	if (idx < 7 && !info->ssel) {
+		nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x80003f3f, info->ddiv);
+		nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc);
 	}
 }
 
 static void
-gf100_clk_prog_1(struct gf100_clk_priv *priv, int clk)
+gf100_clk_prog_1(struct gf100_clk *clk, int idx)
 {
-	nv_mask(priv, 0x137100, (1 << clk), 0x00000000);
-	nv_wait(priv, 0x137100, (1 << clk), 0x00000000);
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, 0x137100, (1 << idx), 0x00000000);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x137100) & (1 << idx)))
+			break;
+	);
 }
 
 static void
-gf100_clk_prog_2(struct gf100_clk_priv *priv, int clk)
+gf100_clk_prog_2(struct gf100_clk *clk, int idx)
 {
-	struct gf100_clk_info *info = &priv->eng[clk];
-	const u32 addr = 0x137000 + (clk * 0x20);
-	if (clk <= 7) {
-		nv_mask(priv, addr + 0x00, 0x00000004, 0x00000000);
-		nv_mask(priv, addr + 0x00, 0x00000001, 0x00000000);
+	struct gf100_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
+	const u32 addr = 0x137000 + (idx * 0x20);
+	if (idx <= 7) {
+		nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000);
+		nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000);
 		if (info->coef) {
-			nv_wr32(priv, addr + 0x04, info->coef);
-			nv_mask(priv, addr + 0x00, 0x00000001, 0x00000001);
-			nv_wait(priv, addr + 0x00, 0x00020000, 0x00020000);
-			nv_mask(priv, addr + 0x00, 0x00020004, 0x00000004);
+			nvkm_wr32(device, addr + 0x04, info->coef);
+			nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
+			nvkm_msec(device, 2000,
+				if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
+					break;
+			);
+			nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
 		}
 	}
 }
 
 static void
-gf100_clk_prog_3(struct gf100_clk_priv *priv, int clk)
+gf100_clk_prog_3(struct gf100_clk *clk, int idx)
 {
-	struct gf100_clk_info *info = &priv->eng[clk];
+	struct gf100_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
 	if (info->ssel) {
-		nv_mask(priv, 0x137100, (1 << clk), info->ssel);
-		nv_wait(priv, 0x137100, (1 << clk), info->ssel);
+		nvkm_mask(device, 0x137100, (1 << idx), info->ssel);
+		nvkm_msec(device, 2000,
+			u32 tmp = nvkm_rd32(device, 0x137100) & (1 << idx);
+			if (tmp == info->ssel)
+				break;
+		);
 	}
 }
 
 static void
-gf100_clk_prog_4(struct gf100_clk_priv *priv, int clk)
+gf100_clk_prog_4(struct gf100_clk *clk, int idx)
 {
-	struct gf100_clk_info *info = &priv->eng[clk];
-	nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
+	struct gf100_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f3f, info->mdiv);
 }
 
 static int
-gf100_clk_prog(struct nvkm_clk *clk)
+gf100_clk_prog(struct nvkm_clk *base)
 {
-	struct gf100_clk_priv *priv = (void *)clk;
+	struct gf100_clk *clk = gf100_clk(base);
 	struct {
-		void (*exec)(struct gf100_clk_priv *, int);
+		void (*exec)(struct gf100_clk *, int);
 	} stage[] = {
 		{ gf100_clk_prog_0 }, /* div programming */
 		{ gf100_clk_prog_1 }, /* select div mode */
@@ -396,10 +414,10 @@ gf100_clk_prog(struct nvkm_clk *clk)
 	int i, j;
 
 	for (i = 0; i < ARRAY_SIZE(stage); i++) {
-		for (j = 0; j < ARRAY_SIZE(priv->eng); j++) {
-			if (!priv->eng[j].freq)
+		for (j = 0; j < ARRAY_SIZE(clk->eng); j++) {
+			if (!clk->eng[j].freq)
 				continue;
-			stage[i].exec(priv, j);
+			stage[i].exec(clk, j);
 		}
 	}
 
@@ -407,56 +425,42 @@ gf100_clk_prog(struct nvkm_clk *clk)
 }
 
 static void
-gf100_clk_tidy(struct nvkm_clk *clk)
+gf100_clk_tidy(struct nvkm_clk *base)
 {
-	struct gf100_clk_priv *priv = (void *)clk;
-	memset(priv->eng, 0x00, sizeof(priv->eng));
+	struct gf100_clk *clk = gf100_clk(base);
+	memset(clk->eng, 0x00, sizeof(clk->eng));
 }
 
-static struct nvkm_domain
-gf100_domain[] = {
-	{ nv_clk_src_crystal, 0xff },
-	{ nv_clk_src_href   , 0xff },
-	{ nv_clk_src_hubk06 , 0x00 },
-	{ nv_clk_src_hubk01 , 0x01 },
-	{ nv_clk_src_copy   , 0x02 },
-	{ nv_clk_src_gpc    , 0x03, 0, "core", 2000 },
-	{ nv_clk_src_rop    , 0x04 },
-	{ nv_clk_src_mem    , 0x05, 0, "memory", 1000 },
-	{ nv_clk_src_vdec   , 0x06 },
-	{ nv_clk_src_daemon , 0x0a },
-	{ nv_clk_src_hubk07 , 0x0b },
-	{ nv_clk_src_max }
+static const struct nvkm_clk_func
+gf100_clk = {
+	.read = gf100_clk_read,
+	.calc = gf100_clk_calc,
+	.prog = gf100_clk_prog,
+	.tidy = gf100_clk_tidy,
+	.domains = {
+		{ nv_clk_src_crystal, 0xff },
+		{ nv_clk_src_href   , 0xff },
+		{ nv_clk_src_hubk06 , 0x00 },
+		{ nv_clk_src_hubk01 , 0x01 },
+		{ nv_clk_src_copy   , 0x02 },
+		{ nv_clk_src_gpc    , 0x03, 0, "core", 2000 },
+		{ nv_clk_src_rop    , 0x04 },
+		{ nv_clk_src_mem    , 0x05, 0, "memory", 1000 },
+		{ nv_clk_src_vdec   , 0x06 },
+		{ nv_clk_src_daemon , 0x0a },
+		{ nv_clk_src_hubk07 , 0x0b },
+		{ nv_clk_src_max }
+	}
 };
 
-static int
-gf100_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+gf100_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 {
-	struct gf100_clk_priv *priv;
-	int ret;
+	struct gf100_clk *clk;
 
-	ret = nvkm_clk_create(parent, engine, oclass, gf100_domain,
-			      NULL, 0, false, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+		return -ENOMEM;
+	*pclk = &clk->base;
 
-	priv->base.read = gf100_clk_read;
-	priv->base.calc = gf100_clk_calc;
-	priv->base.prog = gf100_clk_prog;
-	priv->base.tidy = gf100_clk_tidy;
-	return 0;
+	return nvkm_clk_ctor(&gf100_clk, device, index, false, &clk->base);
 }
-
-struct nvkm_oclass
-gf100_clk_oclass = {
-	.handle = NV_SUBDEV(CLK, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_clk_ctor,
-		.dtor = _nvkm_clk_dtor,
-		.init = _nvkm_clk_init,
-		.fini = _nvkm_clk_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
index e9b2310bdfbb..396f7e4dad0a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
@@ -21,10 +21,10 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/clk.h>
+#define gk104_clk(p) container_of((p), struct gk104_clk, base)
+#include "priv.h"
 #include "pll.h"
 
-#include <core/device.h>
 #include <subdev/timer.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
@@ -38,28 +38,30 @@ struct gk104_clk_info {
 	u32 coef;
 };
 
-struct gk104_clk_priv {
+struct gk104_clk {
 	struct nvkm_clk base;
 	struct gk104_clk_info eng[16];
 };
 
-static u32 read_div(struct gk104_clk_priv *, int, u32, u32);
-static u32 read_pll(struct gk104_clk_priv *, u32);
+static u32 read_div(struct gk104_clk *, int, u32, u32);
+static u32 read_pll(struct gk104_clk *, u32);
 
 static u32
-read_vco(struct gk104_clk_priv *priv, u32 dsrc)
+read_vco(struct gk104_clk *clk, u32 dsrc)
 {
-	u32 ssrc = nv_rd32(priv, dsrc);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ssrc = nvkm_rd32(device, dsrc);
 	if (!(ssrc & 0x00000100))
-		return read_pll(priv, 0x00e800);
-	return read_pll(priv, 0x00e820);
+		return read_pll(clk, 0x00e800);
+	return read_pll(clk, 0x00e820);
 }
 
 static u32
-read_pll(struct gk104_clk_priv *priv, u32 pll)
+read_pll(struct gk104_clk *clk, u32 pll)
 {
-	u32 ctrl = nv_rd32(priv, pll + 0x00);
-	u32 coef = nv_rd32(priv, pll + 0x04);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ctrl = nvkm_rd32(device, pll + 0x00);
+	u32 coef = nvkm_rd32(device, pll + 0x04);
 	u32 P = (coef & 0x003f0000) >> 16;
 	u32 N = (coef & 0x0000ff00) >> 8;
 	u32 M = (coef & 0x000000ff) >> 0;
@@ -72,22 +74,22 @@ read_pll(struct gk104_clk_priv *priv, u32 pll)
 	switch (pll) {
 	case 0x00e800:
 	case 0x00e820:
-		sclk = nv_device(priv)->crystal;
+		sclk = device->crystal;
 		P = 1;
 		break;
 	case 0x132000:
-		sclk = read_pll(priv, 0x132020);
+		sclk = read_pll(clk, 0x132020);
 		P = (coef & 0x10000000) ? 2 : 1;
 		break;
 	case 0x132020:
-		sclk = read_div(priv, 0, 0x137320, 0x137330);
-		fN   = nv_rd32(priv, pll + 0x10) >> 16;
+		sclk = read_div(clk, 0, 0x137320, 0x137330);
+		fN   = nvkm_rd32(device, pll + 0x10) >> 16;
 		break;
 	case 0x137000:
 	case 0x137020:
 	case 0x137040:
 	case 0x1370e0:
-		sclk = read_div(priv, (pll & 0xff) / 0x20, 0x137120, 0x137140);
+		sclk = read_div(clk, (pll & 0xff) / 0x20, 0x137120, 0x137140);
 		break;
 	default:
 		return 0;
@@ -101,70 +103,73 @@ read_pll(struct gk104_clk_priv *priv, u32 pll)
 }
 
 static u32
-read_div(struct gk104_clk_priv *priv, int doff, u32 dsrc, u32 dctl)
+read_div(struct gk104_clk *clk, int doff, u32 dsrc, u32 dctl)
 {
-	u32 ssrc = nv_rd32(priv, dsrc + (doff * 4));
-	u32 sctl = nv_rd32(priv, dctl + (doff * 4));
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
+	u32 sctl = nvkm_rd32(device, dctl + (doff * 4));
 
 	switch (ssrc & 0x00000003) {
 	case 0:
 		if ((ssrc & 0x00030000) != 0x00030000)
-			return nv_device(priv)->crystal;
+			return device->crystal;
 		return 108000;
 	case 2:
 		return 100000;
 	case 3:
 		if (sctl & 0x80000000) {
-			u32 sclk = read_vco(priv, dsrc + (doff * 4));
+			u32 sclk = read_vco(clk, dsrc + (doff * 4));
 			u32 sdiv = (sctl & 0x0000003f) + 2;
 			return (sclk * 2) / sdiv;
 		}
 
-		return read_vco(priv, dsrc + (doff * 4));
+		return read_vco(clk, dsrc + (doff * 4));
 	default:
 		return 0;
 	}
 }
 
 static u32
-read_mem(struct gk104_clk_priv *priv)
+read_mem(struct gk104_clk *clk)
 {
-	switch (nv_rd32(priv, 0x1373f4) & 0x0000000f) {
-	case 1: return read_pll(priv, 0x132020);
-	case 2: return read_pll(priv, 0x132000);
+	struct nvkm_device *device = clk->base.subdev.device;
+	switch (nvkm_rd32(device, 0x1373f4) & 0x0000000f) {
+	case 1: return read_pll(clk, 0x132020);
+	case 2: return read_pll(clk, 0x132000);
 	default:
 		return 0;
 	}
 }
 
 static u32
-read_clk(struct gk104_clk_priv *priv, int clk)
+read_clk(struct gk104_clk *clk, int idx)
 {
-	u32 sctl = nv_rd32(priv, 0x137250 + (clk * 4));
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4));
 	u32 sclk, sdiv;
 
-	if (clk < 7) {
-		u32 ssel = nv_rd32(priv, 0x137100);
-		if (ssel & (1 << clk)) {
-			sclk = read_pll(priv, 0x137000 + (clk * 0x20));
+	if (idx < 7) {
+		u32 ssel = nvkm_rd32(device, 0x137100);
+		if (ssel & (1 << idx)) {
+			sclk = read_pll(clk, 0x137000 + (idx * 0x20));
 			sdiv = 1;
 		} else {
-			sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+			sclk = read_div(clk, idx, 0x137160, 0x1371d0);
 			sdiv = 0;
 		}
 	} else {
-		u32 ssrc = nv_rd32(priv, 0x137160 + (clk * 0x04));
+		u32 ssrc = nvkm_rd32(device, 0x137160 + (idx * 0x04));
 		if ((ssrc & 0x00000003) == 0x00000003) {
-			sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+			sclk = read_div(clk, idx, 0x137160, 0x1371d0);
 			if (ssrc & 0x00000100) {
 				if (ssrc & 0x40000000)
-					sclk = read_pll(priv, 0x1370e0);
+					sclk = read_pll(clk, 0x1370e0);
 				sdiv = 1;
 			} else {
 				sdiv = 0;
 			}
 		} else {
-			sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+			sclk = read_div(clk, idx, 0x137160, 0x1371d0);
 			sdiv = 0;
 		}
 	}
@@ -181,10 +186,11 @@ read_clk(struct gk104_clk_priv *priv, int clk)
 }
 
 static int
-gk104_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+gk104_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
 {
-	struct nvkm_device *device = nv_device(clk);
-	struct gk104_clk_priv *priv = (void *)clk;
+	struct gk104_clk *clk = gk104_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 
 	switch (src) {
 	case nv_clk_src_crystal:
@@ -192,29 +198,29 @@ gk104_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
 	case nv_clk_src_href:
 		return 100000;
 	case nv_clk_src_mem:
-		return read_mem(priv);
+		return read_mem(clk);
 	case nv_clk_src_gpc:
-		return read_clk(priv, 0x00);
+		return read_clk(clk, 0x00);
 	case nv_clk_src_rop:
-		return read_clk(priv, 0x01);
+		return read_clk(clk, 0x01);
 	case nv_clk_src_hubk07:
-		return read_clk(priv, 0x02);
+		return read_clk(clk, 0x02);
 	case nv_clk_src_hubk06:
-		return read_clk(priv, 0x07);
+		return read_clk(clk, 0x07);
 	case nv_clk_src_hubk01:
-		return read_clk(priv, 0x08);
+		return read_clk(clk, 0x08);
 	case nv_clk_src_daemon:
-		return read_clk(priv, 0x0c);
+		return read_clk(clk, 0x0c);
 	case nv_clk_src_vdec:
-		return read_clk(priv, 0x0e);
+		return read_clk(clk, 0x0e);
 	default:
-		nv_error(clk, "invalid clock source %d\n", src);
+		nvkm_error(subdev, "invalid clock source %d\n", src);
 		return -EINVAL;
 	}
 }
 
 static u32
-calc_div(struct gk104_clk_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
+calc_div(struct gk104_clk *clk, int idx, u32 ref, u32 freq, u32 *ddiv)
 {
 	u32 div = min((ref * 2) / freq, (u32)65);
 	if (div < 2)
@@ -225,7 +231,7 @@ calc_div(struct gk104_clk_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
 }
 
 static u32
-calc_src(struct gk104_clk_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
+calc_src(struct gk104_clk *clk, int idx, u32 freq, u32 *dsrc, u32 *ddiv)
 {
 	u32 sclk;
 
@@ -247,28 +253,29 @@ calc_src(struct gk104_clk_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
 	}
 
 	/* otherwise, calculate the closest divider */
-	sclk = read_vco(priv, 0x137160 + (clk * 4));
-	if (clk < 7)
-		sclk = calc_div(priv, clk, sclk, freq, ddiv);
+	sclk = read_vco(clk, 0x137160 + (idx * 4));
+	if (idx < 7)
+		sclk = calc_div(clk, idx, sclk, freq, ddiv);
 	return sclk;
 }
 
 static u32
-calc_pll(struct gk104_clk_priv *priv, int clk, u32 freq, u32 *coef)
+calc_pll(struct gk104_clk *clk, int idx, u32 freq, u32 *coef)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct nvbios_pll limits;
 	int N, M, P, ret;
 
-	ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
+	ret = nvbios_pll_parse(bios, 0x137000 + (idx * 0x20), &limits);
 	if (ret)
 		return 0;
 
-	limits.refclk = read_div(priv, clk, 0x137120, 0x137140);
+	limits.refclk = read_div(clk, idx, 0x137120, 0x137140);
 	if (!limits.refclk)
 		return 0;
 
-	ret = gt215_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P);
+	ret = gt215_pll_calc(subdev, &limits, freq, &N, NULL, &M, &P);
 	if (ret <= 0)
 		return 0;
 
@@ -277,10 +284,10 @@ calc_pll(struct gk104_clk_priv *priv, int clk, u32 freq, u32 *coef)
 }
 
 static int
-calc_clk(struct gk104_clk_priv *priv,
-	 struct nvkm_cstate *cstate, int clk, int dom)
+calc_clk(struct gk104_clk *clk,
+	 struct nvkm_cstate *cstate, int idx, int dom)
 {
-	struct gk104_clk_info *info = &priv->eng[clk];
+	struct gk104_clk_info *info = &clk->eng[idx];
 	u32 freq = cstate->domain[dom];
 	u32 src0, div0, div1D, div1P = 0;
 	u32 clk0, clk1 = 0;
@@ -290,16 +297,16 @@ calc_clk(struct gk104_clk_priv *priv,
 		return 0;
 
 	/* first possible path, using only dividers */
-	clk0 = calc_src(priv, clk, freq, &src0, &div0);
-	clk0 = calc_div(priv, clk, clk0, freq, &div1D);
+	clk0 = calc_src(clk, idx, freq, &src0, &div0);
+	clk0 = calc_div(clk, idx, clk0, freq, &div1D);
 
 	/* see if we can get any closer using PLLs */
-	if (clk0 != freq && (0x0000ff87 & (1 << clk))) {
-		if (clk <= 7)
-			clk1 = calc_pll(priv, clk, freq, &info->coef);
+	if (clk0 != freq && (0x0000ff87 & (1 << idx))) {
+		if (idx <= 7)
+			clk1 = calc_pll(clk, idx, freq, &info->coef);
 		else
 			clk1 = cstate->domain[nv_clk_src_hubk06];
-		clk1 = calc_div(priv, clk, clk1, freq, &div1P);
+		clk1 = calc_div(clk, idx, clk1, freq, &div1P);
 	}
 
 	/* select the method which gets closest to target freq */
@@ -320,7 +327,7 @@ calc_clk(struct gk104_clk_priv *priv,
 			info->mdiv |= 0x80000000;
 			info->mdiv |= div1P << 8;
 		}
-		info->ssel = (1 << clk);
+		info->ssel = (1 << idx);
 		info->dsrc = 0x40000100;
 		info->freq = clk1;
 	}
@@ -329,98 +336,115 @@ calc_clk(struct gk104_clk_priv *priv,
 }
 
 static int
-gk104_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+gk104_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
 {
-	struct gk104_clk_priv *priv = (void *)clk;
+	struct gk104_clk *clk = gk104_clk(base);
 	int ret;
 
-	if ((ret = calc_clk(priv, cstate, 0x00, nv_clk_src_gpc)) ||
-	    (ret = calc_clk(priv, cstate, 0x01, nv_clk_src_rop)) ||
-	    (ret = calc_clk(priv, cstate, 0x02, nv_clk_src_hubk07)) ||
-	    (ret = calc_clk(priv, cstate, 0x07, nv_clk_src_hubk06)) ||
-	    (ret = calc_clk(priv, cstate, 0x08, nv_clk_src_hubk01)) ||
-	    (ret = calc_clk(priv, cstate, 0x0c, nv_clk_src_daemon)) ||
-	    (ret = calc_clk(priv, cstate, 0x0e, nv_clk_src_vdec)))
+	if ((ret = calc_clk(clk, cstate, 0x00, nv_clk_src_gpc)) ||
+	    (ret = calc_clk(clk, cstate, 0x01, nv_clk_src_rop)) ||
+	    (ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) ||
+	    (ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) ||
+	    (ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) ||
+	    (ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_daemon)) ||
+	    (ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec)))
 		return ret;
 
 	return 0;
 }
 
 static void
-gk104_clk_prog_0(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_0(struct gk104_clk *clk, int idx)
 {
-	struct gk104_clk_info *info = &priv->eng[clk];
+	struct gk104_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
 	if (!info->ssel) {
-		nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x8000003f, info->ddiv);
-		nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
+		nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x8000003f, info->ddiv);
+		nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc);
 	}
 }
 
 static void
-gk104_clk_prog_1_0(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_1_0(struct gk104_clk *clk, int idx)
 {
-	nv_mask(priv, 0x137100, (1 << clk), 0x00000000);
-	nv_wait(priv, 0x137100, (1 << clk), 0x00000000);
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, 0x137100, (1 << idx), 0x00000000);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x137100) & (1 << idx)))
+			break;
+	);
 }
 
 static void
-gk104_clk_prog_1_1(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_1_1(struct gk104_clk *clk, int idx)
 {
-	nv_mask(priv, 0x137160 + (clk * 0x04), 0x00000100, 0x00000000);
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, 0x137160 + (idx * 0x04), 0x00000100, 0x00000000);
 }
 
 static void
-gk104_clk_prog_2(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_2(struct gk104_clk *clk, int idx)
 {
-	struct gk104_clk_info *info = &priv->eng[clk];
-	const u32 addr = 0x137000 + (clk * 0x20);
-	nv_mask(priv, addr + 0x00, 0x00000004, 0x00000000);
-	nv_mask(priv, addr + 0x00, 0x00000001, 0x00000000);
+	struct gk104_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
+	const u32 addr = 0x137000 + (idx * 0x20);
+	nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000);
+	nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000);
 	if (info->coef) {
-		nv_wr32(priv, addr + 0x04, info->coef);
-		nv_mask(priv, addr + 0x00, 0x00000001, 0x00000001);
-		nv_wait(priv, addr + 0x00, 0x00020000, 0x00020000);
-		nv_mask(priv, addr + 0x00, 0x00020004, 0x00000004);
+		nvkm_wr32(device, addr + 0x04, info->coef);
+		nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
+		nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
+				break;
+		);
+		nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
 	}
 }
 
 static void
-gk104_clk_prog_3(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_3(struct gk104_clk *clk, int idx)
 {
-	struct gk104_clk_info *info = &priv->eng[clk];
+	struct gk104_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
 	if (info->ssel)
-		nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f00, info->mdiv);
+		nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f00, info->mdiv);
 	else
-		nv_mask(priv, 0x137250 + (clk * 0x04), 0x0000003f, info->mdiv);
+		nvkm_mask(device, 0x137250 + (idx * 0x04), 0x0000003f, info->mdiv);
 }
 
 static void
-gk104_clk_prog_4_0(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_4_0(struct gk104_clk *clk, int idx)
 {
-	struct gk104_clk_info *info = &priv->eng[clk];
+	struct gk104_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
 	if (info->ssel) {
-		nv_mask(priv, 0x137100, (1 << clk), info->ssel);
-		nv_wait(priv, 0x137100, (1 << clk), info->ssel);
+		nvkm_mask(device, 0x137100, (1 << idx), info->ssel);
+		nvkm_msec(device, 2000,
+			u32 tmp = nvkm_rd32(device, 0x137100) & (1 << idx);
+			if (tmp == info->ssel)
+				break;
+		);
 	}
 }
 
 static void
-gk104_clk_prog_4_1(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_4_1(struct gk104_clk *clk, int idx)
 {
-	struct gk104_clk_info *info = &priv->eng[clk];
+	struct gk104_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
 	if (info->ssel) {
-		nv_mask(priv, 0x137160 + (clk * 0x04), 0x40000000, 0x40000000);
-		nv_mask(priv, 0x137160 + (clk * 0x04), 0x00000100, 0x00000100);
+		nvkm_mask(device, 0x137160 + (idx * 0x04), 0x40000000, 0x40000000);
+		nvkm_mask(device, 0x137160 + (idx * 0x04), 0x00000100, 0x00000100);
 	}
 }
 
 static int
-gk104_clk_prog(struct nvkm_clk *clk)
+gk104_clk_prog(struct nvkm_clk *base)
 {
-	struct gk104_clk_priv *priv = (void *)clk;
+	struct gk104_clk *clk = gk104_clk(base);
 	struct {
 		u32 mask;
-		void (*exec)(struct gk104_clk_priv *, int);
+		void (*exec)(struct gk104_clk *, int);
 	} stage[] = {
 		{ 0x007f, gk104_clk_prog_0   }, /* div programming */
 		{ 0x007f, gk104_clk_prog_1_0 }, /* select div mode */
@@ -433,12 +457,12 @@ gk104_clk_prog(struct nvkm_clk *clk)
 	int i, j;
 
 	for (i = 0; i < ARRAY_SIZE(stage); i++) {
-		for (j = 0; j < ARRAY_SIZE(priv->eng); j++) {
+		for (j = 0; j < ARRAY_SIZE(clk->eng); j++) {
 			if (!(stage[i].mask & (1 << j)))
 				continue;
-			if (!priv->eng[j].freq)
+			if (!clk->eng[j].freq)
 				continue;
-			stage[i].exec(priv, j);
+			stage[i].exec(clk, j);
 		}
 	}
 
@@ -446,55 +470,41 @@ gk104_clk_prog(struct nvkm_clk *clk)
 }
 
 static void
-gk104_clk_tidy(struct nvkm_clk *clk)
+gk104_clk_tidy(struct nvkm_clk *base)
 {
-	struct gk104_clk_priv *priv = (void *)clk;
-	memset(priv->eng, 0x00, sizeof(priv->eng));
+	struct gk104_clk *clk = gk104_clk(base);
+	memset(clk->eng, 0x00, sizeof(clk->eng));
 }
 
-static struct nvkm_domain
-gk104_domain[] = {
-	{ nv_clk_src_crystal, 0xff },
-	{ nv_clk_src_href   , 0xff },
-	{ nv_clk_src_gpc    , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
-	{ nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
-	{ nv_clk_src_rop    , 0x02, NVKM_CLK_DOM_FLAG_CORE },
-	{ nv_clk_src_mem    , 0x03, 0, "memory", 500 },
-	{ nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE },
-	{ nv_clk_src_hubk01 , 0x05 },
-	{ nv_clk_src_vdec   , 0x06 },
-	{ nv_clk_src_daemon , 0x07 },
-	{ nv_clk_src_max }
+static const struct nvkm_clk_func
+gk104_clk = {
+	.read = gk104_clk_read,
+	.calc = gk104_clk_calc,
+	.prog = gk104_clk_prog,
+	.tidy = gk104_clk_tidy,
+	.domains = {
+		{ nv_clk_src_crystal, 0xff },
+		{ nv_clk_src_href   , 0xff },
+		{ nv_clk_src_gpc    , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
+		{ nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
+		{ nv_clk_src_rop    , 0x02, NVKM_CLK_DOM_FLAG_CORE },
+		{ nv_clk_src_mem    , 0x03, 0, "memory", 500 },
+		{ nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE },
+		{ nv_clk_src_hubk01 , 0x05 },
+		{ nv_clk_src_vdec   , 0x06 },
+		{ nv_clk_src_daemon , 0x07 },
+		{ nv_clk_src_max }
+	}
 };
 
-static int
-gk104_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+gk104_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 {
-	struct gk104_clk_priv *priv;
-	int ret;
+	struct gk104_clk *clk;
 
-	ret = nvkm_clk_create(parent, engine, oclass, gk104_domain,
-			      NULL, 0, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+		return -ENOMEM;
+	*pclk = &clk->base;
 
-	priv->base.read = gk104_clk_read;
-	priv->base.calc = gk104_clk_calc;
-	priv->base.prog = gk104_clk_prog;
-	priv->base.tidy = gk104_clk_tidy;
-	return 0;
+	return nvkm_clk_ctor(&gk104_clk, device, index, true, &clk->base);
 }
-
-struct nvkm_oclass
-gk104_clk_oclass = {
-	.handle = NV_SUBDEV(CLK, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_clk_ctor,
-		.dtor = _nvkm_clk_dtor,
-		.init = _nvkm_clk_init,
-		.fini = _nvkm_clk_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
index 65c532742b08..254094ab7fb8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
@@ -22,14 +22,11 @@
  * Shamelessly ripped off from ChromeOS's gk20a/clk_pllg.c
  *
  */
-#include <subdev/clk.h>
-#include <subdev/timer.h>
-
-#include <core/device.h>
+#define gk20a_clk(p) container_of((p), struct gk20a_clk, base)
+#include "priv.h"
 
-#ifdef __KERNEL__
-#include <nouveau_platform.h>
-#endif
+#include <core/tegra.h>
+#include <subdev/timer.h>
 
 #define MHZ (1000 * 1000)
 
@@ -117,41 +114,42 @@ static const struct gk20a_clk_pllg_params gk20a_pllg_params = {
 	.min_pl = 1, .max_pl = 32,
 };
 
-struct gk20a_clk_priv {
+struct gk20a_clk {
 	struct nvkm_clk base;
 	const struct gk20a_clk_pllg_params *params;
 	u32 m, n, pl;
 	u32 parent_rate;
 };
-#define to_gk20a_clk(base) container_of(base, struct gk20a_clk_priv, base)
 
 static void
-gk20a_pllg_read_mnp(struct gk20a_clk_priv *priv)
+gk20a_pllg_read_mnp(struct gk20a_clk *clk)
 {
+	struct nvkm_device *device = clk->base.subdev.device;
 	u32 val;
 
-	val = nv_rd32(priv, GPCPLL_COEFF);
-	priv->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
-	priv->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
-	priv->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
+	val = nvkm_rd32(device, GPCPLL_COEFF);
+	clk->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
+	clk->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
+	clk->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
 }
 
 static u32
-gk20a_pllg_calc_rate(struct gk20a_clk_priv *priv)
+gk20a_pllg_calc_rate(struct gk20a_clk *clk)
 {
 	u32 rate;
 	u32 divider;
 
-	rate = priv->parent_rate * priv->n;
-	divider = priv->m * pl_to_div[priv->pl];
+	rate = clk->parent_rate * clk->n;
+	divider = clk->m * pl_to_div[clk->pl];
 	do_div(rate, divider);
 
 	return rate / 2;
 }
 
 static int
-gk20a_pllg_calc_mnp(struct gk20a_clk_priv *priv, unsigned long rate)
+gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
 {
+	struct nvkm_subdev *subdev = &clk->base.subdev;
 	u32 target_clk_f, ref_clk_f, target_freq;
 	u32 min_vco_f, max_vco_f;
 	u32 low_pl, high_pl, best_pl;
@@ -163,13 +161,13 @@ gk20a_pllg_calc_mnp(struct gk20a_clk_priv *priv, unsigned long rate)
 	u32 pl;
 
 	target_clk_f = rate * 2 / MHZ;
-	ref_clk_f = priv->parent_rate / MHZ;
+	ref_clk_f = clk->parent_rate / MHZ;
 
-	max_vco_f = priv->params->max_vco;
-	min_vco_f = priv->params->min_vco;
-	best_m = priv->params->max_m;
-	best_n = priv->params->min_n;
-	best_pl = priv->params->min_pl;
+	max_vco_f = clk->params->max_vco;
+	min_vco_f = clk->params->min_vco;
+	best_m = clk->params->max_m;
+	best_n = clk->params->min_n;
+	best_pl = clk->params->min_pl;
 
 	target_vco_f = target_clk_f + target_clk_f / 50;
 	if (max_vco_f < target_vco_f)
@@ -177,13 +175,13 @@ gk20a_pllg_calc_mnp(struct gk20a_clk_priv *priv, unsigned long rate)
 
 	/* min_pl <= high_pl <= max_pl */
 	high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f;
-	high_pl = min(high_pl, priv->params->max_pl);
-	high_pl = max(high_pl, priv->params->min_pl);
+	high_pl = min(high_pl, clk->params->max_pl);
+	high_pl = max(high_pl, clk->params->min_pl);
 
 	/* min_pl <= low_pl <= max_pl */
 	low_pl = min_vco_f / target_vco_f;
-	low_pl = min(low_pl, priv->params->max_pl);
-	low_pl = max(low_pl, priv->params->min_pl);
+	low_pl = min(low_pl, clk->params->max_pl);
+	low_pl = max(low_pl, clk->params->min_pl);
 
 	/* Find Indices of high_pl and low_pl */
 	for (pl = 0; pl < ARRAY_SIZE(pl_to_div) - 1; pl++) {
@@ -199,30 +197,30 @@ gk20a_pllg_calc_mnp(struct gk20a_clk_priv *priv, unsigned long rate)
 		}
 	}
 
-	nv_debug(priv, "low_PL %d(div%d), high_PL %d(div%d)", low_pl,
-		 pl_to_div[low_pl], high_pl, pl_to_div[high_pl]);
+	nvkm_debug(subdev, "low_PL %d(div%d), high_PL %d(div%d)", low_pl,
+		   pl_to_div[low_pl], high_pl, pl_to_div[high_pl]);
 
 	/* Select lowest possible VCO */
 	for (pl = low_pl; pl <= high_pl; pl++) {
 		target_vco_f = target_clk_f * pl_to_div[pl];
-		for (m = priv->params->min_m; m <= priv->params->max_m; m++) {
+		for (m = clk->params->min_m; m <= clk->params->max_m; m++) {
 			u_f = ref_clk_f / m;
 
-			if (u_f < priv->params->min_u)
+			if (u_f < clk->params->min_u)
 				break;
-			if (u_f > priv->params->max_u)
+			if (u_f > clk->params->max_u)
 				continue;
 
 			n = (target_vco_f * m) / ref_clk_f;
 			n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f;
 
-			if (n > priv->params->max_n)
+			if (n > clk->params->max_n)
 				break;
 
 			for (; n <= n2; n++) {
-				if (n < priv->params->min_n)
+				if (n < clk->params->min_n)
 					continue;
-				if (n > priv->params->max_n)
+				if (n > clk->params->max_n)
 					break;
 
 				vco_f = ref_clk_f * n / m;
@@ -250,71 +248,75 @@ found_match:
 	WARN_ON(best_delta == ~0);
 
 	if (best_delta != 0)
-		nv_debug(priv, "no best match for target @ %dMHz on gpc_pll",
-			 target_clk_f);
+		nvkm_debug(subdev,
+			   "no best match for target @ %dMHz on gpc_pll",
+			   target_clk_f);
 
-	priv->m = best_m;
-	priv->n = best_n;
-	priv->pl = best_pl;
+	clk->m = best_m;
+	clk->n = best_n;
+	clk->pl = best_pl;
 
-	target_freq = gk20a_pllg_calc_rate(priv) / MHZ;
+	target_freq = gk20a_pllg_calc_rate(clk) / MHZ;
 
-	nv_debug(priv, "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
-		 target_freq, priv->m, priv->n, priv->pl, pl_to_div[priv->pl]);
+	nvkm_debug(subdev,
+		   "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
+		   target_freq, clk->m, clk->n, clk->pl, pl_to_div[clk->pl]);
 	return 0;
 }
 
 static int
-gk20a_pllg_slide(struct gk20a_clk_priv *priv, u32 n)
+gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
 {
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 val;
 	int ramp_timeout;
 
 	/* get old coefficients */
-	val = nv_rd32(priv, GPCPLL_COEFF);
+	val = nvkm_rd32(device, GPCPLL_COEFF);
 	/* do nothing if NDIV is the same */
 	if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH)))
 		return 0;
 
 	/* setup */
-	nv_mask(priv, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
+	nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
 		0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT);
-	nv_mask(priv, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
+	nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
 		0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT);
 
 	/* pll slowdown mode */
-	nv_mask(priv, GPCPLL_NDIV_SLOWDOWN,
+	nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
 		BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
 		BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
 
 	/* new ndiv ready for ramp */
-	val = nv_rd32(priv, GPCPLL_COEFF);
+	val = nvkm_rd32(device, GPCPLL_COEFF);
 	val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT);
 	val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
 	udelay(1);
-	nv_wr32(priv, GPCPLL_COEFF, val);
+	nvkm_wr32(device, GPCPLL_COEFF, val);
 
 	/* dynamic ramp to new ndiv */
-	val = nv_rd32(priv, GPCPLL_NDIV_SLOWDOWN);
+	val = nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
 	val |= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT;
 	udelay(1);
-	nv_wr32(priv, GPCPLL_NDIV_SLOWDOWN, val);
+	nvkm_wr32(device, GPCPLL_NDIV_SLOWDOWN, val);
 
 	for (ramp_timeout = 500; ramp_timeout > 0; ramp_timeout--) {
 		udelay(1);
-		val = nv_rd32(priv, GPC_BCAST_NDIV_SLOWDOWN_DEBUG);
+		val = nvkm_rd32(device, GPC_BCAST_NDIV_SLOWDOWN_DEBUG);
 		if (val & GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK)
 			break;
 	}
 
 	/* exit slowdown mode */
-	nv_mask(priv, GPCPLL_NDIV_SLOWDOWN,
+	nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
 		BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
 		BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
-	nv_rd32(priv, GPCPLL_NDIV_SLOWDOWN);
+	nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
 
 	if (ramp_timeout <= 0) {
-		nv_error(priv, "gpcpll dynamic ramp timeout\n");
+		nvkm_error(subdev, "gpcpll dynamic ramp timeout\n");
 		return -ETIMEDOUT;
 	}
 
@@ -322,149 +324,147 @@ gk20a_pllg_slide(struct gk20a_clk_priv *priv, u32 n)
 }
 
 static void
-_gk20a_pllg_enable(struct gk20a_clk_priv *priv)
+_gk20a_pllg_enable(struct gk20a_clk *clk)
 {
-	nv_mask(priv, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
-	nv_rd32(priv, GPCPLL_CFG);
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
+	nvkm_rd32(device, GPCPLL_CFG);
 }
 
 static void
-_gk20a_pllg_disable(struct gk20a_clk_priv *priv)
+_gk20a_pllg_disable(struct gk20a_clk *clk)
 {
-	nv_mask(priv, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
-	nv_rd32(priv, GPCPLL_CFG);
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
+	nvkm_rd32(device, GPCPLL_CFG);
 }
 
 static int
-_gk20a_pllg_program_mnp(struct gk20a_clk_priv *priv, bool allow_slide)
+_gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
 {
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 val, cfg;
 	u32 m_old, pl_old, n_lo;
 
 	/* get old coefficients */
-	val = nv_rd32(priv, GPCPLL_COEFF);
+	val = nvkm_rd32(device, GPCPLL_COEFF);
 	m_old = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
 	pl_old = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
 
 	/* do NDIV slide if there is no change in M and PL */
-	cfg = nv_rd32(priv, GPCPLL_CFG);
-	if (allow_slide && priv->m == m_old && priv->pl == pl_old &&
+	cfg = nvkm_rd32(device, GPCPLL_CFG);
+	if (allow_slide && clk->m == m_old && clk->pl == pl_old &&
 	    (cfg & GPCPLL_CFG_ENABLE)) {
-		return gk20a_pllg_slide(priv, priv->n);
+		return gk20a_pllg_slide(clk, clk->n);
 	}
 
 	/* slide down to NDIV_LO */
-	n_lo = DIV_ROUND_UP(m_old * priv->params->min_vco,
-			    priv->parent_rate / MHZ);
+	n_lo = DIV_ROUND_UP(m_old * clk->params->min_vco,
+			    clk->parent_rate / MHZ);
 	if (allow_slide && (cfg & GPCPLL_CFG_ENABLE)) {
-		int ret = gk20a_pllg_slide(priv, n_lo);
+		int ret = gk20a_pllg_slide(clk, n_lo);
 
 		if (ret)
 			return ret;
 	}
 
 	/* split FO-to-bypass jump in halfs by setting out divider 1:2 */
-	nv_mask(priv, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
 		0x2 << GPC2CLK_OUT_VCODIV_SHIFT);
 
 	/* put PLL in bypass before programming it */
-	val = nv_rd32(priv, SEL_VCO);
+	val = nvkm_rd32(device, SEL_VCO);
 	val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
 	udelay(2);
-	nv_wr32(priv, SEL_VCO, val);
+	nvkm_wr32(device, SEL_VCO, val);
 
 	/* get out from IDDQ */
-	val = nv_rd32(priv, GPCPLL_CFG);
+	val = nvkm_rd32(device, GPCPLL_CFG);
 	if (val & GPCPLL_CFG_IDDQ) {
 		val &= ~GPCPLL_CFG_IDDQ;
-		nv_wr32(priv, GPCPLL_CFG, val);
-		nv_rd32(priv, GPCPLL_CFG);
+		nvkm_wr32(device, GPCPLL_CFG, val);
+		nvkm_rd32(device, GPCPLL_CFG);
 		udelay(2);
 	}
 
-	_gk20a_pllg_disable(priv);
+	_gk20a_pllg_disable(clk);
 
-	nv_debug(priv, "%s: m=%d n=%d pl=%d\n", __func__, priv->m, priv->n,
-		 priv->pl);
+	nvkm_debug(subdev, "%s: m=%d n=%d pl=%d\n", __func__,
+		   clk->m, clk->n, clk->pl);
 
-	n_lo = DIV_ROUND_UP(priv->m * priv->params->min_vco,
-			    priv->parent_rate / MHZ);
-	val = priv->m << GPCPLL_COEFF_M_SHIFT;
-	val |= (allow_slide ? n_lo : priv->n) << GPCPLL_COEFF_N_SHIFT;
-	val |= priv->pl << GPCPLL_COEFF_P_SHIFT;
-	nv_wr32(priv, GPCPLL_COEFF, val);
+	n_lo = DIV_ROUND_UP(clk->m * clk->params->min_vco,
+			    clk->parent_rate / MHZ);
+	val = clk->m << GPCPLL_COEFF_M_SHIFT;
+	val |= (allow_slide ? n_lo : clk->n) << GPCPLL_COEFF_N_SHIFT;
+	val |= clk->pl << GPCPLL_COEFF_P_SHIFT;
+	nvkm_wr32(device, GPCPLL_COEFF, val);
 
-	_gk20a_pllg_enable(priv);
+	_gk20a_pllg_enable(clk);
 
-	val = nv_rd32(priv, GPCPLL_CFG);
+	val = nvkm_rd32(device, GPCPLL_CFG);
 	if (val & GPCPLL_CFG_LOCK_DET_OFF) {
 		val &= ~GPCPLL_CFG_LOCK_DET_OFF;
-		nv_wr32(priv, GPCPLL_CFG, val);
+		nvkm_wr32(device, GPCPLL_CFG, val);
 	}
 
-	if (!nvkm_timer_wait_eq(priv, 300000, GPCPLL_CFG, GPCPLL_CFG_LOCK,
-				GPCPLL_CFG_LOCK)) {
-		nv_error(priv, "%s: timeout waiting for pllg lock\n", __func__);
+	if (nvkm_usec(device, 300,
+		if (nvkm_rd32(device, GPCPLL_CFG) & GPCPLL_CFG_LOCK)
+			break;
+	) < 0)
 		return -ETIMEDOUT;
-	}
 
 	/* switch to VCO mode */
-	nv_mask(priv, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+	nvkm_mask(device, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
 
 	/* restore out divider 1:1 */
-	val = nv_rd32(priv, GPC2CLK_OUT);
+	val = nvkm_rd32(device, GPC2CLK_OUT);
 	val &= ~GPC2CLK_OUT_VCODIV_MASK;
 	udelay(2);
-	nv_wr32(priv, GPC2CLK_OUT, val);
+	nvkm_wr32(device, GPC2CLK_OUT, val);
 
 	/* slide up to new NDIV */
-	return allow_slide ? gk20a_pllg_slide(priv, priv->n) : 0;
+	return allow_slide ? gk20a_pllg_slide(clk, clk->n) : 0;
 }
 
 static int
-gk20a_pllg_program_mnp(struct gk20a_clk_priv *priv)
+gk20a_pllg_program_mnp(struct gk20a_clk *clk)
 {
 	int err;
 
-	err = _gk20a_pllg_program_mnp(priv, true);
+	err = _gk20a_pllg_program_mnp(clk, true);
 	if (err)
-		err = _gk20a_pllg_program_mnp(priv, false);
+		err = _gk20a_pllg_program_mnp(clk, false);
 
 	return err;
 }
 
 static void
-gk20a_pllg_disable(struct gk20a_clk_priv *priv)
+gk20a_pllg_disable(struct gk20a_clk *clk)
 {
+	struct nvkm_device *device = clk->base.subdev.device;
 	u32 val;
 
 	/* slide to VCO min */
-	val = nv_rd32(priv, GPCPLL_CFG);
+	val = nvkm_rd32(device, GPCPLL_CFG);
 	if (val & GPCPLL_CFG_ENABLE) {
 		u32 coeff, m, n_lo;
 
-		coeff = nv_rd32(priv, GPCPLL_COEFF);
+		coeff = nvkm_rd32(device, GPCPLL_COEFF);
 		m = (coeff >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
-		n_lo = DIV_ROUND_UP(m * priv->params->min_vco,
-				    priv->parent_rate / MHZ);
-		gk20a_pllg_slide(priv, n_lo);
+		n_lo = DIV_ROUND_UP(m * clk->params->min_vco,
+				    clk->parent_rate / MHZ);
+		gk20a_pllg_slide(clk, n_lo);
 	}
 
 	/* put PLL in bypass before disabling it */
-	nv_mask(priv, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
+	nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
 
-	_gk20a_pllg_disable(priv);
+	_gk20a_pllg_disable(clk);
 }
 
 #define GK20A_CLK_GPC_MDIV 1000
 
-static struct nvkm_domain
-gk20a_domains[] = {
-	{ nv_clk_src_crystal, 0xff },
-	{ nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
-	{ nv_clk_src_max }
-};
-
 static struct nvkm_pstate
 gk20a_pstates[] = {
 	{
@@ -560,87 +560,99 @@ gk20a_pstates[] = {
 };
 
 static int
-gk20a_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
 {
-	struct gk20a_clk_priv *priv = (void *)clk;
+	struct gk20a_clk *clk = gk20a_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 
 	switch (src) {
 	case nv_clk_src_crystal:
-		return nv_device(clk)->crystal;
+		return device->crystal;
 	case nv_clk_src_gpc:
-		gk20a_pllg_read_mnp(priv);
-		return gk20a_pllg_calc_rate(priv) / GK20A_CLK_GPC_MDIV;
+		gk20a_pllg_read_mnp(clk);
+		return gk20a_pllg_calc_rate(clk) / GK20A_CLK_GPC_MDIV;
 	default:
-		nv_error(clk, "invalid clock source %d\n", src);
+		nvkm_error(subdev, "invalid clock source %d\n", src);
 		return -EINVAL;
 	}
 }
 
 static int
-gk20a_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+gk20a_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
 {
-	struct gk20a_clk_priv *priv = (void *)clk;
+	struct gk20a_clk *clk = gk20a_clk(base);
 
-	return gk20a_pllg_calc_mnp(priv, cstate->domain[nv_clk_src_gpc] *
+	return gk20a_pllg_calc_mnp(clk, cstate->domain[nv_clk_src_gpc] *
 					 GK20A_CLK_GPC_MDIV);
 }
 
 static int
-gk20a_clk_prog(struct nvkm_clk *clk)
+gk20a_clk_prog(struct nvkm_clk *base)
 {
-	struct gk20a_clk_priv *priv = (void *)clk;
+	struct gk20a_clk *clk = gk20a_clk(base);
 
-	return gk20a_pllg_program_mnp(priv);
+	return gk20a_pllg_program_mnp(clk);
 }
 
 static void
-gk20a_clk_tidy(struct nvkm_clk *clk)
+gk20a_clk_tidy(struct nvkm_clk *base)
 {
 }
 
-static int
-gk20a_clk_fini(struct nvkm_object *object, bool suspend)
+static void
+gk20a_clk_fini(struct nvkm_clk *base)
 {
-	struct gk20a_clk_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_clk_fini(&priv->base, false);
-
-	gk20a_pllg_disable(priv);
-
-	return ret;
+	struct gk20a_clk *clk = gk20a_clk(base);
+	gk20a_pllg_disable(clk);
 }
 
 static int
-gk20a_clk_init(struct nvkm_object *object)
+gk20a_clk_init(struct nvkm_clk *base)
 {
-	struct gk20a_clk_priv *priv = (void *)object;
+	struct gk20a_clk *clk = gk20a_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 	int ret;
 
-	nv_mask(priv, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL);
-
-	ret = nvkm_clk_init(&priv->base);
-	if (ret)
-		return ret;
+	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL);
 
-	ret = gk20a_clk_prog(&priv->base);
+	ret = gk20a_clk_prog(&clk->base);
 	if (ret) {
-		nv_error(priv, "cannot initialize clock\n");
+		nvkm_error(subdev, "cannot initialize clock\n");
 		return ret;
 	}
 
 	return 0;
 }
 
-static int
-gk20a_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+static const struct nvkm_clk_func
+gk20a_clk = {
+	.init = gk20a_clk_init,
+	.fini = gk20a_clk_fini,
+	.read = gk20a_clk_read,
+	.calc = gk20a_clk_calc,
+	.prog = gk20a_clk_prog,
+	.tidy = gk20a_clk_tidy,
+	.pstates = gk20a_pstates,
+	.nr_pstates = ARRAY_SIZE(gk20a_pstates),
+	.domains = {
+		{ nv_clk_src_crystal, 0xff },
+		{ nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
+		{ nv_clk_src_max }
+	}
+};
+
+int
+gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 {
-	struct gk20a_clk_priv *priv;
-	struct nouveau_platform_device *plat;
-	int ret;
-	int i;
+	struct nvkm_device_tegra *tdev = device->func->tegra(device);
+	struct gk20a_clk *clk;
+	int ret, i;
+
+	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+		return -ENOMEM;
+	*pclk = &clk->base;
 
 	/* Finish initializing the pstates */
 	for (i = 0; i < ARRAY_SIZE(gk20a_pstates); i++) {
@@ -648,33 +660,11 @@ gk20a_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
 		gk20a_pstates[i].pstate = i + 1;
 	}
 
-	ret = nvkm_clk_create(parent, engine, oclass, gk20a_domains,
-			      gk20a_pstates, ARRAY_SIZE(gk20a_pstates),
-			      true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	clk->params = &gk20a_pllg_params;
+	clk->parent_rate = clk_get_rate(tdev->clk);
 
-	priv->params = &gk20a_pllg_params;
-
-	plat = nv_device_to_platform(nv_device(parent));
-	priv->parent_rate = clk_get_rate(plat->gpu->clk);
-	nv_info(priv, "parent clock rate: %d Mhz\n", priv->parent_rate / MHZ);
-
-	priv->base.read = gk20a_clk_read;
-	priv->base.calc = gk20a_clk_calc;
-	priv->base.prog = gk20a_clk_prog;
-	priv->base.tidy = gk20a_clk_tidy;
-	return 0;
+	ret = nvkm_clk_ctor(&gk20a_clk, device, index, true, &clk->base);
+	nvkm_info(&clk->base.subdev, "parent clock rate: %d Mhz\n",
+		  clk->parent_rate / MHZ);
+	return ret;
 }
-
-struct nvkm_oclass
-gk20a_clk_oclass = {
-	.handle = NV_SUBDEV(CLK, 0xea),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk20a_clk_ctor,
-		.dtor = _nvkm_subdev_dtor,
-		.init = gk20a_clk_init,
-		.fini = gk20a_clk_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
index 065e9f5c8db9..07feae620c8d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
@@ -22,56 +22,58 @@
  * Authors: Ben Skeggs
  *          Roy Spliet
  */
+#define gt215_clk(p) container_of((p), struct gt215_clk, base)
 #include "gt215.h"
 #include "pll.h"
 
-#include <core/device.h>
 #include <engine/fifo.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 #include <subdev/timer.h>
 
-struct gt215_clk_priv {
+struct gt215_clk {
 	struct nvkm_clk base;
 	struct gt215_clk_info eng[nv_clk_src_max];
 };
 
-static u32 read_clk(struct gt215_clk_priv *, int, bool);
-static u32 read_pll(struct gt215_clk_priv *, int, u32);
+static u32 read_clk(struct gt215_clk *, int, bool);
+static u32 read_pll(struct gt215_clk *, int, u32);
 
 static u32
-read_vco(struct gt215_clk_priv *priv, int clk)
+read_vco(struct gt215_clk *clk, int idx)
 {
-	u32 sctl = nv_rd32(priv, 0x4120 + (clk * 4));
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 sctl = nvkm_rd32(device, 0x4120 + (idx * 4));
 
 	switch (sctl & 0x00000030) {
 	case 0x00000000:
-		return nv_device(priv)->crystal;
+		return device->crystal;
 	case 0x00000020:
-		return read_pll(priv, 0x41, 0x00e820);
+		return read_pll(clk, 0x41, 0x00e820);
 	case 0x00000030:
-		return read_pll(priv, 0x42, 0x00e8a0);
+		return read_pll(clk, 0x42, 0x00e8a0);
 	default:
 		return 0;
 	}
 }
 
 static u32
-read_clk(struct gt215_clk_priv *priv, int clk, bool ignore_en)
+read_clk(struct gt215_clk *clk, int idx, bool ignore_en)
 {
+	struct nvkm_device *device = clk->base.subdev.device;
 	u32 sctl, sdiv, sclk;
 
 	/* refclk for the 0xe8xx plls is a fixed frequency */
-	if (clk >= 0x40) {
-		if (nv_device(priv)->chipset == 0xaf) {
+	if (idx >= 0x40) {
+		if (device->chipset == 0xaf) {
 			/* no joke.. seriously.. sigh.. */
-			return nv_rd32(priv, 0x00471c) * 1000;
+			return nvkm_rd32(device, 0x00471c) * 1000;
 		}
 
-		return nv_device(priv)->crystal;
+		return device->crystal;
 	}
 
-	sctl = nv_rd32(priv, 0x4120 + (clk * 4));
+	sctl = nvkm_rd32(device, 0x4120 + (idx * 4));
 	if (!ignore_en && !(sctl & 0x00000100))
 		return 0;
 
@@ -83,7 +85,7 @@ read_clk(struct gt215_clk_priv *priv, int clk, bool ignore_en)
 	switch (sctl & 0x00003000) {
 	case 0x00000000:
 		if (!(sctl & 0x00000200))
-			return nv_device(priv)->crystal;
+			return device->crystal;
 		return 0;
 	case 0x00002000:
 		if (sctl & 0x00000040)
@@ -94,7 +96,7 @@ read_clk(struct gt215_clk_priv *priv, int clk, bool ignore_en)
 		if (!(sctl & 0x00000001))
 			return 0;
 
-		sclk = read_vco(priv, clk);
+		sclk = read_vco(clk, idx);
 		sdiv = ((sctl & 0x003f0000) >> 16) + 2;
 		return (sclk * 2) / sdiv;
 	default:
@@ -103,14 +105,15 @@ read_clk(struct gt215_clk_priv *priv, int clk, bool ignore_en)
 }
 
 static u32
-read_pll(struct gt215_clk_priv *priv, int clk, u32 pll)
+read_pll(struct gt215_clk *clk, int idx, u32 pll)
 {
-	u32 ctrl = nv_rd32(priv, pll + 0);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ctrl = nvkm_rd32(device, pll + 0);
 	u32 sclk = 0, P = 1, N = 1, M = 1;
 
 	if (!(ctrl & 0x00000008)) {
 		if (ctrl & 0x00000001) {
-			u32 coef = nv_rd32(priv, pll + 4);
+			u32 coef = nvkm_rd32(device, pll + 4);
 			M = (coef & 0x000000ff) >> 0;
 			N = (coef & 0x0000ff00) >> 8;
 			P = (coef & 0x003f0000) >> 16;
@@ -121,10 +124,10 @@ read_pll(struct gt215_clk_priv *priv, int clk, u32 pll)
 			if ((pll & 0x00ff00) == 0x00e800)
 				P = 1;
 
-			sclk = read_clk(priv, 0x00 + clk, false);
+			sclk = read_clk(clk, 0x00 + idx, false);
 		}
 	} else {
-		sclk = read_clk(priv, 0x10 + clk, false);
+		sclk = read_clk(clk, 0x10 + idx, false);
 	}
 
 	if (M * P)
@@ -134,41 +137,43 @@ read_pll(struct gt215_clk_priv *priv, int clk, u32 pll)
 }
 
 static int
-gt215_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+gt215_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
 {
-	struct gt215_clk_priv *priv = (void *)clk;
+	struct gt215_clk *clk = gt215_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 hsrc;
 
 	switch (src) {
 	case nv_clk_src_crystal:
-		return nv_device(priv)->crystal;
+		return device->crystal;
 	case nv_clk_src_core:
 	case nv_clk_src_core_intm:
-		return read_pll(priv, 0x00, 0x4200);
+		return read_pll(clk, 0x00, 0x4200);
 	case nv_clk_src_shader:
-		return read_pll(priv, 0x01, 0x4220);
+		return read_pll(clk, 0x01, 0x4220);
 	case nv_clk_src_mem:
-		return read_pll(priv, 0x02, 0x4000);
+		return read_pll(clk, 0x02, 0x4000);
 	case nv_clk_src_disp:
-		return read_clk(priv, 0x20, false);
+		return read_clk(clk, 0x20, false);
 	case nv_clk_src_vdec:
-		return read_clk(priv, 0x21, false);
+		return read_clk(clk, 0x21, false);
 	case nv_clk_src_daemon:
-		return read_clk(priv, 0x25, false);
+		return read_clk(clk, 0x25, false);
 	case nv_clk_src_host:
-		hsrc = (nv_rd32(priv, 0xc040) & 0x30000000) >> 28;
+		hsrc = (nvkm_rd32(device, 0xc040) & 0x30000000) >> 28;
 		switch (hsrc) {
 		case 0:
-			return read_clk(priv, 0x1d, false);
+			return read_clk(clk, 0x1d, false);
 		case 2:
 		case 3:
 			return 277000;
 		default:
-			nv_error(clk, "unknown HOST clock source %d\n", hsrc);
+			nvkm_error(subdev, "unknown HOST clock source %d\n", hsrc);
 			return -EINVAL;
 		}
 	default:
-		nv_error(clk, "invalid clock source %d\n", src);
+		nvkm_error(subdev, "invalid clock source %d\n", src);
 		return -EINVAL;
 	}
 
@@ -176,10 +181,10 @@ gt215_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
 }
 
 int
-gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
+gt215_clk_info(struct nvkm_clk *base, int idx, u32 khz,
 	       struct gt215_clk_info *info)
 {
-	struct gt215_clk_priv *priv = (void *)clock;
+	struct gt215_clk *clk = gt215_clk(base);
 	u32 oclk, sclk, sdiv;
 	s32 diff;
 
@@ -196,7 +201,7 @@ gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
 		info->clk = 0x00002140;
 		return khz;
 	default:
-		sclk = read_vco(priv, clk);
+		sclk = read_vco(clk, idx);
 		sdiv = min((sclk * 2) / khz, (u32)65);
 		oclk = (sclk * 2) / sdiv;
 		diff = ((khz + 3000) - oclk);
@@ -224,11 +229,11 @@ gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
 }
 
 int
-gt215_pll_info(struct nvkm_clk *clock, int clk, u32 pll, u32 khz,
+gt215_pll_info(struct nvkm_clk *base, int idx, u32 pll, u32 khz,
 	       struct gt215_clk_info *info)
 {
-	struct nvkm_bios *bios = nvkm_bios(clock);
-	struct gt215_clk_priv *priv = (void *)clock;
+	struct gt215_clk *clk = gt215_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
 	struct nvbios_pll limits;
 	int P, N, M, diff;
 	int ret;
@@ -237,22 +242,22 @@ gt215_pll_info(struct nvkm_clk *clock, int clk, u32 pll, u32 khz,
 
 	/* If we can get a within [-2, 3) MHz of a divider, we'll disable the
 	 * PLL and use the divider instead. */
-	ret = gt215_clk_info(clock, clk, khz, info);
+	ret = gt215_clk_info(&clk->base, idx, khz, info);
 	diff = khz - ret;
 	if (!pll || (diff >= -2000 && diff < 3000)) {
 		goto out;
 	}
 
 	/* Try with PLL */
-	ret = nvbios_pll_parse(bios, pll, &limits);
+	ret = nvbios_pll_parse(subdev->device->bios, pll, &limits);
 	if (ret)
 		return ret;
 
-	ret = gt215_clk_info(clock, clk - 0x10, limits.refclk, info);
+	ret = gt215_clk_info(&clk->base, idx - 0x10, limits.refclk, info);
 	if (ret != limits.refclk)
 		return -EINVAL;
 
-	ret = gt215_pll_calc(nv_subdev(priv), &limits, khz, &N, NULL, &M, &P);
+	ret = gt215_pll_calc(subdev, &limits, khz, &N, NULL, &M, &P);
 	if (ret >= 0) {
 		info->pll = (P << 16) | (N << 8) | M;
 	}
@@ -263,22 +268,22 @@ out:
 }
 
 static int
-calc_clk(struct gt215_clk_priv *priv, struct nvkm_cstate *cstate,
-	 int clk, u32 pll, int idx)
+calc_clk(struct gt215_clk *clk, struct nvkm_cstate *cstate,
+	 int idx, u32 pll, int dom)
 {
-	int ret = gt215_pll_info(&priv->base, clk, pll, cstate->domain[idx],
-				 &priv->eng[idx]);
+	int ret = gt215_pll_info(&clk->base, idx, pll, cstate->domain[dom],
+				 &clk->eng[dom]);
 	if (ret >= 0)
 		return 0;
 	return ret;
 }
 
 static int
-calc_host(struct gt215_clk_priv *priv, struct nvkm_cstate *cstate)
+calc_host(struct gt215_clk *clk, struct nvkm_cstate *cstate)
 {
 	int ret = 0;
 	u32 kHz = cstate->domain[nv_clk_src_host];
-	struct gt215_clk_info *info = &priv->eng[nv_clk_src_host];
+	struct gt215_clk_info *info = &clk->eng[nv_clk_src_host];
 
 	if (kHz == 277000) {
 		info->clk = 0;
@@ -288,7 +293,7 @@ calc_host(struct gt215_clk_priv *priv, struct nvkm_cstate *cstate)
 
 	info->host_out = NVA3_HOST_CLK;
 
-	ret = gt215_clk_info(&priv->base, 0x1d, kHz, info);
+	ret = gt215_clk_info(&clk->base, 0x1d, kHz, info);
 	if (ret >= 0)
 		return 0;
 
@@ -298,21 +303,33 @@ calc_host(struct gt215_clk_priv *priv, struct nvkm_cstate *cstate)
 int
 gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(clk);
+	struct nvkm_device *device = clk->subdev.device;
+	struct nvkm_fifo *fifo = device->fifo;
 
 	/* halt and idle execution engines */
-	nv_mask(clk, 0x020060, 0x00070000, 0x00000000);
-	nv_mask(clk, 0x002504, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x020060, 0x00070000, 0x00000000);
+	nvkm_mask(device, 0x002504, 0x00000001, 0x00000001);
 	/* Wait until the interrupt handler is finished */
-	if (!nv_wait(clk, 0x000100, 0xffffffff, 0x00000000))
+	if (nvkm_msec(device, 2000,
+		if (!nvkm_rd32(device, 0x000100))
+			break;
+	) < 0)
 		return -EBUSY;
 
-	if (pfifo)
-		pfifo->pause(pfifo, flags);
+	if (fifo)
+		nvkm_fifo_pause(fifo, flags);
 
-	if (!nv_wait(clk, 0x002504, 0x00000010, 0x00000010))
+	if (nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x002504) & 0x00000010)
+			break;
+	) < 0)
 		return -EIO;
-	if (!nv_wait(clk, 0x00251c, 0x0000003f, 0x0000003f))
+
+	if (nvkm_msec(device, 2000,
+		u32 tmp = nvkm_rd32(device, 0x002504) & 0x0000003f;
+		if (tmp == 0x0000003f)
+			break;
+	) < 0)
 		return -EIO;
 
 	return 0;
@@ -321,86 +338,94 @@ gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags)
 void
 gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(clk);
+	struct nvkm_device *device = clk->subdev.device;
+	struct nvkm_fifo *fifo = device->fifo;
 
-	if (pfifo && flags)
-		pfifo->start(pfifo, flags);
+	if (fifo && flags)
+		nvkm_fifo_start(fifo, flags);
 
-	nv_mask(clk, 0x002504, 0x00000001, 0x00000000);
-	nv_mask(clk, 0x020060, 0x00070000, 0x00040000);
+	nvkm_mask(device, 0x002504, 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x020060, 0x00070000, 0x00040000);
 }
 
 static void
-disable_clk_src(struct gt215_clk_priv *priv, u32 src)
+disable_clk_src(struct gt215_clk *clk, u32 src)
 {
-	nv_mask(priv, src, 0x00000100, 0x00000000);
-	nv_mask(priv, src, 0x00000001, 0x00000000);
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, src, 0x00000100, 0x00000000);
+	nvkm_mask(device, src, 0x00000001, 0x00000000);
 }
 
 static void
-prog_pll(struct gt215_clk_priv *priv, int clk, u32 pll, int idx)
+prog_pll(struct gt215_clk *clk, int idx, u32 pll, int dom)
 {
-	struct gt215_clk_info *info = &priv->eng[idx];
-	const u32 src0 = 0x004120 + (clk * 4);
-	const u32 src1 = 0x004160 + (clk * 4);
+	struct gt215_clk_info *info = &clk->eng[dom];
+	struct nvkm_device *device = clk->base.subdev.device;
+	const u32 src0 = 0x004120 + (idx * 4);
+	const u32 src1 = 0x004160 + (idx * 4);
 	const u32 ctrl = pll + 0;
 	const u32 coef = pll + 4;
 	u32 bypass;
 
 	if (info->pll) {
 		/* Always start from a non-PLL clock */
-		bypass = nv_rd32(priv, ctrl)  & 0x00000008;
+		bypass = nvkm_rd32(device, ctrl)  & 0x00000008;
 		if (!bypass) {
-			nv_mask(priv, src1, 0x00000101, 0x00000101);
-			nv_mask(priv, ctrl, 0x00000008, 0x00000008);
+			nvkm_mask(device, src1, 0x00000101, 0x00000101);
+			nvkm_mask(device, ctrl, 0x00000008, 0x00000008);
 			udelay(20);
 		}
 
-		nv_mask(priv, src0, 0x003f3141, 0x00000101 | info->clk);
-		nv_wr32(priv, coef, info->pll);
-		nv_mask(priv, ctrl, 0x00000015, 0x00000015);
-		nv_mask(priv, ctrl, 0x00000010, 0x00000000);
-		if (!nv_wait(priv, ctrl, 0x00020000, 0x00020000)) {
-			nv_mask(priv, ctrl, 0x00000010, 0x00000010);
-			nv_mask(priv, src0, 0x00000101, 0x00000000);
+		nvkm_mask(device, src0, 0x003f3141, 0x00000101 | info->clk);
+		nvkm_wr32(device, coef, info->pll);
+		nvkm_mask(device, ctrl, 0x00000015, 0x00000015);
+		nvkm_mask(device, ctrl, 0x00000010, 0x00000000);
+		if (nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, ctrl) & 0x00020000)
+				break;
+		) < 0) {
+			nvkm_mask(device, ctrl, 0x00000010, 0x00000010);
+			nvkm_mask(device, src0, 0x00000101, 0x00000000);
 			return;
 		}
-		nv_mask(priv, ctrl, 0x00000010, 0x00000010);
-		nv_mask(priv, ctrl, 0x00000008, 0x00000000);
-		disable_clk_src(priv, src1);
+		nvkm_mask(device, ctrl, 0x00000010, 0x00000010);
+		nvkm_mask(device, ctrl, 0x00000008, 0x00000000);
+		disable_clk_src(clk, src1);
 	} else {
-		nv_mask(priv, src1, 0x003f3141, 0x00000101 | info->clk);
-		nv_mask(priv, ctrl, 0x00000018, 0x00000018);
+		nvkm_mask(device, src1, 0x003f3141, 0x00000101 | info->clk);
+		nvkm_mask(device, ctrl, 0x00000018, 0x00000018);
 		udelay(20);
-		nv_mask(priv, ctrl, 0x00000001, 0x00000000);
-		disable_clk_src(priv, src0);
+		nvkm_mask(device, ctrl, 0x00000001, 0x00000000);
+		disable_clk_src(clk, src0);
 	}
 }
 
 static void
-prog_clk(struct gt215_clk_priv *priv, int clk, int idx)
+prog_clk(struct gt215_clk *clk, int idx, int dom)
 {
-	struct gt215_clk_info *info = &priv->eng[idx];
-	nv_mask(priv, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | info->clk);
+	struct gt215_clk_info *info = &clk->eng[dom];
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, 0x004120 + (idx * 4), 0x003f3141, 0x00000101 | info->clk);
 }
 
 static void
-prog_host(struct gt215_clk_priv *priv)
+prog_host(struct gt215_clk *clk)
 {
-	struct gt215_clk_info *info = &priv->eng[nv_clk_src_host];
-	u32 hsrc = (nv_rd32(priv, 0xc040));
+	struct gt215_clk_info *info = &clk->eng[nv_clk_src_host];
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 hsrc = (nvkm_rd32(device, 0xc040));
 
 	switch (info->host_out) {
 	case NVA3_HOST_277:
 		if ((hsrc & 0x30000000) == 0) {
-			nv_wr32(priv, 0xc040, hsrc | 0x20000000);
-			disable_clk_src(priv, 0x4194);
+			nvkm_wr32(device, 0xc040, hsrc | 0x20000000);
+			disable_clk_src(clk, 0x4194);
 		}
 		break;
 	case NVA3_HOST_CLK:
-		prog_clk(priv, 0x1d, nv_clk_src_host);
+		prog_clk(clk, 0x1d, nv_clk_src_host);
 		if ((hsrc & 0x30000000) >= 0x20000000) {
-			nv_wr32(priv, 0xc040, hsrc & ~0x30000000);
+			nvkm_wr32(device, 0xc040, hsrc & ~0x30000000);
 		}
 		break;
 	default:
@@ -408,44 +433,45 @@ prog_host(struct gt215_clk_priv *priv)
 	}
 
 	/* This seems to be a clock gating factor on idle, always set to 64 */
-	nv_wr32(priv, 0xc044, 0x3e);
+	nvkm_wr32(device, 0xc044, 0x3e);
 }
 
 static void
-prog_core(struct gt215_clk_priv *priv, int idx)
+prog_core(struct gt215_clk *clk, int dom)
 {
-	struct gt215_clk_info *info = &priv->eng[idx];
-	u32 fb_delay = nv_rd32(priv, 0x10002c);
+	struct gt215_clk_info *info = &clk->eng[dom];
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 fb_delay = nvkm_rd32(device, 0x10002c);
 
 	if (fb_delay < info->fb_delay)
-		nv_wr32(priv, 0x10002c, info->fb_delay);
+		nvkm_wr32(device, 0x10002c, info->fb_delay);
 
-	prog_pll(priv, 0x00, 0x004200, idx);
+	prog_pll(clk, 0x00, 0x004200, dom);
 
 	if (fb_delay > info->fb_delay)
-		nv_wr32(priv, 0x10002c, info->fb_delay);
+		nvkm_wr32(device, 0x10002c, info->fb_delay);
 }
 
 static int
-gt215_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+gt215_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
 {
-	struct gt215_clk_priv *priv = (void *)clk;
-	struct gt215_clk_info *core = &priv->eng[nv_clk_src_core];
+	struct gt215_clk *clk = gt215_clk(base);
+	struct gt215_clk_info *core = &clk->eng[nv_clk_src_core];
 	int ret;
 
-	if ((ret = calc_clk(priv, cstate, 0x10, 0x4200, nv_clk_src_core)) ||
-	    (ret = calc_clk(priv, cstate, 0x11, 0x4220, nv_clk_src_shader)) ||
-	    (ret = calc_clk(priv, cstate, 0x20, 0x0000, nv_clk_src_disp)) ||
-	    (ret = calc_clk(priv, cstate, 0x21, 0x0000, nv_clk_src_vdec)) ||
-	    (ret = calc_host(priv, cstate)))
+	if ((ret = calc_clk(clk, cstate, 0x10, 0x4200, nv_clk_src_core)) ||
+	    (ret = calc_clk(clk, cstate, 0x11, 0x4220, nv_clk_src_shader)) ||
+	    (ret = calc_clk(clk, cstate, 0x20, 0x0000, nv_clk_src_disp)) ||
+	    (ret = calc_clk(clk, cstate, 0x21, 0x0000, nv_clk_src_vdec)) ||
+	    (ret = calc_host(clk, cstate)))
 		return ret;
 
 	/* XXX: Should be reading the highest bit in the VBIOS clock to decide
 	 * whether to use a PLL or not... but using a PLL defeats the purpose */
 	if (core->pll) {
-		ret = gt215_clk_info(clk, 0x10,
+		ret = gt215_clk_info(&clk->base, 0x10,
 				     cstate->domain[nv_clk_src_core_intm],
-				     &priv->eng[nv_clk_src_core_intm]);
+				     &clk->eng[nv_clk_src_core_intm]);
 		if (ret < 0)
 			return ret;
 	}
@@ -454,81 +480,67 @@ gt215_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
 }
 
 static int
-gt215_clk_prog(struct nvkm_clk *clk)
+gt215_clk_prog(struct nvkm_clk *base)
 {
-	struct gt215_clk_priv *priv = (void *)clk;
-	struct gt215_clk_info *core = &priv->eng[nv_clk_src_core];
+	struct gt215_clk *clk = gt215_clk(base);
+	struct gt215_clk_info *core = &clk->eng[nv_clk_src_core];
 	int ret = 0;
 	unsigned long flags;
 	unsigned long *f = &flags;
 
-	ret = gt215_clk_pre(clk, f);
+	ret = gt215_clk_pre(&clk->base, f);
 	if (ret)
 		goto out;
 
 	if (core->pll)
-		prog_core(priv, nv_clk_src_core_intm);
+		prog_core(clk, nv_clk_src_core_intm);
 
-	prog_core(priv,  nv_clk_src_core);
-	prog_pll(priv, 0x01, 0x004220, nv_clk_src_shader);
-	prog_clk(priv, 0x20, nv_clk_src_disp);
-	prog_clk(priv, 0x21, nv_clk_src_vdec);
-	prog_host(priv);
+	prog_core(clk,  nv_clk_src_core);
+	prog_pll(clk, 0x01, 0x004220, nv_clk_src_shader);
+	prog_clk(clk, 0x20, nv_clk_src_disp);
+	prog_clk(clk, 0x21, nv_clk_src_vdec);
+	prog_host(clk);
 
 out:
 	if (ret == -EBUSY)
 		f = NULL;
 
-	gt215_clk_post(clk, f);
+	gt215_clk_post(&clk->base, f);
 	return ret;
 }
 
 static void
-gt215_clk_tidy(struct nvkm_clk *clk)
+gt215_clk_tidy(struct nvkm_clk *base)
 {
 }
 
-static struct nvkm_domain
-gt215_domain[] = {
-	{ nv_clk_src_crystal  , 0xff },
-	{ nv_clk_src_core     , 0x00, 0, "core", 1000 },
-	{ nv_clk_src_shader   , 0x01, 0, "shader", 1000 },
-	{ nv_clk_src_mem      , 0x02, 0, "memory", 1000 },
-	{ nv_clk_src_vdec     , 0x03 },
-	{ nv_clk_src_disp     , 0x04 },
-	{ nv_clk_src_host     , 0x05 },
-	{ nv_clk_src_core_intm, 0x06 },
-	{ nv_clk_src_max }
+static const struct nvkm_clk_func
+gt215_clk = {
+	.read = gt215_clk_read,
+	.calc = gt215_clk_calc,
+	.prog = gt215_clk_prog,
+	.tidy = gt215_clk_tidy,
+	.domains = {
+		{ nv_clk_src_crystal  , 0xff },
+		{ nv_clk_src_core     , 0x00, 0, "core", 1000 },
+		{ nv_clk_src_shader   , 0x01, 0, "shader", 1000 },
+		{ nv_clk_src_mem      , 0x02, 0, "memory", 1000 },
+		{ nv_clk_src_vdec     , 0x03 },
+		{ nv_clk_src_disp     , 0x04 },
+		{ nv_clk_src_host     , 0x05 },
+		{ nv_clk_src_core_intm, 0x06 },
+		{ nv_clk_src_max }
+	}
 };
 
-static int
-gt215_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+gt215_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 {
-	struct gt215_clk_priv *priv;
-	int ret;
+	struct gt215_clk *clk;
 
-	ret = nvkm_clk_create(parent, engine, oclass, gt215_domain,
-			      NULL, 0, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+		return -ENOMEM;
+	*pclk = &clk->base;
 
-	priv->base.read = gt215_clk_read;
-	priv->base.calc = gt215_clk_calc;
-	priv->base.prog = gt215_clk_prog;
-	priv->base.tidy = gt215_clk_tidy;
-	return 0;
+	return nvkm_clk_ctor(&gt215_clk, device, index, true, &clk->base);
 }
-
-struct nvkm_oclass
-gt215_clk_oclass = {
-	.handle = NV_SUBDEV(CLK, 0xa3),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gt215_clk_ctor,
-		.dtor = _nvkm_clk_dtor,
-		.init = _nvkm_clk_init,
-		.fini = _nvkm_clk_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
index b447d9cd4d37..8865b59fe575 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
@@ -1,6 +1,6 @@
 #ifndef __NVKM_CLK_NVA3_H__
 #define __NVKM_CLK_NVA3_H__
-#include <subdev/clk.h>
+#include "priv.h"
 
 struct gt215_clk_info {
 	u32 clk;
@@ -13,6 +13,6 @@ struct gt215_clk_info {
 };
 
 int  gt215_pll_info(struct nvkm_clk *, int, u32, u32, struct gt215_clk_info *);
-int  gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags);
-void gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags);
+int  gt215_clk_pre(struct nvkm_clk *, unsigned long *flags);
+void gt215_clk_post(struct nvkm_clk *, unsigned long *flags);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
index c54417b146c7..1c21b8b53b78 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
@@ -21,15 +21,15 @@
  *
  * Authors: Ben Skeggs
  */
+#define mcp77_clk(p) container_of((p), struct mcp77_clk, base)
 #include "gt215.h"
 #include "pll.h"
 
-#include <core/device.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 #include <subdev/timer.h>
 
-struct mcp77_clk_priv {
+struct mcp77_clk {
 	struct nvkm_clk base;
 	enum nv_clk_src csrc, ssrc, vsrc;
 	u32 cctrl, sctrl;
@@ -39,27 +39,29 @@ struct mcp77_clk_priv {
 };
 
 static u32
-read_div(struct nvkm_clk *clk)
+read_div(struct mcp77_clk *clk)
 {
-	return nv_rd32(clk, 0x004600);
+	struct nvkm_device *device = clk->base.subdev.device;
+	return nvkm_rd32(device, 0x004600);
 }
 
 static u32
-read_pll(struct nvkm_clk *clk, u32 base)
+read_pll(struct mcp77_clk *clk, u32 base)
 {
-	u32 ctrl = nv_rd32(clk, base + 0);
-	u32 coef = nv_rd32(clk, base + 4);
-	u32 ref = clk->read(clk, nv_clk_src_href);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ctrl = nvkm_rd32(device, base + 0);
+	u32 coef = nvkm_rd32(device, base + 4);
+	u32 ref = nvkm_clk_read(&clk->base, nv_clk_src_href);
 	u32 post_div = 0;
 	u32 clock = 0;
 	int N1, M1;
 
 	switch (base){
 	case 0x4020:
-		post_div = 1 << ((nv_rd32(clk, 0x4070) & 0x000f0000) >> 16);
+		post_div = 1 << ((nvkm_rd32(device, 0x4070) & 0x000f0000) >> 16);
 		break;
 	case 0x4028:
-		post_div = (nv_rd32(clk, 0x4040) & 0x000f0000) >> 16;
+		post_div = (nvkm_rd32(device, 0x4040) & 0x000f0000) >> 16;
 		break;
 	default:
 		break;
@@ -76,59 +78,61 @@ read_pll(struct nvkm_clk *clk, u32 base)
 }
 
 static int
-mcp77_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+mcp77_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
 {
-	struct mcp77_clk_priv *priv = (void *)clk;
-	u32 mast = nv_rd32(clk, 0x00c054);
+	struct mcp77_clk *clk = mcp77_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mast = nvkm_rd32(device, 0x00c054);
 	u32 P = 0;
 
 	switch (src) {
 	case nv_clk_src_crystal:
-		return nv_device(priv)->crystal;
+		return device->crystal;
 	case nv_clk_src_href:
 		return 100000; /* PCIE reference clock */
 	case nv_clk_src_hclkm4:
-		return clk->read(clk, nv_clk_src_href) * 4;
+		return nvkm_clk_read(&clk->base, nv_clk_src_href) * 4;
 	case nv_clk_src_hclkm2d3:
-		return clk->read(clk, nv_clk_src_href) * 2 / 3;
+		return nvkm_clk_read(&clk->base, nv_clk_src_href) * 2 / 3;
 	case nv_clk_src_host:
 		switch (mast & 0x000c0000) {
-		case 0x00000000: return clk->read(clk, nv_clk_src_hclkm2d3);
+		case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm2d3);
 		case 0x00040000: break;
-		case 0x00080000: return clk->read(clk, nv_clk_src_hclkm4);
-		case 0x000c0000: return clk->read(clk, nv_clk_src_cclk);
+		case 0x00080000: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4);
+		case 0x000c0000: return nvkm_clk_read(&clk->base, nv_clk_src_cclk);
 		}
 		break;
 	case nv_clk_src_core:
-		P = (nv_rd32(clk, 0x004028) & 0x00070000) >> 16;
+		P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16;
 
 		switch (mast & 0x00000003) {
-		case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
+		case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
 		case 0x00000001: return 0;
-		case 0x00000002: return clk->read(clk, nv_clk_src_hclkm4) >> P;
+		case 0x00000002: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4) >> P;
 		case 0x00000003: return read_pll(clk, 0x004028) >> P;
 		}
 		break;
 	case nv_clk_src_cclk:
 		if ((mast & 0x03000000) != 0x03000000)
-			return clk->read(clk, nv_clk_src_core);
+			return nvkm_clk_read(&clk->base, nv_clk_src_core);
 
 		if ((mast & 0x00000200) == 0x00000000)
-			return clk->read(clk, nv_clk_src_core);
+			return nvkm_clk_read(&clk->base, nv_clk_src_core);
 
 		switch (mast & 0x00000c00) {
-		case 0x00000000: return clk->read(clk, nv_clk_src_href);
-		case 0x00000400: return clk->read(clk, nv_clk_src_hclkm4);
-		case 0x00000800: return clk->read(clk, nv_clk_src_hclkm2d3);
+		case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href);
+		case 0x00000400: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4);
+		case 0x00000800: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm2d3);
 		default: return 0;
 		}
 	case nv_clk_src_shader:
-		P = (nv_rd32(clk, 0x004020) & 0x00070000) >> 16;
+		P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16;
 		switch (mast & 0x00000030) {
 		case 0x00000000:
 			if (mast & 0x00000040)
-				return clk->read(clk, nv_clk_src_href) >> P;
-			return clk->read(clk, nv_clk_src_crystal) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_href) >> P;
+			return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
 		case 0x00000010: break;
 		case 0x00000020: return read_pll(clk, 0x004028) >> P;
 		case 0x00000030: return read_pll(clk, 0x004020) >> P;
@@ -142,7 +146,7 @@ mcp77_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
 
 		switch (mast & 0x00400000) {
 		case 0x00400000:
-			return clk->read(clk, nv_clk_src_core) >> P;
+			return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
 			break;
 		default:
 			return 500000 >> P;
@@ -153,29 +157,28 @@ mcp77_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
 		break;
 	}
 
-	nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
+	nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
 	return 0;
 }
 
 static u32
-calc_pll(struct mcp77_clk_priv *priv, u32 reg,
+calc_pll(struct mcp77_clk *clk, u32 reg,
 	 u32 clock, int *N, int *M, int *P)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
 	struct nvbios_pll pll;
-	struct nvkm_clk *clk = &priv->base;
 	int ret;
 
-	ret = nvbios_pll_parse(bios, reg, &pll);
+	ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
 	if (ret)
 		return 0;
 
 	pll.vco2.max_freq = 0;
-	pll.refclk = clk->read(clk, nv_clk_src_href);
+	pll.refclk = nvkm_clk_read(&clk->base, nv_clk_src_href);
 	if (!pll.refclk)
 		return 0;
 
-	return nv04_pll_calc(nv_subdev(priv), &pll, clock, N, M, NULL, NULL, P);
+	return nv04_pll_calc(subdev, &pll, clock, N, M, NULL, NULL, P);
 }
 
 static inline u32
@@ -197,26 +200,27 @@ calc_P(u32 src, u32 target, int *div)
 }
 
 static int
-mcp77_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+mcp77_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
 {
-	struct mcp77_clk_priv *priv = (void *)clk;
+	struct mcp77_clk *clk = mcp77_clk(base);
 	const int shader = cstate->domain[nv_clk_src_shader];
 	const int core = cstate->domain[nv_clk_src_core];
 	const int vdec = cstate->domain[nv_clk_src_vdec];
+	struct nvkm_subdev *subdev = &clk->base.subdev;
 	u32 out = 0, clock = 0;
 	int N, M, P1, P2 = 0;
 	int divs = 0;
 
 	/* cclk: find suitable source, disable PLL if we can */
-	if (core < clk->read(clk, nv_clk_src_hclkm4))
-		out = calc_P(clk->read(clk, nv_clk_src_hclkm4), core, &divs);
+	if (core < nvkm_clk_read(&clk->base, nv_clk_src_hclkm4))
+		out = calc_P(nvkm_clk_read(&clk->base, nv_clk_src_hclkm4), core, &divs);
 
 	/* Calculate clock * 2, so shader clock can use it too */
-	clock = calc_pll(priv, 0x4028, (core << 1), &N, &M, &P1);
+	clock = calc_pll(clk, 0x4028, (core << 1), &N, &M, &P1);
 
 	if (abs(core - out) <= abs(core - (clock >> 1))) {
-		priv->csrc = nv_clk_src_hclkm4;
-		priv->cctrl = divs << 16;
+		clk->csrc = nv_clk_src_hclkm4;
+		clk->cctrl = divs << 16;
 	} else {
 		/* NVCTRL is actually used _after_ NVPOST, and after what we
 		 * call NVPLL. To make matters worse, NVPOST is an integer
@@ -226,31 +230,31 @@ mcp77_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
 			P1 = 2;
 		}
 
-		priv->csrc = nv_clk_src_core;
-		priv->ccoef = (N << 8) | M;
+		clk->csrc = nv_clk_src_core;
+		clk->ccoef = (N << 8) | M;
 
-		priv->cctrl = (P2 + 1) << 16;
-		priv->cpost = (1 << P1) << 16;
+		clk->cctrl = (P2 + 1) << 16;
+		clk->cpost = (1 << P1) << 16;
 	}
 
 	/* sclk: nvpll + divisor, href or spll */
 	out = 0;
-	if (shader == clk->read(clk, nv_clk_src_href)) {
-		priv->ssrc = nv_clk_src_href;
+	if (shader == nvkm_clk_read(&clk->base, nv_clk_src_href)) {
+		clk->ssrc = nv_clk_src_href;
 	} else {
-		clock = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
-		if (priv->csrc == nv_clk_src_core)
+		clock = calc_pll(clk, 0x4020, shader, &N, &M, &P1);
+		if (clk->csrc == nv_clk_src_core)
 			out = calc_P((core << 1), shader, &divs);
 
 		if (abs(shader - out) <=
 		    abs(shader - clock) &&
 		   (divs + P2) <= 7) {
-			priv->ssrc = nv_clk_src_core;
-			priv->sctrl = (divs + P2) << 16;
+			clk->ssrc = nv_clk_src_core;
+			clk->sctrl = (divs + P2) << 16;
 		} else {
-			priv->ssrc = nv_clk_src_shader;
-			priv->scoef = (N << 8) | M;
-			priv->sctrl = P1 << 16;
+			clk->ssrc = nv_clk_src_shader;
+			clk->scoef = (N << 8) | M;
+			clk->sctrl = P1 << 16;
 		}
 	}
 
@@ -258,172 +262,162 @@ mcp77_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
 	out = calc_P(core, vdec, &divs);
 	clock = calc_P(500000, vdec, &P1);
 	if(abs(vdec - out) <= abs(vdec - clock)) {
-		priv->vsrc = nv_clk_src_cclk;
-		priv->vdiv = divs << 16;
+		clk->vsrc = nv_clk_src_cclk;
+		clk->vdiv = divs << 16;
 	} else {
-		priv->vsrc = nv_clk_src_vdec;
-		priv->vdiv = P1 << 16;
+		clk->vsrc = nv_clk_src_vdec;
+		clk->vdiv = P1 << 16;
 	}
 
 	/* Print strategy! */
-	nv_debug(priv, "nvpll: %08x %08x %08x\n",
-			priv->ccoef, priv->cpost, priv->cctrl);
-	nv_debug(priv, " spll: %08x %08x %08x\n",
-			priv->scoef, priv->spost, priv->sctrl);
-	nv_debug(priv, " vdiv: %08x\n", priv->vdiv);
-	if (priv->csrc == nv_clk_src_hclkm4)
-		nv_debug(priv, "core: hrefm4\n");
+	nvkm_debug(subdev, "nvpll: %08x %08x %08x\n",
+		   clk->ccoef, clk->cpost, clk->cctrl);
+	nvkm_debug(subdev, " spll: %08x %08x %08x\n",
+		   clk->scoef, clk->spost, clk->sctrl);
+	nvkm_debug(subdev, " vdiv: %08x\n", clk->vdiv);
+	if (clk->csrc == nv_clk_src_hclkm4)
+		nvkm_debug(subdev, "core: hrefm4\n");
 	else
-		nv_debug(priv, "core: nvpll\n");
+		nvkm_debug(subdev, "core: nvpll\n");
 
-	if (priv->ssrc == nv_clk_src_hclkm4)
-		nv_debug(priv, "shader: hrefm4\n");
-	else if (priv->ssrc == nv_clk_src_core)
-		nv_debug(priv, "shader: nvpll\n");
+	if (clk->ssrc == nv_clk_src_hclkm4)
+		nvkm_debug(subdev, "shader: hrefm4\n");
+	else if (clk->ssrc == nv_clk_src_core)
+		nvkm_debug(subdev, "shader: nvpll\n");
 	else
-		nv_debug(priv, "shader: spll\n");
+		nvkm_debug(subdev, "shader: spll\n");
 
-	if (priv->vsrc == nv_clk_src_hclkm4)
-		nv_debug(priv, "vdec: 500MHz\n");
+	if (clk->vsrc == nv_clk_src_hclkm4)
+		nvkm_debug(subdev, "vdec: 500MHz\n");
 	else
-		nv_debug(priv, "vdec: core\n");
+		nvkm_debug(subdev, "vdec: core\n");
 
 	return 0;
 }
 
 static int
-mcp77_clk_prog(struct nvkm_clk *clk)
+mcp77_clk_prog(struct nvkm_clk *base)
 {
-	struct mcp77_clk_priv *priv = (void *)clk;
+	struct mcp77_clk *clk = mcp77_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 pllmask = 0, mast;
 	unsigned long flags;
 	unsigned long *f = &flags;
 	int ret = 0;
 
-	ret = gt215_clk_pre(clk, f);
+	ret = gt215_clk_pre(&clk->base, f);
 	if (ret)
 		goto out;
 
 	/* First switch to safe clocks: href */
-	mast = nv_mask(clk, 0xc054, 0x03400e70, 0x03400640);
+	mast = nvkm_mask(device, 0xc054, 0x03400e70, 0x03400640);
 	mast &= ~0x00400e73;
 	mast |= 0x03000000;
 
-	switch (priv->csrc) {
+	switch (clk->csrc) {
 	case nv_clk_src_hclkm4:
-		nv_mask(clk, 0x4028, 0x00070000, priv->cctrl);
+		nvkm_mask(device, 0x4028, 0x00070000, clk->cctrl);
 		mast |= 0x00000002;
 		break;
 	case nv_clk_src_core:
-		nv_wr32(clk, 0x402c, priv->ccoef);
-		nv_wr32(clk, 0x4028, 0x80000000 | priv->cctrl);
-		nv_wr32(clk, 0x4040, priv->cpost);
+		nvkm_wr32(device, 0x402c, clk->ccoef);
+		nvkm_wr32(device, 0x4028, 0x80000000 | clk->cctrl);
+		nvkm_wr32(device, 0x4040, clk->cpost);
 		pllmask |= (0x3 << 8);
 		mast |= 0x00000003;
 		break;
 	default:
-		nv_warn(priv,"Reclocking failed: unknown core clock\n");
+		nvkm_warn(subdev, "Reclocking failed: unknown core clock\n");
 		goto resume;
 	}
 
-	switch (priv->ssrc) {
+	switch (clk->ssrc) {
 	case nv_clk_src_href:
-		nv_mask(clk, 0x4020, 0x00070000, 0x00000000);
+		nvkm_mask(device, 0x4020, 0x00070000, 0x00000000);
 		/* mast |= 0x00000000; */
 		break;
 	case nv_clk_src_core:
-		nv_mask(clk, 0x4020, 0x00070000, priv->sctrl);
+		nvkm_mask(device, 0x4020, 0x00070000, clk->sctrl);
 		mast |= 0x00000020;
 		break;
 	case nv_clk_src_shader:
-		nv_wr32(clk, 0x4024, priv->scoef);
-		nv_wr32(clk, 0x4020, 0x80000000 | priv->sctrl);
-		nv_wr32(clk, 0x4070, priv->spost);
+		nvkm_wr32(device, 0x4024, clk->scoef);
+		nvkm_wr32(device, 0x4020, 0x80000000 | clk->sctrl);
+		nvkm_wr32(device, 0x4070, clk->spost);
 		pllmask |= (0x3 << 12);
 		mast |= 0x00000030;
 		break;
 	default:
-		nv_warn(priv,"Reclocking failed: unknown sclk clock\n");
+		nvkm_warn(subdev, "Reclocking failed: unknown sclk clock\n");
 		goto resume;
 	}
 
-	if (!nv_wait(clk, 0x004080, pllmask, pllmask)) {
-		nv_warn(priv,"Reclocking failed: unstable PLLs\n");
+	if (nvkm_msec(device, 2000,
+		u32 tmp = nvkm_rd32(device, 0x004080) & pllmask;
+		if (tmp == pllmask)
+			break;
+	) < 0)
 		goto resume;
-	}
 
-	switch (priv->vsrc) {
+	switch (clk->vsrc) {
 	case nv_clk_src_cclk:
 		mast |= 0x00400000;
 	default:
-		nv_wr32(clk, 0x4600, priv->vdiv);
+		nvkm_wr32(device, 0x4600, clk->vdiv);
 	}
 
-	nv_wr32(clk, 0xc054, mast);
+	nvkm_wr32(device, 0xc054, mast);
 
 resume:
 	/* Disable some PLLs and dividers when unused */
-	if (priv->csrc != nv_clk_src_core) {
-		nv_wr32(clk, 0x4040, 0x00000000);
-		nv_mask(clk, 0x4028, 0x80000000, 0x00000000);
+	if (clk->csrc != nv_clk_src_core) {
+		nvkm_wr32(device, 0x4040, 0x00000000);
+		nvkm_mask(device, 0x4028, 0x80000000, 0x00000000);
 	}
 
-	if (priv->ssrc != nv_clk_src_shader) {
-		nv_wr32(clk, 0x4070, 0x00000000);
-		nv_mask(clk, 0x4020, 0x80000000, 0x00000000);
+	if (clk->ssrc != nv_clk_src_shader) {
+		nvkm_wr32(device, 0x4070, 0x00000000);
+		nvkm_mask(device, 0x4020, 0x80000000, 0x00000000);
 	}
 
 out:
 	if (ret == -EBUSY)
 		f = NULL;
 
-	gt215_clk_post(clk, f);
+	gt215_clk_post(&clk->base, f);
 	return ret;
 }
 
 static void
-mcp77_clk_tidy(struct nvkm_clk *clk)
+mcp77_clk_tidy(struct nvkm_clk *base)
 {
 }
 
-static struct nvkm_domain
-mcp77_domains[] = {
-	{ nv_clk_src_crystal, 0xff },
-	{ nv_clk_src_href   , 0xff },
-	{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
-	{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
-	{ nv_clk_src_vdec   , 0xff, 0, "vdec", 1000 },
-	{ nv_clk_src_max }
+static const struct nvkm_clk_func
+mcp77_clk = {
+	.read = mcp77_clk_read,
+	.calc = mcp77_clk_calc,
+	.prog = mcp77_clk_prog,
+	.tidy = mcp77_clk_tidy,
+	.domains = {
+		{ nv_clk_src_crystal, 0xff },
+		{ nv_clk_src_href   , 0xff },
+		{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
+		{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+		{ nv_clk_src_vdec   , 0xff, 0, "vdec", 1000 },
+		{ nv_clk_src_max }
+	}
 };
 
-static int
-mcp77_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+mcp77_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 {
-	struct mcp77_clk_priv *priv;
-	int ret;
+	struct mcp77_clk *clk;
 
-	ret = nvkm_clk_create(parent, engine, oclass, mcp77_domains,
-			      NULL, 0, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+		return -ENOMEM;
+	*pclk = &clk->base;
 
-	priv->base.read = mcp77_clk_read;
-	priv->base.calc = mcp77_clk_calc;
-	priv->base.prog = mcp77_clk_prog;
-	priv->base.tidy = mcp77_clk_tidy;
-	return 0;
+	return nvkm_clk_ctor(&mcp77_clk, device, index, true, &clk->base);
 }
-
-struct nvkm_oclass *
-mcp77_clk_oclass = &(struct nvkm_oclass) {
-	.handle = NV_SUBDEV(CLK, 0xaa),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = mcp77_clk_ctor,
-		.dtor = _nvkm_clk_dtor,
-		.init = _nvkm_clk_init,
-		.fini = _nvkm_clk_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
index 63dbbb575228..b280f85e8827 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
@@ -21,23 +21,19 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/clk.h>
+#include "priv.h"
 #include "pll.h"
 
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 #include <subdev/devinit/nv04.h>
 
-struct nv04_clk_priv {
-	struct nvkm_clk base;
-};
-
 int
 nv04_clk_pll_calc(struct nvkm_clk *clock, struct nvbios_pll *info,
 		  int clk, struct nvkm_pll_vals *pv)
 {
 	int N1, M1, N2, M2, P;
-	int ret = nv04_pll_calc(nv_subdev(clock), info, clk, &N1, &M1, &N2, &M2, &P);
+	int ret = nv04_pll_calc(&clock->subdev, info, clk, &N1, &M1, &N2, &M2, &P);
 	if (ret) {
 		pv->refclk = info->refclk;
 		pv->N1 = N1;
@@ -52,8 +48,9 @@ nv04_clk_pll_calc(struct nvkm_clk *clock, struct nvbios_pll *info,
 int
 nv04_clk_pll_prog(struct nvkm_clk *clk, u32 reg1, struct nvkm_pll_vals *pv)
 {
-	struct nvkm_devinit *devinit = nvkm_devinit(clk);
-	int cv = nvkm_bios(clk)->version.chip;
+	struct nvkm_device *device = clk->subdev.device;
+	struct nvkm_devinit *devinit = device->devinit;
+	int cv = device->bios->version.chip;
 
 	if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
 	    cv >= 0x40) {
@@ -67,37 +64,20 @@ nv04_clk_pll_prog(struct nvkm_clk *clk, u32 reg1, struct nvkm_pll_vals *pv)
 	return 0;
 }
 
-static struct nvkm_domain
-nv04_domain[] = {
-	{ nv_clk_src_max }
+static const struct nvkm_clk_func
+nv04_clk = {
+	.domains = {
+		{ nv_clk_src_max }
+	}
 };
 
-static int
-nv04_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+int
+nv04_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 {
-	struct nv04_clk_priv *priv;
-	int ret;
-
-	ret = nvkm_clk_create(parent, engine, oclass, nv04_domain,
-			      NULL, 0, false, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.pll_calc = nv04_clk_pll_calc;
-	priv->base.pll_prog = nv04_clk_pll_prog;
-	return 0;
+	int ret = nvkm_clk_new_(&nv04_clk, device, index, false, pclk);
+	if (ret == 0) {
+		(*pclk)->pll_calc = nv04_clk_pll_calc;
+		(*pclk)->pll_prog = nv04_clk_pll_prog;
+	}
+	return ret;
 }
-
-struct nvkm_oclass
-nv04_clk_oclass = {
-	.handle = NV_SUBDEV(CLK, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_clk_ctor,
-		.dtor = _nvkm_clk_dtor,
-		.init = _nvkm_clk_init,
-		.fini = _nvkm_clk_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
index ed838130c89d..2ab9b9b84018 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
@@ -21,14 +21,14 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/clk.h>
+#define nv40_clk(p) container_of((p), struct nv40_clk, base)
+#include "priv.h"
 #include "pll.h"
 
-#include <core/device.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 
-struct nv40_clk_priv {
+struct nv40_clk {
 	struct nvkm_clk base;
 	u32 ctrl;
 	u32 npll_ctrl;
@@ -36,64 +36,56 @@ struct nv40_clk_priv {
 	u32 spll;
 };
 
-static struct nvkm_domain
-nv40_domain[] = {
-	{ nv_clk_src_crystal, 0xff },
-	{ nv_clk_src_href   , 0xff },
-	{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
-	{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
-	{ nv_clk_src_mem    , 0xff, 0, "memory", 1000 },
-	{ nv_clk_src_max }
-};
-
 static u32
-read_pll_1(struct nv40_clk_priv *priv, u32 reg)
+read_pll_1(struct nv40_clk *clk, u32 reg)
 {
-	u32 ctrl = nv_rd32(priv, reg + 0x00);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ctrl = nvkm_rd32(device, reg + 0x00);
 	int P = (ctrl & 0x00070000) >> 16;
 	int N = (ctrl & 0x0000ff00) >> 8;
 	int M = (ctrl & 0x000000ff) >> 0;
-	u32 ref = 27000, clk = 0;
+	u32 ref = 27000, khz = 0;
 
 	if (ctrl & 0x80000000)
-		clk = ref * N / M;
+		khz = ref * N / M;
 
-	return clk >> P;
+	return khz >> P;
 }
 
 static u32
-read_pll_2(struct nv40_clk_priv *priv, u32 reg)
+read_pll_2(struct nv40_clk *clk, u32 reg)
 {
-	u32 ctrl = nv_rd32(priv, reg + 0x00);
-	u32 coef = nv_rd32(priv, reg + 0x04);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ctrl = nvkm_rd32(device, reg + 0x00);
+	u32 coef = nvkm_rd32(device, reg + 0x04);
 	int N2 = (coef & 0xff000000) >> 24;
 	int M2 = (coef & 0x00ff0000) >> 16;
 	int N1 = (coef & 0x0000ff00) >> 8;
 	int M1 = (coef & 0x000000ff) >> 0;
 	int P = (ctrl & 0x00070000) >> 16;
-	u32 ref = 27000, clk = 0;
+	u32 ref = 27000, khz = 0;
 
 	if ((ctrl & 0x80000000) && M1) {
-		clk = ref * N1 / M1;
+		khz = ref * N1 / M1;
 		if ((ctrl & 0x40000100) == 0x40000000) {
 			if (M2)
-				clk = clk * N2 / M2;
+				khz = khz * N2 / M2;
 			else
-				clk = 0;
+				khz = 0;
 		}
 	}
 
-	return clk >> P;
+	return khz >> P;
 }
 
 static u32
-read_clk(struct nv40_clk_priv *priv, u32 src)
+read_clk(struct nv40_clk *clk, u32 src)
 {
 	switch (src) {
 	case 3:
-		return read_pll_2(priv, 0x004000);
+		return read_pll_2(clk, 0x004000);
 	case 2:
-		return read_pll_1(priv, 0x004008);
+		return read_pll_1(clk, 0x004008);
 	default:
 		break;
 	}
@@ -102,46 +94,48 @@ read_clk(struct nv40_clk_priv *priv, u32 src)
 }
 
 static int
-nv40_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+nv40_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
 {
-	struct nv40_clk_priv *priv = (void *)clk;
-	u32 mast = nv_rd32(priv, 0x00c040);
+	struct nv40_clk *clk = nv40_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mast = nvkm_rd32(device, 0x00c040);
 
 	switch (src) {
 	case nv_clk_src_crystal:
-		return nv_device(priv)->crystal;
+		return device->crystal;
 	case nv_clk_src_href:
 		return 100000; /*XXX: PCIE/AGP differ*/
 	case nv_clk_src_core:
-		return read_clk(priv, (mast & 0x00000003) >> 0);
+		return read_clk(clk, (mast & 0x00000003) >> 0);
 	case nv_clk_src_shader:
-		return read_clk(priv, (mast & 0x00000030) >> 4);
+		return read_clk(clk, (mast & 0x00000030) >> 4);
 	case nv_clk_src_mem:
-		return read_pll_2(priv, 0x4020);
+		return read_pll_2(clk, 0x4020);
 	default:
 		break;
 	}
 
-	nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
+	nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
 	return -EINVAL;
 }
 
 static int
-nv40_clk_calc_pll(struct nv40_clk_priv *priv, u32 reg, u32 clk,
+nv40_clk_calc_pll(struct nv40_clk *clk, u32 reg, u32 khz,
 		  int *N1, int *M1, int *N2, int *M2, int *log2P)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
 	struct nvbios_pll pll;
 	int ret;
 
-	ret = nvbios_pll_parse(bios, reg, &pll);
+	ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
 	if (ret)
 		return ret;
 
-	if (clk < pll.vco1.max_freq)
+	if (khz < pll.vco1.max_freq)
 		pll.vco2.max_freq = 0;
 
-	ret = nv04_pll_calc(nv_subdev(priv), &pll, clk, N1, M1, N2, M2, log2P);
+	ret = nv04_pll_calc(subdev, &pll, khz, N1, M1, N2, M2, log2P);
 	if (ret == 0)
 		return -ERANGE;
 
@@ -149,93 +143,90 @@ nv40_clk_calc_pll(struct nv40_clk_priv *priv, u32 reg, u32 clk,
 }
 
 static int
-nv40_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+nv40_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
 {
-	struct nv40_clk_priv *priv = (void *)clk;
+	struct nv40_clk *clk = nv40_clk(base);
 	int gclk = cstate->domain[nv_clk_src_core];
 	int sclk = cstate->domain[nv_clk_src_shader];
 	int N1, M1, N2, M2, log2P;
 	int ret;
 
 	/* core/geometric clock */
-	ret = nv40_clk_calc_pll(priv, 0x004000, gclk,
+	ret = nv40_clk_calc_pll(clk, 0x004000, gclk,
 				&N1, &M1, &N2, &M2, &log2P);
 	if (ret < 0)
 		return ret;
 
 	if (N2 == M2) {
-		priv->npll_ctrl = 0x80000100 | (log2P << 16);
-		priv->npll_coef = (N1 << 8) | M1;
+		clk->npll_ctrl = 0x80000100 | (log2P << 16);
+		clk->npll_coef = (N1 << 8) | M1;
 	} else {
-		priv->npll_ctrl = 0xc0000000 | (log2P << 16);
-		priv->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
+		clk->npll_ctrl = 0xc0000000 | (log2P << 16);
+		clk->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
 	}
 
 	/* use the second pll for shader/rop clock, if it differs from core */
 	if (sclk && sclk != gclk) {
-		ret = nv40_clk_calc_pll(priv, 0x004008, sclk,
+		ret = nv40_clk_calc_pll(clk, 0x004008, sclk,
 					&N1, &M1, NULL, NULL, &log2P);
 		if (ret < 0)
 			return ret;
 
-		priv->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
-		priv->ctrl = 0x00000223;
+		clk->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
+		clk->ctrl = 0x00000223;
 	} else {
-		priv->spll = 0x00000000;
-		priv->ctrl = 0x00000333;
+		clk->spll = 0x00000000;
+		clk->ctrl = 0x00000333;
 	}
 
 	return 0;
 }
 
 static int
-nv40_clk_prog(struct nvkm_clk *clk)
+nv40_clk_prog(struct nvkm_clk *base)
 {
-	struct nv40_clk_priv *priv = (void *)clk;
-	nv_mask(priv, 0x00c040, 0x00000333, 0x00000000);
-	nv_wr32(priv, 0x004004, priv->npll_coef);
-	nv_mask(priv, 0x004000, 0xc0070100, priv->npll_ctrl);
-	nv_mask(priv, 0x004008, 0xc007ffff, priv->spll);
+	struct nv40_clk *clk = nv40_clk(base);
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, 0x00c040, 0x00000333, 0x00000000);
+	nvkm_wr32(device, 0x004004, clk->npll_coef);
+	nvkm_mask(device, 0x004000, 0xc0070100, clk->npll_ctrl);
+	nvkm_mask(device, 0x004008, 0xc007ffff, clk->spll);
 	mdelay(5);
-	nv_mask(priv, 0x00c040, 0x00000333, priv->ctrl);
+	nvkm_mask(device, 0x00c040, 0x00000333, clk->ctrl);
 	return 0;
 }
 
 static void
-nv40_clk_tidy(struct nvkm_clk *clk)
+nv40_clk_tidy(struct nvkm_clk *obj)
 {
 }
 
-static int
-nv40_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+static const struct nvkm_clk_func
+nv40_clk = {
+	.read = nv40_clk_read,
+	.calc = nv40_clk_calc,
+	.prog = nv40_clk_prog,
+	.tidy = nv40_clk_tidy,
+	.domains = {
+		{ nv_clk_src_crystal, 0xff },
+		{ nv_clk_src_href   , 0xff },
+		{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
+		{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+		{ nv_clk_src_mem    , 0xff, 0, "memory", 1000 },
+		{ nv_clk_src_max }
+	}
+};
+
+int
+nv40_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 {
-	struct nv40_clk_priv *priv;
-	int ret;
+	struct nv40_clk *clk;
 
-	ret = nvkm_clk_create(parent, engine, oclass, nv40_domain,
-			      NULL, 0, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+		return -ENOMEM;
+	clk->base.pll_calc = nv04_clk_pll_calc;
+	clk->base.pll_prog = nv04_clk_pll_prog;
+	*pclk = &clk->base;
 
-	priv->base.pll_calc = nv04_clk_pll_calc;
-	priv->base.pll_prog = nv04_clk_pll_prog;
-	priv->base.read = nv40_clk_read;
-	priv->base.calc = nv40_clk_calc;
-	priv->base.prog = nv40_clk_prog;
-	priv->base.tidy = nv40_clk_tidy;
-	return 0;
+	return nvkm_clk_ctor(&nv40_clk, device, index, true, &clk->base);
 }
-
-struct nvkm_oclass
-nv40_clk_oclass = {
-	.handle = NV_SUBDEV(CLK, 0x40),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_clk_ctor,
-		.dtor = _nvkm_clk_dtor,
-		.init = _nvkm_clk_init,
-		.fini = _nvkm_clk_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
index 9b4ffd6347ce..5841f297973c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
@@ -25,38 +25,39 @@
 #include "pll.h"
 #include "seq.h"
 
-#include <core/device.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 
 static u32
-read_div(struct nv50_clk_priv *priv)
+read_div(struct nv50_clk *clk)
 {
-	switch (nv_device(priv)->chipset) {
+	struct nvkm_device *device = clk->base.subdev.device;
+	switch (device->chipset) {
 	case 0x50: /* it exists, but only has bit 31, not the dividers.. */
 	case 0x84:
 	case 0x86:
 	case 0x98:
 	case 0xa0:
-		return nv_rd32(priv, 0x004700);
+		return nvkm_rd32(device, 0x004700);
 	case 0x92:
 	case 0x94:
 	case 0x96:
-		return nv_rd32(priv, 0x004800);
+		return nvkm_rd32(device, 0x004800);
 	default:
 		return 0x00000000;
 	}
 }
 
 static u32
-read_pll_src(struct nv50_clk_priv *priv, u32 base)
+read_pll_src(struct nv50_clk *clk, u32 base)
 {
-	struct nvkm_clk *clk = &priv->base;
-	u32 coef, ref = clk->read(clk, nv_clk_src_crystal);
-	u32 rsel = nv_rd32(priv, 0x00e18c);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 coef, ref = nvkm_clk_read(&clk->base, nv_clk_src_crystal);
+	u32 rsel = nvkm_rd32(device, 0x00e18c);
 	int P, N, M, id;
 
-	switch (nv_device(priv)->chipset) {
+	switch (device->chipset) {
 	case 0x50:
 	case 0xa0:
 		switch (base) {
@@ -65,11 +66,11 @@ read_pll_src(struct nv50_clk_priv *priv, u32 base)
 		case 0x4008: id = !!(rsel & 0x00000008); break;
 		case 0x4030: id = 0; break;
 		default:
-			nv_error(priv, "ref: bad pll 0x%06x\n", base);
+			nvkm_error(subdev, "ref: bad pll %06x\n", base);
 			return 0;
 		}
 
-		coef = nv_rd32(priv, 0x00e81c + (id * 0x0c));
+		coef = nvkm_rd32(device, 0x00e81c + (id * 0x0c));
 		ref *=  (coef & 0x01000000) ? 2 : 4;
 		P    =  (coef & 0x00070000) >> 16;
 		N    = ((coef & 0x0000ff00) >> 8) + 1;
@@ -78,7 +79,7 @@ read_pll_src(struct nv50_clk_priv *priv, u32 base)
 	case 0x84:
 	case 0x86:
 	case 0x92:
-		coef = nv_rd32(priv, 0x00e81c);
+		coef = nvkm_rd32(device, 0x00e81c);
 		P    = (coef & 0x00070000) >> 16;
 		N    = (coef & 0x0000ff00) >> 8;
 		M    = (coef & 0x000000ff) >> 0;
@@ -86,26 +87,26 @@ read_pll_src(struct nv50_clk_priv *priv, u32 base)
 	case 0x94:
 	case 0x96:
 	case 0x98:
-		rsel = nv_rd32(priv, 0x00c050);
+		rsel = nvkm_rd32(device, 0x00c050);
 		switch (base) {
 		case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
 		case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
 		case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
 		case 0x4030: rsel = 3; break;
 		default:
-			nv_error(priv, "ref: bad pll 0x%06x\n", base);
+			nvkm_error(subdev, "ref: bad pll %06x\n", base);
 			return 0;
 		}
 
 		switch (rsel) {
 		case 0: id = 1; break;
-		case 1: return clk->read(clk, nv_clk_src_crystal);
-		case 2: return clk->read(clk, nv_clk_src_href);
+		case 1: return nvkm_clk_read(&clk->base, nv_clk_src_crystal);
+		case 2: return nvkm_clk_read(&clk->base, nv_clk_src_href);
 		case 3: id = 0; break;
 		}
 
-		coef =  nv_rd32(priv, 0x00e81c + (id * 0x28));
-		P    = (nv_rd32(priv, 0x00e824 + (id * 0x28)) >> 16) & 7;
+		coef =  nvkm_rd32(device, 0x00e81c + (id * 0x28));
+		P    = (nvkm_rd32(device, 0x00e824 + (id * 0x28)) >> 16) & 7;
 		P   += (coef & 0x00070000) >> 16;
 		N    = (coef & 0x0000ff00) >> 8;
 		M    = (coef & 0x000000ff) >> 0;
@@ -121,10 +122,11 @@ read_pll_src(struct nv50_clk_priv *priv, u32 base)
 }
 
 static u32
-read_pll_ref(struct nv50_clk_priv *priv, u32 base)
+read_pll_ref(struct nv50_clk *clk, u32 base)
 {
-	struct nvkm_clk *clk = &priv->base;
-	u32 src, mast = nv_rd32(priv, 0x00c040);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 src, mast = nvkm_rd32(device, 0x00c040);
 
 	switch (base) {
 	case 0x004028:
@@ -140,33 +142,33 @@ read_pll_ref(struct nv50_clk_priv *priv, u32 base)
 		src = !!(mast & 0x02000000);
 		break;
 	case 0x00e810:
-		return clk->read(clk, nv_clk_src_crystal);
+		return nvkm_clk_read(&clk->base, nv_clk_src_crystal);
 	default:
-		nv_error(priv, "bad pll 0x%06x\n", base);
+		nvkm_error(subdev, "bad pll %06x\n", base);
 		return 0;
 	}
 
 	if (src)
-		return clk->read(clk, nv_clk_src_href);
+		return nvkm_clk_read(&clk->base, nv_clk_src_href);
 
-	return read_pll_src(priv, base);
+	return read_pll_src(clk, base);
 }
 
 static u32
-read_pll(struct nv50_clk_priv *priv, u32 base)
+read_pll(struct nv50_clk *clk, u32 base)
 {
-	struct nvkm_clk *clk = &priv->base;
-	u32 mast = nv_rd32(priv, 0x00c040);
-	u32 ctrl = nv_rd32(priv, base + 0);
-	u32 coef = nv_rd32(priv, base + 4);
-	u32 ref = read_pll_ref(priv, base);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 mast = nvkm_rd32(device, 0x00c040);
+	u32 ctrl = nvkm_rd32(device, base + 0);
+	u32 coef = nvkm_rd32(device, base + 4);
+	u32 ref = read_pll_ref(clk, base);
 	u32 freq = 0;
 	int N1, N2, M1, M2;
 
 	if (base == 0x004028 && (mast & 0x00100000)) {
 		/* wtf, appears to only disable post-divider on gt200 */
-		if (nv_device(priv)->chipset != 0xa0)
-			return clk->read(clk, nv_clk_src_dom6);
+		if (device->chipset != 0xa0)
+			return nvkm_clk_read(&clk->base, nv_clk_src_dom6);
 	}
 
 	N2 = (coef & 0xff000000) >> 24;
@@ -186,71 +188,73 @@ read_pll(struct nv50_clk_priv *priv, u32 base)
 	return freq;
 }
 
-static int
-nv50_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+int
+nv50_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
 {
-	struct nv50_clk_priv *priv = (void *)clk;
-	u32 mast = nv_rd32(priv, 0x00c040);
+	struct nv50_clk *clk = nv50_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mast = nvkm_rd32(device, 0x00c040);
 	u32 P = 0;
 
 	switch (src) {
 	case nv_clk_src_crystal:
-		return nv_device(priv)->crystal;
+		return device->crystal;
 	case nv_clk_src_href:
 		return 100000; /* PCIE reference clock */
 	case nv_clk_src_hclk:
-		return div_u64((u64)clk->read(clk, nv_clk_src_href) * 27778, 10000);
+		return div_u64((u64)nvkm_clk_read(&clk->base, nv_clk_src_href) * 27778, 10000);
 	case nv_clk_src_hclkm3:
-		return clk->read(clk, nv_clk_src_hclk) * 3;
+		return nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3;
 	case nv_clk_src_hclkm3d2:
-		return clk->read(clk, nv_clk_src_hclk) * 3 / 2;
+		return nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3 / 2;
 	case nv_clk_src_host:
 		switch (mast & 0x30000000) {
-		case 0x00000000: return clk->read(clk, nv_clk_src_href);
+		case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href);
 		case 0x10000000: break;
 		case 0x20000000: /* !0x50 */
-		case 0x30000000: return clk->read(clk, nv_clk_src_hclk);
+		case 0x30000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclk);
 		}
 		break;
 	case nv_clk_src_core:
 		if (!(mast & 0x00100000))
-			P = (nv_rd32(priv, 0x004028) & 0x00070000) >> 16;
+			P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16;
 		switch (mast & 0x00000003) {
-		case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
-		case 0x00000001: return clk->read(clk, nv_clk_src_dom6);
-		case 0x00000002: return read_pll(priv, 0x004020) >> P;
-		case 0x00000003: return read_pll(priv, 0x004028) >> P;
+		case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
+		case 0x00000001: return nvkm_clk_read(&clk->base, nv_clk_src_dom6);
+		case 0x00000002: return read_pll(clk, 0x004020) >> P;
+		case 0x00000003: return read_pll(clk, 0x004028) >> P;
 		}
 		break;
 	case nv_clk_src_shader:
-		P = (nv_rd32(priv, 0x004020) & 0x00070000) >> 16;
+		P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16;
 		switch (mast & 0x00000030) {
 		case 0x00000000:
 			if (mast & 0x00000080)
-				return clk->read(clk, nv_clk_src_host) >> P;
-			return clk->read(clk, nv_clk_src_crystal) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_host) >> P;
+			return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
 		case 0x00000010: break;
-		case 0x00000020: return read_pll(priv, 0x004028) >> P;
-		case 0x00000030: return read_pll(priv, 0x004020) >> P;
+		case 0x00000020: return read_pll(clk, 0x004028) >> P;
+		case 0x00000030: return read_pll(clk, 0x004020) >> P;
 		}
 		break;
 	case nv_clk_src_mem:
-		P = (nv_rd32(priv, 0x004008) & 0x00070000) >> 16;
-		if (nv_rd32(priv, 0x004008) & 0x00000200) {
+		P = (nvkm_rd32(device, 0x004008) & 0x00070000) >> 16;
+		if (nvkm_rd32(device, 0x004008) & 0x00000200) {
 			switch (mast & 0x0000c000) {
 			case 0x00000000:
-				return clk->read(clk, nv_clk_src_crystal) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
 			case 0x00008000:
 			case 0x0000c000:
-				return clk->read(clk, nv_clk_src_href) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_href) >> P;
 			}
 		} else {
-			return read_pll(priv, 0x004008) >> P;
+			return read_pll(clk, 0x004008) >> P;
 		}
 		break;
 	case nv_clk_src_vdec:
-		P = (read_div(priv) & 0x00000700) >> 8;
-		switch (nv_device(priv)->chipset) {
+		P = (read_div(clk) & 0x00000700) >> 8;
+		switch (device->chipset) {
 		case 0x84:
 		case 0x86:
 		case 0x92:
@@ -259,51 +263,51 @@ nv50_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
 		case 0xa0:
 			switch (mast & 0x00000c00) {
 			case 0x00000000:
-				if (nv_device(priv)->chipset == 0xa0) /* wtf?? */
-					return clk->read(clk, nv_clk_src_core) >> P;
-				return clk->read(clk, nv_clk_src_crystal) >> P;
+				if (device->chipset == 0xa0) /* wtf?? */
+					return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
 			case 0x00000400:
 				return 0;
 			case 0x00000800:
 				if (mast & 0x01000000)
-					return read_pll(priv, 0x004028) >> P;
-				return read_pll(priv, 0x004030) >> P;
+					return read_pll(clk, 0x004028) >> P;
+				return read_pll(clk, 0x004030) >> P;
 			case 0x00000c00:
-				return clk->read(clk, nv_clk_src_core) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
 			}
 			break;
 		case 0x98:
 			switch (mast & 0x00000c00) {
 			case 0x00000000:
-				return clk->read(clk, nv_clk_src_core) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
 			case 0x00000400:
 				return 0;
 			case 0x00000800:
-				return clk->read(clk, nv_clk_src_hclkm3d2) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_hclkm3d2) >> P;
 			case 0x00000c00:
-				return clk->read(clk, nv_clk_src_mem) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_mem) >> P;
 			}
 			break;
 		}
 		break;
 	case nv_clk_src_dom6:
-		switch (nv_device(priv)->chipset) {
+		switch (device->chipset) {
 		case 0x50:
 		case 0xa0:
-			return read_pll(priv, 0x00e810) >> 2;
+			return read_pll(clk, 0x00e810) >> 2;
 		case 0x84:
 		case 0x86:
 		case 0x92:
 		case 0x94:
 		case 0x96:
 		case 0x98:
-			P = (read_div(priv) & 0x00000007) >> 0;
+			P = (read_div(clk) & 0x00000007) >> 0;
 			switch (mast & 0x0c000000) {
-			case 0x00000000: return clk->read(clk, nv_clk_src_href);
+			case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href);
 			case 0x04000000: break;
-			case 0x08000000: return clk->read(clk, nv_clk_src_hclk);
+			case 0x08000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclk);
 			case 0x0c000000:
-				return clk->read(clk, nv_clk_src_hclkm3) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_hclkm3) >> P;
 			}
 			break;
 		default:
@@ -313,27 +317,27 @@ nv50_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
 		break;
 	}
 
-	nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
+	nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
 	return -EINVAL;
 }
 
 static u32
-calc_pll(struct nv50_clk_priv *priv, u32 reg, u32 clk, int *N, int *M, int *P)
+calc_pll(struct nv50_clk *clk, u32 reg, u32 idx, int *N, int *M, int *P)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
 	struct nvbios_pll pll;
 	int ret;
 
-	ret = nvbios_pll_parse(bios, reg, &pll);
+	ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
 	if (ret)
 		return 0;
 
 	pll.vco2.max_freq = 0;
-	pll.refclk = read_pll_ref(priv, reg);
+	pll.refclk = read_pll_ref(clk, reg);
 	if (!pll.refclk)
 		return 0;
 
-	return nv04_pll_calc(nv_subdev(priv), &pll, clk, N, M, NULL, NULL, P);
+	return nv04_pll_calc(subdev, &pll, idx, N, M, NULL, NULL, P);
 }
 
 static inline u32
@@ -360,11 +364,13 @@ clk_same(u32 a, u32 b)
 	return ((a / 1000) == (b / 1000));
 }
 
-static int
-nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+int
+nv50_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
 {
-	struct nv50_clk_priv *priv = (void *)clk;
-	struct nv50_clk_hwsq *hwsq = &priv->hwsq;
+	struct nv50_clk *clk = nv50_clk(base);
+	struct nv50_clk_hwsq *hwsq = &clk->hwsq;
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 	const int shader = cstate->domain[nv_clk_src_shader];
 	const int core = cstate->domain[nv_clk_src_core];
 	const int vdec = cstate->domain[nv_clk_src_vdec];
@@ -375,7 +381,7 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
 	int freq, out;
 
 	/* prepare a hwsq script from which we'll perform the reclock */
-	out = clk_init(hwsq, nv_subdev(clk));
+	out = clk_init(hwsq, subdev);
 	if (out)
 		return out;
 
@@ -393,15 +399,15 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
 		freq = calc_div(core, vdec, &P1);
 
 		/* see how close we can get using xpll/hclk as a source */
-		if (nv_device(priv)->chipset != 0x98)
-			out = read_pll(priv, 0x004030);
+		if (device->chipset != 0x98)
+			out = read_pll(clk, 0x004030);
 		else
-			out = clk->read(clk, nv_clk_src_hclkm3d2);
+			out = nvkm_clk_read(&clk->base, nv_clk_src_hclkm3d2);
 		out = calc_div(out, vdec, &P2);
 
 		/* select whichever gets us closest */
 		if (abs(vdec - freq) <= abs(vdec - out)) {
-			if (nv_device(priv)->chipset != 0x98)
+			if (device->chipset != 0x98)
 				mastv |= 0x00000c00;
 			divsv |= P1 << 8;
 		} else {
@@ -417,14 +423,14 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
 	 * of the host clock frequency
 	 */
 	if (dom6) {
-		if (clk_same(dom6, clk->read(clk, nv_clk_src_href))) {
+		if (clk_same(dom6, nvkm_clk_read(&clk->base, nv_clk_src_href))) {
 			mastv |= 0x00000000;
 		} else
-		if (clk_same(dom6, clk->read(clk, nv_clk_src_hclk))) {
+		if (clk_same(dom6, nvkm_clk_read(&clk->base, nv_clk_src_hclk))) {
 			mastv |= 0x08000000;
 		} else {
-			freq = clk->read(clk, nv_clk_src_hclk) * 3;
-			freq = calc_div(freq, dom6, &P1);
+			freq = nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3;
+			calc_div(freq, dom6, &P1);
 
 			mastv |= 0x0c000000;
 			divsv |= P1;
@@ -444,13 +450,13 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
 	/* core/shader: disconnect nvclk/sclk from their PLLs (nvclk to dom6,
 	 * sclk to hclk) before reprogramming
 	 */
-	if (nv_device(priv)->chipset < 0x92)
+	if (device->chipset < 0x92)
 		clk_mask(hwsq, mast, 0x001000b0, 0x00100080);
 	else
 		clk_mask(hwsq, mast, 0x000000b3, 0x00000081);
 
 	/* core: for the moment at least, always use nvpll */
-	freq = calc_pll(priv, 0x4028, core, &N, &M, &P1);
+	freq = calc_pll(clk, 0x4028, core, &N, &M, &P1);
 	if (freq == 0)
 		return -ERANGE;
 
@@ -468,7 +474,7 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
 		clk_mask(hwsq, spll[0], 0xc03f0100, (P1 << 19) | (P1 << 16));
 		clk_mask(hwsq, mast, 0x00100033, 0x00000023);
 	} else {
-		freq = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
+		freq = calc_pll(clk, 0x4020, shader, &N, &M, &P1);
 		if (freq == 0)
 			return -ERANGE;
 
@@ -485,77 +491,71 @@ nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
 	return 0;
 }
 
-static int
-nv50_clk_prog(struct nvkm_clk *clk)
+int
+nv50_clk_prog(struct nvkm_clk *base)
 {
-	struct nv50_clk_priv *priv = (void *)clk;
-	return clk_exec(&priv->hwsq, true);
+	struct nv50_clk *clk = nv50_clk(base);
+	return clk_exec(&clk->hwsq, true);
 }
 
-static void
-nv50_clk_tidy(struct nvkm_clk *clk)
+void
+nv50_clk_tidy(struct nvkm_clk *base)
 {
-	struct nv50_clk_priv *priv = (void *)clk;
-	clk_exec(&priv->hwsq, false);
+	struct nv50_clk *clk = nv50_clk(base);
+	clk_exec(&clk->hwsq, false);
 }
 
 int
-nv50_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+nv50_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
+	      int index, bool allow_reclock, struct nvkm_clk **pclk)
 {
-	struct nv50_clk_oclass *pclass = (void *)oclass;
-	struct nv50_clk_priv *priv;
+	struct nv50_clk *clk;
 	int ret;
 
-	ret = nvkm_clk_create(parent, engine, oclass, pclass->domains,
-			      NULL, 0, false, &priv);
-	*pobject = nv_object(priv);
+	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+		return -ENOMEM;
+	ret = nvkm_clk_ctor(func, device, index, allow_reclock, &clk->base);
+	*pclk = &clk->base;
 	if (ret)
 		return ret;
 
-	priv->hwsq.r_fifo = hwsq_reg(0x002504);
-	priv->hwsq.r_spll[0] = hwsq_reg(0x004020);
-	priv->hwsq.r_spll[1] = hwsq_reg(0x004024);
-	priv->hwsq.r_nvpll[0] = hwsq_reg(0x004028);
-	priv->hwsq.r_nvpll[1] = hwsq_reg(0x00402c);
-	switch (nv_device(priv)->chipset) {
+	clk->hwsq.r_fifo = hwsq_reg(0x002504);
+	clk->hwsq.r_spll[0] = hwsq_reg(0x004020);
+	clk->hwsq.r_spll[1] = hwsq_reg(0x004024);
+	clk->hwsq.r_nvpll[0] = hwsq_reg(0x004028);
+	clk->hwsq.r_nvpll[1] = hwsq_reg(0x00402c);
+	switch (device->chipset) {
 	case 0x92:
 	case 0x94:
 	case 0x96:
-		priv->hwsq.r_divs = hwsq_reg(0x004800);
+		clk->hwsq.r_divs = hwsq_reg(0x004800);
 		break;
 	default:
-		priv->hwsq.r_divs = hwsq_reg(0x004700);
+		clk->hwsq.r_divs = hwsq_reg(0x004700);
 		break;
 	}
-	priv->hwsq.r_mast = hwsq_reg(0x00c040);
-
-	priv->base.read = nv50_clk_read;
-	priv->base.calc = nv50_clk_calc;
-	priv->base.prog = nv50_clk_prog;
-	priv->base.tidy = nv50_clk_tidy;
+	clk->hwsq.r_mast = hwsq_reg(0x00c040);
 	return 0;
 }
 
-static struct nvkm_domain
-nv50_domains[] = {
-	{ nv_clk_src_crystal, 0xff },
-	{ nv_clk_src_href   , 0xff },
-	{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
-	{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
-	{ nv_clk_src_mem    , 0xff, 0, "memory", 1000 },
-	{ nv_clk_src_max }
+static const struct nvkm_clk_func
+nv50_clk = {
+	.read = nv50_clk_read,
+	.calc = nv50_clk_calc,
+	.prog = nv50_clk_prog,
+	.tidy = nv50_clk_tidy,
+	.domains = {
+		{ nv_clk_src_crystal, 0xff },
+		{ nv_clk_src_href   , 0xff },
+		{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
+		{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+		{ nv_clk_src_mem    , 0xff, 0, "memory", 1000 },
+		{ nv_clk_src_max }
+	}
 };
 
-struct nvkm_oclass *
-nv50_clk_oclass = &(struct nv50_clk_oclass) {
-	.base.handle = NV_SUBDEV(CLK, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_clk_ctor,
-		.dtor = _nvkm_clk_dtor,
-		.init = _nvkm_clk_init,
-		.fini = _nvkm_clk_fini,
-	},
-	.domains = nv50_domains,
-}.base;
+int
+nv50_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+{
+	return nv50_clk_new_(&nv50_clk, device, index, false, pclk);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
index 0ead76a32f10..d3c7fb6efa16 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
@@ -1,7 +1,9 @@
-#ifndef __NVKM_CLK_NV50_H__
-#define __NVKM_CLK_NV50_H__
+#ifndef __NV50_CLK_H__
+#define __NV50_CLK_H__
+#define nv50_clk(p) container_of((p), struct nv50_clk, base)
+#include "priv.h"
+
 #include <subdev/bus/hwsq.h>
-#include <subdev/clk.h>
 
 struct nv50_clk_hwsq {
 	struct hwsq base;
@@ -12,17 +14,15 @@ struct nv50_clk_hwsq {
 	struct hwsq_reg r_mast;
 };
 
-struct nv50_clk_priv {
+struct nv50_clk {
 	struct nvkm_clk base;
 	struct nv50_clk_hwsq hwsq;
 };
 
-int  nv50_clk_ctor(struct nvkm_object *, struct nvkm_object *,
-		     struct nvkm_oclass *, void *, u32,
-		     struct nvkm_object **);
-
-struct nv50_clk_oclass {
-	struct nvkm_oclass base;
-	struct nvkm_domain *domains;
-};
+int nv50_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, int,
+		  bool, struct nvkm_clk **);
+int nv50_clk_read(struct nvkm_clk *, enum nv_clk_src);
+int nv50_clk_calc(struct nvkm_clk *, struct nvkm_cstate *);
+int nv50_clk_prog(struct nvkm_clk *);
+void nv50_clk_tidy(struct nvkm_clk *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c
index 783a3e78d632..c6fccd600db4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c
@@ -79,7 +79,7 @@ gt215_pll_calc(struct nvkm_subdev *subdev, struct nvbios_pll *info,
 	}
 
 	if (unlikely(best_err == ~0)) {
-		nv_error(subdev, "unable to find matching pll values\n");
+		nvkm_error(subdev, "unable to find matching pll values\n");
 		return -EINVAL;
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c
index f2292895a1a8..5ad67879e703 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c
@@ -37,7 +37,7 @@ getMNP_single(struct nvkm_subdev *subdev, struct nvbios_pll *info, int clk,
 	 * "clk" parameter in kHz
 	 * returns calculated clock
 	 */
-	struct nvkm_bios *bios = nvkm_bios(subdev);
+	struct nvkm_bios *bios = subdev->device->bios;
 	int minvco = info->vco1.min_freq, maxvco = info->vco1.max_freq;
 	int minM = info->vco1.min_m, maxM = info->vco1.max_m;
 	int minN = info->vco1.min_n, maxN = info->vco1.max_n;
@@ -136,7 +136,7 @@ getMNP_double(struct nvkm_subdev *subdev, struct nvbios_pll *info, int clk,
 	 * "clk" parameter in kHz
 	 * returns calculated clock
 	 */
-	int chip_version = nvkm_bios(subdev)->version.chip;
+	int chip_version = subdev->device->bios->version.chip;
 	int minvco1 = info->vco1.min_freq, maxvco1 = info->vco1.max_freq;
 	int minvco2 = info->vco2.min_freq, maxvco2 = info->vco2.max_freq;
 	int minU1 = info->vco1.min_inputfreq, minU2 = info->vco2.min_inputfreq;
@@ -240,6 +240,6 @@ nv04_pll_calc(struct nvkm_subdev *subdev, struct nvbios_pll *info, u32 freq,
 	}
 
 	if (!ret)
-		nv_error(subdev, "unable to compute acceptable pll values\n");
+		nvkm_error(subdev, "unable to compute acceptable pll values\n");
 	return ret;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
new file mode 100644
index 000000000000..51eafc00c8b1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
@@ -0,0 +1,26 @@
+#ifndef __NVKM_CLK_PRIV_H__
+#define __NVKM_CLK_PRIV_H__
+#define nvkm_clk(p) container_of((p), struct nvkm_clk, subdev)
+#include <subdev/clk.h>
+
+struct nvkm_clk_func {
+	int (*init)(struct nvkm_clk *);
+	void (*fini)(struct nvkm_clk *);
+	int (*read)(struct nvkm_clk *, enum nv_clk_src);
+	int (*calc)(struct nvkm_clk *, struct nvkm_cstate *);
+	int (*prog)(struct nvkm_clk *);
+	void (*tidy)(struct nvkm_clk *);
+	struct nvkm_pstate *pstates;
+	int nr_pstates;
+	struct nvkm_domain domains[];
+};
+
+int nvkm_clk_ctor(const struct nvkm_clk_func *, struct nvkm_device *, int,
+		  bool allow_reclock, struct nvkm_clk *);
+int nvkm_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, int,
+		  bool allow_reclock, struct nvkm_clk **);
+
+int nv04_clk_pll_calc(struct nvkm_clk *, struct nvbios_pll *, int clk,
+		      struct nvkm_pll_vals *);
+int nv04_clk_pll_prog(struct nvkm_clk *, u32 reg1, struct nvkm_pll_vals *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
index b0d7c5f40db1..5f25402f6b09 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
@@ -23,74 +23,108 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
 #include <core/option.h>
 #include <subdev/vga.h>
 
-int
-_nvkm_devinit_fini(struct nvkm_object *object, bool suspend)
+u32
+nvkm_devinit_mmio(struct nvkm_devinit *init, u32 addr)
 {
-	struct nvkm_devinit *devinit = (void *)object;
+	if (init->func->mmio)
+		addr = init->func->mmio(init, addr);
+	return addr;
+}
 
-	/* force full reinit on resume */
-	if (suspend)
-		devinit->post = true;
+int
+nvkm_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 khz)
+{
+	return init->func->pll_set(init, type, khz);
+}
 
-	/* unlock the extended vga crtc regs */
-	nv_lockvgac(devinit, false);
+void
+nvkm_devinit_meminit(struct nvkm_devinit *init)
+{
+	if (init->func->meminit)
+		init->func->meminit(init);
+}
 
-	return nvkm_subdev_fini(&devinit->base, suspend);
+u64
+nvkm_devinit_disable(struct nvkm_devinit *init)
+{
+	if (init && init->func->disable)
+		return init->func->disable(init);
+	return 0;
 }
 
 int
-_nvkm_devinit_init(struct nvkm_object *object)
+nvkm_devinit_post(struct nvkm_devinit *init, u64 *disable)
 {
-	struct nvkm_devinit_impl *impl = (void *)object->oclass;
-	struct nvkm_devinit *devinit = (void *)object;
-	int ret;
+	int ret = 0;
+	if (init && init->func->post)
+		ret = init->func->post(init, init->post);
+	*disable = nvkm_devinit_disable(init);
+	return ret;
+}
 
-	ret = nvkm_subdev_init(&devinit->base);
-	if (ret)
-		return ret;
+static int
+nvkm_devinit_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+	struct nvkm_devinit *init = nvkm_devinit(subdev);
+	/* force full reinit on resume */
+	if (suspend)
+		init->post = true;
+	return 0;
+}
+
+static int
+nvkm_devinit_preinit(struct nvkm_subdev *subdev)
+{
+	struct nvkm_devinit *init = nvkm_devinit(subdev);
 
-	ret = impl->post(&devinit->base, devinit->post);
-	if (ret)
-		return ret;
+	if (init->func->preinit)
+		init->func->preinit(init);
 
-	if (impl->disable)
-		nv_device(devinit)->disable_mask |= impl->disable(devinit);
+	/* unlock the extended vga crtc regs */
+	nvkm_lockvgac(subdev->device, false);
 	return 0;
 }
 
-void
-_nvkm_devinit_dtor(struct nvkm_object *object)
+static int
+nvkm_devinit_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_devinit *init = nvkm_devinit(subdev);
+	if (init->func->init)
+		init->func->init(init);
+	return 0;
+}
+
+static void *
+nvkm_devinit_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_devinit *devinit = (void *)object;
+	struct nvkm_devinit *init = nvkm_devinit(subdev);
+	void *data = init;
 
-	/* lock crtc regs */
-	nv_lockvgac(devinit, true);
+	if (init->func->dtor)
+		data = init->func->dtor(init);
 
-	nvkm_subdev_destroy(&devinit->base);
+	/* lock crtc regs */
+	nvkm_lockvgac(subdev->device, true);
+	return data;
 }
 
-int
-nvkm_devinit_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, int size, void **pobject)
+static const struct nvkm_subdev_func
+nvkm_devinit = {
+	.dtor = nvkm_devinit_dtor,
+	.preinit = nvkm_devinit_preinit,
+	.init = nvkm_devinit_init,
+	.fini = nvkm_devinit_fini,
+};
+
+void
+nvkm_devinit_ctor(const struct nvkm_devinit_func *func,
+		  struct nvkm_device *device, int index,
+		  struct nvkm_devinit *init)
 {
-	struct nvkm_devinit_impl *impl = (void *)oclass;
-	struct nvkm_device *device = nv_device(parent);
-	struct nvkm_devinit *devinit;
-	int ret;
-
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "DEVINIT",
-				  "init", size, pobject);
-	devinit = *pobject;
-	if (ret)
-		return ret;
-
-	devinit->post = nvkm_boolopt(device->cfgopt, "NvForcePost", false);
-	devinit->meminit = impl->meminit;
-	devinit->pll_set = impl->pll_set;
-	devinit->mmio    = impl->mmio;
-	return 0;
+	nvkm_subdev_ctor(&nvkm_devinit, device, index, 0, &init->subdev);
+	init->func = func;
+	init->post = nvkm_boolopt(device->cfgopt, "NvForcePost", false);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
index 36684c3f9e9c..6c5bbff12eb4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
@@ -23,7 +23,6 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include <core/device.h>
 #include <subdev/fb/regsnv04.h>
 
 #define NV04_PFB_DEBUG_0					0x00100080
@@ -48,8 +47,8 @@
 static inline struct io_mapping *
 fbmem_init(struct nvkm_device *dev)
 {
-	return io_mapping_create_wc(nv_device_resource_start(dev, 1),
-				    nv_device_resource_len(dev, 1));
+	return io_mapping_create_wc(dev->func->resource_addr(dev, 1),
+				    dev->func->resource_size(dev, 1));
 }
 
 static inline void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
index ca776ce75f4f..e895289bf3c1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
@@ -27,40 +27,42 @@
 #include <subdev/bios/init.h>
 
 static u64
-g84_devinit_disable(struct nvkm_devinit *devinit)
+g84_devinit_disable(struct nvkm_devinit *init)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	u32 r001540 = nv_rd32(priv, 0x001540);
-	u32 r00154c = nv_rd32(priv, 0x00154c);
+	struct nvkm_device *device = init->subdev.device;
+	u32 r001540 = nvkm_rd32(device, 0x001540);
+	u32 r00154c = nvkm_rd32(device, 0x00154c);
 	u64 disable = 0ULL;
 
 	if (!(r001540 & 0x40000000)) {
-		disable |= (1ULL << NVDEV_ENGINE_MPEG);
-		disable |= (1ULL << NVDEV_ENGINE_VP);
-		disable |= (1ULL << NVDEV_ENGINE_BSP);
-		disable |= (1ULL << NVDEV_ENGINE_CIPHER);
+		disable |= (1ULL << NVKM_ENGINE_MPEG);
+		disable |= (1ULL << NVKM_ENGINE_VP);
+		disable |= (1ULL << NVKM_ENGINE_BSP);
+		disable |= (1ULL << NVKM_ENGINE_CIPHER);
 	}
 
 	if (!(r00154c & 0x00000004))
-		disable |= (1ULL << NVDEV_ENGINE_DISP);
+		disable |= (1ULL << NVKM_ENGINE_DISP);
 	if (!(r00154c & 0x00000020))
-		disable |= (1ULL << NVDEV_ENGINE_BSP);
+		disable |= (1ULL << NVKM_ENGINE_BSP);
 	if (!(r00154c & 0x00000040))
-		disable |= (1ULL << NVDEV_ENGINE_CIPHER);
+		disable |= (1ULL << NVKM_ENGINE_CIPHER);
 
 	return disable;
 }
 
-struct nvkm_oclass *
-g84_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x84),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_devinit_ctor,
-		.dtor = _nvkm_devinit_dtor,
-		.init = nv50_devinit_init,
-		.fini = _nvkm_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+g84_devinit = {
+	.preinit = nv50_devinit_preinit,
+	.init = nv50_devinit_init,
+	.post = nv04_devinit_post,
 	.pll_set = nv50_devinit_pll_set,
 	.disable = g84_devinit_disable,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+g84_devinit_new(struct nvkm_device *device, int index,
+		struct nvkm_devinit **pinit)
+{
+	return nv50_devinit_new_(&g84_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
index d29bacee65ee..a9d45844df5a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
@@ -27,39 +27,41 @@
 #include <subdev/bios/init.h>
 
 static u64
-g98_devinit_disable(struct nvkm_devinit *devinit)
+g98_devinit_disable(struct nvkm_devinit *init)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	u32 r001540 = nv_rd32(priv, 0x001540);
-	u32 r00154c = nv_rd32(priv, 0x00154c);
+	struct nvkm_device *device = init->subdev.device;
+	u32 r001540 = nvkm_rd32(device, 0x001540);
+	u32 r00154c = nvkm_rd32(device, 0x00154c);
 	u64 disable = 0ULL;
 
 	if (!(r001540 & 0x40000000)) {
-		disable |= (1ULL << NVDEV_ENGINE_MSPDEC);
-		disable |= (1ULL << NVDEV_ENGINE_MSVLD);
-		disable |= (1ULL << NVDEV_ENGINE_MSPPP);
+		disable |= (1ULL << NVKM_ENGINE_MSPDEC);
+		disable |= (1ULL << NVKM_ENGINE_MSVLD);
+		disable |= (1ULL << NVKM_ENGINE_MSPPP);
 	}
 
 	if (!(r00154c & 0x00000004))
-		disable |= (1ULL << NVDEV_ENGINE_DISP);
+		disable |= (1ULL << NVKM_ENGINE_DISP);
 	if (!(r00154c & 0x00000020))
-		disable |= (1ULL << NVDEV_ENGINE_MSVLD);
+		disable |= (1ULL << NVKM_ENGINE_MSVLD);
 	if (!(r00154c & 0x00000040))
-		disable |= (1ULL << NVDEV_ENGINE_SEC);
+		disable |= (1ULL << NVKM_ENGINE_SEC);
 
 	return disable;
 }
 
-struct nvkm_oclass *
-g98_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x98),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_devinit_ctor,
-		.dtor = _nvkm_devinit_dtor,
-		.init = nv50_devinit_init,
-		.fini = _nvkm_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+g98_devinit = {
+	.preinit = nv50_devinit_preinit,
+	.init = nv50_devinit_init,
+	.post = nv04_devinit_post,
 	.pll_set = nv50_devinit_pll_set,
 	.disable = g98_devinit_disable,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+g98_devinit_new(struct nvkm_device *device, int index,
+		struct nvkm_devinit **pinit)
+{
+	return nv50_devinit_new_(&g98_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
index c61102f70805..22b0140e28c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
@@ -29,19 +29,19 @@
 #include <subdev/clk/pll.h>
 
 int
-gf100_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
+gf100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &init->subdev;
+	struct nvkm_device *device = subdev->device;
 	struct nvbios_pll info;
 	int N, fN, M, P;
 	int ret;
 
-	ret = nvbios_pll_parse(bios, type, &info);
+	ret = nvbios_pll_parse(device->bios, type, &info);
 	if (ret)
 		return ret;
 
-	ret = gt215_pll_calc(nv_subdev(devinit), &info, freq, &N, &fN, &M, &P);
+	ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
 	if (ret < 0)
 		return ret;
 
@@ -50,12 +50,12 @@ gf100_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
 	case PLL_VPLL1:
 	case PLL_VPLL2:
 	case PLL_VPLL3:
-		nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100);
-		nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M);
-		nv_wr32(priv, info.reg + 0x10, fN << 16);
+		nvkm_mask(device, info.reg + 0x0c, 0x00000000, 0x00000100);
+		nvkm_wr32(device, info.reg + 0x04, (P << 16) | (N << 8) | M);
+		nvkm_wr32(device, info.reg + 0x10, fN << 16);
 		break;
 	default:
-		nv_warn(priv, "0x%08x/%dKhz unimplemented\n", type, freq);
+		nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
 		ret = -EINVAL;
 		break;
 	}
@@ -64,64 +64,44 @@ gf100_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
 }
 
 static u64
-gf100_devinit_disable(struct nvkm_devinit *devinit)
+gf100_devinit_disable(struct nvkm_devinit *init)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	u32 r022500 = nv_rd32(priv, 0x022500);
+	struct nvkm_device *device = init->subdev.device;
+	u32 r022500 = nvkm_rd32(device, 0x022500);
 	u64 disable = 0ULL;
 
 	if (r022500 & 0x00000001)
-		disable |= (1ULL << NVDEV_ENGINE_DISP);
+		disable |= (1ULL << NVKM_ENGINE_DISP);
 
 	if (r022500 & 0x00000002) {
-		disable |= (1ULL << NVDEV_ENGINE_MSPDEC);
-		disable |= (1ULL << NVDEV_ENGINE_MSPPP);
+		disable |= (1ULL << NVKM_ENGINE_MSPDEC);
+		disable |= (1ULL << NVKM_ENGINE_MSPPP);
 	}
 
 	if (r022500 & 0x00000004)
-		disable |= (1ULL << NVDEV_ENGINE_MSVLD);
+		disable |= (1ULL << NVKM_ENGINE_MSVLD);
 	if (r022500 & 0x00000008)
-		disable |= (1ULL << NVDEV_ENGINE_MSENC);
+		disable |= (1ULL << NVKM_ENGINE_MSENC);
 	if (r022500 & 0x00000100)
-		disable |= (1ULL << NVDEV_ENGINE_CE0);
+		disable |= (1ULL << NVKM_ENGINE_CE0);
 	if (r022500 & 0x00000200)
-		disable |= (1ULL << NVDEV_ENGINE_CE1);
+		disable |= (1ULL << NVKM_ENGINE_CE1);
 
 	return disable;
 }
 
+static const struct nvkm_devinit_func
+gf100_devinit = {
+	.preinit = nv50_devinit_preinit,
+	.init = nv50_devinit_init,
+	.post = nv04_devinit_post,
+	.pll_set = gf100_devinit_pll_set,
+	.disable = gf100_devinit_disable,
+};
+
 int
-gf100_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, void *data, u32 size,
-		   struct nvkm_object **pobject)
+gf100_devinit_new(struct nvkm_device *device, int index,
+		struct nvkm_devinit **pinit)
 {
-	struct nvkm_devinit_impl *impl = (void *)oclass;
-	struct nv50_devinit_priv *priv;
-	u64 disable;
-	int ret;
-
-	ret = nvkm_devinit_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	disable = impl->disable(&priv->base);
-	if (disable & (1ULL << NVDEV_ENGINE_DISP))
-		priv->base.post = true;
-
-	return 0;
+	return nv50_devinit_new_(&gf100_devinit, device, index, pinit);
 }
-
-struct nvkm_oclass *
-gf100_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_devinit_ctor,
-		.dtor = _nvkm_devinit_dtor,
-		.init = nv50_devinit_init,
-		.fini = _nvkm_devinit_fini,
-	},
-	.pll_set = gf100_devinit_pll_set,
-	.disable = gf100_devinit_disable,
-	.post = nvbios_init,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
index 87ca0ece37b4..2be98bd78214 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
@@ -27,33 +27,35 @@
 #include <subdev/bios/init.h>
 
 u64
-gm107_devinit_disable(struct nvkm_devinit *devinit)
+gm107_devinit_disable(struct nvkm_devinit *init)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	u32 r021c00 = nv_rd32(priv, 0x021c00);
-	u32 r021c04 = nv_rd32(priv, 0x021c04);
+	struct nvkm_device *device = init->subdev.device;
+	u32 r021c00 = nvkm_rd32(device, 0x021c00);
+	u32 r021c04 = nvkm_rd32(device, 0x021c04);
 	u64 disable = 0ULL;
 
 	if (r021c00 & 0x00000001)
-		disable |= (1ULL << NVDEV_ENGINE_CE0);
+		disable |= (1ULL << NVKM_ENGINE_CE0);
 	if (r021c00 & 0x00000004)
-		disable |= (1ULL << NVDEV_ENGINE_CE2);
+		disable |= (1ULL << NVKM_ENGINE_CE2);
 	if (r021c04 & 0x00000001)
-		disable |= (1ULL << NVDEV_ENGINE_DISP);
+		disable |= (1ULL << NVKM_ENGINE_DISP);
 
 	return disable;
 }
 
-struct nvkm_oclass *
-gm107_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x07),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_devinit_ctor,
-		.dtor = _nvkm_devinit_dtor,
-		.init = nv50_devinit_init,
-		.fini = _nvkm_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+gm107_devinit = {
+	.preinit = nv50_devinit_preinit,
+	.init = nv50_devinit_init,
+	.post = nv04_devinit_post,
 	.pll_set = gf100_devinit_pll_set,
 	.disable = gm107_devinit_disable,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+gm107_devinit_new(struct nvkm_device *device, int index,
+		struct nvkm_devinit **pinit)
+{
+	return nv50_devinit_new_(&gm107_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
index 1076fcf0d716..2b9c3f11b7a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
@@ -28,69 +28,74 @@
 #include <subdev/bios/pmu.h>
 
 static void
-pmu_code(struct nv50_devinit_priv *priv, u32 pmu, u32 img, u32 len, bool sec)
+pmu_code(struct nv50_devinit *init, u32 pmu, u32 img, u32 len, bool sec)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_device *device = init->base.subdev.device;
+	struct nvkm_bios *bios = device->bios;
 	int i;
 
-	nv_wr32(priv, 0x10a180, 0x01000000 | (sec ? 0x10000000 : 0) | pmu);
+	nvkm_wr32(device, 0x10a180, 0x01000000 | (sec ? 0x10000000 : 0) | pmu);
 	for (i = 0; i < len; i += 4) {
 		if ((i & 0xff) == 0)
-			nv_wr32(priv, 0x10a188, (pmu + i) >> 8);
-		nv_wr32(priv, 0x10a184, nv_ro32(bios, img + i));
+			nvkm_wr32(device, 0x10a188, (pmu + i) >> 8);
+		nvkm_wr32(device, 0x10a184, nvbios_rd32(bios, img + i));
 	}
 
 	while (i & 0xff) {
-		nv_wr32(priv, 0x10a184, 0x00000000);
+		nvkm_wr32(device, 0x10a184, 0x00000000);
 		i += 4;
 	}
 }
 
 static void
-pmu_data(struct nv50_devinit_priv *priv, u32 pmu, u32 img, u32 len)
+pmu_data(struct nv50_devinit *init, u32 pmu, u32 img, u32 len)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_device *device = init->base.subdev.device;
+	struct nvkm_bios *bios = device->bios;
 	int i;
 
-	nv_wr32(priv, 0x10a1c0, 0x01000000 | pmu);
+	nvkm_wr32(device, 0x10a1c0, 0x01000000 | pmu);
 	for (i = 0; i < len; i += 4)
-		nv_wr32(priv, 0x10a1c4, nv_ro32(bios, img + i));
+		nvkm_wr32(device, 0x10a1c4, nvbios_rd32(bios, img + i));
 }
 
 static u32
-pmu_args(struct nv50_devinit_priv *priv, u32 argp, u32 argi)
+pmu_args(struct nv50_devinit *init, u32 argp, u32 argi)
 {
-	nv_wr32(priv, 0x10a1c0, argp);
-	nv_wr32(priv, 0x10a1c0, nv_rd32(priv, 0x10a1c4) + argi);
-	return nv_rd32(priv, 0x10a1c4);
+	struct nvkm_device *device = init->base.subdev.device;
+	nvkm_wr32(device, 0x10a1c0, argp);
+	nvkm_wr32(device, 0x10a1c0, nvkm_rd32(device, 0x10a1c4) + argi);
+	return nvkm_rd32(device, 0x10a1c4);
 }
 
 static void
-pmu_exec(struct nv50_devinit_priv *priv, u32 init_addr)
+pmu_exec(struct nv50_devinit *init, u32 init_addr)
 {
-	nv_wr32(priv, 0x10a104, init_addr);
-	nv_wr32(priv, 0x10a10c, 0x00000000);
-	nv_wr32(priv, 0x10a100, 0x00000002);
+	struct nvkm_device *device = init->base.subdev.device;
+	nvkm_wr32(device, 0x10a104, init_addr);
+	nvkm_wr32(device, 0x10a10c, 0x00000000);
+	nvkm_wr32(device, 0x10a100, 0x00000002);
 }
 
 static int
-pmu_load(struct nv50_devinit_priv *priv, u8 type, bool post,
+pmu_load(struct nv50_devinit *init, u8 type, bool post,
 	 u32 *init_addr_pmu, u32 *args_addr_pmu)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &init->base.subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct nvbios_pmuR pmu;
 
 	if (!nvbios_pmuRm(bios, type, &pmu)) {
-		nv_error(priv, "VBIOS PMU fuc %02x not found\n", type);
+		nvkm_error(subdev, "VBIOS PMU fuc %02x not found\n", type);
 		return -EINVAL;
 	}
 
 	if (!post)
 		return 0;
 
-	pmu_code(priv, pmu.boot_addr_pmu, pmu.boot_addr, pmu.boot_size, false);
-	pmu_code(priv, pmu.code_addr_pmu, pmu.code_addr, pmu.code_size, true);
-	pmu_data(priv, pmu.data_addr_pmu, pmu.data_addr, pmu.data_size);
+	pmu_code(init, pmu.boot_addr_pmu, pmu.boot_addr, pmu.boot_size, false);
+	pmu_code(init, pmu.code_addr_pmu, pmu.code_addr, pmu.code_size, true);
+	pmu_data(init, pmu.data_addr_pmu, pmu.data_addr, pmu.data_size);
 
 	if (init_addr_pmu) {
 		*init_addr_pmu = pmu.init_addr_pmu;
@@ -98,75 +103,79 @@ pmu_load(struct nv50_devinit_priv *priv, u8 type, bool post,
 		return 0;
 	}
 
-	return pmu_exec(priv, pmu.init_addr_pmu), 0;
+	return pmu_exec(init, pmu.init_addr_pmu), 0;
 }
 
 static int
-gm204_devinit_post(struct nvkm_subdev *subdev, bool post)
+gm204_devinit_post(struct nvkm_devinit *base, bool post)
 {
-	struct nv50_devinit_priv *priv = (void *)nvkm_devinit(subdev);
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nv50_devinit *init = nv50_devinit(base);
+	struct nvkm_subdev *subdev = &init->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
 	struct bit_entry bit_I;
-	u32 init, args;
+	u32 exec, args;
 	int ret;
 
 	if (bit_entry(bios, 'I', &bit_I) || bit_I.version != 1 ||
 					    bit_I.length < 0x1c) {
-		nv_error(priv, "VBIOS PMU init data not found\n");
+		nvkm_error(subdev, "VBIOS PMU init data not found\n");
 		return -EINVAL;
 	}
 
 	/* reset PMU and load init table parser ucode */
 	if (post) {
-		nv_mask(priv, 0x000200, 0x00002000, 0x00000000);
-		nv_mask(priv, 0x000200, 0x00002000, 0x00002000);
-		nv_rd32(priv, 0x000200);
-		while (nv_rd32(priv, 0x10a10c) & 0x00000006) {
+		nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
+		nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
+		nvkm_rd32(device, 0x000200);
+		while (nvkm_rd32(device, 0x10a10c) & 0x00000006) {
 		}
 	}
 
-	ret = pmu_load(priv, 0x04, post, &init, &args);
+	ret = pmu_load(init, 0x04, post, &exec, &args);
 	if (ret)
 		return ret;
 
 	/* upload first chunk of init data */
 	if (post) {
-		u32 pmu = pmu_args(priv, args + 0x08, 0x08);
-		u32 img = nv_ro16(bios, bit_I.offset + 0x14);
-		u32 len = nv_ro16(bios, bit_I.offset + 0x16);
-		pmu_data(priv, pmu, img, len);
+		u32 pmu = pmu_args(init, args + 0x08, 0x08);
+		u32 img = nvbios_rd16(bios, bit_I.offset + 0x14);
+		u32 len = nvbios_rd16(bios, bit_I.offset + 0x16);
+		pmu_data(init, pmu, img, len);
 	}
 
 	/* upload second chunk of init data */
 	if (post) {
-		u32 pmu = pmu_args(priv, args + 0x08, 0x10);
-		u32 img = nv_ro16(bios, bit_I.offset + 0x18);
-		u32 len = nv_ro16(bios, bit_I.offset + 0x1a);
-		pmu_data(priv, pmu, img, len);
+		u32 pmu = pmu_args(init, args + 0x08, 0x10);
+		u32 img = nvbios_rd16(bios, bit_I.offset + 0x18);
+		u32 len = nvbios_rd16(bios, bit_I.offset + 0x1a);
+		pmu_data(init, pmu, img, len);
 	}
 
 	/* execute init tables */
 	if (post) {
-		nv_wr32(priv, 0x10a040, 0x00005000);
-		pmu_exec(priv, init);
-		while (!(nv_rd32(priv, 0x10a040) & 0x00002000)) {
+		nvkm_wr32(device, 0x10a040, 0x00005000);
+		pmu_exec(init, exec);
+		while (!(nvkm_rd32(device, 0x10a040) & 0x00002000)) {
 		}
 	}
 
 	/* load and execute some other ucode image (bios therm?) */
-	return pmu_load(priv, 0x01, post, NULL, NULL);
+	return pmu_load(init, 0x01, post, NULL, NULL);
 }
 
-struct nvkm_oclass *
-gm204_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x07),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_devinit_ctor,
-		.dtor = _nvkm_devinit_dtor,
-		.init = nv50_devinit_init,
-		.fini = _nvkm_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+gm204_devinit = {
+	.preinit = nv50_devinit_preinit,
+	.init = nv50_devinit_init,
+	.post = gm204_devinit_post,
 	.pll_set = gf100_devinit_pll_set,
 	.disable = gm107_devinit_disable,
-	.post = gm204_devinit_post,
-}.base;
+};
+
+int
+gm204_devinit_new(struct nvkm_device *device, int index,
+		struct nvkm_devinit **pinit)
+{
+	return nv50_devinit_new_(&gm204_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
index 6a3e8d4efed7..9a8522fa9c65 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
@@ -29,32 +29,32 @@
 #include <subdev/clk/pll.h>
 
 int
-gt215_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
+gt215_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &init->subdev;
+	struct nvkm_device *device = subdev->device;
 	struct nvbios_pll info;
 	int N, fN, M, P;
 	int ret;
 
-	ret = nvbios_pll_parse(bios, type, &info);
+	ret = nvbios_pll_parse(device->bios, type, &info);
 	if (ret)
 		return ret;
 
-	ret = gt215_pll_calc(nv_subdev(devinit), &info, freq, &N, &fN, &M, &P);
+	ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
 	if (ret < 0)
 		return ret;
 
 	switch (info.type) {
 	case PLL_VPLL0:
 	case PLL_VPLL1:
-		nv_wr32(priv, info.reg + 0, 0x50000610);
-		nv_mask(priv, info.reg + 4, 0x003fffff,
-					    (P << 16) | (M << 8) | N);
-		nv_wr32(priv, info.reg + 8, fN);
+		nvkm_wr32(device, info.reg + 0, 0x50000610);
+		nvkm_mask(device, info.reg + 4, 0x003fffff,
+						(P << 16) | (M << 8) | N);
+		nvkm_wr32(device, info.reg + 8, fN);
 		break;
 	default:
-		nv_warn(priv, "0x%08x/%dKhz unimplemented\n", type, freq);
+		nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
 		ret = -EINVAL;
 		break;
 	}
@@ -63,24 +63,24 @@ gt215_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
 }
 
 static u64
-gt215_devinit_disable(struct nvkm_devinit *devinit)
+gt215_devinit_disable(struct nvkm_devinit *init)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	u32 r001540 = nv_rd32(priv, 0x001540);
-	u32 r00154c = nv_rd32(priv, 0x00154c);
+	struct nvkm_device *device = init->subdev.device;
+	u32 r001540 = nvkm_rd32(device, 0x001540);
+	u32 r00154c = nvkm_rd32(device, 0x00154c);
 	u64 disable = 0ULL;
 
 	if (!(r001540 & 0x40000000)) {
-		disable |= (1ULL << NVDEV_ENGINE_MSPDEC);
-		disable |= (1ULL << NVDEV_ENGINE_MSPPP);
+		disable |= (1ULL << NVKM_ENGINE_MSPDEC);
+		disable |= (1ULL << NVKM_ENGINE_MSPPP);
 	}
 
 	if (!(r00154c & 0x00000004))
-		disable |= (1ULL << NVDEV_ENGINE_DISP);
+		disable |= (1ULL << NVKM_ENGINE_DISP);
 	if (!(r00154c & 0x00000020))
-		disable |= (1ULL << NVDEV_ENGINE_MSVLD);
+		disable |= (1ULL << NVKM_ENGINE_MSVLD);
 	if (!(r00154c & 0x00000200))
-		disable |= (1ULL << NVDEV_ENGINE_CE0);
+		disable |= (1ULL << NVKM_ENGINE_CE0);
 
 	return disable;
 }
@@ -99,9 +99,10 @@ gt215_devinit_mmio_part[] = {
 };
 
 static u32
-gt215_devinit_mmio(struct nvkm_devinit *devinit, u32 addr)
+gt215_devinit_mmio(struct nvkm_devinit *base, u32 addr)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
+	struct nv50_devinit *init = nv50_devinit(base);
+	struct nvkm_device *device = init->base.subdev.device;
 	u32 *mmio = gt215_devinit_mmio_part;
 
 	/* the init tables on some boards have INIT_RAM_RESTRICT_ZM_REG_GROUP
@@ -113,7 +114,7 @@ gt215_devinit_mmio(struct nvkm_devinit *devinit, u32 addr)
 	 *
 	 * the binary driver avoids touching these registers at all, however,
 	 * the video bios doesn't care and does what the scripts say.  it's
-	 * presumed that the io-port access to priv registers isn't effected
+	 * presumed that the io-port access to init registers isn't effected
 	 * by the screw-up bug mentioned above.
 	 *
 	 * really, a new opcode should've been invented to handle these
@@ -122,9 +123,9 @@ gt215_devinit_mmio(struct nvkm_devinit *devinit, u32 addr)
 	while (mmio[0]) {
 		if (addr >= mmio[0] && addr <= mmio[1]) {
 			u32 part = (addr / mmio[2]) & 7;
-			if (!priv->r001540)
-				priv->r001540 = nv_rd32(priv, 0x001540);
-			if (part >= hweight8((priv->r001540 >> 16) & 0xff))
+			if (!init->r001540)
+				init->r001540 = nvkm_rd32(device, 0x001540);
+			if (part >= hweight8((init->r001540 >> 16) & 0xff))
 				return ~0;
 			return addr;
 		}
@@ -134,17 +135,19 @@ gt215_devinit_mmio(struct nvkm_devinit *devinit, u32 addr)
 	return addr;
 }
 
-struct nvkm_oclass *
-gt215_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0xa3),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_devinit_ctor,
-		.dtor = _nvkm_devinit_dtor,
-		.init = nv50_devinit_init,
-		.fini = _nvkm_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+gt215_devinit = {
+	.preinit = nv50_devinit_preinit,
+	.init = nv50_devinit_init,
+	.post = nv04_devinit_post,
+	.mmio = gt215_devinit_mmio,
 	.pll_set = gt215_devinit_pll_set,
 	.disable = gt215_devinit_disable,
-	.mmio    = gt215_devinit_mmio,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+gt215_devinit_new(struct nvkm_device *device, int index,
+		struct nvkm_devinit **pinit)
+{
+	return nv50_devinit_new_(&gt215_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
index 55cf48bbca1c..ce4f718e98a1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
@@ -27,40 +27,42 @@
 #include <subdev/bios/init.h>
 
 static u64
-mcp89_devinit_disable(struct nvkm_devinit *devinit)
+mcp89_devinit_disable(struct nvkm_devinit *init)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	u32 r001540 = nv_rd32(priv, 0x001540);
-	u32 r00154c = nv_rd32(priv, 0x00154c);
+	struct nvkm_device *device = init->subdev.device;
+	u32 r001540 = nvkm_rd32(device, 0x001540);
+	u32 r00154c = nvkm_rd32(device, 0x00154c);
 	u64 disable = 0;
 
 	if (!(r001540 & 0x40000000)) {
-		disable |= (1ULL << NVDEV_ENGINE_MSPDEC);
-		disable |= (1ULL << NVDEV_ENGINE_MSPPP);
+		disable |= (1ULL << NVKM_ENGINE_MSPDEC);
+		disable |= (1ULL << NVKM_ENGINE_MSPPP);
 	}
 
 	if (!(r00154c & 0x00000004))
-		disable |= (1ULL << NVDEV_ENGINE_DISP);
+		disable |= (1ULL << NVKM_ENGINE_DISP);
 	if (!(r00154c & 0x00000020))
-		disable |= (1ULL << NVDEV_ENGINE_MSVLD);
+		disable |= (1ULL << NVKM_ENGINE_MSVLD);
 	if (!(r00154c & 0x00000040))
-		disable |= (1ULL << NVDEV_ENGINE_VIC);
+		disable |= (1ULL << NVKM_ENGINE_VIC);
 	if (!(r00154c & 0x00000200))
-		disable |= (1ULL << NVDEV_ENGINE_CE0);
+		disable |= (1ULL << NVKM_ENGINE_CE0);
 
 	return disable;
 }
 
-struct nvkm_oclass *
-mcp89_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0xaf),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_devinit_ctor,
-		.dtor = _nvkm_devinit_dtor,
-		.init = nv50_devinit_init,
-		.fini = _nvkm_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+mcp89_devinit = {
+	.preinit = nv50_devinit_preinit,
+	.init = nv50_devinit_init,
+	.post = nv04_devinit_post,
 	.pll_set = gt215_devinit_pll_set,
 	.disable = mcp89_devinit_disable,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+mcp89_devinit_new(struct nvkm_device *device, int index,
+		struct nvkm_devinit **pinit)
+{
+	return nv50_devinit_new_(&mcp89_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
index 03a0da834244..c8d455346fcd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
@@ -33,25 +33,26 @@
 #include <subdev/vga.h>
 
 static void
-nv04_devinit_meminit(struct nvkm_devinit *devinit)
+nv04_devinit_meminit(struct nvkm_devinit *init)
 {
-	struct nv04_devinit_priv *priv = (void *)devinit;
+	struct nvkm_subdev *subdev = &init->subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 patt = 0xdeadbeef;
 	struct io_mapping *fb;
 	int i;
 
 	/* Map the framebuffer aperture */
-	fb = fbmem_init(nv_device(priv));
+	fb = fbmem_init(device);
 	if (!fb) {
-		nv_error(priv, "failed to map fb\n");
+		nvkm_error(subdev, "failed to map fb\n");
 		return;
 	}
 
 	/* Sequencer and refresh off */
-	nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) | 0x20);
-	nv_mask(priv, NV04_PFB_DEBUG_0, 0, NV04_PFB_DEBUG_0_REFRESH_OFF);
+	nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) | 0x20);
+	nvkm_mask(device, NV04_PFB_DEBUG_0, 0, NV04_PFB_DEBUG_0_REFRESH_OFF);
 
-	nv_mask(priv, NV04_PFB_BOOT_0, ~0,
+	nvkm_mask(device, NV04_PFB_BOOT_0, ~0,
 		      NV04_PFB_BOOT_0_RAM_AMOUNT_16MB |
 		      NV04_PFB_BOOT_0_RAM_WIDTH_128 |
 		      NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT);
@@ -62,49 +63,49 @@ nv04_devinit_meminit(struct nvkm_devinit *devinit)
 	fbmem_poke(fb, 0x400000, patt + 1);
 
 	if (fbmem_peek(fb, 0) == patt + 1) {
-		nv_mask(priv, NV04_PFB_BOOT_0,
+		nvkm_mask(device, NV04_PFB_BOOT_0,
 			      NV04_PFB_BOOT_0_RAM_TYPE,
 			      NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT);
-		nv_mask(priv, NV04_PFB_DEBUG_0,
+		nvkm_mask(device, NV04_PFB_DEBUG_0,
 			      NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
 
 		for (i = 0; i < 4; i++)
 			fbmem_poke(fb, 4 * i, patt);
 
 		if ((fbmem_peek(fb, 0xc) & 0xffff) != (patt & 0xffff))
-			nv_mask(priv, NV04_PFB_BOOT_0,
+			nvkm_mask(device, NV04_PFB_BOOT_0,
 				      NV04_PFB_BOOT_0_RAM_WIDTH_128 |
 				      NV04_PFB_BOOT_0_RAM_AMOUNT,
 				      NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
 	} else
 	if ((fbmem_peek(fb, 0xc) & 0xffff0000) != (patt & 0xffff0000)) {
-		nv_mask(priv, NV04_PFB_BOOT_0,
+		nvkm_mask(device, NV04_PFB_BOOT_0,
 			      NV04_PFB_BOOT_0_RAM_WIDTH_128 |
 			      NV04_PFB_BOOT_0_RAM_AMOUNT,
 			      NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
 	} else
 	if (fbmem_peek(fb, 0) != patt) {
 		if (fbmem_readback(fb, 0x800000, patt))
-			nv_mask(priv, NV04_PFB_BOOT_0,
+			nvkm_mask(device, NV04_PFB_BOOT_0,
 				      NV04_PFB_BOOT_0_RAM_AMOUNT,
 				      NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
 		else
-			nv_mask(priv, NV04_PFB_BOOT_0,
+			nvkm_mask(device, NV04_PFB_BOOT_0,
 				      NV04_PFB_BOOT_0_RAM_AMOUNT,
 				      NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
 
-		nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE,
+		nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE,
 			      NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT);
 	} else
 	if (!fbmem_readback(fb, 0x800000, patt)) {
-		nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+		nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
 			      NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
 
 	}
 
 	/* Refresh on, sequencer on */
-	nv_mask(priv, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
-	nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) & ~0x20);
+	nvkm_mask(device, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
+	nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) & ~0x20);
 	fbmem_fini(fb);
 }
 
@@ -139,11 +140,12 @@ powerctrl_1_shift(int chip_version, int reg)
 }
 
 void
-setPLL_single(struct nvkm_devinit *devinit, u32 reg,
+setPLL_single(struct nvkm_devinit *init, u32 reg,
 	      struct nvkm_pll_vals *pv)
 {
-	int chip_version = nvkm_bios(devinit)->version.chip;
-	uint32_t oldpll = nv_rd32(devinit, reg);
+	struct nvkm_device *device = init->subdev.device;
+	int chip_version = device->bios->version.chip;
+	uint32_t oldpll = nvkm_rd32(device, reg);
 	int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
 	uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
 	uint32_t saved_powerctrl_1 = 0;
@@ -153,30 +155,30 @@ setPLL_single(struct nvkm_devinit *devinit, u32 reg,
 		return;	/* already set */
 
 	if (shift_powerctrl_1 >= 0) {
-		saved_powerctrl_1 = nv_rd32(devinit, 0x001584);
-		nv_wr32(devinit, 0x001584,
+		saved_powerctrl_1 = nvkm_rd32(device, 0x001584);
+		nvkm_wr32(device, 0x001584,
 			(saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
 			1 << shift_powerctrl_1);
 	}
 
 	if (oldM && pv->M1 && (oldN / oldM < pv->N1 / pv->M1))
 		/* upclock -- write new post divider first */
-		nv_wr32(devinit, reg, pv->log2P << 16 | (oldpll & 0xffff));
+		nvkm_wr32(device, reg, pv->log2P << 16 | (oldpll & 0xffff));
 	else
 		/* downclock -- write new NM first */
-		nv_wr32(devinit, reg, (oldpll & 0xffff0000) | pv->NM1);
+		nvkm_wr32(device, reg, (oldpll & 0xffff0000) | pv->NM1);
 
 	if ((chip_version < 0x17 || chip_version == 0x1a) &&
 	    chip_version != 0x11)
 		/* wait a bit on older chips */
 		msleep(64);
-	nv_rd32(devinit, reg);
+	nvkm_rd32(device, reg);
 
 	/* then write the other half as well */
-	nv_wr32(devinit, reg, pll);
+	nvkm_wr32(device, reg, pll);
 
 	if (shift_powerctrl_1 >= 0)
-		nv_wr32(devinit, 0x001584, saved_powerctrl_1);
+		nvkm_wr32(device, 0x001584, saved_powerctrl_1);
 }
 
 static uint32_t
@@ -193,14 +195,15 @@ new_ramdac580(uint32_t reg1, bool ss, uint32_t ramdac580)
 }
 
 void
-setPLL_double_highregs(struct nvkm_devinit *devinit, u32 reg1,
+setPLL_double_highregs(struct nvkm_devinit *init, u32 reg1,
 		       struct nvkm_pll_vals *pv)
 {
-	int chip_version = nvkm_bios(devinit)->version.chip;
+	struct nvkm_device *device = init->subdev.device;
+	int chip_version = device->bios->version.chip;
 	bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
 	uint32_t reg2 = reg1 + ((reg1 == 0x680520) ? 0x5c : 0x70);
-	uint32_t oldpll1 = nv_rd32(devinit, reg1);
-	uint32_t oldpll2 = !nv3035 ? nv_rd32(devinit, reg2) : 0;
+	uint32_t oldpll1 = nvkm_rd32(device, reg1);
+	uint32_t oldpll2 = !nv3035 ? nvkm_rd32(device, reg2) : 0;
 	uint32_t pll1 = (oldpll1 & 0xfff80000) | pv->log2P << 16 | pv->NM1;
 	uint32_t pll2 = (oldpll2 & 0x7fff0000) | 1 << 31 | pv->NM2;
 	uint32_t oldramdac580 = 0, ramdac580 = 0;
@@ -215,7 +218,7 @@ setPLL_double_highregs(struct nvkm_devinit *devinit, u32 reg1,
 		pll2 = 0;
 	}
 	if (chip_version > 0x40 && reg1 >= 0x680508) { /* !nv40 */
-		oldramdac580 = nv_rd32(devinit, 0x680580);
+		oldramdac580 = nvkm_rd32(device, 0x680580);
 		ramdac580 = new_ramdac580(reg1, single_stage, oldramdac580);
 		if (oldramdac580 != ramdac580)
 			oldpll1 = ~0;	/* force mismatch */
@@ -231,8 +234,8 @@ setPLL_double_highregs(struct nvkm_devinit *devinit, u32 reg1,
 		return;	/* already set */
 
 	if (shift_powerctrl_1 >= 0) {
-		saved_powerctrl_1 = nv_rd32(devinit, 0x001584);
-		nv_wr32(devinit, 0x001584,
+		saved_powerctrl_1 = nvkm_rd32(device, 0x001584);
+		nvkm_wr32(device, 0x001584,
 			(saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
 			1 << shift_powerctrl_1);
 	}
@@ -251,26 +254,26 @@ setPLL_double_highregs(struct nvkm_devinit *devinit, u32 reg1,
 			shift_c040 += 2;
 		}
 
-		savedc040 = nv_rd32(devinit, 0xc040);
+		savedc040 = nvkm_rd32(device, 0xc040);
 		if (shift_c040 != 14)
-			nv_wr32(devinit, 0xc040, savedc040 & ~(3 << shift_c040));
+			nvkm_wr32(device, 0xc040, savedc040 & ~(3 << shift_c040));
 	}
 
 	if (oldramdac580 != ramdac580)
-		nv_wr32(devinit, 0x680580, ramdac580);
+		nvkm_wr32(device, 0x680580, ramdac580);
 
 	if (!nv3035)
-		nv_wr32(devinit, reg2, pll2);
-	nv_wr32(devinit, reg1, pll1);
+		nvkm_wr32(device, reg2, pll2);
+	nvkm_wr32(device, reg1, pll1);
 
 	if (shift_powerctrl_1 >= 0)
-		nv_wr32(devinit, 0x001584, saved_powerctrl_1);
+		nvkm_wr32(device, 0x001584, saved_powerctrl_1);
 	if (chip_version >= 0x40)
-		nv_wr32(devinit, 0xc040, savedc040);
+		nvkm_wr32(device, 0xc040, savedc040);
 }
 
 void
-setPLL_double_lowregs(struct nvkm_devinit *devinit, u32 NMNMreg,
+setPLL_double_lowregs(struct nvkm_devinit *init, u32 NMNMreg,
 		      struct nvkm_pll_vals *pv)
 {
 	/* When setting PLLs, there is a merry game of disabling and enabling
@@ -280,10 +283,10 @@ setPLL_double_lowregs(struct nvkm_devinit *devinit, u32 NMNMreg,
 	 * combined herein. Without luck it deviates from each card's formula
 	 * so as to not work on any :)
 	 */
-
+	struct nvkm_device *device = init->subdev.device;
 	uint32_t Preg = NMNMreg - 4;
 	bool mpll = Preg == 0x4020;
-	uint32_t oldPval = nv_rd32(devinit, Preg);
+	uint32_t oldPval = nvkm_rd32(device, Preg);
 	uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
 	uint32_t Pval = (oldPval & (mpll ? ~(0x77 << 16) : ~(7 << 16))) |
 			0xc << 28 | pv->log2P << 16;
@@ -292,7 +295,7 @@ setPLL_double_lowregs(struct nvkm_devinit *devinit, u32 NMNMreg,
 	uint32_t maskc040 = ~(3 << 14), savedc040;
 	bool single_stage = !pv->NM2 || pv->N2 == pv->M2;
 
-	if (nv_rd32(devinit, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
+	if (nvkm_rd32(device, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
 		return;
 
 	if (Preg == 0x4000)
@@ -304,7 +307,7 @@ setPLL_double_lowregs(struct nvkm_devinit *devinit, u32 NMNMreg,
 		struct nvbios_pll info;
 		uint8_t Pval2;
 
-		if (nvbios_pll_parse(nvkm_bios(devinit), Preg, &info))
+		if (nvbios_pll_parse(device->bios, Preg, &info))
 			return;
 
 		Pval2 = pv->log2P + info.bias_p;
@@ -312,47 +315,48 @@ setPLL_double_lowregs(struct nvkm_devinit *devinit, u32 NMNMreg,
 			Pval2 = info.max_p;
 		Pval |= 1 << 28 | Pval2 << 20;
 
-		saved4600 = nv_rd32(devinit, 0x4600);
-		nv_wr32(devinit, 0x4600, saved4600 | 8 << 28);
+		saved4600 = nvkm_rd32(device, 0x4600);
+		nvkm_wr32(device, 0x4600, saved4600 | 8 << 28);
 	}
 	if (single_stage)
 		Pval |= mpll ? 1 << 12 : 1 << 8;
 
-	nv_wr32(devinit, Preg, oldPval | 1 << 28);
-	nv_wr32(devinit, Preg, Pval & ~(4 << 28));
+	nvkm_wr32(device, Preg, oldPval | 1 << 28);
+	nvkm_wr32(device, Preg, Pval & ~(4 << 28));
 	if (mpll) {
 		Pval |= 8 << 20;
-		nv_wr32(devinit, 0x4020, Pval & ~(0xc << 28));
-		nv_wr32(devinit, 0x4038, Pval & ~(0xc << 28));
+		nvkm_wr32(device, 0x4020, Pval & ~(0xc << 28));
+		nvkm_wr32(device, 0x4038, Pval & ~(0xc << 28));
 	}
 
-	savedc040 = nv_rd32(devinit, 0xc040);
-	nv_wr32(devinit, 0xc040, savedc040 & maskc040);
+	savedc040 = nvkm_rd32(device, 0xc040);
+	nvkm_wr32(device, 0xc040, savedc040 & maskc040);
 
-	nv_wr32(devinit, NMNMreg, NMNM);
+	nvkm_wr32(device, NMNMreg, NMNM);
 	if (NMNMreg == 0x4024)
-		nv_wr32(devinit, 0x403c, NMNM);
+		nvkm_wr32(device, 0x403c, NMNM);
 
-	nv_wr32(devinit, Preg, Pval);
+	nvkm_wr32(device, Preg, Pval);
 	if (mpll) {
 		Pval &= ~(8 << 20);
-		nv_wr32(devinit, 0x4020, Pval);
-		nv_wr32(devinit, 0x4038, Pval);
-		nv_wr32(devinit, 0x4600, saved4600);
+		nvkm_wr32(device, 0x4020, Pval);
+		nvkm_wr32(device, 0x4038, Pval);
+		nvkm_wr32(device, 0x4600, saved4600);
 	}
 
-	nv_wr32(devinit, 0xc040, savedc040);
+	nvkm_wr32(device, 0xc040, savedc040);
 
 	if (mpll) {
-		nv_wr32(devinit, 0x4020, Pval & ~(1 << 28));
-		nv_wr32(devinit, 0x4038, Pval & ~(1 << 28));
+		nvkm_wr32(device, 0x4020, Pval & ~(1 << 28));
+		nvkm_wr32(device, 0x4038, Pval & ~(1 << 28));
 	}
 }
 
 int
 nv04_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
 {
-	struct nvkm_bios *bios = nvkm_bios(devinit);
+	struct nvkm_subdev *subdev = &devinit->subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct nvkm_pll_vals pv;
 	struct nvbios_pll info;
 	int cv = bios->version.chip;
@@ -363,8 +367,7 @@ nv04_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
 	if (ret)
 		return ret;
 
-	ret = nv04_pll_calc(nv_subdev(devinit), &info, freq,
-			    &N1, &M1, &N2, &M2, &P);
+	ret = nv04_pll_calc(subdev, &info, freq, &N1, &M1, &N2, &M2, &P);
 	if (!ret)
 		return -EINVAL;
 
@@ -388,83 +391,76 @@ nv04_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
 }
 
 int
-nv04_devinit_fini(struct nvkm_object *object, bool suspend)
+nv04_devinit_post(struct nvkm_devinit *init, bool execute)
 {
-	struct nv04_devinit_priv *priv = (void *)object;
-	int ret;
+	return nvbios_init(&init->subdev, execute);
+}
 
-	/* make i2c busses accessible */
-	nv_mask(priv, 0x000200, 0x00000001, 0x00000001);
+void
+nv04_devinit_preinit(struct nvkm_devinit *base)
+{
+	struct nv04_devinit *init = nv04_devinit(base);
+	struct nvkm_subdev *subdev = &init->base.subdev;
+	struct nvkm_device *device = subdev->device;
 
-	ret = nvkm_devinit_fini(&priv->base, suspend);
-	if (ret)
-		return ret;
+	/* make i2c busses accessible */
+	nvkm_mask(device, 0x000200, 0x00000001, 0x00000001);
 
 	/* unslave crtcs */
-	if (priv->owner < 0)
-		priv->owner = nv_rdvgaowner(priv);
-	nv_wrvgaowner(priv, 0);
-	return 0;
-}
-
-int
-nv04_devinit_init(struct nvkm_object *object)
-{
-	struct nv04_devinit_priv *priv = (void *)object;
-
-	if (!priv->base.post) {
-		u32 htotal = nv_rdvgac(priv, 0, 0x06);
-		htotal |= (nv_rdvgac(priv, 0, 0x07) & 0x01) << 8;
-		htotal |= (nv_rdvgac(priv, 0, 0x07) & 0x20) << 4;
-		htotal |= (nv_rdvgac(priv, 0, 0x25) & 0x01) << 10;
-		htotal |= (nv_rdvgac(priv, 0, 0x41) & 0x01) << 11;
+	if (init->owner < 0)
+		init->owner = nvkm_rdvgaowner(device);
+	nvkm_wrvgaowner(device, 0);
+
+	if (!init->base.post) {
+		u32 htotal = nvkm_rdvgac(device, 0, 0x06);
+		htotal |= (nvkm_rdvgac(device, 0, 0x07) & 0x01) << 8;
+		htotal |= (nvkm_rdvgac(device, 0, 0x07) & 0x20) << 4;
+		htotal |= (nvkm_rdvgac(device, 0, 0x25) & 0x01) << 10;
+		htotal |= (nvkm_rdvgac(device, 0, 0x41) & 0x01) << 11;
 		if (!htotal) {
-			nv_info(priv, "adaptor not initialised\n");
-			priv->base.post = true;
+			nvkm_debug(subdev, "adaptor not initialised\n");
+			init->base.post = true;
 		}
 	}
-
-	return nvkm_devinit_init(&priv->base);
 }
 
-void
-nv04_devinit_dtor(struct nvkm_object *object)
+void *
+nv04_devinit_dtor(struct nvkm_devinit *base)
 {
-	struct nv04_devinit_priv *priv = (void *)object;
-
+	struct nv04_devinit *init = nv04_devinit(base);
 	/* restore vga owner saved at first init */
-	nv_wrvgaowner(priv, priv->owner);
-
-	nvkm_devinit_destroy(&priv->base);
+	nvkm_wrvgaowner(init->base.subdev.device, init->owner);
+	return init;
 }
 
 int
-nv04_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nv04_devinit_new_(const struct nvkm_devinit_func *func,
+		  struct nvkm_device *device, int index,
+		  struct nvkm_devinit **pinit)
 {
-	struct nv04_devinit_priv *priv;
-	int ret;
+	struct nv04_devinit *init;
 
-	ret = nvkm_devinit_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(init = kzalloc(sizeof(*init), GFP_KERNEL)))
+		return -ENOMEM;
+	*pinit = &init->base;
 
-	priv->owner = -1;
+	nvkm_devinit_ctor(func, device, index, &init->base);
+	init->owner = -1;
 	return 0;
 }
 
-struct nvkm_oclass *
-nv04_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x04),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_devinit_ctor,
-		.dtor = nv04_devinit_dtor,
-		.init = nv04_devinit_init,
-		.fini = nv04_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+nv04_devinit = {
+	.dtor = nv04_devinit_dtor,
+	.preinit = nv04_devinit_preinit,
+	.post = nv04_devinit_post,
 	.meminit = nv04_devinit_meminit,
 	.pll_set = nv04_devinit_pll_set,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+nv04_devinit_new(struct nvkm_device *device, int index,
+		 struct nvkm_devinit **pinit)
+{
+	return nv04_devinit_new_(&nv04_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
index 7c63abf11e22..4a87c8c2bce8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
@@ -1,19 +1,19 @@
-#ifndef __NVKM_DEVINIT_NV04_H__
-#define __NVKM_DEVINIT_NV04_H__
+#ifndef __NV04_DEVINIT_H__
+#define __NV04_DEVINIT_H__
+#define nv04_devinit(p) container_of((p), struct nv04_devinit, base)
 #include "priv.h"
 struct nvkm_pll_vals;
 
-struct nv04_devinit_priv {
+struct nv04_devinit {
 	struct nvkm_devinit base;
 	int owner;
 };
 
-int  nv04_devinit_ctor(struct nvkm_object *, struct nvkm_object *,
-		       struct nvkm_oclass *, void *, u32,
-		       struct nvkm_object **);
-void nv04_devinit_dtor(struct nvkm_object *);
-int  nv04_devinit_init(struct nvkm_object *);
-int  nv04_devinit_fini(struct nvkm_object *, bool);
+int nv04_devinit_new_(const struct nvkm_devinit_func *, struct nvkm_device *,
+		      int, struct nvkm_devinit **);
+void *nv04_devinit_dtor(struct nvkm_devinit *);
+void nv04_devinit_preinit(struct nvkm_devinit *);
+void nv04_devinit_fini(struct nvkm_devinit *);
 int  nv04_devinit_pll_set(struct nvkm_devinit *, u32, u32);
 
 void setPLL_single(struct nvkm_devinit *, u32, struct nvkm_pll_vals *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c
index def8649216c2..9891eadca1ce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c
@@ -32,7 +32,7 @@
 #include <subdev/vga.h>
 
 static void
-nv05_devinit_meminit(struct nvkm_devinit *devinit)
+nv05_devinit_meminit(struct nvkm_devinit *init)
 {
 	static const u8 default_config_tab[][2] = {
 		{ 0x24, 0x00 },
@@ -44,8 +44,9 @@ nv05_devinit_meminit(struct nvkm_devinit *devinit)
 		{ 0x06, 0x00 },
 		{ 0x00, 0x00 }
 	};
-	struct nv04_devinit_priv *priv = (void *)devinit;
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &init->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
 	struct io_mapping *fb;
 	u32 patt = 0xdeadbeef;
 	u16 data;
@@ -53,88 +54,90 @@ nv05_devinit_meminit(struct nvkm_devinit *devinit)
 	int i, v;
 
 	/* Map the framebuffer aperture */
-	fb = fbmem_init(nv_device(priv));
+	fb = fbmem_init(device);
 	if (!fb) {
-		nv_error(priv, "failed to map fb\n");
+		nvkm_error(subdev, "failed to map fb\n");
 		return;
 	}
 
-	strap = (nv_rd32(priv, 0x101000) & 0x0000003c) >> 2;
+	strap = (nvkm_rd32(device, 0x101000) & 0x0000003c) >> 2;
 	if ((data = bmp_mem_init_table(bios))) {
-		ramcfg[0] = nv_ro08(bios, data + 2 * strap + 0);
-		ramcfg[1] = nv_ro08(bios, data + 2 * strap + 1);
+		ramcfg[0] = nvbios_rd08(bios, data + 2 * strap + 0);
+		ramcfg[1] = nvbios_rd08(bios, data + 2 * strap + 1);
 	} else {
 		ramcfg[0] = default_config_tab[strap][0];
 		ramcfg[1] = default_config_tab[strap][1];
 	}
 
 	/* Sequencer off */
-	nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) | 0x20);
+	nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) | 0x20);
 
-	if (nv_rd32(priv, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_UMA_ENABLE)
+	if (nvkm_rd32(device, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_UMA_ENABLE)
 		goto out;
 
-	nv_mask(priv, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
+	nvkm_mask(device, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
 
 	/* If present load the hardcoded scrambling table */
 	if (data) {
 		for (i = 0, data += 0x10; i < 8; i++, data += 4) {
-			u32 scramble = nv_ro32(bios, data);
-			nv_wr32(priv, NV04_PFB_SCRAMBLE(i), scramble);
+			u32 scramble = nvbios_rd32(bios, data);
+			nvkm_wr32(device, NV04_PFB_SCRAMBLE(i), scramble);
 		}
 	}
 
 	/* Set memory type/width/length defaults depending on the straps */
-	nv_mask(priv, NV04_PFB_BOOT_0, 0x3f, ramcfg[0]);
+	nvkm_mask(device, NV04_PFB_BOOT_0, 0x3f, ramcfg[0]);
 
 	if (ramcfg[1] & 0x80)
-		nv_mask(priv, NV04_PFB_CFG0, 0, NV04_PFB_CFG0_SCRAMBLE);
+		nvkm_mask(device, NV04_PFB_CFG0, 0, NV04_PFB_CFG0_SCRAMBLE);
 
-	nv_mask(priv, NV04_PFB_CFG1, 0x700001, (ramcfg[1] & 1) << 20);
-	nv_mask(priv, NV04_PFB_CFG1, 0, 1);
+	nvkm_mask(device, NV04_PFB_CFG1, 0x700001, (ramcfg[1] & 1) << 20);
+	nvkm_mask(device, NV04_PFB_CFG1, 0, 1);
 
 	/* Probe memory bus width */
 	for (i = 0; i < 4; i++)
 		fbmem_poke(fb, 4 * i, patt);
 
 	if (fbmem_peek(fb, 0xc) != patt)
-		nv_mask(priv, NV04_PFB_BOOT_0,
+		nvkm_mask(device, NV04_PFB_BOOT_0,
 			  NV04_PFB_BOOT_0_RAM_WIDTH_128, 0);
 
 	/* Probe memory length */
-	v = nv_rd32(priv, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_RAM_AMOUNT;
+	v = nvkm_rd32(device, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_RAM_AMOUNT;
 
 	if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_32MB &&
 	    (!fbmem_readback(fb, 0x1000000, ++patt) ||
 	     !fbmem_readback(fb, 0, ++patt)))
-		nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+		nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
 			  NV04_PFB_BOOT_0_RAM_AMOUNT_16MB);
 
 	if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_16MB &&
 	    !fbmem_readback(fb, 0x800000, ++patt))
-		nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+		nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
 			  NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
 
 	if (!fbmem_readback(fb, 0x400000, ++patt))
-		nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+		nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
 			  NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
 
 out:
 	/* Sequencer on */
-	nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) & ~0x20);
+	nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) & ~0x20);
 	fbmem_fini(fb);
 }
 
-struct nvkm_oclass *
-nv05_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x05),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_devinit_ctor,
-		.dtor = nv04_devinit_dtor,
-		.init = nv04_devinit_init,
-		.fini = nv04_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+nv05_devinit = {
+	.dtor = nv04_devinit_dtor,
+	.preinit = nv04_devinit_preinit,
+	.post = nv04_devinit_post,
 	.meminit = nv05_devinit_meminit,
 	.pll_set = nv04_devinit_pll_set,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+nv05_devinit_new(struct nvkm_device *device, int index,
+		 struct nvkm_devinit **pinit)
+{
+	return nv04_devinit_new_(&nv05_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c
index 7aabc1bf0640..570822f83acf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c
@@ -30,33 +30,33 @@
 #include <subdev/bios/init.h>
 
 static void
-nv10_devinit_meminit(struct nvkm_devinit *devinit)
+nv10_devinit_meminit(struct nvkm_devinit *init)
 {
-	struct nv04_devinit_priv *priv = (void *)devinit;
+	struct nvkm_subdev *subdev = &init->subdev;
+	struct nvkm_device *device = subdev->device;
 	static const int mem_width[] = { 0x10, 0x00, 0x20 };
 	int mem_width_count;
 	uint32_t patt = 0xdeadbeef;
 	struct io_mapping *fb;
 	int i, j, k;
 
-	if (nv_device(priv)->card_type >= NV_11 &&
-	    nv_device(priv)->chipset >= 0x17)
+	if (device->card_type >= NV_11 && device->chipset >= 0x17)
 		mem_width_count = 3;
 	else
 		mem_width_count = 2;
 
 	/* Map the framebuffer aperture */
-	fb = fbmem_init(nv_device(priv));
+	fb = fbmem_init(device);
 	if (!fb) {
-		nv_error(priv, "failed to map fb\n");
+		nvkm_error(subdev, "failed to map fb\n");
 		return;
 	}
 
-	nv_wr32(priv, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
+	nvkm_wr32(device, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
 
 	/* Probe memory bus width */
 	for (i = 0; i < mem_width_count; i++) {
-		nv_mask(priv, NV04_PFB_CFG0, 0x30, mem_width[i]);
+		nvkm_mask(device, NV04_PFB_CFG0, 0x30, mem_width[i]);
 
 		for (j = 0; j < 4; j++) {
 			for (k = 0; k < 4; k++)
@@ -75,7 +75,7 @@ mem_width_found:
 
 	/* Probe amount of installed memory */
 	for (i = 0; i < 4; i++) {
-		int off = nv_rd32(priv, 0x10020c) - 0x100000;
+		int off = nvkm_rd32(device, 0x10020c) - 0x100000;
 
 		fbmem_poke(fb, off, patt);
 		fbmem_poke(fb, 0, 0);
@@ -90,22 +90,24 @@ mem_width_found:
 	}
 
 	/* IC missing - disable the upper half memory space. */
-	nv_mask(priv, NV04_PFB_CFG0, 0x1000, 0);
+	nvkm_mask(device, NV04_PFB_CFG0, 0x1000, 0);
 
 amount_found:
 	fbmem_fini(fb);
 }
 
-struct nvkm_oclass *
-nv10_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x10),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_devinit_ctor,
-		.dtor = nv04_devinit_dtor,
-		.init = nv04_devinit_init,
-		.fini = nv04_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+nv10_devinit = {
+	.dtor = nv04_devinit_dtor,
+	.preinit = nv04_devinit_preinit,
+	.post = nv04_devinit_post,
 	.meminit = nv10_devinit_meminit,
 	.pll_set = nv04_devinit_pll_set,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+nv10_devinit_new(struct nvkm_device *device, int index,
+		 struct nvkm_devinit **pinit)
+{
+	return nv04_devinit_new_(&nv10_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c
index 9f36fff5a1c3..fefafec7e2a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c
@@ -26,15 +26,17 @@
 #include <subdev/bios.h>
 #include <subdev/bios/init.h>
 
-struct nvkm_oclass *
-nv1a_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x1a),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_devinit_ctor,
-		.dtor = nv04_devinit_dtor,
-		.init = nv04_devinit_init,
-		.fini = nv04_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+nv1a_devinit = {
+	.dtor = nv04_devinit_dtor,
+	.preinit = nv04_devinit_preinit,
+	.post = nv04_devinit_post,
 	.pll_set = nv04_devinit_pll_set,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+nv1a_devinit_new(struct nvkm_device *device, int index,
+		 struct nvkm_devinit **pinit)
+{
+	return nv04_devinit_new_(&nv1a_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c
index 02fcfd921c42..4ef04e0d8826 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c
@@ -30,48 +30,50 @@
 #include <subdev/bios/init.h>
 
 static void
-nv20_devinit_meminit(struct nvkm_devinit *devinit)
+nv20_devinit_meminit(struct nvkm_devinit *init)
 {
-	struct nv04_devinit_priv *priv = (void *)devinit;
-	struct nvkm_device *device = nv_device(priv);
+	struct nvkm_subdev *subdev = &init->subdev;
+	struct nvkm_device *device = subdev->device;
 	uint32_t mask = (device->chipset >= 0x25 ? 0x300 : 0x900);
 	uint32_t amount, off;
 	struct io_mapping *fb;
 
 	/* Map the framebuffer aperture */
-	fb = fbmem_init(nv_device(priv));
+	fb = fbmem_init(device);
 	if (!fb) {
-		nv_error(priv, "failed to map fb\n");
+		nvkm_error(subdev, "failed to map fb\n");
 		return;
 	}
 
-	nv_wr32(priv, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
+	nvkm_wr32(device, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
 
 	/* Allow full addressing */
-	nv_mask(priv, NV04_PFB_CFG0, 0, mask);
+	nvkm_mask(device, NV04_PFB_CFG0, 0, mask);
 
-	amount = nv_rd32(priv, 0x10020c);
+	amount = nvkm_rd32(device, 0x10020c);
 	for (off = amount; off > 0x2000000; off -= 0x2000000)
 		fbmem_poke(fb, off - 4, off);
 
-	amount = nv_rd32(priv, 0x10020c);
+	amount = nvkm_rd32(device, 0x10020c);
 	if (amount != fbmem_peek(fb, amount - 4))
 		/* IC missing - disable the upper half memory space. */
-		nv_mask(priv, NV04_PFB_CFG0, mask, 0);
+		nvkm_mask(device, NV04_PFB_CFG0, mask, 0);
 
 	fbmem_fini(fb);
 }
 
-struct nvkm_oclass *
-nv20_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x20),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_devinit_ctor,
-		.dtor = nv04_devinit_dtor,
-		.init = nv04_devinit_init,
-		.fini = nv04_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+nv20_devinit = {
+	.dtor = nv04_devinit_dtor,
+	.preinit = nv04_devinit_preinit,
+	.post = nv04_devinit_post,
 	.meminit = nv20_devinit_meminit,
 	.pll_set = nv04_devinit_pll_set,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+nv20_devinit_new(struct nvkm_device *device, int index,
+		 struct nvkm_devinit **pinit)
+{
+	return nv04_devinit_new_(&nv20_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
index 26b7cb13e167..337c2c692dc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
@@ -29,47 +29,48 @@
 #include <subdev/bios/init.h>
 #include <subdev/bios/pll.h>
 #include <subdev/clk/pll.h>
-#include <subdev/ibus.h>
 #include <subdev/vga.h>
 
 int
-nv50_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
+nv50_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &init->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
 	struct nvbios_pll info;
 	int N1, M1, N2, M2, P;
 	int ret;
 
 	ret = nvbios_pll_parse(bios, type, &info);
 	if (ret) {
-		nv_error(devinit, "failed to retrieve pll data, %d\n", ret);
+		nvkm_error(subdev, "failed to retrieve pll data, %d\n", ret);
 		return ret;
 	}
 
-	ret = nv04_pll_calc(nv_subdev(devinit), &info, freq, &N1, &M1, &N2, &M2, &P);
+	ret = nv04_pll_calc(subdev, &info, freq, &N1, &M1, &N2, &M2, &P);
 	if (!ret) {
-		nv_error(devinit, "failed pll calculation\n");
+		nvkm_error(subdev, "failed pll calculation\n");
 		return ret;
 	}
 
 	switch (info.type) {
 	case PLL_VPLL0:
 	case PLL_VPLL1:
-		nv_wr32(priv, info.reg + 0, 0x10000611);
-		nv_mask(priv, info.reg + 4, 0x00ff00ff, (M1 << 16) | N1);
-		nv_mask(priv, info.reg + 8, 0x7fff00ff, (P  << 28) |
-							(M2 << 16) | N2);
+		nvkm_wr32(device, info.reg + 0, 0x10000611);
+		nvkm_mask(device, info.reg + 4, 0x00ff00ff, (M1 << 16) | N1);
+		nvkm_mask(device, info.reg + 8, 0x7fff00ff, (P  << 28) |
+							    (M2 << 16) | N2);
 		break;
 	case PLL_MEMORY:
-		nv_mask(priv, info.reg + 0, 0x01ff0000, (P << 22) |
-						        (info.bias_p << 19) |
-							(P << 16));
-		nv_wr32(priv, info.reg + 4, (N1 << 8) | M1);
+		nvkm_mask(device, info.reg + 0, 0x01ff0000,
+					        (P << 22) |
+						(info.bias_p << 19) |
+						(P << 16));
+		nvkm_wr32(device, info.reg + 4, (N1 << 8) | M1);
 		break;
 	default:
-		nv_mask(priv, info.reg + 0, 0x00070000, (P << 16));
-		nv_wr32(priv, info.reg + 4, (N1 << 8) | M1);
+		nvkm_mask(device, info.reg + 0, 0x00070000, (P << 16));
+		nvkm_wr32(device, info.reg + 4, (N1 << 8) | M1);
 		break;
 	}
 
@@ -77,57 +78,68 @@ nv50_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
 }
 
 static u64
-nv50_devinit_disable(struct nvkm_devinit *devinit)
+nv50_devinit_disable(struct nvkm_devinit *init)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	u32 r001540 = nv_rd32(priv, 0x001540);
+	struct nvkm_device *device = init->subdev.device;
+	u32 r001540 = nvkm_rd32(device, 0x001540);
 	u64 disable = 0ULL;
 
 	if (!(r001540 & 0x40000000))
-		disable |= (1ULL << NVDEV_ENGINE_MPEG);
+		disable |= (1ULL << NVKM_ENGINE_MPEG);
 
 	return disable;
 }
 
-int
-nv50_devinit_init(struct nvkm_object *object)
+void
+nv50_devinit_preinit(struct nvkm_devinit *base)
 {
-	struct nvkm_bios *bios = nvkm_bios(object);
-	struct nvkm_ibus *ibus = nvkm_ibus(object);
-	struct nv50_devinit_priv *priv = (void *)object;
-	struct nvbios_outp info;
-	struct dcb_output outp;
-	u8  ver = 0xff, hdr, cnt, len;
-	int ret, i = 0;
+	struct nv50_devinit *init = nv50_devinit(base);
+	struct nvkm_subdev *subdev = &init->base.subdev;
+	struct nvkm_device *device = subdev->device;
 
-	if (!priv->base.post) {
-		if (!nv_rdvgac(priv, 0, 0x00) &&
-		    !nv_rdvgac(priv, 0, 0x1a)) {
-			nv_info(priv, "adaptor not initialised\n");
-			priv->base.post = true;
-		}
+	/* our heuristics can't detect whether the board has had its
+	 * devinit scripts executed or not if the display engine is
+	 * missing, assume it's a secondary gpu which requires post
+	 */
+	if (!init->base.post) {
+		u64 disable = nvkm_devinit_disable(&init->base);
+		if (disable & (1ULL << NVKM_ENGINE_DISP))
+			init->base.post = true;
 	}
 
-	/* some boards appear to require certain priv register timeouts
-	 * to be bumped before runing devinit scripts.  not a clue why
-	 * the vbios engineers didn't make the scripts just work...
+	/* magic to detect whether or not x86 vbios code has executed
+	 * the devinit scripts to initialise the board
 	 */
-	if (priv->base.post && ibus)
-		nv_ofuncs(ibus)->init(nv_object(ibus));
+	if (!init->base.post) {
+		if (!nvkm_rdvgac(device, 0, 0x00) &&
+		    !nvkm_rdvgac(device, 0, 0x1a)) {
+			nvkm_debug(subdev, "adaptor not initialised\n");
+			init->base.post = true;
+		}
+	}
+}
 
-	ret = nvkm_devinit_init(&priv->base);
-	if (ret)
-		return ret;
+void
+nv50_devinit_init(struct nvkm_devinit *base)
+{
+	struct nv50_devinit *init = nv50_devinit(base);
+	struct nvkm_subdev *subdev = &init->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
+	struct nvbios_outp info;
+	struct dcb_output outp;
+	u8  ver = 0xff, hdr, cnt, len;
+	int i = 0;
 
 	/* if we ran the init tables, we have to execute the first script
 	 * pointer of each dcb entry's display encoder table in order
 	 * to properly initialise each encoder.
 	 */
-	while (priv->base.post && dcb_outp_parse(bios, i, &ver, &hdr, &outp)) {
+	while (init->base.post && dcb_outp_parse(bios, i, &ver, &hdr, &outp)) {
 		if (nvbios_outp_match(bios, outp.hasht, outp.hashm,
 				      &ver, &hdr, &cnt, &len, &info)) {
-			struct nvbios_init init = {
-				.subdev = nv_subdev(priv),
+			struct nvbios_init exec = {
+				.subdev = subdev,
 				.bios = bios,
 				.offset = info.script[0],
 				.outp = &outp,
@@ -135,40 +147,39 @@ nv50_devinit_init(struct nvkm_object *object)
 				.execute = 1,
 			};
 
-			nvbios_exec(&init);
+			nvbios_exec(&exec);
 		}
 		i++;
 	}
-
-	return 0;
 }
 
 int
-nv50_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nv50_devinit_new_(const struct nvkm_devinit_func *func,
+		  struct nvkm_device *device, int index,
+		  struct nvkm_devinit **pinit)
 {
-	struct nv50_devinit_priv *priv;
-	int ret;
+	struct nv50_devinit *init;
 
-	ret = nvkm_devinit_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(init = kzalloc(sizeof(*init), GFP_KERNEL)))
+		return -ENOMEM;
+	*pinit = &init->base;
 
+	nvkm_devinit_ctor(func, device, index, &init->base);
 	return 0;
 }
 
-struct nvkm_oclass *
-nv50_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_devinit_ctor,
-		.dtor = _nvkm_devinit_dtor,
-		.init = nv50_devinit_init,
-		.fini = _nvkm_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+nv50_devinit = {
+	.preinit = nv50_devinit_preinit,
+	.init = nv50_devinit_init,
+	.post = nv04_devinit_post,
 	.pll_set = nv50_devinit_pll_set,
 	.disable = nv50_devinit_disable,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+nv50_devinit_new(struct nvkm_device *device, int index,
+		 struct nvkm_devinit **pinit)
+{
+	return nv50_devinit_new_(&nv50_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
index 9243521c80ac..5de70a8486b4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
@@ -1,16 +1,17 @@
-#ifndef __NVKM_DEVINIT_NV50_H__
-#define __NVKM_DEVINIT_NV50_H__
+#ifndef __NV50_DEVINIT_H__
+#define __NV50_DEVINIT_H__
+#define nv50_devinit(p) container_of((p), struct nv50_devinit, base)
 #include "priv.h"
 
-struct nv50_devinit_priv {
+struct nv50_devinit {
 	struct nvkm_devinit base;
 	u32 r001540;
 };
 
-int  nv50_devinit_ctor(struct nvkm_object *, struct nvkm_object *,
-		       struct nvkm_oclass *, void *, u32,
-		       struct nvkm_object **);
-int  nv50_devinit_init(struct nvkm_object *);
+int nv50_devinit_new_(const struct nvkm_devinit_func *, struct nvkm_device *,
+		      int, struct nvkm_devinit **);
+void nv50_devinit_preinit(struct nvkm_devinit *);
+void nv50_devinit_init(struct nvkm_devinit *);
 int  nv50_devinit_pll_set(struct nvkm_devinit *, u32, u32);
 
 int  gt215_devinit_pll_set(struct nvkm_devinit *, u32, u32);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
index bb51a95d8012..e1f6ae58f1d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
@@ -1,34 +1,21 @@
 #ifndef __NVKM_DEVINIT_PRIV_H__
 #define __NVKM_DEVINIT_PRIV_H__
+#define nvkm_devinit(p) container_of((p), struct nvkm_devinit, subdev)
 #include <subdev/devinit.h>
 
-struct nvkm_devinit_impl {
-	struct nvkm_oclass base;
+struct nvkm_devinit_func {
+	void *(*dtor)(struct nvkm_devinit *);
+	void (*preinit)(struct nvkm_devinit *);
+	void (*init)(struct nvkm_devinit *);
+	int  (*post)(struct nvkm_devinit *, bool post);
+	u32  (*mmio)(struct nvkm_devinit *, u32);
 	void (*meminit)(struct nvkm_devinit *);
 	int  (*pll_set)(struct nvkm_devinit *, u32 type, u32 freq);
 	u64  (*disable)(struct nvkm_devinit *);
-	u32  (*mmio)(struct nvkm_devinit *, u32);
-	int  (*post)(struct nvkm_subdev *, bool);
 };
 
-#define nvkm_devinit_create(p,e,o,d)                                        \
-	nvkm_devinit_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_devinit_destroy(p) ({                                          \
-	struct nvkm_devinit *d = (p);                                       \
-	_nvkm_devinit_dtor(nv_object(d));                                   \
-})
-#define nvkm_devinit_init(p) ({                                             \
-	struct nvkm_devinit *d = (p);                                       \
-	_nvkm_devinit_init(nv_object(d));                                   \
-})
-#define nvkm_devinit_fini(p,s) ({                                           \
-	struct nvkm_devinit *d = (p);                                       \
-	_nvkm_devinit_fini(nv_object(d), (s));                              \
-})
+void nvkm_devinit_ctor(const struct nvkm_devinit_func *, struct nvkm_device *,
+		       int index, struct nvkm_devinit *);
 
-int nvkm_devinit_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, int, void **);
-void _nvkm_devinit_dtor(struct nvkm_object *);
-int _nvkm_devinit_init(struct nvkm_object *);
-int _nvkm_devinit_fini(struct nvkm_object *, bool suspend);
+int nv04_devinit_post(struct nvkm_devinit *, bool);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index d6be4c6c5408..08105701af7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -23,6 +23,8 @@ nvkm-y += nvkm/subdev/fb/gf100.o
 nvkm-y += nvkm/subdev/fb/gk104.o
 nvkm-y += nvkm/subdev/fb/gk20a.o
 nvkm-y += nvkm/subdev/fb/gm107.o
+
+nvkm-y += nvkm/subdev/fb/ram.o
 nvkm-y += nvkm/subdev/fb/ramnv04.o
 nvkm-y += nvkm/subdev/fb/ramnv10.o
 nvkm-y += nvkm/subdev/fb/ramnv1a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index 61fde43dab71..a719b9becb73 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -22,144 +22,151 @@
  * Authors: Ben Skeggs
  */
 #include "priv.h"
+#include "ram.h"
 
 #include <subdev/bios.h>
 #include <subdev/bios/M0203.h>
+#include <engine/gr.h>
+#include <engine/mpeg.h>
+
+bool
+nvkm_fb_memtype_valid(struct nvkm_fb *fb, u32 memtype)
+{
+	return fb->func->memtype_valid(fb, memtype);
+}
+
+void
+nvkm_fb_tile_fini(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile)
+{
+	fb->func->tile.fini(fb, region, tile);
+}
+
+void
+nvkm_fb_tile_init(struct nvkm_fb *fb, int region, u32 addr, u32 size,
+		  u32 pitch, u32 flags, struct nvkm_fb_tile *tile)
+{
+	fb->func->tile.init(fb, region, addr, size, pitch, flags, tile);
+}
+
+void
+nvkm_fb_tile_prog(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile)
+{
+	struct nvkm_device *device = fb->subdev.device;
+	if (fb->func->tile.prog) {
+		fb->func->tile.prog(fb, region, tile);
+		if (device->gr)
+			nvkm_engine_tile(&device->gr->engine, region);
+		if (device->mpeg)
+			nvkm_engine_tile(device->mpeg, region);
+	}
+}
 
 int
 nvkm_fb_bios_memtype(struct nvkm_bios *bios)
 {
-	const u8 ramcfg = (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2;
+	struct nvkm_subdev *subdev = &bios->subdev;
+	struct nvkm_device *device = subdev->device;
+	const u8 ramcfg = (nvkm_rd32(device, 0x101000) & 0x0000003c) >> 2;
 	struct nvbios_M0203E M0203E;
 	u8 ver, hdr;
 
 	if (nvbios_M0203Em(bios, ramcfg, &ver, &hdr, &M0203E)) {
 		switch (M0203E.type) {
-		case M0203E_TYPE_DDR2 : return NV_MEM_TYPE_DDR2;
-		case M0203E_TYPE_DDR3 : return NV_MEM_TYPE_DDR3;
-		case M0203E_TYPE_GDDR3: return NV_MEM_TYPE_GDDR3;
-		case M0203E_TYPE_GDDR5: return NV_MEM_TYPE_GDDR5;
+		case M0203E_TYPE_DDR2 : return NVKM_RAM_TYPE_DDR2;
+		case M0203E_TYPE_DDR3 : return NVKM_RAM_TYPE_DDR3;
+		case M0203E_TYPE_GDDR3: return NVKM_RAM_TYPE_GDDR3;
+		case M0203E_TYPE_GDDR5: return NVKM_RAM_TYPE_GDDR5;
 		default:
-			nv_warn(bios, "M0203E type %02x\n", M0203E.type);
-			return NV_MEM_TYPE_UNKNOWN;
+			nvkm_warn(subdev, "M0203E type %02x\n", M0203E.type);
+			return NVKM_RAM_TYPE_UNKNOWN;
 		}
 	}
 
-	nv_warn(bios, "M0203E not matched!\n");
-	return NV_MEM_TYPE_UNKNOWN;
+	nvkm_warn(subdev, "M0203E not matched!\n");
+	return NVKM_RAM_TYPE_UNKNOWN;
 }
 
-int
-_nvkm_fb_fini(struct nvkm_object *object, bool suspend)
+static void
+nvkm_fb_intr(struct nvkm_subdev *subdev)
 {
-	struct nvkm_fb *pfb = (void *)object;
-	int ret;
+	struct nvkm_fb *fb = nvkm_fb(subdev);
+	if (fb->func->intr)
+		fb->func->intr(fb);
+}
 
-	if (pfb->ram) {
-		ret = nv_ofuncs(pfb->ram)->fini(nv_object(pfb->ram), suspend);
-		if (ret && suspend)
+static int
+nvkm_fb_oneinit(struct nvkm_subdev *subdev)
+{
+	struct nvkm_fb *fb = nvkm_fb(subdev);
+	if (fb->func->ram_new) {
+		int ret = fb->func->ram_new(fb, &fb->ram);
+		if (ret) {
+			nvkm_error(subdev, "vram setup failed, %d\n", ret);
 			return ret;
+		}
 	}
-
-	return nvkm_subdev_fini(&pfb->base, suspend);
+	return 0;
 }
 
-int
-_nvkm_fb_init(struct nvkm_object *object)
+static int
+nvkm_fb_init(struct nvkm_subdev *subdev)
 {
-	struct nvkm_fb *pfb = (void *)object;
+	struct nvkm_fb *fb = nvkm_fb(subdev);
 	int ret, i;
 
-	ret = nvkm_subdev_init(&pfb->base);
-	if (ret)
-		return ret;
-
-	if (pfb->ram) {
-		ret = nv_ofuncs(pfb->ram)->init(nv_object(pfb->ram));
+	if (fb->ram) {
+		ret = nvkm_ram_init(fb->ram);
 		if (ret)
 			return ret;
 	}
 
-	for (i = 0; i < pfb->tile.regions; i++)
-		pfb->tile.prog(pfb, i, &pfb->tile.region[i]);
+	for (i = 0; i < fb->tile.regions; i++)
+		fb->func->tile.prog(fb, i, &fb->tile.region[i]);
 
+	if (fb->func->init)
+		fb->func->init(fb);
 	return 0;
 }
 
-void
-_nvkm_fb_dtor(struct nvkm_object *object)
+static void *
+nvkm_fb_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_fb *pfb = (void *)object;
+	struct nvkm_fb *fb = nvkm_fb(subdev);
 	int i;
 
-	for (i = 0; i < pfb->tile.regions; i++)
-		pfb->tile.fini(pfb, i, &pfb->tile.region[i]);
-	nvkm_mm_fini(&pfb->tags);
+	for (i = 0; i < fb->tile.regions; i++)
+		fb->func->tile.fini(fb, i, &fb->tile.region[i]);
 
-	if (pfb->ram) {
-		nvkm_mm_fini(&pfb->vram);
-		nvkm_object_ref(NULL, (struct nvkm_object **)&pfb->ram);
-	}
+	nvkm_ram_del(&fb->ram);
 
-	nvkm_subdev_destroy(&pfb->base);
+	if (fb->func->dtor)
+		return fb->func->dtor(fb);
+	return fb;
 }
 
-int
-nvkm_fb_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, int length, void **pobject)
-{
-	struct nvkm_fb_impl *impl = (void *)oclass;
-	static const char *name[] = {
-		[NV_MEM_TYPE_UNKNOWN] = "unknown",
-		[NV_MEM_TYPE_STOLEN ] = "stolen system memory",
-		[NV_MEM_TYPE_SGRAM  ] = "SGRAM",
-		[NV_MEM_TYPE_SDRAM  ] = "SDRAM",
-		[NV_MEM_TYPE_DDR1   ] = "DDR1",
-		[NV_MEM_TYPE_DDR2   ] = "DDR2",
-		[NV_MEM_TYPE_DDR3   ] = "DDR3",
-		[NV_MEM_TYPE_GDDR2  ] = "GDDR2",
-		[NV_MEM_TYPE_GDDR3  ] = "GDDR3",
-		[NV_MEM_TYPE_GDDR4  ] = "GDDR4",
-		[NV_MEM_TYPE_GDDR5  ] = "GDDR5",
-	};
-	struct nvkm_object *ram;
-	struct nvkm_fb *pfb;
-	int ret;
-
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PFB", "fb",
-				  length, pobject);
-	pfb = *pobject;
-	if (ret)
-		return ret;
-
-	pfb->memtype_valid = impl->memtype;
-
-	if (!impl->ram)
-		return 0;
-
-	ret = nvkm_object_ctor(nv_object(pfb), NULL, impl->ram, NULL, 0, &ram);
-	if (ret) {
-		nv_fatal(pfb, "error detecting memory configuration!!\n");
-		return ret;
-	}
-
-	pfb->ram = (void *)ram;
+static const struct nvkm_subdev_func
+nvkm_fb = {
+	.dtor = nvkm_fb_dtor,
+	.oneinit = nvkm_fb_oneinit,
+	.init = nvkm_fb_init,
+	.intr = nvkm_fb_intr,
+};
 
-	if (!nvkm_mm_initialised(&pfb->vram)) {
-		ret = nvkm_mm_init(&pfb->vram, 0, pfb->ram->size >> 12, 1);
-		if (ret)
-			return ret;
-	}
-
-	if (!nvkm_mm_initialised(&pfb->tags)) {
-		ret = nvkm_mm_init(&pfb->tags, 0, pfb->ram->tags ?
-				   ++pfb->ram->tags : 0, 1);
-		if (ret)
-			return ret;
-	}
+void
+nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_fb *fb)
+{
+	nvkm_subdev_ctor(&nvkm_fb, device, index, 0, &fb->subdev);
+	fb->func = func;
+	fb->tile.regions = fb->func->tile.regions;
+}
 
-	nv_info(pfb, "RAM type: %s\n", name[pfb->ram->type]);
-	nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram->size >> 20));
-	nv_info(pfb, "   ZCOMP: %d tags\n", pfb->ram->tags);
+int
+nvkm_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_fb **pfb)
+{
+	if (!(*pfb = kzalloc(sizeof(**pfb), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_fb_ctor(func, device, index, *pfb);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
index 6c968d1e98b3..9c28392d07e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
@@ -22,17 +22,16 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-g84_fb_oclass = &(struct nv50_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x84),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_fb_ctor,
-		.dtor = nv50_fb_dtor,
-		.init = nv50_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv50_fb_memtype_valid,
-	.base.ram = &nv50_ram_oclass,
+static const struct nv50_fb_func
+g84_fb = {
+	.ram_new = nv50_ram_new,
 	.trap = 0x001d07ff,
-}.base.base;
+};
+
+int
+g84_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nv50_fb_new_(&g84_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
index 15b462ae33cb..79b523aa52aa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  * 	    Roy Spliet <rspliet@eclipso.eu>
  */
-#include "priv.h"
+#include "ram.h"
 
 struct ramxlat {
 	int id;
@@ -42,9 +42,9 @@ ramxlat(const struct ramxlat *xlat, int id)
 
 static const struct ramxlat
 ramgddr3_cl_lo[] = {
-	{ 7, 7 }, { 8, 0 }, { 9, 1 }, { 10, 2 }, { 11, 3 },
+	{ 5, 5 }, { 7, 7 }, { 8, 0 }, { 9, 1 }, { 10, 2 }, { 11, 3 }, { 12, 8 },
 	/* the below are mentioned in some, but not all, gddr3 docs */
-	{ 12, 4 }, { 13, 5 }, { 14, 6 },
+	{ 13, 9 }, { 14, 6 },
 	/* XXX: Per Samsung docs, are these used? They overlap with Qimonda */
 	/* { 4, 4 }, { 5, 5 }, { 6, 6 }, { 12, 8 }, { 13, 9 }, { 14, 10 },
 	 * { 15, 11 }, */
@@ -61,24 +61,25 @@ ramgddr3_cl_hi[] = {
 static const struct ramxlat
 ramgddr3_wr_lo[] = {
 	{ 5, 2 }, { 7, 4 }, { 8, 5 }, { 9, 6 }, { 10, 7 },
-	{ 11, 0 },
+	{ 11, 0 }, { 13 , 1 },
 	/* the below are mentioned in some, but not all, gddr3 docs */
-	{ 4, 1 }, { 6, 3 }, { 12, 1 }, { 13 , 2 },
+	{ 4, 1 }, { 6, 3 }, { 12, 1 },
 	{ -1 }
 };
 
 int
 nvkm_gddr3_calc(struct nvkm_ram *ram)
 {
-	int CL, WR, CWL, DLL = 0, ODT = 0, hi;
+	int CL, WR, CWL, DLL = 0, ODT = 0, RON, hi;
 
 	switch (ram->next->bios.timing_ver) {
 	case 0x10:
 		CWL = ram->next->bios.timing_10_CWL;
 		CL  = ram->next->bios.timing_10_CL;
 		WR  = ram->next->bios.timing_10_WR;
-		DLL = !ram->next->bios.ramcfg_10_DLLoff;
+		DLL = !ram->next->bios.ramcfg_DLLoff;
 		ODT = ram->next->bios.timing_10_ODT;
+		RON = ram->next->bios.ramcfg_RON;
 		break;
 	case 0x20:
 		CWL = (ram->next->bios.timing[1] & 0x00000f80) >> 7;
@@ -89,6 +90,7 @@ nvkm_gddr3_calc(struct nvkm_ram *ram)
 		ODT =  (ram->mr[1] & 0x004) >> 2 |
 		       (ram->mr[1] & 0x040) >> 5 |
 		       (ram->mr[1] & 0x200) >> 7;
+		RON = !(ram->mr[1] & 0x300) >> 8;
 		break;
 	default:
 		return -ENOSYS;
@@ -107,7 +109,7 @@ nvkm_gddr3_calc(struct nvkm_ram *ram)
 
 	ram->mr[1] &= ~0x3fc;
 	ram->mr[1] |= (ODT & 0x03) << 2;
-	ram->mr[1] |= (ODT & 0x03) << 8;
+	ram->mr[1] |= (RON & 0x03) << 8;
 	ram->mr[1] |= (WR  & 0x03) << 4;
 	ram->mr[1] |= (WR  & 0x04) << 5;
 	ram->mr[1] |= !DLL << 6;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c
index f6f9eee1dcd0..24f83b09e6a1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c
@@ -21,7 +21,7 @@
  *
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
-#include "priv.h"
+#include "ram.h"
 
 /* binary driver only executes this path if the condition (a) is true
  * for any configuration (combination of rammap+ramcfg+timing) that
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index d51aa0237baf..008bb9849f3b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -22,101 +22,90 @@
  * Authors: Ben Skeggs
  */
 #include "gf100.h"
-
-#include <core/device.h>
+#include "ram.h"
 
 extern const u8 gf100_pte_storage_type_map[256];
 
 bool
-gf100_fb_memtype_valid(struct nvkm_fb *pfb, u32 tile_flags)
+gf100_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags)
 {
 	u8 memtype = (tile_flags & 0x0000ff00) >> 8;
 	return likely((gf100_pte_storage_type_map[memtype] != 0xff));
 }
 
-static void
-gf100_fb_intr(struct nvkm_subdev *subdev)
+void
+gf100_fb_intr(struct nvkm_fb *base)
 {
-	struct gf100_fb_priv *priv = (void *)subdev;
-	u32 intr = nv_rd32(priv, 0x000100);
-	if (intr & 0x08000000) {
-		nv_debug(priv, "PFFB intr\n");
-		intr &= ~0x08000000;
-	}
-	if (intr & 0x00002000) {
-		nv_debug(priv, "PBFB intr\n");
-		intr &= ~0x00002000;
-	}
+	struct gf100_fb *fb = gf100_fb(base);
+	struct nvkm_subdev *subdev = &fb->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 intr = nvkm_rd32(device, 0x000100);
+	if (intr & 0x08000000)
+		nvkm_debug(subdev, "PFFB intr\n");
+	if (intr & 0x00002000)
+		nvkm_debug(subdev, "PBFB intr\n");
 }
 
-int
-gf100_fb_init(struct nvkm_object *object)
+void
+gf100_fb_init(struct nvkm_fb *base)
 {
-	struct gf100_fb_priv *priv = (void *)object;
-	int ret;
+	struct gf100_fb *fb = gf100_fb(base);
+	struct nvkm_device *device = fb->base.subdev.device;
 
-	ret = nvkm_fb_init(&priv->base);
-	if (ret)
-		return ret;
+	if (fb->r100c10_page)
+		nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
 
-	if (priv->r100c10_page)
-		nv_wr32(priv, 0x100c10, priv->r100c10 >> 8);
-
-	nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
-	return 0;
+	nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
 }
 
-void
-gf100_fb_dtor(struct nvkm_object *object)
+void *
+gf100_fb_dtor(struct nvkm_fb *base)
 {
-	struct nvkm_device *device = nv_device(object);
-	struct gf100_fb_priv *priv = (void *)object;
+	struct gf100_fb *fb = gf100_fb(base);
+	struct nvkm_device *device = fb->base.subdev.device;
 
-	if (priv->r100c10_page) {
-		dma_unmap_page(nv_device_base(device), priv->r100c10, PAGE_SIZE,
+	if (fb->r100c10_page) {
+		dma_unmap_page(device->dev, fb->r100c10, PAGE_SIZE,
 			       DMA_BIDIRECTIONAL);
-		__free_page(priv->r100c10_page);
+		__free_page(fb->r100c10_page);
 	}
 
-	nvkm_fb_destroy(&priv->base);
+	return fb;
 }
 
 int
-gf100_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_fb **pfb)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct gf100_fb_priv *priv;
-	int ret;
-
-	ret = nvkm_fb_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-	if (priv->r100c10_page) {
-		priv->r100c10 = dma_map_page(nv_device_base(device),
-					     priv->r100c10_page, 0, PAGE_SIZE,
-					     DMA_BIDIRECTIONAL);
-		if (dma_mapping_error(nv_device_base(device), priv->r100c10))
+	struct gf100_fb *fb;
+
+	if (!(fb = kzalloc(sizeof(*fb), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_fb_ctor(func, device, index, &fb->base);
+	*pfb = &fb->base;
+
+	fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	if (fb->r100c10_page) {
+		fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
+					   PAGE_SIZE, DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(device->dev, fb->r100c10))
 			return -EFAULT;
 	}
 
-	nv_subdev(priv)->intr = gf100_fb_intr;
 	return 0;
 }
 
-struct nvkm_oclass *
-gf100_fb_oclass = &(struct nvkm_fb_impl) {
-	.base.handle = NV_SUBDEV(FB, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_fb_ctor,
-		.dtor = gf100_fb_dtor,
-		.init = gf100_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.memtype = gf100_fb_memtype_valid,
-	.ram = &gf100_ram_oclass,
-}.base;
+static const struct nvkm_fb_func
+gf100_fb = {
+	.dtor = gf100_fb_dtor,
+	.init = gf100_fb_init,
+	.intr = gf100_fb_intr,
+	.ram_new = gf100_ram_new,
+	.memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gf100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return gf100_fb_new_(&gf100_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
index 0af4da259471..2160e5a39c9a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
@@ -1,28 +1,17 @@
 #ifndef __NVKM_RAM_NVC0_H__
 #define __NVKM_RAM_NVC0_H__
+#define gf100_fb(p) container_of((p), struct gf100_fb, base)
 #include "priv.h"
-#include "nv50.h"
 
-struct gf100_fb_priv {
+struct gf100_fb {
 	struct nvkm_fb base;
 	struct page *r100c10_page;
 	dma_addr_t r100c10;
 };
 
-int  gf100_fb_ctor(struct nvkm_object *, struct nvkm_object *,
-		  struct nvkm_oclass *, void *, u32,
-		  struct nvkm_object **);
-void gf100_fb_dtor(struct nvkm_object *);
-int  gf100_fb_init(struct nvkm_object *);
-bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
-
-#define gf100_ram_create(p,e,o,m,d)                                             \
-	gf100_ram_create_((p), (e), (o), (m), sizeof(**d), (void **)d)
-int  gf100_ram_create_(struct nvkm_object *, struct nvkm_object *,
-		      struct nvkm_oclass *, u32, int, void **);
-int  gf100_ram_get(struct nvkm_fb *, u64, u32, u32, u32,
-		  struct nvkm_mem **);
-void gf100_ram_put(struct nvkm_fb *, struct nvkm_mem **);
-
-int  gk104_ram_init(struct nvkm_object*);
+int gf100_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *,
+		  int index, struct nvkm_fb **);
+void *gf100_fb_dtor(struct nvkm_fb *);
+void gf100_fb_init(struct nvkm_fb *);
+void gf100_fb_intr(struct nvkm_fb *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
index 1c08317665bb..0edb3c316f5c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
@@ -22,16 +22,19 @@
  * Authors: Ben Skeggs
  */
 #include "gf100.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-gk104_fb_oclass = &(struct nvkm_fb_impl) {
-	.base.handle = NV_SUBDEV(FB, 0xe0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_fb_ctor,
-		.dtor = gf100_fb_dtor,
-		.init = gf100_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.memtype = gf100_fb_memtype_valid,
-	.ram = &gk104_ram_oclass,
-}.base;
+static const struct nvkm_fb_func
+gk104_fb = {
+	.dtor = gf100_fb_dtor,
+	.init = gf100_fb_init,
+	.intr = gf100_fb_intr,
+	.ram_new = gk104_ram_new,
+	.memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gk104_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return gf100_fb_new_(&gk104_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
index a5d7857d3898..81447eb4c948 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
@@ -19,50 +19,23 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-#include "gf100.h"
+#include "priv.h"
 
-struct gk20a_fb_priv {
-	struct nvkm_fb base;
-};
-
-static int
-gk20a_fb_init(struct nvkm_object *object)
+static void
+gk20a_fb_init(struct nvkm_fb *fb)
 {
-	struct gk20a_fb_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_fb_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
-	return 0;
+	struct nvkm_device *device = fb->subdev.device;
+	nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
 }
 
-static int
-gk20a_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
-{
-	struct gk20a_fb_priv *priv;
-	int ret;
-
-	ret = nvkm_fb_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+static const struct nvkm_fb_func
+gk20a_fb = {
+	.init = gk20a_fb_init,
+	.memtype_valid = gf100_fb_memtype_valid,
+};
 
-	return 0;
+int
+gk20a_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&gk20a_fb, device, index, pfb);
 }
-
-struct nvkm_oclass *
-gk20a_fb_oclass = &(struct nvkm_fb_impl) {
-	.base.handle = NV_SUBDEV(FB, 0xea),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk20a_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = gk20a_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.memtype = gf100_fb_memtype_valid,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
index 843f9356b360..2a91df8655dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
@@ -22,16 +22,19 @@
  * Authors: Ben Skeggs
  */
 #include "gf100.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-gm107_fb_oclass = &(struct nvkm_fb_impl) {
-	.base.handle = NV_SUBDEV(FB, 0x07),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_fb_ctor,
-		.dtor = gf100_fb_dtor,
-		.init = gf100_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.memtype = gf100_fb_memtype_valid,
-	.ram = &gm107_ram_oclass,
-}.base;
+static const struct nvkm_fb_func
+gm107_fb = {
+	.dtor = gf100_fb_dtor,
+	.init = gf100_fb_init,
+	.intr = gf100_fb_intr,
+	.ram_new = gm107_ram_new,
+	.memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gm107_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return gf100_fb_new_(&gm107_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
index dd9b8a0a3c8e..ebb30608d5ef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
@@ -22,17 +22,16 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-gt215_fb_oclass = &(struct nv50_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0xa3),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_fb_ctor,
-		.dtor = nv50_fb_dtor,
-		.init = nv50_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv50_fb_memtype_valid,
-	.base.ram = &gt215_ram_oclass,
+static const struct nv50_fb_func
+gt215_fb = {
+	.ram_new = gt215_ram_new,
 	.trap = 0x000d0fff,
-}.base.base;
+};
+
+int
+gt215_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nv50_fb_new_(&gt215_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c
index 7be4a47ef4ad..73b3b86a2826 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c
@@ -22,17 +22,16 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-mcp77_fb_oclass = &(struct nv50_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0xaa),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_fb_ctor,
-		.dtor = nv50_fb_dtor,
-		.init = nv50_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv50_fb_memtype_valid,
-	.base.ram = &mcp77_ram_oclass,
+static const struct nv50_fb_func
+mcp77_fb = {
+	.ram_new = mcp77_ram_new,
 	.trap = 0x001d07ff,
-}.base.base;
+};
+
+int
+mcp77_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nv50_fb_new_(&mcp77_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c
index 2d00656faef5..6d11e32ec7ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c
@@ -22,17 +22,16 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-mcp89_fb_oclass = &(struct nv50_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0xaf),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_fb_ctor,
-		.dtor = nv50_fb_dtor,
-		.init = nv50_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv50_fb_memtype_valid,
-	.base.ram = &mcp77_ram_oclass,
+static const struct nv50_fb_func
+mcp89_fb = {
+	.ram_new = mcp77_ram_new,
 	.trap = 0x089d1fff,
-}.base.base;
+};
+
+int
+mcp89_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nv50_fb_new_(&mcp89_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
index c063dec7d03a..8ff2e5db4571 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
@@ -21,67 +21,39 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 #include "regsnv04.h"
 
 bool
-nv04_fb_memtype_valid(struct nvkm_fb *pfb, u32 tile_flags)
+nv04_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags)
 {
 	if (!(tile_flags & 0xff00))
 		return true;
-
 	return false;
 }
 
-static int
-nv04_fb_init(struct nvkm_object *object)
+static void
+nv04_fb_init(struct nvkm_fb *fb)
 {
-	struct nv04_fb_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_fb_init(&priv->base);
-	if (ret)
-		return ret;
+	struct nvkm_device *device = fb->subdev.device;
 
 	/* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
 	 * nvidia reading PFB_CFG_0, then writing back its original value.
 	 * (which was 0x701114 in this case)
 	 */
-	nv_wr32(priv, NV04_PFB_CFG0, 0x1114);
-	return 0;
+	nvkm_wr32(device, NV04_PFB_CFG0, 0x1114);
 }
 
+static const struct nvkm_fb_func
+nv04_fb = {
+	.init = nv04_fb_init,
+	.ram_new = nv04_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
 int
-nv04_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+nv04_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
 {
-	struct nv04_fb_impl *impl = (void *)oclass;
-	struct nv04_fb_priv *priv;
-	int ret;
-
-	ret = nvkm_fb_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.tile.regions = impl->tile.regions;
-	priv->base.tile.init = impl->tile.init;
-	priv->base.tile.comp = impl->tile.comp;
-	priv->base.tile.fini = impl->tile.fini;
-	priv->base.tile.prog = impl->tile.prog;
-	return 0;
+	return nvkm_fb_new_(&nv04_fb, device, index, pfb);
 }
-
-struct nvkm_oclass *
-nv04_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x04),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv04_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv04_ram_oclass,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.h
deleted file mode 100644
index caa0d03aaacc..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.h
+++ /dev/null
@@ -1,53 +0,0 @@
-#ifndef __NVKM_FB_NV04_H__
-#define __NVKM_FB_NV04_H__
-#include "priv.h"
-
-struct nv04_fb_priv {
-	struct nvkm_fb base;
-};
-
-int  nv04_fb_ctor(struct nvkm_object *, struct nvkm_object *,
-		  struct nvkm_oclass *, void *, u32,
-		  struct nvkm_object **);
-
-struct nv04_fb_impl {
-	struct nvkm_fb_impl base;
-	struct {
-		int regions;
-		void (*init)(struct nvkm_fb *, int i, u32 addr, u32 size,
-			     u32 pitch, u32 flags, struct nvkm_fb_tile *);
-		void (*comp)(struct nvkm_fb *, int i, u32 size, u32 flags,
-			     struct nvkm_fb_tile *);
-		void (*fini)(struct nvkm_fb *, int i,
-			     struct nvkm_fb_tile *);
-		void (*prog)(struct nvkm_fb *, int i,
-			     struct nvkm_fb_tile *);
-	} tile;
-};
-
-void nv10_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
-		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
-void nv10_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
-void nv10_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
-
-void nv20_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
-		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
-void nv20_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
-void nv20_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
-
-int  nv30_fb_init(struct nvkm_object *);
-void nv30_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
-		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
-
-void nv40_fb_tile_comp(struct nvkm_fb *, int i, u32 size, u32 flags,
-		       struct nvkm_fb_tile *);
-
-int  nv41_fb_init(struct nvkm_object *);
-void nv41_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
-
-int  nv44_fb_init(struct nvkm_object *);
-void nv44_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
-
-void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
-		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
index f3530e4a6760..e8c44f5a3d84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
@@ -23,10 +23,11 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 void
-nv10_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+nv10_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
 		  u32 flags, struct nvkm_fb_tile *tile)
 {
 	tile->addr  = 0x80000000 | addr;
@@ -35,7 +36,7 @@ nv10_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
 }
 
 void
-nv10_fb_tile_fini(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
+nv10_fb_tile_fini(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
 {
 	tile->addr  = 0;
 	tile->limit = 0;
@@ -44,27 +45,27 @@ nv10_fb_tile_fini(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
 }
 
 void
-nv10_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
+nv10_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
 {
-	nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
-	nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
-	nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
-	nv_rd32(pfb, 0x100240 + (i * 0x10));
+	struct nvkm_device *device = fb->subdev.device;
+	nvkm_wr32(device, 0x100244 + (i * 0x10), tile->limit);
+	nvkm_wr32(device, 0x100248 + (i * 0x10), tile->pitch);
+	nvkm_wr32(device, 0x100240 + (i * 0x10), tile->addr);
+	nvkm_rd32(device, 0x100240 + (i * 0x10));
 }
 
-struct nvkm_oclass *
-nv10_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x10),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = _nvkm_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv10_ram_oclass,
+static const struct nvkm_fb_func
+nv10_fb = {
 	.tile.regions = 8,
 	.tile.init = nv10_fb_tile_init,
 	.tile.fini = nv10_fb_tile_fini,
 	.tile.prog = nv10_fb_tile_prog,
-}.base.base;
+	.ram_new = nv10_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv10_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv10_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
index 83bcb73caf0a..2ae0beb87567 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
@@ -23,21 +23,21 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-nv1a_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x1a),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = _nvkm_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv1a_ram_oclass,
+static const struct nvkm_fb_func
+nv1a_fb = {
 	.tile.regions = 8,
 	.tile.init = nv10_fb_tile_init,
 	.tile.fini = nv10_fb_tile_fini,
 	.tile.prog = nv10_fb_tile_prog,
-}.base.base;
+	.ram_new = nv1a_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv1a_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv1a_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
index e37084b8d05e..126865dfe777 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
@@ -23,28 +23,29 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 void
-nv20_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+nv20_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
 		  u32 flags, struct nvkm_fb_tile *tile)
 {
 	tile->addr  = 0x00000001 | addr;
 	tile->limit = max(1u, addr + size) - 1;
 	tile->pitch = pitch;
 	if (flags & 4) {
-		pfb->tile.comp(pfb, i, size, flags, tile);
+		fb->func->tile.comp(fb, i, size, flags, tile);
 		tile->addr |= 2;
 	}
 }
 
 static void
-nv20_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+nv20_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 		  struct nvkm_fb_tile *tile)
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x40);
-	u32 tags  = round_up(tiles / pfb->ram->parts, 0x40);
-	if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	u32 tags  = round_up(tiles / fb->ram->parts, 0x40);
+	if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
 		if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
 		else              tile->zcomp = 0x04000000; /* Z24S8 */
 		tile->zcomp |= tile->tag->offset;
@@ -56,39 +57,39 @@ nv20_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
 }
 
 void
-nv20_fb_tile_fini(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
+nv20_fb_tile_fini(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
 {
 	tile->addr  = 0;
 	tile->limit = 0;
 	tile->pitch = 0;
 	tile->zcomp = 0;
-	nvkm_mm_free(&pfb->tags, &tile->tag);
+	nvkm_mm_free(&fb->ram->tags, &tile->tag);
 }
 
 void
-nv20_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
+nv20_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
 {
-	nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
-	nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
-	nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
-	nv_rd32(pfb, 0x100240 + (i * 0x10));
-	nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp);
+	struct nvkm_device *device = fb->subdev.device;
+	nvkm_wr32(device, 0x100244 + (i * 0x10), tile->limit);
+	nvkm_wr32(device, 0x100248 + (i * 0x10), tile->pitch);
+	nvkm_wr32(device, 0x100240 + (i * 0x10), tile->addr);
+	nvkm_rd32(device, 0x100240 + (i * 0x10));
+	nvkm_wr32(device, 0x100300 + (i * 0x04), tile->zcomp);
 }
 
-struct nvkm_oclass *
-nv20_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x20),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = _nvkm_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv20_ram_oclass,
+static const struct nvkm_fb_func
+nv20_fb = {
 	.tile.regions = 8,
 	.tile.init = nv20_fb_tile_init,
 	.tile.comp = nv20_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv20_fb_tile_prog,
-}.base.base;
+	.ram_new = nv20_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv20_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv20_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
index bc9f54f38fba..c56746d2a502 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
@@ -23,15 +23,16 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 static void
-nv25_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+nv25_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 		  struct nvkm_fb_tile *tile)
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x40);
-	u32 tags  = round_up(tiles / pfb->ram->parts, 0x40);
-	if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	u32 tags  = round_up(tiles / fb->ram->parts, 0x40);
+	if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
 		if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
 		else              tile->zcomp = 0x00200000; /* Z24S8 */
 		tile->zcomp |= tile->tag->offset;
@@ -41,20 +42,19 @@ nv25_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
 	}
 }
 
-struct nvkm_oclass *
-nv25_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x25),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = _nvkm_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv20_ram_oclass,
+static const struct nvkm_fb_func
+nv25_fb = {
 	.tile.regions = 8,
 	.tile.init = nv20_fb_tile_init,
 	.tile.comp = nv25_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv20_fb_tile_prog,
-}.base.base;
+	.ram_new = nv20_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv25_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv25_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
index 09ebb9477e00..2a7c4831b821 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
@@ -23,20 +23,19 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
-
-#include <core/device.h>
+#include "priv.h"
+#include "ram.h"
 
 void
-nv30_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+nv30_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
 		  u32 flags, struct nvkm_fb_tile *tile)
 {
 	/* for performance, select alternate bank offset for zeta */
 	if (!(flags & 4)) {
 		tile->addr = (0 << 4);
 	} else {
-		if (pfb->tile.comp) /* z compression */
-			pfb->tile.comp(pfb, i, size, flags, tile);
+		if (fb->func->tile.comp) /* z compression */
+			fb->func->tile.comp(fb, i, size, flags, tile);
 		tile->addr = (1 << 4);
 	}
 
@@ -47,12 +46,12 @@ nv30_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
 }
 
 static void
-nv30_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+nv30_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 		  struct nvkm_fb_tile *tile)
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x40);
-	u32 tags  = round_up(tiles / pfb->ram->parts, 0x40);
-	if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	u32 tags  = round_up(tiles / fb->ram->parts, 0x40);
+	if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
 		if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
 		else           tile->zcomp |= 0x02000000; /* Z24S8 */
 		tile->zcomp |= ((tile->tag->offset           ) >> 6);
@@ -64,23 +63,24 @@ nv30_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
 }
 
 static int
-calc_bias(struct nv04_fb_priv *priv, int k, int i, int j)
+calc_bias(struct nvkm_fb *fb, int k, int i, int j)
 {
-	struct nvkm_device *device = nv_device(priv);
+	struct nvkm_device *device = fb->subdev.device;
 	int b = (device->chipset > 0x30 ?
-		 nv_rd32(priv, 0x122c + 0x10 * k + 0x4 * j) >> (4 * (i ^ 1)) :
+		 nvkm_rd32(device, 0x122c + 0x10 * k + 0x4 * j) >>
+			(4 * (i ^ 1)) :
 		 0) & 0xf;
 
 	return 2 * (b & 0x8 ? b - 0x10 : b);
 }
 
 static int
-calc_ref(struct nv04_fb_priv *priv, int l, int k, int i)
+calc_ref(struct nvkm_fb *fb, int l, int k, int i)
 {
 	int j, x = 0;
 
 	for (j = 0; j < 4; j++) {
-		int m = (l >> (8 * i) & 0xff) + calc_bias(priv, k, i, j);
+		int m = (l >> (8 * i) & 0xff) + calc_bias(fb, k, i, j);
 
 		x |= (0x80 | clamp(m, 0, 0x1f)) << (8 * j);
 	}
@@ -88,16 +88,11 @@ calc_ref(struct nv04_fb_priv *priv, int l, int k, int i)
 	return x;
 }
 
-int
-nv30_fb_init(struct nvkm_object *object)
+void
+nv30_fb_init(struct nvkm_fb *fb)
 {
-	struct nvkm_device *device = nv_device(object);
-	struct nv04_fb_priv *priv = (void *)object;
-	int ret, i, j;
-
-	ret = nvkm_fb_init(&priv->base);
-	if (ret)
-		return ret;
+	struct nvkm_device *device = fb->subdev.device;
+	int i, j;
 
 	/* Init the memory timing regs at 0x10037c/0x1003ac */
 	if (device->chipset == 0x30 ||
@@ -105,36 +100,34 @@ nv30_fb_init(struct nvkm_object *object)
 	    device->chipset == 0x35) {
 		/* Related to ROP count */
 		int n = (device->chipset == 0x31 ? 2 : 4);
-		int l = nv_rd32(priv, 0x1003d0);
+		int l = nvkm_rd32(device, 0x1003d0);
 
 		for (i = 0; i < n; i++) {
 			for (j = 0; j < 3; j++)
-				nv_wr32(priv, 0x10037c + 0xc * i + 0x4 * j,
-					calc_ref(priv, l, 0, j));
+				nvkm_wr32(device, 0x10037c + 0xc * i + 0x4 * j,
+					  calc_ref(fb, l, 0, j));
 
 			for (j = 0; j < 2; j++)
-				nv_wr32(priv, 0x1003ac + 0x8 * i + 0x4 * j,
-					calc_ref(priv, l, 1, j));
+				nvkm_wr32(device, 0x1003ac + 0x8 * i + 0x4 * j,
+					  calc_ref(fb, l, 1, j));
 		}
 	}
-
-	return 0;
 }
 
-struct nvkm_oclass *
-nv30_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x30),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv30_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv20_ram_oclass,
+static const struct nvkm_fb_func
+nv30_fb = {
+	.init = nv30_fb_init,
 	.tile.regions = 8,
 	.tile.init = nv30_fb_tile_init,
 	.tile.comp = nv30_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv20_fb_tile_prog,
-}.base.base;
+	.ram_new = nv20_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv30_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv30_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
index c01dc1839ea4..1604b3789ad1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
@@ -23,15 +23,16 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 static void
-nv35_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+nv35_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 		  struct nvkm_fb_tile *tile)
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x40);
-	u32 tags  = round_up(tiles / pfb->ram->parts, 0x40);
-	if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	u32 tags  = round_up(tiles / fb->ram->parts, 0x40);
+	if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
 		if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
 		else           tile->zcomp |= 0x08000000; /* Z24S8 */
 		tile->zcomp |= ((tile->tag->offset           ) >> 6);
@@ -42,20 +43,20 @@ nv35_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
 	}
 }
 
-struct nvkm_oclass *
-nv35_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x35),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv30_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv20_ram_oclass,
+static const struct nvkm_fb_func
+nv35_fb = {
+	.init = nv30_fb_init,
 	.tile.regions = 8,
 	.tile.init = nv30_fb_tile_init,
 	.tile.comp = nv35_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv20_fb_tile_prog,
-}.base.base;
+	.ram_new = nv20_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv35_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv35_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
index cad75a1cef22..80cc0a6e3416 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
@@ -23,15 +23,16 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 static void
-nv36_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+nv36_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 		  struct nvkm_fb_tile *tile)
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x40);
-	u32 tags  = round_up(tiles / pfb->ram->parts, 0x40);
-	if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	u32 tags  = round_up(tiles / fb->ram->parts, 0x40);
+	if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
 		if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
 		else           tile->zcomp |= 0x20000000; /* Z24S8 */
 		tile->zcomp |= ((tile->tag->offset           ) >> 6);
@@ -42,20 +43,20 @@ nv36_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
 	}
 }
 
-struct nvkm_oclass *
-nv36_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x36),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv30_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv20_ram_oclass,
+static const struct nvkm_fb_func
+nv36_fb = {
+	.init = nv30_fb_init,
 	.tile.regions = 8,
 	.tile.init = nv30_fb_tile_init,
 	.tile.comp = nv36_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv20_fb_tile_prog,
-}.base.base;
+	.ram_new = nv20_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv36_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv36_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
index dbe5c1910c2c..deec46a310f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
@@ -23,16 +23,17 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 void
-nv40_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+nv40_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 		  struct nvkm_fb_tile *tile)
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x80);
-	u32 tags  = round_up(tiles / pfb->ram->parts, 0x100);
+	u32 tags  = round_up(tiles / fb->ram->parts, 0x100);
 	if ( (flags & 2) &&
-	    !nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	    !nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
 		tile->zcomp  = 0x28000000; /* Z24S8_SPLIT_GRAD */
 		tile->zcomp |= ((tile->tag->offset           ) >> 8);
 		tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
@@ -42,34 +43,26 @@ nv40_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
 	}
 }
 
-static int
-nv40_fb_init(struct nvkm_object *object)
+static void
+nv40_fb_init(struct nvkm_fb *fb)
 {
-	struct nv04_fb_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_fb_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
-	return 0;
+	nvkm_mask(fb->subdev.device, 0x10033c, 0x00008000, 0x00000000);
 }
 
-struct nvkm_oclass *
-nv40_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x40),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv40_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv40_ram_oclass,
+static const struct nvkm_fb_func
+nv40_fb = {
+	.init = nv40_fb_init,
 	.tile.regions = 8,
 	.tile.init = nv30_fb_tile_init,
 	.tile.comp = nv40_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv20_fb_tile_prog,
-}.base.base;
+	.ram_new = nv40_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv40_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv40_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.h
deleted file mode 100644
index 602182661820..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef __NVKM_FB_NV40_H__
-#define __NVKM_FB_NV40_H__
-#include "priv.h"
-
-struct nv40_ram {
-	struct nvkm_ram base;
-	u32 ctrl;
-	u32 coef;
-};
-
-int  nv40_ram_calc(struct nvkm_fb *, u32);
-int  nv40_ram_prog(struct nvkm_fb *);
-void nv40_ram_tidy(struct nvkm_fb *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
index d9e1a40a2955..79e57dd5a00f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
@@ -23,46 +23,40 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 void
-nv41_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
+nv41_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
 {
-	nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
-	nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
-	nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
-	nv_rd32(pfb, 0x100600 + (i * 0x10));
-	nv_wr32(pfb, 0x100700 + (i * 0x04), tile->zcomp);
+	struct nvkm_device *device = fb->subdev.device;
+	nvkm_wr32(device, 0x100604 + (i * 0x10), tile->limit);
+	nvkm_wr32(device, 0x100608 + (i * 0x10), tile->pitch);
+	nvkm_wr32(device, 0x100600 + (i * 0x10), tile->addr);
+	nvkm_rd32(device, 0x100600 + (i * 0x10));
+	nvkm_wr32(device, 0x100700 + (i * 0x04), tile->zcomp);
 }
 
-int
-nv41_fb_init(struct nvkm_object *object)
+void
+nv41_fb_init(struct nvkm_fb *fb)
 {
-	struct nv04_fb_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_fb_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x100800, 0x00000001);
-	return 0;
+	nvkm_wr32(fb->subdev.device, 0x100800, 0x00000001);
 }
 
-struct nvkm_oclass *
-nv41_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x41),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv41_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv41_ram_oclass,
+static const struct nvkm_fb_func
+nv41_fb = {
+	.init = nv41_fb_init,
 	.tile.regions = 12,
 	.tile.init = nv30_fb_tile_init,
 	.tile.comp = nv40_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv41_fb_tile_prog,
-}.base.base;
+	.ram_new = nv41_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv41_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv41_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
index 20b97c83c4af..06246cce5ec4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
@@ -23,10 +23,11 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 static void
-nv44_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+nv44_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
 		  u32 flags, struct nvkm_fb_tile *tile)
 {
 	tile->addr  = 0x00000001; /* mode = vram */
@@ -36,42 +37,36 @@ nv44_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
 }
 
 void
-nv44_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
+nv44_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
 {
-	nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
-	nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
-	nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
-	nv_rd32(pfb, 0x100600 + (i * 0x10));
+	struct nvkm_device *device = fb->subdev.device;
+	nvkm_wr32(device, 0x100604 + (i * 0x10), tile->limit);
+	nvkm_wr32(device, 0x100608 + (i * 0x10), tile->pitch);
+	nvkm_wr32(device, 0x100600 + (i * 0x10), tile->addr);
+	nvkm_rd32(device, 0x100600 + (i * 0x10));
 }
 
-int
-nv44_fb_init(struct nvkm_object *object)
+void
+nv44_fb_init(struct nvkm_fb *fb)
 {
-	struct nv04_fb_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_fb_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x100850, 0x80000000);
-	nv_wr32(priv, 0x100800, 0x00000001);
-	return 0;
+	struct nvkm_device *device = fb->subdev.device;
+	nvkm_wr32(device, 0x100850, 0x80000000);
+	nvkm_wr32(device, 0x100800, 0x00000001);
 }
 
-struct nvkm_oclass *
-nv44_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x44),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv44_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv44_ram_oclass,
+static const struct nvkm_fb_func
+nv44_fb = {
+	.init = nv44_fb_init,
 	.tile.regions = 12,
 	.tile.init = nv44_fb_tile_init,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv44_fb_tile_prog,
-}.base.base;
+	.ram_new = nv44_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv44_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv44_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
index 5bfac38cdf24..3598a1aa65be 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
@@ -23,10 +23,11 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 void
-nv46_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+nv46_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
 		  u32 flags, struct nvkm_fb_tile *tile)
 {
 	/* for performance, select alternate bank offset for zeta */
@@ -39,19 +40,19 @@ nv46_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
 	tile->pitch = pitch;
 }
 
-struct nvkm_oclass *
-nv46_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x46),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv44_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv44_ram_oclass,
+static const struct nvkm_fb_func
+nv46_fb = {
+	.init = nv44_fb_init,
 	.tile.regions = 15,
 	.tile.init = nv46_fb_tile_init,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv44_fb_tile_prog,
-}.base.base;
+	.ram_new = nv44_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv46_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv46_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
index d3b3988d1d49..c505e4429314 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
@@ -23,22 +23,23 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-nv47_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x47),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv41_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv41_ram_oclass,
+static const struct nvkm_fb_func
+nv47_fb = {
+	.init = nv41_fb_init,
 	.tile.regions = 15,
 	.tile.init = nv30_fb_tile_init,
 	.tile.comp = nv40_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv41_fb_tile_prog,
-}.base.base;
+	.ram_new = nv41_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv47_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv47_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
index 236e36c5054e..7b91b9f170e5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
@@ -23,22 +23,23 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-nv49_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x49),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv41_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv49_ram_oclass,
+static const struct nvkm_fb_func
+nv49_fb = {
+	.init = nv41_fb_init,
 	.tile.regions = 15,
 	.tile.init = nv30_fb_tile_init,
 	.tile.comp = nv40_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv41_fb_tile_prog,
-}.base.base;
+	.ram_new = nv49_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv49_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv49_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
index 1352b6a73fb0..4e98210c1b1c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
@@ -23,21 +23,22 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-nv4e_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x4e),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv44_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv4e_ram_oclass,
+static const struct nvkm_fb_func
+nv4e_fb = {
+	.init = nv44_fb_init,
 	.tile.regions = 12,
 	.tile.init = nv46_fb_tile_init,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv44_fb_tile_prog,
-}.base.base;
+	.ram_new = nv44_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv4e_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv4e_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
index 0480ce52aa06..f5edfadb5b46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
@@ -22,11 +22,11 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "ram.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/engctx.h>
 #include <core/enum.h>
+#include <engine/fifo.h>
 
 int
 nv50_fb_memtype[0x80] = {
@@ -40,130 +40,139 @@ nv50_fb_memtype[0x80] = {
 	1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
 };
 
-bool
-nv50_fb_memtype_valid(struct nvkm_fb *pfb, u32 memtype)
+static int
+nv50_fb_ram_new(struct nvkm_fb *base, struct nvkm_ram **pram)
+{
+	struct nv50_fb *fb = nv50_fb(base);
+	return fb->func->ram_new(&fb->base, pram);
+}
+
+static bool
+nv50_fb_memtype_valid(struct nvkm_fb *fb, u32 memtype)
 {
 	return nv50_fb_memtype[(memtype & 0xff00) >> 8] != 0;
 }
 
 static const struct nvkm_enum vm_dispatch_subclients[] = {
-	{ 0x00000000, "GRCTX", NULL },
-	{ 0x00000001, "NOTIFY", NULL },
-	{ 0x00000002, "QUERY", NULL },
-	{ 0x00000003, "COND", NULL },
-	{ 0x00000004, "M2M_IN", NULL },
-	{ 0x00000005, "M2M_OUT", NULL },
-	{ 0x00000006, "M2M_NOTIFY", NULL },
+	{ 0x00000000, "GRCTX" },
+	{ 0x00000001, "NOTIFY" },
+	{ 0x00000002, "QUERY" },
+	{ 0x00000003, "COND" },
+	{ 0x00000004, "M2M_IN" },
+	{ 0x00000005, "M2M_OUT" },
+	{ 0x00000006, "M2M_NOTIFY" },
 	{}
 };
 
 static const struct nvkm_enum vm_ccache_subclients[] = {
-	{ 0x00000000, "CB", NULL },
-	{ 0x00000001, "TIC", NULL },
-	{ 0x00000002, "TSC", NULL },
+	{ 0x00000000, "CB" },
+	{ 0x00000001, "TIC" },
+	{ 0x00000002, "TSC" },
 	{}
 };
 
 static const struct nvkm_enum vm_prop_subclients[] = {
-	{ 0x00000000, "RT0", NULL },
-	{ 0x00000001, "RT1", NULL },
-	{ 0x00000002, "RT2", NULL },
-	{ 0x00000003, "RT3", NULL },
-	{ 0x00000004, "RT4", NULL },
-	{ 0x00000005, "RT5", NULL },
-	{ 0x00000006, "RT6", NULL },
-	{ 0x00000007, "RT7", NULL },
-	{ 0x00000008, "ZETA", NULL },
-	{ 0x00000009, "LOCAL", NULL },
-	{ 0x0000000a, "GLOBAL", NULL },
-	{ 0x0000000b, "STACK", NULL },
-	{ 0x0000000c, "DST2D", NULL },
+	{ 0x00000000, "RT0" },
+	{ 0x00000001, "RT1" },
+	{ 0x00000002, "RT2" },
+	{ 0x00000003, "RT3" },
+	{ 0x00000004, "RT4" },
+	{ 0x00000005, "RT5" },
+	{ 0x00000006, "RT6" },
+	{ 0x00000007, "RT7" },
+	{ 0x00000008, "ZETA" },
+	{ 0x00000009, "LOCAL" },
+	{ 0x0000000a, "GLOBAL" },
+	{ 0x0000000b, "STACK" },
+	{ 0x0000000c, "DST2D" },
 	{}
 };
 
 static const struct nvkm_enum vm_pfifo_subclients[] = {
-	{ 0x00000000, "PUSHBUF", NULL },
-	{ 0x00000001, "SEMAPHORE", NULL },
+	{ 0x00000000, "PUSHBUF" },
+	{ 0x00000001, "SEMAPHORE" },
 	{}
 };
 
 static const struct nvkm_enum vm_bar_subclients[] = {
-	{ 0x00000000, "FB", NULL },
-	{ 0x00000001, "IN", NULL },
+	{ 0x00000000, "FB" },
+	{ 0x00000001, "IN" },
 	{}
 };
 
 static const struct nvkm_enum vm_client[] = {
-	{ 0x00000000, "STRMOUT", NULL },
+	{ 0x00000000, "STRMOUT" },
 	{ 0x00000003, "DISPATCH", vm_dispatch_subclients },
-	{ 0x00000004, "PFIFO_WRITE", NULL },
+	{ 0x00000004, "PFIFO_WRITE" },
 	{ 0x00000005, "CCACHE", vm_ccache_subclients },
-	{ 0x00000006, "PMSPPP", NULL },
-	{ 0x00000007, "CLIPID", NULL },
-	{ 0x00000008, "PFIFO_READ", NULL },
-	{ 0x00000009, "VFETCH", NULL },
-	{ 0x0000000a, "TEXTURE", NULL },
+	{ 0x00000006, "PMSPPP" },
+	{ 0x00000007, "CLIPID" },
+	{ 0x00000008, "PFIFO_READ" },
+	{ 0x00000009, "VFETCH" },
+	{ 0x0000000a, "TEXTURE" },
 	{ 0x0000000b, "PROP", vm_prop_subclients },
-	{ 0x0000000c, "PVP", NULL },
-	{ 0x0000000d, "PBSP", NULL },
-	{ 0x0000000e, "PCRYPT", NULL },
-	{ 0x0000000f, "PCOUNTER", NULL },
-	{ 0x00000011, "PDAEMON", NULL },
+	{ 0x0000000c, "PVP" },
+	{ 0x0000000d, "PBSP" },
+	{ 0x0000000e, "PCRYPT" },
+	{ 0x0000000f, "PCOUNTER" },
+	{ 0x00000011, "PDAEMON" },
 	{}
 };
 
 static const struct nvkm_enum vm_engine[] = {
-	{ 0x00000000, "PGRAPH", NULL, NVDEV_ENGINE_GR },
-	{ 0x00000001, "PVP", NULL, NVDEV_ENGINE_VP },
-	{ 0x00000004, "PEEPHOLE", NULL },
-	{ 0x00000005, "PFIFO", vm_pfifo_subclients, NVDEV_ENGINE_FIFO },
+	{ 0x00000000, "PGRAPH" },
+	{ 0x00000001, "PVP" },
+	{ 0x00000004, "PEEPHOLE" },
+	{ 0x00000005, "PFIFO", vm_pfifo_subclients },
 	{ 0x00000006, "BAR", vm_bar_subclients },
-	{ 0x00000008, "PMSPPP", NULL, NVDEV_ENGINE_MSPPP },
-	{ 0x00000008, "PMPEG", NULL, NVDEV_ENGINE_MPEG },
-	{ 0x00000009, "PBSP", NULL, NVDEV_ENGINE_BSP },
-	{ 0x0000000a, "PCRYPT", NULL, NVDEV_ENGINE_CIPHER },
-	{ 0x0000000b, "PCOUNTER", NULL },
-	{ 0x0000000c, "SEMAPHORE_BG", NULL },
-	{ 0x0000000d, "PCE0", NULL, NVDEV_ENGINE_CE0 },
-	{ 0x0000000e, "PDAEMON", NULL },
+	{ 0x00000008, "PMSPPP" },
+	{ 0x00000008, "PMPEG" },
+	{ 0x00000009, "PBSP" },
+	{ 0x0000000a, "PCRYPT" },
+	{ 0x0000000b, "PCOUNTER" },
+	{ 0x0000000c, "SEMAPHORE_BG" },
+	{ 0x0000000d, "PCE0" },
+	{ 0x0000000e, "PDAEMON" },
 	{}
 };
 
 static const struct nvkm_enum vm_fault[] = {
-	{ 0x00000000, "PT_NOT_PRESENT", NULL },
-	{ 0x00000001, "PT_TOO_SHORT", NULL },
-	{ 0x00000002, "PAGE_NOT_PRESENT", NULL },
-	{ 0x00000003, "PAGE_SYSTEM_ONLY", NULL },
-	{ 0x00000004, "PAGE_READ_ONLY", NULL },
-	{ 0x00000006, "NULL_DMAOBJ", NULL },
-	{ 0x00000007, "WRONG_MEMTYPE", NULL },
-	{ 0x0000000b, "VRAM_LIMIT", NULL },
-	{ 0x0000000f, "DMAOBJ_LIMIT", NULL },
+	{ 0x00000000, "PT_NOT_PRESENT" },
+	{ 0x00000001, "PT_TOO_SHORT" },
+	{ 0x00000002, "PAGE_NOT_PRESENT" },
+	{ 0x00000003, "PAGE_SYSTEM_ONLY" },
+	{ 0x00000004, "PAGE_READ_ONLY" },
+	{ 0x00000006, "NULL_DMAOBJ" },
+	{ 0x00000007, "WRONG_MEMTYPE" },
+	{ 0x0000000b, "VRAM_LIMIT" },
+	{ 0x0000000f, "DMAOBJ_LIMIT" },
 	{}
 };
 
 static void
-nv50_fb_intr(struct nvkm_subdev *subdev)
+nv50_fb_intr(struct nvkm_fb *base)
 {
-	struct nvkm_device *device = nv_device(subdev);
-	struct nvkm_engine *engine;
-	struct nv50_fb_priv *priv = (void *)subdev;
-	const struct nvkm_enum *en, *cl;
-	struct nvkm_object *engctx = NULL;
-	u32 trap[6], idx, chan;
+	struct nv50_fb *fb = nv50_fb(base);
+	struct nvkm_subdev *subdev = &fb->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_fifo *fifo = device->fifo;
+	struct nvkm_fifo_chan *chan;
+	const struct nvkm_enum *en, *re, *cl, *sc;
+	u32 trap[6], idx, inst;
 	u8 st0, st1, st2, st3;
+	unsigned long flags;
 	int i;
 
-	idx = nv_rd32(priv, 0x100c90);
+	idx = nvkm_rd32(device, 0x100c90);
 	if (!(idx & 0x80000000))
 		return;
 	idx &= 0x00ffffff;
 
 	for (i = 0; i < 6; i++) {
-		nv_wr32(priv, 0x100c90, idx | i << 24);
-		trap[i] = nv_rd32(priv, 0x100c94);
+		nvkm_wr32(device, 0x100c90, idx | i << 24);
+		trap[i] = nvkm_rd32(device, 0x100c94);
 	}
-	nv_wr32(priv, 0x100c90, idx | 0x80000000);
+	nvkm_wr32(device, 0x100c90, idx | 0x80000000);
 
 	/* decode status bits into something more useful */
 	if (device->chipset  < 0xa3 ||
@@ -178,143 +187,103 @@ nv50_fb_intr(struct nvkm_subdev *subdev)
 		st2 = (trap[0] & 0x00ff0000) >> 16;
 		st3 = (trap[0] & 0xff000000) >> 24;
 	}
-	chan = (trap[2] << 16) | trap[1];
+	inst = ((trap[2] << 16) | trap[1]) << 12;
 
 	en = nvkm_enum_find(vm_engine, st0);
-
-	if (en && en->data2) {
-		const struct nvkm_enum *orig_en = en;
-		while (en->name && en->value == st0 && en->data2) {
-			engine = nvkm_engine(subdev, en->data2);
-			/*XXX: clean this up */
-			if (!engine && en->data2 == NVDEV_ENGINE_BSP)
-				engine = nvkm_engine(subdev, NVDEV_ENGINE_MSVLD);
-			if (!engine && en->data2 == NVDEV_ENGINE_CIPHER)
-				engine = nvkm_engine(subdev, NVDEV_ENGINE_SEC);
-			if (!engine && en->data2 == NVDEV_ENGINE_VP)
-				engine = nvkm_engine(subdev, NVDEV_ENGINE_MSPDEC);
-			if (engine) {
-				engctx = nvkm_engctx_get(engine, chan);
-				if (engctx)
-					break;
-			}
-			en++;
-		}
-		if (!engctx)
-			en = orig_en;
-	}
-
-	nv_error(priv, "trapped %s at 0x%02x%04x%04x on channel 0x%08x [%s] ",
-		 (trap[5] & 0x00000100) ? "read" : "write",
-		 trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, chan,
-		 nvkm_client_name(engctx));
-
-	nvkm_engctx_put(engctx);
-
-	if (en)
-		pr_cont("%s/", en->name);
-	else
-		pr_cont("%02x/", st0);
-
+	re = nvkm_enum_find(vm_fault , st1);
 	cl = nvkm_enum_find(vm_client, st2);
-	if (cl)
-		pr_cont("%s/", cl->name);
-	else
-		pr_cont("%02x/", st2);
-
-	if      (cl && cl->data) cl = nvkm_enum_find(cl->data, st3);
-	else if (en && en->data) cl = nvkm_enum_find(en->data, st3);
-	else                     cl = NULL;
-	if (cl)
-		pr_cont("%s", cl->name);
-	else
-		pr_cont("%02x", st3);
-
-	pr_cont(" reason: ");
-	en = nvkm_enum_find(vm_fault, st1);
-	if (en)
-		pr_cont("%s\n", en->name);
-	else
-		pr_cont("0x%08x\n", st1);
+	if      (cl && cl->data) sc = nvkm_enum_find(cl->data, st3);
+	else if (en && en->data) sc = nvkm_enum_find(en->data, st3);
+	else                     sc = NULL;
+
+	chan = nvkm_fifo_chan_inst(fifo, inst, &flags);
+	nvkm_error(subdev, "trapped %s at %02x%04x%04x on channel %d [%08x %s] "
+			   "engine %02x [%s] client %02x [%s] "
+			   "subclient %02x [%s] reason %08x [%s]\n",
+		   (trap[5] & 0x00000100) ? "read" : "write",
+		   trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff,
+		   chan ? chan->chid : -1, inst,
+		   chan ? chan->object.client->name : "unknown",
+		   st0, en ? en->name : "",
+		   st2, cl ? cl->name : "", st3, sc ? sc->name : "",
+		   st1, re ? re->name : "");
+	nvkm_fifo_chan_put(fifo, flags, &chan);
 }
 
-int
-nv50_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+static void
+nv50_fb_init(struct nvkm_fb *base)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct nv50_fb_priv *priv;
-	int ret;
-
-	ret = nvkm_fb_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	struct nv50_fb *fb = nv50_fb(base);
+	struct nvkm_device *device = fb->base.subdev.device;
 
-	priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-	if (priv->r100c08_page) {
-		priv->r100c08 = dma_map_page(nv_device_base(device),
-					     priv->r100c08_page, 0, PAGE_SIZE,
-					     DMA_BIDIRECTIONAL);
-		if (dma_mapping_error(nv_device_base(device), priv->r100c08))
-			return -EFAULT;
-	} else {
-		nv_warn(priv, "failed 0x100c08 page alloc\n");
-	}
+	/* Not a clue what this is exactly.  Without pointing it at a
+	 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
+	 * cause IOMMU "read from address 0" errors (rh#561267)
+	 */
+	nvkm_wr32(device, 0x100c08, fb->r100c08 >> 8);
 
-	nv_subdev(priv)->intr = nv50_fb_intr;
-	return 0;
+	/* This is needed to get meaningful information from 100c90
+	 * on traps. No idea what these values mean exactly. */
+	nvkm_wr32(device, 0x100c90, fb->func->trap);
 }
 
-void
-nv50_fb_dtor(struct nvkm_object *object)
+static void *
+nv50_fb_dtor(struct nvkm_fb *base)
 {
-	struct nvkm_device *device = nv_device(object);
-	struct nv50_fb_priv *priv = (void *)object;
+	struct nv50_fb *fb = nv50_fb(base);
+	struct nvkm_device *device = fb->base.subdev.device;
 
-	if (priv->r100c08_page) {
-		dma_unmap_page(nv_device_base(device), priv->r100c08, PAGE_SIZE,
+	if (fb->r100c08_page) {
+		dma_unmap_page(device->dev, fb->r100c08, PAGE_SIZE,
 			       DMA_BIDIRECTIONAL);
-		__free_page(priv->r100c08_page);
+		__free_page(fb->r100c08_page);
 	}
 
-	nvkm_fb_destroy(&priv->base);
+	return fb;
 }
 
+static const struct nvkm_fb_func
+nv50_fb_ = {
+	.dtor = nv50_fb_dtor,
+	.init = nv50_fb_init,
+	.intr = nv50_fb_intr,
+	.ram_new = nv50_fb_ram_new,
+	.memtype_valid = nv50_fb_memtype_valid,
+};
+
 int
-nv50_fb_init(struct nvkm_object *object)
+nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_fb **pfb)
 {
-	struct nv50_fb_impl *impl = (void *)object->oclass;
-	struct nv50_fb_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_fb_init(&priv->base);
-	if (ret)
-		return ret;
-
-	/* Not a clue what this is exactly.  Without pointing it at a
-	 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
-	 * cause IOMMU "read from address 0" errors (rh#561267)
-	 */
-	nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
+	struct nv50_fb *fb;
+
+	if (!(fb = kzalloc(sizeof(*fb), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_fb_ctor(&nv50_fb_, device, index, &fb->base);
+	fb->func = func;
+	*pfb = &fb->base;
+
+	fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	if (fb->r100c08_page) {
+		fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0,
+					   PAGE_SIZE, DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(device->dev, fb->r100c08))
+			return -EFAULT;
+	} else {
+		nvkm_warn(&fb->base.subdev, "failed 100c08 page alloc\n");
+	}
 
-	/* This is needed to get meaningful information from 100c90
-	 * on traps. No idea what these values mean exactly. */
-	nv_wr32(priv, 0x100c90, impl->trap);
 	return 0;
 }
 
-struct nvkm_oclass *
-nv50_fb_oclass = &(struct nv50_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x50),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_fb_ctor,
-		.dtor = nv50_fb_dtor,
-		.init = nv50_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv50_fb_memtype_valid,
-	.base.ram = &nv50_ram_oclass,
+static const struct nv50_fb_func
+nv50_fb = {
+	.ram_new = nv50_ram_new,
 	.trap = 0x000707ff,
-}.base.base;
+};
+
+int
+nv50_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nv50_fb_new_(&nv50_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
index f3cde3f1f511..faa88c8c66fe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
@@ -1,31 +1,21 @@
 #ifndef __NVKM_FB_NV50_H__
 #define __NVKM_FB_NV50_H__
+#define nv50_fb(p) container_of((p), struct nv50_fb, base)
 #include "priv.h"
 
-struct nv50_fb_priv {
+struct nv50_fb {
+	const struct nv50_fb_func *func;
 	struct nvkm_fb base;
 	struct page *r100c08_page;
 	dma_addr_t r100c08;
 };
 
-int  nv50_fb_ctor(struct nvkm_object *, struct nvkm_object *,
-		  struct nvkm_oclass *, void *, u32,
-		  struct nvkm_object **);
-void nv50_fb_dtor(struct nvkm_object *);
-int  nv50_fb_init(struct nvkm_object *);
-
-struct nv50_fb_impl {
-	struct nvkm_fb_impl base;
+struct nv50_fb_func {
+	int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **);
 	u32 trap;
 };
 
-#define nv50_ram_create(p,e,o,d)                                               \
-	nv50_ram_create_((p), (e), (o), sizeof(**d), (void **)d)
-int  nv50_ram_create_(struct nvkm_object *, struct nvkm_object *,
-		      struct nvkm_oclass *, int, void **);
-int  nv50_ram_get(struct nvkm_fb *, u64 size, u32 align, u32 ncmin,
-		  u32 memtype, struct nvkm_mem **);
-void nv50_ram_put(struct nvkm_fb *, struct nvkm_mem **);
-void __nv50_ram_put(struct nvkm_fb *, struct nvkm_mem *);
+int nv50_fb_new_(const struct nv50_fb_func *, struct nvkm_device *, int index,
+		 struct nvkm_fb **pfb);
 extern int nv50_fb_memtype[0x80];
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 485c4b64819a..62b9feb531dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -1,73 +1,62 @@
 #ifndef __NVKM_FB_PRIV_H__
 #define __NVKM_FB_PRIV_H__
+#define nvkm_fb(p) container_of((p), struct nvkm_fb, subdev)
 #include <subdev/fb.h>
 struct nvkm_bios;
 
-#define nvkm_ram_create(p,e,o,d)                                            \
-	nvkm_object_create_((p), (e), (o), 0, sizeof(**d), (void **)d)
-#define nvkm_ram_destroy(p)                                                 \
-	nvkm_object_destroy(&(p)->base)
-#define nvkm_ram_init(p)                                                    \
-	nvkm_object_init(&(p)->base)
-#define nvkm_ram_fini(p,s)                                                  \
-	nvkm_object_fini(&(p)->base, (s))
+struct nvkm_fb_func {
+	void *(*dtor)(struct nvkm_fb *);
+	void (*init)(struct nvkm_fb *);
+	void (*intr)(struct nvkm_fb *);
 
-#define nvkm_ram_create_(p,e,o,s,d)                                         \
-	nvkm_object_create_((p), (e), (o), 0, (s), (void **)d)
-#define _nvkm_ram_dtor nvkm_object_destroy
-#define _nvkm_ram_init nvkm_object_init
-#define _nvkm_ram_fini nvkm_object_fini
+	struct {
+		int regions;
+		void (*init)(struct nvkm_fb *, int i, u32 addr, u32 size,
+			     u32 pitch, u32 flags, struct nvkm_fb_tile *);
+		void (*comp)(struct nvkm_fb *, int i, u32 size, u32 flags,
+			     struct nvkm_fb_tile *);
+		void (*fini)(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
+		void (*prog)(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
+	} tile;
 
-extern struct nvkm_oclass nv04_ram_oclass;
-extern struct nvkm_oclass nv10_ram_oclass;
-extern struct nvkm_oclass nv1a_ram_oclass;
-extern struct nvkm_oclass nv20_ram_oclass;
-extern struct nvkm_oclass nv40_ram_oclass;
-extern struct nvkm_oclass nv41_ram_oclass;
-extern struct nvkm_oclass nv44_ram_oclass;
-extern struct nvkm_oclass nv49_ram_oclass;
-extern struct nvkm_oclass nv4e_ram_oclass;
-extern struct nvkm_oclass nv50_ram_oclass;
-extern struct nvkm_oclass gt215_ram_oclass;
-extern struct nvkm_oclass mcp77_ram_oclass;
-extern struct nvkm_oclass gf100_ram_oclass;
-extern struct nvkm_oclass gk104_ram_oclass;
-extern struct nvkm_oclass gm107_ram_oclass;
+	int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **);
 
-int nvkm_sddr2_calc(struct nvkm_ram *ram);
-int nvkm_sddr3_calc(struct nvkm_ram *ram);
-int nvkm_gddr3_calc(struct nvkm_ram *ram);
-int nvkm_gddr5_calc(struct nvkm_ram *ram, bool nuts);
+	bool (*memtype_valid)(struct nvkm_fb *, u32 memtype);
+};
 
-#define nvkm_fb_create(p,e,c,d)                                             \
-	nvkm_fb_create_((p), (e), (c), sizeof(**d), (void **)d)
-#define nvkm_fb_destroy(p) ({                                               \
-	struct nvkm_fb *pfb = (p);                                          \
-	_nvkm_fb_dtor(nv_object(pfb));                                      \
-})
-#define nvkm_fb_init(p) ({                                                  \
-	struct nvkm_fb *pfb = (p);                                          \
-	_nvkm_fb_init(nv_object(pfb));                                      \
-})
-#define nvkm_fb_fini(p,s) ({                                                \
-	struct nvkm_fb *pfb = (p);                                          \
-	_nvkm_fb_fini(nv_object(pfb), (s));                                 \
-})
+void nvkm_fb_ctor(const struct nvkm_fb_func *, struct nvkm_device *device,
+		  int index, struct nvkm_fb *);
+int nvkm_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *device,
+		 int index, struct nvkm_fb **);
+int nvkm_fb_bios_memtype(struct nvkm_bios *);
 
-int nvkm_fb_create_(struct nvkm_object *, struct nvkm_object *,
-		       struct nvkm_oclass *, int, void **);
-void _nvkm_fb_dtor(struct nvkm_object *);
-int  _nvkm_fb_init(struct nvkm_object *);
-int  _nvkm_fb_fini(struct nvkm_object *, bool);
+bool nv04_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
 
-struct nvkm_fb_impl {
-	struct nvkm_oclass base;
-	struct nvkm_oclass *ram;
-	bool (*memtype)(struct nvkm_fb *, u32);
-};
+void nv10_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
+void nv10_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
+void nv10_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
 
-bool nv04_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
-bool nv50_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
+void nv20_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
+void nv20_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
+void nv20_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
+
+void nv30_fb_init(struct nvkm_fb *);
+void nv30_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
+
+void nv40_fb_tile_comp(struct nvkm_fb *, int i, u32 size, u32 flags,
+		       struct nvkm_fb_tile *);
+
+void nv41_fb_init(struct nvkm_fb *);
+void nv41_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
+
+void nv44_fb_init(struct nvkm_fb *);
+void nv44_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
+
+void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
 
-int  nvkm_fb_bios_memtype(struct nvkm_bios *);
+bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
new file mode 100644
index 000000000000..c17d559dbfbe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "ram.h"
+
+int
+nvkm_ram_init(struct nvkm_ram *ram)
+{
+	if (ram->func->init)
+		return ram->func->init(ram);
+	return 0;
+}
+
+void
+nvkm_ram_del(struct nvkm_ram **pram)
+{
+	struct nvkm_ram *ram = *pram;
+	if (ram && !WARN_ON(!ram->func)) {
+		if (ram->func->dtor)
+			*pram = ram->func->dtor(ram);
+		nvkm_mm_fini(&ram->tags);
+		nvkm_mm_fini(&ram->vram);
+		kfree(*pram);
+		*pram = NULL;
+	}
+}
+
+int
+nvkm_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
+	      enum nvkm_ram_type type, u64 size, u32 tags,
+	      struct nvkm_ram *ram)
+{
+	static const char *name[] = {
+		[NVKM_RAM_TYPE_UNKNOWN] = "of unknown memory type",
+		[NVKM_RAM_TYPE_STOLEN ] = "stolen system memory",
+		[NVKM_RAM_TYPE_SGRAM  ] = "SGRAM",
+		[NVKM_RAM_TYPE_SDRAM  ] = "SDRAM",
+		[NVKM_RAM_TYPE_DDR1   ] = "DDR1",
+		[NVKM_RAM_TYPE_DDR2   ] = "DDR2",
+		[NVKM_RAM_TYPE_DDR3   ] = "DDR3",
+		[NVKM_RAM_TYPE_GDDR2  ] = "GDDR2",
+		[NVKM_RAM_TYPE_GDDR3  ] = "GDDR3",
+		[NVKM_RAM_TYPE_GDDR4  ] = "GDDR4",
+		[NVKM_RAM_TYPE_GDDR5  ] = "GDDR5",
+	};
+	struct nvkm_subdev *subdev = &fb->subdev;
+	int ret;
+
+	nvkm_info(subdev, "%d MiB %s\n", (int)(size >> 20), name[type]);
+	ram->func = func;
+	ram->fb = fb;
+	ram->type = type;
+	ram->size = size;
+
+	if (!nvkm_mm_initialised(&ram->vram)) {
+		ret = nvkm_mm_init(&ram->vram, 0, size >> NVKM_RAM_MM_SHIFT, 1);
+		if (ret)
+			return ret;
+	}
+
+	if (!nvkm_mm_initialised(&ram->tags)) {
+		ret = nvkm_mm_init(&ram->tags, 0, tags ? ++tags : 0, 1);
+		if (ret)
+			return ret;
+
+		nvkm_debug(subdev, "%d compression tags\n", tags);
+	}
+
+	return 0;
+}
+
+int
+nvkm_ram_new_(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
+	      enum nvkm_ram_type type, u64 size, u32 tags,
+	      struct nvkm_ram **pram)
+{
+	if (!(*pram = kzalloc(sizeof(**pram), GFP_KERNEL)))
+		return -ENOMEM;
+	return nvkm_ram_ctor(func, fb, type, size, tags, *pram);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
new file mode 100644
index 000000000000..f816cbf2ced3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -0,0 +1,50 @@
+#ifndef __NVKM_FB_RAM_PRIV_H__
+#define __NVKM_FB_RAM_PRIV_H__
+#include "priv.h"
+
+int  nvkm_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
+		   enum nvkm_ram_type, u64 size, u32 tags,
+		   struct nvkm_ram *);
+int  nvkm_ram_new_(const struct nvkm_ram_func *, struct nvkm_fb *,
+		   enum nvkm_ram_type, u64 size, u32 tags,
+		   struct nvkm_ram **);
+void nvkm_ram_del(struct nvkm_ram **);
+int  nvkm_ram_init(struct nvkm_ram *);
+
+extern const struct nvkm_ram_func nv04_ram_func;
+
+int  nv50_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
+		   struct nvkm_ram *);
+int  nv50_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
+void nv50_ram_put(struct nvkm_ram *, struct nvkm_mem **);
+void __nv50_ram_put(struct nvkm_ram *, struct nvkm_mem *);
+
+int  gf100_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
+		    u32, struct nvkm_ram *);
+int  gf100_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
+void gf100_ram_put(struct nvkm_ram *, struct nvkm_mem **);
+
+int  gk104_ram_init(struct nvkm_ram *ram);
+
+/* RAM type-specific MR calculation routines */
+int nvkm_sddr2_calc(struct nvkm_ram *);
+int nvkm_sddr3_calc(struct nvkm_ram *);
+int nvkm_gddr3_calc(struct nvkm_ram *);
+int nvkm_gddr5_calc(struct nvkm_ram *, bool nuts);
+
+int nv04_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv10_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv1a_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv20_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv40_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv41_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv44_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv49_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv4e_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv50_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int gt215_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int mcp77_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int gf100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h
index f343682b1387..9ef9d6aa3721 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h
@@ -1,10 +1,11 @@
 #ifndef __NVKM_FBRAM_FUC_H__
 #define __NVKM_FBRAM_FUC_H__
+#include <subdev/fb.h>
 #include <subdev/pmu.h>
 
 struct ramfuc {
 	struct nvkm_memx *memx;
-	struct nvkm_fb *pfb;
+	struct nvkm_fb *fb;
 	int sequence;
 };
 
@@ -54,17 +55,14 @@ ramfuc_reg(u32 addr)
 }
 
 static inline int
-ramfuc_init(struct ramfuc *ram, struct nvkm_fb *pfb)
+ramfuc_init(struct ramfuc *ram, struct nvkm_fb *fb)
 {
-	struct nvkm_pmu *pmu = nvkm_pmu(pfb);
-	int ret;
-
-	ret = nvkm_memx_init(pmu, &ram->memx);
+	int ret = nvkm_memx_init(fb->subdev.device->pmu, &ram->memx);
 	if (ret)
 		return ret;
 
 	ram->sequence++;
-	ram->pfb = pfb;
+	ram->fb = fb;
 	return 0;
 }
 
@@ -72,9 +70,9 @@ static inline int
 ramfuc_exec(struct ramfuc *ram, bool exec)
 {
 	int ret = 0;
-	if (ram->pfb) {
+	if (ram->fb) {
 		ret = nvkm_memx_fini(&ram->memx, exec);
-		ram->pfb = NULL;
+		ram->fb = NULL;
 	}
 	return ret;
 }
@@ -82,8 +80,9 @@ ramfuc_exec(struct ramfuc *ram, bool exec)
 static inline u32
 ramfuc_rd32(struct ramfuc *ram, struct ramfuc_reg *reg)
 {
+	struct nvkm_device *device = ram->fb->subdev.device;
 	if (reg->sequence != ram->sequence)
-		reg->data = nv_rd32(ram->pfb, reg->addr);
+		reg->data = nvkm_rd32(device, reg->addr);
 	return reg->data;
 }
 
@@ -144,11 +143,9 @@ ramfuc_train(struct ramfuc *ram)
 }
 
 static inline int
-ramfuc_train_result(struct nvkm_fb *pfb, u32 *result, u32 rsize)
+ramfuc_train_result(struct nvkm_fb *fb, u32 *result, u32 rsize)
 {
-	struct nvkm_pmu *pmu = nvkm_pmu(pfb);
-
-	return nvkm_memx_train_result(pmu, result, rsize);
+	return nvkm_memx_train_result(fb->subdev.device->pmu, result, rsize);
 }
 
 static inline void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
index de9f39569943..772425ca5a9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
@@ -21,10 +21,10 @@
  *
  * Authors: Ben Skeggs
  */
-#include "gf100.h"
+#define gf100_ram(p) container_of((p), struct gf100_ram, base)
+#include "ram.h"
 #include "ramfuc.h"
 
-#include <core/device.h>
 #include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
@@ -108,9 +108,10 @@ static void
 gf100_ram_train(struct gf100_ramfuc *fuc, u32 magic)
 {
 	struct gf100_ram *ram = container_of(fuc, typeof(*ram), fuc);
-	struct nvkm_fb *pfb = nvkm_fb(ram);
-	u32 part = nv_rd32(pfb, 0x022438), i;
-	u32 mask = nv_rd32(pfb, 0x022554);
+	struct nvkm_fb *fb = ram->base.fb;
+	struct nvkm_device *device = fb->subdev.device;
+	u32 part = nvkm_rd32(device, 0x022438), i;
+	u32 mask = nvkm_rd32(device, 0x022554);
 	u32 addr = 0x110974;
 
 	ram_wr32(fuc, 0x10f910, magic);
@@ -124,12 +125,14 @@ gf100_ram_train(struct gf100_ramfuc *fuc, u32 magic)
 }
 
 static int
-gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
+gf100_ram_calc(struct nvkm_ram *base, u32 freq)
 {
-	struct nvkm_clk *clk = nvkm_clk(pfb);
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct gf100_ram *ram = (void *)pfb->ram;
+	struct gf100_ram *ram = gf100_ram(base);
 	struct gf100_ramfuc *fuc = &ram->fuc;
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_clk *clk = device->clk;
+	struct nvkm_bios *bios = device->bios;
 	struct nvbios_ramcfg cfg;
 	u8  ver, cnt, len, strap;
 	struct {
@@ -145,37 +148,37 @@ gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
 	rammap.data = nvbios_rammapEm(bios, freq / 1000, &ver, &rammap.size,
 				      &cnt, &ramcfg.size, &cfg);
 	if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
-		nv_error(pfb, "invalid/missing rammap entry\n");
+		nvkm_error(subdev, "invalid/missing rammap entry\n");
 		return -EINVAL;
 	}
 
 	/* locate specific data set for the attached memory */
-	strap = nvbios_ramcfg_index(nv_subdev(pfb));
+	strap = nvbios_ramcfg_index(subdev);
 	if (strap >= cnt) {
-		nv_error(pfb, "invalid ramcfg strap\n");
+		nvkm_error(subdev, "invalid ramcfg strap\n");
 		return -EINVAL;
 	}
 
 	ramcfg.data = rammap.data + rammap.size + (strap * ramcfg.size);
 	if (!ramcfg.data || ver != 0x10 || ramcfg.size < 0x0e) {
-		nv_error(pfb, "invalid/missing ramcfg entry\n");
+		nvkm_error(subdev, "invalid/missing ramcfg entry\n");
 		return -EINVAL;
 	}
 
 	/* lookup memory timings, if bios says they're present */
-	strap = nv_ro08(bios, ramcfg.data + 0x01);
+	strap = nvbios_rd08(bios, ramcfg.data + 0x01);
 	if (strap != 0xff) {
 		timing.data = nvbios_timingEe(bios, strap, &ver, &timing.size,
 					      &cnt, &len);
 		if (!timing.data || ver != 0x10 || timing.size < 0x19) {
-			nv_error(pfb, "invalid/missing timing entry\n");
+			nvkm_error(subdev, "invalid/missing timing entry\n");
 			return -EINVAL;
 		}
 	} else {
 		timing.data = 0;
 	}
 
-	ret = ram_init(fuc, pfb);
+	ret = ram_init(fuc, ram->base.fb);
 	if (ret)
 		return ret;
 
@@ -184,9 +187,9 @@ gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
 
 	/* determine target mclk configuration */
 	if (!(ram_rd32(fuc, 0x137300) & 0x00000100))
-		ref = clk->read(clk, nv_clk_src_sppll0);
+		ref = nvkm_clk_read(clk, nv_clk_src_sppll0);
 	else
-		ref = clk->read(clk, nv_clk_src_sppll1);
+		ref = nvkm_clk_read(clk, nv_clk_src_sppll1);
 	div = max(min((ref * 2) / freq, (u32)65), (u32)2) - 2;
 	out = (ref * 2) / (div + 2);
 	mode = freq != out;
@@ -210,10 +213,10 @@ gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
 
 	if (mode == 1 && from == 0) {
 		/* calculate refpll */
-		ret = gt215_pll_calc(nv_subdev(pfb), &ram->refpll,
-				     ram->mempll.refclk, &N1, NULL, &M1, &P);
+		ret = gt215_pll_calc(subdev, &ram->refpll, ram->mempll.refclk,
+				     &N1, NULL, &M1, &P);
 		if (ret <= 0) {
-			nv_error(pfb, "unable to calc refpll\n");
+			nvkm_error(subdev, "unable to calc refpll\n");
 			return ret ? ret : -ERANGE;
 		}
 
@@ -225,10 +228,10 @@ gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
 		ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
 
 		/* calculate mempll */
-		ret = gt215_pll_calc(nv_subdev(pfb), &ram->mempll, freq,
+		ret = gt215_pll_calc(subdev, &ram->mempll, freq,
 				     &N1, NULL, &M1, &P);
 		if (ret <= 0) {
-			nv_error(pfb, "unable to calc refpll\n");
+			nvkm_error(subdev, "unable to calc refpll\n");
 			return ret ? ret : -ERANGE;
 		}
 
@@ -402,49 +405,48 @@ gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
 }
 
 static int
-gf100_ram_prog(struct nvkm_fb *pfb)
+gf100_ram_prog(struct nvkm_ram *base)
 {
-	struct nvkm_device *device = nv_device(pfb);
-	struct gf100_ram *ram = (void *)pfb->ram;
-	struct gf100_ramfuc *fuc = &ram->fuc;
-	ram_exec(fuc, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
+	struct gf100_ram *ram = gf100_ram(base);
+	struct nvkm_device *device = ram->base.fb->subdev.device;
+	ram_exec(&ram->fuc, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
 	return 0;
 }
 
 static void
-gf100_ram_tidy(struct nvkm_fb *pfb)
+gf100_ram_tidy(struct nvkm_ram *base)
 {
-	struct gf100_ram *ram = (void *)pfb->ram;
-	struct gf100_ramfuc *fuc = &ram->fuc;
-	ram_exec(fuc, false);
+	struct gf100_ram *ram = gf100_ram(base);
+	ram_exec(&ram->fuc, false);
 }
 
 extern const u8 gf100_pte_storage_type_map[256];
 
 void
-gf100_ram_put(struct nvkm_fb *pfb, struct nvkm_mem **pmem)
+gf100_ram_put(struct nvkm_ram *ram, struct nvkm_mem **pmem)
 {
-	struct nvkm_ltc *ltc = nvkm_ltc(pfb);
+	struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
 	struct nvkm_mem *mem = *pmem;
 
 	*pmem = NULL;
 	if (unlikely(mem == NULL))
 		return;
 
-	mutex_lock(&pfb->base.mutex);
+	mutex_lock(&ram->fb->subdev.mutex);
 	if (mem->tag)
-		ltc->tags_free(ltc, &mem->tag);
-	__nv50_ram_put(pfb, mem);
-	mutex_unlock(&pfb->base.mutex);
+		nvkm_ltc_tags_free(ltc, &mem->tag);
+	__nv50_ram_put(ram, mem);
+	mutex_unlock(&ram->fb->subdev.mutex);
 
 	kfree(mem);
 }
 
 int
-gf100_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
+gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
 	      u32 memtype, struct nvkm_mem **pmem)
 {
-	struct nvkm_mm *mm = &pfb->vram;
+	struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
+	struct nvkm_mm *mm = &ram->vram;
 	struct nvkm_mm_node *r;
 	struct nvkm_mem *mem;
 	int type = (memtype & 0x0ff);
@@ -452,9 +454,9 @@ gf100_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
 	const bool comp = gf100_pte_storage_type_map[type] != type;
 	int ret;
 
-	size  >>= 12;
-	align >>= 12;
-	ncmin >>= 12;
+	size  >>= NVKM_RAM_MM_SHIFT;
+	align >>= NVKM_RAM_MM_SHIFT;
+	ncmin >>= NVKM_RAM_MM_SHIFT;
 	if (!ncmin)
 		ncmin = size;
 
@@ -465,14 +467,12 @@ gf100_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
 	INIT_LIST_HEAD(&mem->regions);
 	mem->size = size;
 
-	mutex_lock(&pfb->base.mutex);
+	mutex_lock(&ram->fb->subdev.mutex);
 	if (comp) {
-		struct nvkm_ltc *ltc = nvkm_ltc(pfb);
-
 		/* compression only works with lpages */
-		if (align == (1 << (17 - 12))) {
+		if (align == (1 << (17 - NVKM_RAM_MM_SHIFT))) {
 			int n = size >> 5;
-			ltc->tags_alloc(ltc, n, &mem->tag);
+			nvkm_ltc_tags_alloc(ltc, n, &mem->tag);
 		}
 
 		if (unlikely(!mem->tag))
@@ -486,178 +486,173 @@ gf100_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
 		else
 			ret = nvkm_mm_head(mm, 0, 1, size, ncmin, align, &r);
 		if (ret) {
-			mutex_unlock(&pfb->base.mutex);
-			pfb->ram->put(pfb, &mem);
+			mutex_unlock(&ram->fb->subdev.mutex);
+			ram->func->put(ram, &mem);
 			return ret;
 		}
 
 		list_add_tail(&r->rl_entry, &mem->regions);
 		size -= r->length;
 	} while (size);
-	mutex_unlock(&pfb->base.mutex);
+	mutex_unlock(&ram->fb->subdev.mutex);
 
 	r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry);
-	mem->offset = (u64)r->offset << 12;
+	mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT;
 	*pmem = mem;
 	return 0;
 }
 
-int
-gf100_ram_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, u32 maskaddr, int size,
-		  void **pobject)
+static int
+gf100_ram_init(struct nvkm_ram *base)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct nvkm_ram *ram;
-	const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
-	const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
-	u32 parts = nv_rd32(pfb, 0x022438);
-	u32 pmask = nv_rd32(pfb, maskaddr);
-	u32 bsize = nv_rd32(pfb, 0x10f20c);
-	u32 offset, length;
-	bool uniform = true;
-	int ret, part;
+	static const u8  train0[] = {
+		0x00, 0xff, 0x55, 0xaa, 0x33, 0xcc,
+		0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
+	};
+	static const u32 train1[] = {
+		0x00000000, 0xffffffff,
+		0x55555555, 0xaaaaaaaa,
+		0x33333333, 0xcccccccc,
+		0xf0f0f0f0, 0x0f0f0f0f,
+		0x00ff00ff, 0xff00ff00,
+		0x0000ffff, 0xffff0000,
+	};
+	struct gf100_ram *ram = gf100_ram(base);
+	struct nvkm_device *device = ram->base.fb->subdev.device;
+	int i;
 
-	ret = nvkm_ram_create_(parent, engine, oclass, size, pobject);
-	ram = *pobject;
-	if (ret)
-		return ret;
+	switch (ram->base.type) {
+	case NVKM_RAM_TYPE_GDDR5:
+		break;
+	default:
+		return 0;
+	}
 
-	nv_debug(pfb, "0x100800: 0x%08x\n", nv_rd32(pfb, 0x100800));
-	nv_debug(pfb, "parts 0x%08x mask 0x%08x\n", parts, pmask);
+	/* prepare for ddr link training, and load training patterns */
+	for (i = 0; i < 0x30; i++) {
+		nvkm_wr32(device, 0x10f968, 0x00000000 | (i << 8));
+		nvkm_wr32(device, 0x10f96c, 0x00000000 | (i << 8));
+		nvkm_wr32(device, 0x10f920, 0x00000100 | train0[i % 12]);
+		nvkm_wr32(device, 0x10f924, 0x00000100 | train0[i % 12]);
+		nvkm_wr32(device, 0x10f918,              train1[i % 12]);
+		nvkm_wr32(device, 0x10f91c,              train1[i % 12]);
+		nvkm_wr32(device, 0x10f920, 0x00000000 | train0[i % 12]);
+		nvkm_wr32(device, 0x10f924, 0x00000000 | train0[i % 12]);
+		nvkm_wr32(device, 0x10f918,              train1[i % 12]);
+		nvkm_wr32(device, 0x10f91c,              train1[i % 12]);
+	}
 
-	ram->type = nvkm_fb_bios_memtype(bios);
-	ram->ranks = (nv_rd32(pfb, 0x10f200) & 0x00000004) ? 2 : 1;
+	return 0;
+}
+
+static const struct nvkm_ram_func
+gf100_ram_func = {
+	.init = gf100_ram_init,
+	.get = gf100_ram_get,
+	.put = gf100_ram_put,
+	.calc = gf100_ram_calc,
+	.prog = gf100_ram_prog,
+	.tidy = gf100_ram_tidy,
+};
+
+int
+gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
+	       u32 maskaddr, struct nvkm_ram *ram)
+{
+	struct nvkm_subdev *subdev = &fb->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
+	const u32 rsvd_head = ( 256 * 1024); /* vga memory */
+	const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
+	u32 parts = nvkm_rd32(device, 0x022438);
+	u32 pmask = nvkm_rd32(device, maskaddr);
+	u64 bsize = (u64)nvkm_rd32(device, 0x10f20c) << 20;
+	u64 psize, size = 0;
+	enum nvkm_ram_type type = nvkm_fb_bios_memtype(bios);
+	bool uniform = true;
+	int ret, i;
+
+	nvkm_debug(subdev, "100800: %08x\n", nvkm_rd32(device, 0x100800));
+	nvkm_debug(subdev, "parts %08x mask %08x\n", parts, pmask);
 
 	/* read amount of vram attached to each memory controller */
-	for (part = 0; part < parts; part++) {
-		if (!(pmask & (1 << part))) {
-			u32 psize = nv_rd32(pfb, 0x11020c + (part * 0x1000));
-			if (psize != bsize) {
-				if (psize < bsize)
-					bsize = psize;
-				uniform = false;
-			}
-
-			nv_debug(pfb, "%d: mem_amount 0x%08x\n", part, psize);
-			ram->size += (u64)psize << 20;
+	for (i = 0; i < parts; i++) {
+		if (pmask & (1 << i))
+			continue;
+
+		psize = (u64)nvkm_rd32(device, 0x11020c + (i * 0x1000)) << 20;
+		if (psize != bsize) {
+			if (psize < bsize)
+				bsize = psize;
+			uniform = false;
 		}
+
+		nvkm_debug(subdev, "%d: %d MiB\n", i, (u32)(psize >> 20));
+		size += psize;
 	}
 
+	ret = nvkm_ram_ctor(func, fb, type, size, 0, ram);
+	if (ret)
+		return ret;
+
+	nvkm_mm_fini(&ram->vram);
+
 	/* if all controllers have the same amount attached, there's no holes */
 	if (uniform) {
-		offset = rsvd_head;
-		length = (ram->size >> 12) - rsvd_head - rsvd_tail;
-		ret = nvkm_mm_init(&pfb->vram, offset, length, 1);
+		ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+				   (size - rsvd_head - rsvd_tail) >>
+				   NVKM_RAM_MM_SHIFT, 1);
+		if (ret)
+			return ret;
 	} else {
 		/* otherwise, address lowest common amount from 0GiB */
-		ret = nvkm_mm_init(&pfb->vram, rsvd_head,
-				   (bsize << 8) * parts - rsvd_head, 1);
+		ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+				   ((bsize * parts) - rsvd_head) >>
+				   NVKM_RAM_MM_SHIFT, 1);
 		if (ret)
 			return ret;
 
 		/* and the rest starting from (8GiB + common_size) */
-		offset = (0x0200000000ULL >> 12) + (bsize << 8);
-		length = (ram->size >> 12) - ((bsize * parts) << 8) - rsvd_tail;
-
-		ret = nvkm_mm_init(&pfb->vram, offset, length, 1);
+		ret = nvkm_mm_init(&ram->vram, (0x0200000000ULL + bsize) >>
+				   NVKM_RAM_MM_SHIFT,
+				   (size - (bsize * parts) - rsvd_tail) >>
+				   NVKM_RAM_MM_SHIFT, 1);
 		if (ret)
-			nvkm_mm_fini(&pfb->vram);
-	}
-
-	if (ret)
-		return ret;
-
-	ram->get = gf100_ram_get;
-	ram->put = gf100_ram_put;
-	return 0;
-}
-
-static int
-gf100_ram_init(struct nvkm_object *object)
-{
-	struct nvkm_fb *pfb = (void *)object->parent;
-	struct gf100_ram *ram = (void *)object;
-	int ret, i;
-
-	ret = nvkm_ram_init(&ram->base);
-	if (ret)
-		return ret;
-
-	/* prepare for ddr link training, and load training patterns */
-	switch (ram->base.type) {
-	case NV_MEM_TYPE_GDDR5: {
-		static const u8  train0[] = {
-			0x00, 0xff, 0x55, 0xaa, 0x33, 0xcc,
-			0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
-		};
-		static const u32 train1[] = {
-			0x00000000, 0xffffffff,
-			0x55555555, 0xaaaaaaaa,
-			0x33333333, 0xcccccccc,
-			0xf0f0f0f0, 0x0f0f0f0f,
-			0x00ff00ff, 0xff00ff00,
-			0x0000ffff, 0xffff0000,
-		};
-
-		for (i = 0; i < 0x30; i++) {
-			nv_wr32(pfb, 0x10f968, 0x00000000 | (i << 8));
-			nv_wr32(pfb, 0x10f96c, 0x00000000 | (i << 8));
-			nv_wr32(pfb, 0x10f920, 0x00000100 | train0[i % 12]);
-			nv_wr32(pfb, 0x10f924, 0x00000100 | train0[i % 12]);
-			nv_wr32(pfb, 0x10f918,              train1[i % 12]);
-			nv_wr32(pfb, 0x10f91c,              train1[i % 12]);
-			nv_wr32(pfb, 0x10f920, 0x00000000 | train0[i % 12]);
-			nv_wr32(pfb, 0x10f924, 0x00000000 | train0[i % 12]);
-			nv_wr32(pfb, 0x10f918,              train1[i % 12]);
-			nv_wr32(pfb, 0x10f91c,              train1[i % 12]);
-		}
-	}	break;
-	default:
-		break;
+			return ret;
 	}
 
+	ram->ranks = (nvkm_rd32(device, 0x10f200) & 0x00000004) ? 2 : 1;
 	return 0;
 }
 
-static int
-gf100_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+gf100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_bios *bios = nvkm_bios(parent);
+	struct nvkm_subdev *subdev = &fb->subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct gf100_ram *ram;
 	int ret;
 
-	ret = gf100_ram_create(parent, engine, oclass, 0x022554, &ram);
-	*pobject = nv_object(ram);
+	if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+		return -ENOMEM;
+	*pram = &ram->base;
+
+	ret = gf100_ram_ctor(&gf100_ram_func, fb, 0x022554, &ram->base);
 	if (ret)
 		return ret;
 
 	ret = nvbios_pll_parse(bios, 0x0c, &ram->refpll);
 	if (ret) {
-		nv_error(ram, "mclk refpll data not found\n");
+		nvkm_error(subdev, "mclk refpll data not found\n");
 		return ret;
 	}
 
 	ret = nvbios_pll_parse(bios, 0x04, &ram->mempll);
 	if (ret) {
-		nv_error(ram, "mclk pll data not found\n");
+		nvkm_error(subdev, "mclk pll data not found\n");
 		return ret;
 	}
 
-	switch (ram->base.type) {
-	case NV_MEM_TYPE_GDDR5:
-		ram->base.calc = gf100_ram_calc;
-		ram->base.prog = gf100_ram_prog;
-		ram->base.tidy = gf100_ram_tidy;
-		break;
-	default:
-		nv_warn(ram, "reclocking of this ram type unsupported\n");
-		return 0;
-	}
-
 	ram->fuc.r_0x10fe20 = ramfuc_reg(0x10fe20);
 	ram->fuc.r_0x10fe24 = ramfuc_reg(0x10fe24);
 	ram->fuc.r_0x137320 = ramfuc_reg(0x137320);
@@ -718,14 +713,3 @@ gf100_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
 	ram->fuc.r_0x13d8f4 = ramfuc_reg(0x13d8f4);
 	return 0;
 }
-
-struct nvkm_oclass
-gf100_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_ram_ctor,
-		.dtor = _nvkm_ram_dtor,
-		.init = gf100_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index 1ef15c3e6a81..989355622aac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -21,10 +21,10 @@
  *
  * Authors: Ben Skeggs
  */
+#define gk104_ram(p) container_of((p), struct gk104_ram, base)
+#include "ram.h"
 #include "ramfuc.h"
-#include "gf100.h"
 
-#include <core/device.h>
 #include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/init.h>
@@ -229,8 +229,9 @@ static void
 gk104_ram_nuts(struct gk104_ram *ram, struct ramfuc_reg *reg,
 	       u32 _mask, u32 _data, u32 _copy)
 {
-	struct gk104_fb_priv *priv = (void *)nvkm_fb(ram);
+	struct nvkm_fb *fb = ram->base.fb;
 	struct ramfuc *fuc = &ram->fuc.base;
+	struct nvkm_device *device = fb->subdev.device;
 	u32 addr = 0x110000 + (reg->addr & 0xfff);
 	u32 mask = _mask | _copy;
 	u32 data = (_data & _mask) | (reg->data & _copy);
@@ -238,7 +239,7 @@ gk104_ram_nuts(struct gk104_ram *ram, struct ramfuc_reg *reg,
 
 	for (i = 0; i < 16; i++, addr += 0x1000) {
 		if (ram->pnuts & (1 << i)) {
-			u32 prev = nv_rd32(priv, addr);
+			u32 prev = nvkm_rd32(device, addr);
 			u32 next = (prev & ~mask) | data;
 			nvkm_memx_wr32(fuc->memx, addr, next);
 		}
@@ -248,9 +249,8 @@ gk104_ram_nuts(struct gk104_ram *ram, struct ramfuc_reg *reg,
 	gk104_ram_nuts((s), &(s)->fuc.r_##r, (m), (d), (c))
 
 static int
-gk104_ram_calc_gddr5(struct nvkm_fb *pfb, u32 freq)
+gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq)
 {
-	struct gk104_ram *ram = (void *)pfb->ram;
 	struct gk104_ramfuc *fuc = &ram->fuc;
 	struct nvkm_ram_data *next = ram->base.next;
 	int vc = !next->bios.ramcfg_11_02_08;
@@ -674,9 +674,8 @@ gk104_ram_calc_gddr5(struct nvkm_fb *pfb, u32 freq)
  ******************************************************************************/
 
 static int
-gk104_ram_calc_sddr3(struct nvkm_fb *pfb, u32 freq)
+gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
 {
-	struct gk104_ram *ram = (void *)pfb->ram;
 	struct gk104_ramfuc *fuc = &ram->fuc;
 	const u32 rcoef = ((  ram->P1 << 16) | (ram->N1 << 8) | ram->M1);
 	const u32 runk0 = ram->fN1 << 16;
@@ -926,9 +925,9 @@ gk104_ram_calc_sddr3(struct nvkm_fb *pfb, u32 freq)
  ******************************************************************************/
 
 static int
-gk104_ram_calc_data(struct nvkm_fb *pfb, u32 khz, struct nvkm_ram_data *data)
+gk104_ram_calc_data(struct gk104_ram *ram, u32 khz, struct nvkm_ram_data *data)
 {
-	struct gk104_ram *ram = (void *)pfb->ram;
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
 	struct nvkm_ram_data *cfg;
 	u32 mhz = khz / 1000;
 
@@ -941,19 +940,19 @@ gk104_ram_calc_data(struct nvkm_fb *pfb, u32 khz, struct nvkm_ram_data *data)
 		}
 	}
 
-	nv_error(ram, "ramcfg data for %dMHz not found\n", mhz);
+	nvkm_error(subdev, "ramcfg data for %dMHz not found\n", mhz);
 	return -EINVAL;
 }
 
 static int
-gk104_ram_calc_xits(struct nvkm_fb *pfb, struct nvkm_ram_data *next)
+gk104_ram_calc_xits(struct gk104_ram *ram, struct nvkm_ram_data *next)
 {
-	struct gk104_ram *ram = (void *)pfb->ram;
 	struct gk104_ramfuc *fuc = &ram->fuc;
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
 	int refclk, i;
 	int ret;
 
-	ret = ram_init(fuc, pfb);
+	ret = ram_init(fuc, ram->base.fb);
 	if (ret)
 		return ret;
 
@@ -973,11 +972,11 @@ gk104_ram_calc_xits(struct nvkm_fb *pfb, struct nvkm_ram_data *next)
 		refclk = fuc->mempll.refclk;
 
 	/* calculate refpll coefficients */
-	ret = gt215_pll_calc(nv_subdev(pfb), &fuc->refpll, refclk, &ram->N1,
+	ret = gt215_pll_calc(subdev, &fuc->refpll, refclk, &ram->N1,
 			     &ram->fN1, &ram->M1, &ram->P1);
 	fuc->mempll.refclk = ret;
 	if (ret <= 0) {
-		nv_error(pfb, "unable to calc refpll\n");
+		nvkm_error(subdev, "unable to calc refpll\n");
 		return -EINVAL;
 	}
 
@@ -990,10 +989,10 @@ gk104_ram_calc_xits(struct nvkm_fb *pfb, struct nvkm_ram_data *next)
 		fuc->mempll.min_p = 1;
 		fuc->mempll.max_p = 2;
 
-		ret = gt215_pll_calc(nv_subdev(pfb), &fuc->mempll, next->freq,
+		ret = gt215_pll_calc(subdev, &fuc->mempll, next->freq,
 				     &ram->N2, NULL, &ram->M2, &ram->P2);
 		if (ret <= 0) {
-			nv_error(pfb, "unable to calc mempll\n");
+			nvkm_error(subdev, "unable to calc mempll\n");
 			return -EINVAL;
 		}
 	}
@@ -1005,15 +1004,15 @@ gk104_ram_calc_xits(struct nvkm_fb *pfb, struct nvkm_ram_data *next)
 	ram->base.freq = next->freq;
 
 	switch (ram->base.type) {
-	case NV_MEM_TYPE_DDR3:
+	case NVKM_RAM_TYPE_DDR3:
 		ret = nvkm_sddr3_calc(&ram->base);
 		if (ret == 0)
-			ret = gk104_ram_calc_sddr3(pfb, next->freq);
+			ret = gk104_ram_calc_sddr3(ram, next->freq);
 		break;
-	case NV_MEM_TYPE_GDDR5:
+	case NVKM_RAM_TYPE_GDDR5:
 		ret = nvkm_gddr5_calc(&ram->base, ram->pnuts != 0);
 		if (ret == 0)
-			ret = gk104_ram_calc_gddr5(pfb, next->freq);
+			ret = gk104_ram_calc_gddr5(ram, next->freq);
 		break;
 	default:
 		ret = -ENOSYS;
@@ -1024,21 +1023,22 @@ gk104_ram_calc_xits(struct nvkm_fb *pfb, struct nvkm_ram_data *next)
 }
 
 static int
-gk104_ram_calc(struct nvkm_fb *pfb, u32 freq)
+gk104_ram_calc(struct nvkm_ram *base, u32 freq)
 {
-	struct nvkm_clk *clk = nvkm_clk(pfb);
-	struct gk104_ram *ram = (void *)pfb->ram;
+	struct gk104_ram *ram = gk104_ram(base);
+	struct nvkm_clk *clk = ram->base.fb->subdev.device->clk;
 	struct nvkm_ram_data *xits = &ram->base.xition;
 	struct nvkm_ram_data *copy;
 	int ret;
 
 	if (ram->base.next == NULL) {
-		ret = gk104_ram_calc_data(pfb, clk->read(clk, nv_clk_src_mem),
+		ret = gk104_ram_calc_data(ram,
+					  nvkm_clk_read(clk, nv_clk_src_mem),
 					  &ram->base.former);
 		if (ret)
 			return ret;
 
-		ret = gk104_ram_calc_data(pfb, freq, &ram->base.target);
+		ret = gk104_ram_calc_data(ram, freq, &ram->base.target);
 		if (ret)
 			return ret;
 
@@ -1062,13 +1062,13 @@ gk104_ram_calc(struct nvkm_fb *pfb, u32 freq)
 		ram->base.next = &ram->base.target;
 	}
 
-	return gk104_ram_calc_xits(pfb, ram->base.next);
+	return gk104_ram_calc_xits(ram, ram->base.next);
 }
 
 static void
-gk104_ram_prog_0(struct nvkm_fb *pfb, u32 freq)
+gk104_ram_prog_0(struct gk104_ram *ram, u32 freq)
 {
-	struct gk104_ram *ram = (void *)pfb->ram;
+	struct nvkm_device *device = ram->base.fb->subdev.device;
 	struct nvkm_ram_data *cfg;
 	u32 mhz = freq / 1000;
 	u32 mask, data;
@@ -1090,31 +1090,31 @@ gk104_ram_prog_0(struct nvkm_fb *pfb, u32 freq)
 		data |= cfg->bios.rammap_11_09_01ff;
 		mask |= 0x000001ff;
 	}
-	nv_mask(pfb, 0x10f468, mask, data);
+	nvkm_mask(device, 0x10f468, mask, data);
 
 	if (mask = 0, data = 0, ram->diff.rammap_11_0a_0400) {
 		data |= cfg->bios.rammap_11_0a_0400;
 		mask |= 0x00000001;
 	}
-	nv_mask(pfb, 0x10f420, mask, data);
+	nvkm_mask(device, 0x10f420, mask, data);
 
 	if (mask = 0, data = 0, ram->diff.rammap_11_0a_0800) {
 		data |= cfg->bios.rammap_11_0a_0800;
 		mask |= 0x00000001;
 	}
-	nv_mask(pfb, 0x10f430, mask, data);
+	nvkm_mask(device, 0x10f430, mask, data);
 
 	if (mask = 0, data = 0, ram->diff.rammap_11_0b_01f0) {
 		data |= cfg->bios.rammap_11_0b_01f0;
 		mask |= 0x0000001f;
 	}
-	nv_mask(pfb, 0x10f400, mask, data);
+	nvkm_mask(device, 0x10f400, mask, data);
 
 	if (mask = 0, data = 0, ram->diff.rammap_11_0b_0200) {
 		data |= cfg->bios.rammap_11_0b_0200 << 9;
 		mask |= 0x00000200;
 	}
-	nv_mask(pfb, 0x10f410, mask, data);
+	nvkm_mask(device, 0x10f410, mask, data);
 
 	if (mask = 0, data = 0, ram->diff.rammap_11_0d) {
 		data |= cfg->bios.rammap_11_0d << 16;
@@ -1124,7 +1124,7 @@ gk104_ram_prog_0(struct nvkm_fb *pfb, u32 freq)
 		data |= cfg->bios.rammap_11_0f << 8;
 		mask |= 0x0000ff00;
 	}
-	nv_mask(pfb, 0x10f440, mask, data);
+	nvkm_mask(device, 0x10f440, mask, data);
 
 	if (mask = 0, data = 0, ram->diff.rammap_11_0e) {
 		data |= cfg->bios.rammap_11_0e << 8;
@@ -1138,15 +1138,15 @@ gk104_ram_prog_0(struct nvkm_fb *pfb, u32 freq)
 		data |= cfg->bios.rammap_11_0b_0400 << 5;
 		mask |= 0x00000020;
 	}
-	nv_mask(pfb, 0x10f444, mask, data);
+	nvkm_mask(device, 0x10f444, mask, data);
 }
 
 static int
-gk104_ram_prog(struct nvkm_fb *pfb)
+gk104_ram_prog(struct nvkm_ram *base)
 {
-	struct nvkm_device *device = nv_device(pfb);
-	struct gk104_ram *ram = (void *)pfb->ram;
+	struct gk104_ram *ram = gk104_ram(base);
 	struct gk104_ramfuc *fuc = &ram->fuc;
+	struct nvkm_device *device = ram->base.fb->subdev.device;
 	struct nvkm_ram_data *next = ram->base.next;
 
 	if (!nvkm_boolopt(device->cfgopt, "NvMemExec", true)) {
@@ -1154,20 +1154,19 @@ gk104_ram_prog(struct nvkm_fb *pfb)
 		return (ram->base.next == &ram->base.xition);
 	}
 
-	gk104_ram_prog_0(pfb, 1000);
+	gk104_ram_prog_0(ram, 1000);
 	ram_exec(fuc, true);
-	gk104_ram_prog_0(pfb, next->freq);
+	gk104_ram_prog_0(ram, next->freq);
 
 	return (ram->base.next == &ram->base.xition);
 }
 
 static void
-gk104_ram_tidy(struct nvkm_fb *pfb)
+gk104_ram_tidy(struct nvkm_ram *base)
 {
-	struct gk104_ram *ram = (void *)pfb->ram;
-	struct gk104_ramfuc *fuc = &ram->fuc;
+	struct gk104_ram *ram = gk104_ram(base);
 	ram->base.next = NULL;
-	ram_exec(fuc, false);
+	ram_exec(&ram->fuc, false);
 }
 
 struct gk104_ram_train {
@@ -1183,10 +1182,10 @@ struct gk104_ram_train {
 };
 
 static int
-gk104_ram_train_type(struct nvkm_fb *pfb, int i, u8 ramcfg,
+gk104_ram_train_type(struct nvkm_ram *ram, int i, u8 ramcfg,
 		     struct gk104_ram_train *train)
 {
-	struct nvkm_bios *bios = nvkm_bios(pfb);
+	struct nvkm_bios *bios = ram->fb->subdev.device->bios;
 	struct nvbios_M0205E M0205E;
 	struct nvbios_M0205S M0205S;
 	struct nvbios_M0209E M0209E;
@@ -1244,33 +1243,35 @@ gk104_ram_train_type(struct nvkm_fb *pfb, int i, u8 ramcfg,
 }
 
 static int
-gk104_ram_train_init_0(struct nvkm_fb *pfb, struct gk104_ram_train *train)
+gk104_ram_train_init_0(struct nvkm_ram *ram, struct gk104_ram_train *train)
 {
+	struct nvkm_subdev *subdev = &ram->fb->subdev;
+	struct nvkm_device *device = subdev->device;
 	int i, j;
 
 	if ((train->mask & 0x03d3) != 0x03d3) {
-		nv_warn(pfb, "missing link training data\n");
+		nvkm_warn(subdev, "missing link training data\n");
 		return -EINVAL;
 	}
 
 	for (i = 0; i < 0x30; i++) {
 		for (j = 0; j < 8; j += 4) {
-			nv_wr32(pfb, 0x10f968 + j, 0x00000000 | (i << 8));
-			nv_wr32(pfb, 0x10f920 + j, 0x00000000 |
+			nvkm_wr32(device, 0x10f968 + j, 0x00000000 | (i << 8));
+			nvkm_wr32(device, 0x10f920 + j, 0x00000000 |
 						   train->type08.data[i] << 4 |
 						   train->type06.data[i]);
-			nv_wr32(pfb, 0x10f918 + j, train->type00.data[i]);
-			nv_wr32(pfb, 0x10f920 + j, 0x00000100 |
+			nvkm_wr32(device, 0x10f918 + j, train->type00.data[i]);
+			nvkm_wr32(device, 0x10f920 + j, 0x00000100 |
 						   train->type09.data[i] << 4 |
 						   train->type07.data[i]);
-			nv_wr32(pfb, 0x10f918 + j, train->type01.data[i]);
+			nvkm_wr32(device, 0x10f918 + j, train->type01.data[i]);
 		}
 	}
 
 	for (j = 0; j < 8; j += 4) {
 		for (i = 0; i < 0x100; i++) {
-			nv_wr32(pfb, 0x10f968 + j, i);
-			nv_wr32(pfb, 0x10f900 + j, train->type04.data[i]);
+			nvkm_wr32(device, 0x10f968 + j, i);
+			nvkm_wr32(device, 0x10f900 + j, train->type04.data[i]);
 		}
 	}
 
@@ -1278,23 +1279,24 @@ gk104_ram_train_init_0(struct nvkm_fb *pfb, struct gk104_ram_train *train)
 }
 
 static int
-gk104_ram_train_init(struct nvkm_fb *pfb)
+gk104_ram_train_init(struct nvkm_ram *ram)
 {
-	u8 ramcfg = nvbios_ramcfg_index(nv_subdev(pfb));
+	u8 ramcfg = nvbios_ramcfg_index(&ram->fb->subdev);
 	struct gk104_ram_train *train;
-	int ret = -ENOMEM, i;
+	int ret, i;
 
-	if ((train = kzalloc(sizeof(*train), GFP_KERNEL))) {
-		for (i = 0; i < 0x100; i++) {
-			ret = gk104_ram_train_type(pfb, i, ramcfg, train);
-			if (ret && ret != -ENOENT)
-				break;
-		}
+	if (!(train = kzalloc(sizeof(*train), GFP_KERNEL)))
+		return -ENOMEM;
+
+	for (i = 0; i < 0x100; i++) {
+		ret = gk104_ram_train_type(ram, i, ramcfg, train);
+		if (ret && ret != -ENOENT)
+			break;
 	}
 
-	switch (pfb->ram->type) {
-	case NV_MEM_TYPE_GDDR5:
-		ret = gk104_ram_train_init_0(pfb, train);
+	switch (ram->type) {
+	case NVKM_RAM_TYPE_GDDR5:
+		ret = gk104_ram_train_init_0(ram, train);
 		break;
 	default:
 		ret = 0;
@@ -1306,18 +1308,14 @@ gk104_ram_train_init(struct nvkm_fb *pfb)
 }
 
 int
-gk104_ram_init(struct nvkm_object *object)
+gk104_ram_init(struct nvkm_ram *ram)
 {
-	struct nvkm_fb *pfb = (void *)object->parent;
-	struct gk104_ram *ram   = (void *)object;
-	struct nvkm_bios *bios = nvkm_bios(pfb);
+	struct nvkm_subdev *subdev = &ram->fb->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
 	u8  ver, hdr, cnt, len, snr, ssz;
 	u32 data, save;
-	int ret, i;
-
-	ret = nvkm_ram_init(&ram->base);
-	if (ret)
-		return ret;
+	int i;
 
 	/* run a bunch of tables from rammap table.  there's actually
 	 * individual pointers for each rammap entry too, but, nvidia
@@ -1334,33 +1332,32 @@ gk104_ram_init(struct nvkm_object *object)
 	if (!data || hdr < 0x15)
 		return -EINVAL;
 
-	cnt  = nv_ro08(bios, data + 0x14); /* guess at count */
-	data = nv_ro32(bios, data + 0x10); /* guess u32... */
-	save = nv_rd32(pfb, 0x10f65c) & 0x000000f0;
+	cnt  = nvbios_rd08(bios, data + 0x14); /* guess at count */
+	data = nvbios_rd32(bios, data + 0x10); /* guess u32... */
+	save = nvkm_rd32(device, 0x10f65c) & 0x000000f0;
 	for (i = 0; i < cnt; i++, data += 4) {
 		if (i != save >> 4) {
-			nv_mask(pfb, 0x10f65c, 0x000000f0, i << 4);
+			nvkm_mask(device, 0x10f65c, 0x000000f0, i << 4);
 			nvbios_exec(&(struct nvbios_init) {
-					.subdev = nv_subdev(pfb),
+					.subdev = subdev,
 					.bios = bios,
-					.offset = nv_ro32(bios, data),
+					.offset = nvbios_rd32(bios, data),
 					.execute = 1,
 				    });
 		}
 	}
-	nv_mask(pfb, 0x10f65c, 0x000000f0, save);
-	nv_mask(pfb, 0x10f584, 0x11000000, 0x00000000);
-	nv_wr32(pfb, 0x10ecc0, 0xffffffff);
-	nv_mask(pfb, 0x10f160, 0x00000010, 0x00000010);
+	nvkm_mask(device, 0x10f65c, 0x000000f0, save);
+	nvkm_mask(device, 0x10f584, 0x11000000, 0x00000000);
+	nvkm_wr32(device, 0x10ecc0, 0xffffffff);
+	nvkm_mask(device, 0x10f160, 0x00000010, 0x00000010);
 
-	return gk104_ram_train_init(pfb);
+	return gk104_ram_train_init(ram);
 }
 
 static int
 gk104_ram_ctor_data(struct gk104_ram *ram, u8 ramcfg, int i)
 {
-	struct nvkm_fb *pfb = (void *)nv_object(ram)->parent;
-	struct nvkm_bios *bios = nvkm_bios(pfb);
+	struct nvkm_bios *bios = ram->base.fb->subdev.device->bios;
 	struct nvkm_ram_data *cfg;
 	struct nvbios_ramcfg *d = &ram->diff;
 	struct nvbios_ramcfg *p, *n;
@@ -1426,63 +1423,64 @@ done:
 	return ret;
 }
 
-static void
-gk104_ram_dtor(struct nvkm_object *object)
+static void *
+gk104_ram_dtor(struct nvkm_ram *base)
 {
-	struct gk104_ram *ram = (void *)object;
+	struct gk104_ram *ram = gk104_ram(base);
 	struct nvkm_ram_data *cfg, *tmp;
 
 	list_for_each_entry_safe(cfg, tmp, &ram->cfg, head) {
 		kfree(cfg);
 	}
 
-	nvkm_ram_destroy(&ram->base);
+	return ram;
 }
 
-static int
-gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+static const struct nvkm_ram_func
+gk104_ram_func = {
+	.dtor = gk104_ram_dtor,
+	.init = gk104_ram_init,
+	.get = gf100_ram_get,
+	.put = gf100_ram_put,
+	.calc = gk104_ram_calc,
+	.prog = gk104_ram_prog,
+	.tidy = gk104_ram_tidy,
+};
+
+int
+gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct nvkm_gpio *gpio = nvkm_gpio(pfb);
+	struct nvkm_subdev *subdev = &fb->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_gpio *gpio = device->gpio;
 	struct dcb_gpio_func func;
 	struct gk104_ram *ram;
 	int ret, i;
-	u8  ramcfg = nvbios_ramcfg_index(nv_subdev(pfb));
+	u8  ramcfg = nvbios_ramcfg_index(subdev);
 	u32 tmp;
 
-	ret = gf100_ram_create(parent, engine, oclass, 0x022554, &ram);
-	*pobject = nv_object(ram);
+	if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+		return -ENOMEM;
+	*pram = &ram->base;
+
+	ret = gf100_ram_ctor(&gk104_ram_func, fb, 0x022554, &ram->base);
 	if (ret)
 		return ret;
 
 	INIT_LIST_HEAD(&ram->cfg);
 
-	switch (ram->base.type) {
-	case NV_MEM_TYPE_DDR3:
-	case NV_MEM_TYPE_GDDR5:
-		ram->base.calc = gk104_ram_calc;
-		ram->base.prog = gk104_ram_prog;
-		ram->base.tidy = gk104_ram_tidy;
-		break;
-	default:
-		nv_warn(pfb, "reclocking of this RAM type is unsupported\n");
-		break;
-	}
-
 	/* calculate a mask of differently configured memory partitions,
 	 * because, of course reclocking wasn't complicated enough
 	 * already without having to treat some of them differently to
 	 * the others....
 	 */
-	ram->parts = nv_rd32(pfb, 0x022438);
-	ram->pmask = nv_rd32(pfb, 0x022554);
+	ram->parts = nvkm_rd32(device, 0x022438);
+	ram->pmask = nvkm_rd32(device, 0x022554);
 	ram->pnuts = 0;
 	for (i = 0, tmp = 0; i < ram->parts; i++) {
 		if (!(ram->pmask & (1 << i))) {
-			u32 cfg1 = nv_rd32(pfb, 0x110204 + (i * 0x1000));
+			u32 cfg1 = nvkm_rd32(device, 0x110204 + (i * 0x1000));
 			if (tmp && tmp != cfg1) {
 				ram->pnuts |= (1 << i);
 				continue;
@@ -1505,7 +1503,7 @@ gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
 	for (i = 0; !ret; i++) {
 		ret = gk104_ram_ctor_data(ram, ramcfg, i);
 		if (ret && ret != -ENOENT) {
-			nv_error(pfb, "failed to parse ramcfg data\n");
+			nvkm_error(subdev, "failed to parse ramcfg data\n");
 			return ret;
 		}
 	}
@@ -1513,25 +1511,25 @@ gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
 	/* parse bios data for both pll's */
 	ret = nvbios_pll_parse(bios, 0x0c, &ram->fuc.refpll);
 	if (ret) {
-		nv_error(pfb, "mclk refpll data not found\n");
+		nvkm_error(subdev, "mclk refpll data not found\n");
 		return ret;
 	}
 
 	ret = nvbios_pll_parse(bios, 0x04, &ram->fuc.mempll);
 	if (ret) {
-		nv_error(pfb, "mclk pll data not found\n");
+		nvkm_error(subdev, "mclk pll data not found\n");
 		return ret;
 	}
 
 	/* lookup memory voltage gpios */
-	ret = gpio->find(gpio, 0, 0x18, DCB_GPIO_UNUSED, &func);
+	ret = nvkm_gpio_find(gpio, 0, 0x18, DCB_GPIO_UNUSED, &func);
 	if (ret == 0) {
 		ram->fuc.r_gpioMV = ramfuc_reg(0x00d610 + (func.line * 0x04));
 		ram->fuc.r_funcMV[0] = (func.log[0] ^ 2) << 12;
 		ram->fuc.r_funcMV[1] = (func.log[1] ^ 2) << 12;
 	}
 
-	ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
+	ret = nvkm_gpio_find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
 	if (ret == 0) {
 		ram->fuc.r_gpio2E = ramfuc_reg(0x00d610 + (func.line * 0x04));
 		ram->fuc.r_func2E[0] = (func.log[0] ^ 2) << 12;
@@ -1588,7 +1586,7 @@ gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
 	ram->fuc.r_0x10f914 = ramfuc_reg(0x10f914);
 
 	switch (ram->base.type) {
-	case NV_MEM_TYPE_GDDR5:
+	case NVKM_RAM_TYPE_GDDR5:
 		ram->fuc.r_mr[0] = ramfuc_reg(0x10f300);
 		ram->fuc.r_mr[1] = ramfuc_reg(0x10f330);
 		ram->fuc.r_mr[2] = ramfuc_reg(0x10f334);
@@ -1600,7 +1598,7 @@ gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
 		ram->fuc.r_mr[8] = ramfuc_reg(0x10f354);
 		ram->fuc.r_mr[15] = ramfuc_reg(0x10f34c);
 		break;
-	case NV_MEM_TYPE_DDR3:
+	case NVKM_RAM_TYPE_DDR3:
 		ram->fuc.r_mr[0] = ramfuc_reg(0x10f300);
 		ram->fuc.r_mr[2] = ramfuc_reg(0x10f320);
 		break;
@@ -1626,14 +1624,3 @@ gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
 	ram->fuc.r_0x100750 = ramfuc_reg(0x100750);
 	return 0;
 }
-
-struct nvkm_oclass
-gk104_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_ram_ctor,
-		.dtor = gk104_ram_dtor,
-		.init = gk104_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
index a298b39f55c5..43d807f6ca71 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
@@ -21,35 +21,20 @@
  *
  * Authors: Ben Skeggs
  */
-#include "gf100.h"
+#include "ram.h"
 
-struct gm107_ram {
-	struct nvkm_ram base;
+static const struct nvkm_ram_func
+gm107_ram_func = {
+	.init = gk104_ram_init,
+	.get = gf100_ram_get,
+	.put = gf100_ram_put,
 };
 
-static int
-gm107_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+gm107_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct gm107_ram *ram;
-	int ret;
+	if (!(*pram = kzalloc(sizeof(**pram), GFP_KERNEL)))
+		return -ENOMEM;
 
-	ret = gf100_ram_create(parent, engine, oclass, 0x021c14, &ram);
-	*pobject = nv_object(ram);
-	if (ret)
-		return ret;
-
-	return 0;
+	return gf100_ram_ctor(&gm107_ram_func, fb, 0x021c14, *pram);
 }
-
-struct nvkm_oclass
-gm107_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm107_ram_ctor,
-		.dtor = _nvkm_ram_dtor,
-		.init = gk104_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
index 24176401b49b..5c08ae8023fa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
@@ -22,11 +22,10 @@
  * Authors: Ben Skeggs
  * 	    Roy Spliet <rspliet@eclipso.eu>
  */
-
+#define gt215_ram(p) container_of((p), struct gt215_ram, base)
+#include "ram.h"
 #include "ramfuc.h"
-#include "nv50.h"
 
-#include <core/device.h>
 #include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/M0205.h>
@@ -154,14 +153,14 @@ gt215_link_train_calc(u32 *vals, struct gt215_ltrain *train)
  * Link training for (at least) DDR3
  */
 int
-gt215_link_train(struct nvkm_fb *pfb)
+gt215_link_train(struct gt215_ram *ram)
 {
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct gt215_ram *ram = (void *)pfb->ram;
-	struct nvkm_clk *clk = nvkm_clk(pfb);
 	struct gt215_ltrain *train = &ram->ltrain;
-	struct nvkm_device *device = nv_device(pfb);
 	struct gt215_ramfuc *fuc = &ram->fuc;
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_clk *clk = device->clk;
 	u32 *result, r1700;
 	int ret, i;
 	struct nvbios_M0205T M0205T = { 0 };
@@ -182,27 +181,29 @@ gt215_link_train(struct nvkm_fb *pfb)
 
 	/* Clock speeds for training and back */
 	nvbios_M0205Tp(bios, &ver, &hdr, &cnt, &len, &snr, &ssz, &M0205T);
-	if (M0205T.freq == 0)
+	if (M0205T.freq == 0) {
+		kfree(result);
 		return -ENOENT;
+	}
 
-	clk_current = clk->read(clk, nv_clk_src_mem);
+	clk_current = nvkm_clk_read(clk, nv_clk_src_mem);
 
 	ret = gt215_clk_pre(clk, f);
 	if (ret)
 		goto out;
 
 	/* First: clock up/down */
-	ret = ram->base.calc(pfb, (u32) M0205T.freq * 1000);
+	ret = ram->base.func->calc(&ram->base, (u32) M0205T.freq * 1000);
 	if (ret)
 		goto out;
 
 	/* Do this *after* calc, eliminates write in script */
-	nv_wr32(pfb, 0x111400, 0x00000000);
+	nvkm_wr32(device, 0x111400, 0x00000000);
 	/* XXX: Magic writes that improve train reliability? */
-	nv_mask(pfb, 0x100674, 0x0000ffff, 0x00000000);
-	nv_mask(pfb, 0x1005e4, 0x0000ffff, 0x00000000);
-	nv_mask(pfb, 0x100b0c, 0x000000ff, 0x00000000);
-	nv_wr32(pfb, 0x100c04, 0x00000400);
+	nvkm_mask(device, 0x100674, 0x0000ffff, 0x00000000);
+	nvkm_mask(device, 0x1005e4, 0x0000ffff, 0x00000000);
+	nvkm_mask(device, 0x100b0c, 0x000000ff, 0x00000000);
+	nvkm_wr32(device, 0x100c04, 0x00000400);
 
 	/* Now the training script */
 	r1700 = ram_rd32(fuc, 0x001700);
@@ -235,22 +236,22 @@ gt215_link_train(struct nvkm_fb *pfb)
 
 	ram_exec(fuc, true);
 
-	ram->base.calc(pfb, clk_current);
+	ram->base.func->calc(&ram->base, clk_current);
 	ram_exec(fuc, true);
 
 	/* Post-processing, avoids flicker */
-	nv_mask(pfb, 0x616308, 0x10, 0x10);
-	nv_mask(pfb, 0x616b08, 0x10, 0x10);
+	nvkm_mask(device, 0x616308, 0x10, 0x10);
+	nvkm_mask(device, 0x616b08, 0x10, 0x10);
 
 	gt215_clk_post(clk, f);
 
-	ram_train_result(pfb, result, 64);
+	ram_train_result(ram->base.fb, result, 64);
 	for (i = 0; i < 64; i++)
-		nv_debug(pfb, "Train: %08x", result[i]);
+		nvkm_debug(subdev, "Train: %08x", result[i]);
 	gt215_link_train_calc(result, train);
 
-	nv_debug(pfb, "Train: %08x %08x %08x", train->r_100720,
-			train->r_1111e0, train->r_111400);
+	nvkm_debug(subdev, "Train: %08x %08x %08x", train->r_100720,
+		   train->r_1111e0, train->r_111400);
 
 	kfree(result);
 
@@ -265,11 +266,12 @@ out:
 	train->state = NVA3_TRAIN_UNSUPPORTED;
 
 	gt215_clk_post(clk, f);
+	kfree(result);
 	return ret;
 }
 
 int
-gt215_link_train_init(struct nvkm_fb *pfb)
+gt215_link_train_init(struct gt215_ram *ram)
 {
 	static const u32 pattern[16] = {
 		0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee,
@@ -277,9 +279,9 @@ gt215_link_train_init(struct nvkm_fb *pfb)
 		0x33333333, 0x55555555, 0x77777777, 0x66666666,
 		0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb,
 	};
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct gt215_ram *ram = (void *)pfb->ram;
 	struct gt215_ltrain *train = &ram->ltrain;
+	struct nvkm_device *device = ram->base.fb->subdev.device;
+	struct nvkm_bios *bios = device->bios;
 	struct nvkm_mem *mem;
 	struct nvbios_M0205E M0205E;
 	u8 ver, hdr, cnt, len;
@@ -298,48 +300,47 @@ gt215_link_train_init(struct nvkm_fb *pfb)
 
 	train->state = NVA3_TRAIN_ONCE;
 
-	ret = pfb->ram->get(pfb, 0x8000, 0x10000, 0, 0x800, &ram->ltrain.mem);
+	ret = ram->base.func->get(&ram->base, 0x8000, 0x10000, 0, 0x800,
+				  &ram->ltrain.mem);
 	if (ret)
 		return ret;
 
 	mem = ram->ltrain.mem;
 
-	nv_wr32(pfb, 0x100538, 0x10000000 | (mem->offset >> 16));
-	nv_wr32(pfb, 0x1005a8, 0x0000ffff);
-	nv_mask(pfb, 0x10f800, 0x00000001, 0x00000001);
+	nvkm_wr32(device, 0x100538, 0x10000000 | (mem->offset >> 16));
+	nvkm_wr32(device, 0x1005a8, 0x0000ffff);
+	nvkm_mask(device, 0x10f800, 0x00000001, 0x00000001);
 
 	for (i = 0; i < 0x30; i++) {
-		nv_wr32(pfb, 0x10f8c0, (i << 8) | i);
-		nv_wr32(pfb, 0x10f900, pattern[i % 16]);
+		nvkm_wr32(device, 0x10f8c0, (i << 8) | i);
+		nvkm_wr32(device, 0x10f900, pattern[i % 16]);
 	}
 
 	for (i = 0; i < 0x30; i++) {
-		nv_wr32(pfb, 0x10f8e0, (i << 8) | i);
-		nv_wr32(pfb, 0x10f920, pattern[i % 16]);
+		nvkm_wr32(device, 0x10f8e0, (i << 8) | i);
+		nvkm_wr32(device, 0x10f920, pattern[i % 16]);
 	}
 
 	/* And upload the pattern */
-	r001700 = nv_rd32(pfb, 0x1700);
-	nv_wr32(pfb, 0x1700, mem->offset >> 16);
+	r001700 = nvkm_rd32(device, 0x1700);
+	nvkm_wr32(device, 0x1700, mem->offset >> 16);
 	for (i = 0; i < 16; i++)
-		nv_wr32(pfb, 0x700000 + (i << 2), pattern[i]);
+		nvkm_wr32(device, 0x700000 + (i << 2), pattern[i]);
 	for (i = 0; i < 16; i++)
-		nv_wr32(pfb, 0x700100 + (i << 2), pattern[i]);
-	nv_wr32(pfb, 0x1700, r001700);
+		nvkm_wr32(device, 0x700100 + (i << 2), pattern[i]);
+	nvkm_wr32(device, 0x1700, r001700);
 
-	train->r_100720 = nv_rd32(pfb, 0x100720);
-	train->r_1111e0 = nv_rd32(pfb, 0x1111e0);
-	train->r_111400 = nv_rd32(pfb, 0x111400);
+	train->r_100720 = nvkm_rd32(device, 0x100720);
+	train->r_1111e0 = nvkm_rd32(device, 0x1111e0);
+	train->r_111400 = nvkm_rd32(device, 0x111400);
 	return 0;
 }
 
 void
-gt215_link_train_fini(struct nvkm_fb *pfb)
+gt215_link_train_fini(struct gt215_ram *ram)
 {
-	struct gt215_ram *ram = (void *)pfb->ram;
-
 	if (ram->ltrain.mem)
-		pfb->ram->put(pfb, &ram->ltrain.mem);
+		ram->base.func->put(&ram->base, &ram->ltrain.mem);
 }
 
 /*
@@ -347,24 +348,25 @@ gt215_link_train_fini(struct nvkm_fb *pfb)
  */
 #define T(t) cfg->timing_10_##t
 static int
-gt215_ram_timing_calc(struct nvkm_fb *pfb, u32 *timing)
+gt215_ram_timing_calc(struct gt215_ram *ram, u32 *timing)
 {
-	struct gt215_ram *ram = (void *)pfb->ram;
 	struct nvbios_ramcfg *cfg = &ram->base.target.bios;
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+	struct nvkm_device *device = subdev->device;
 	int tUNK_base, tUNK_40_0, prevCL;
 	u32 cur2, cur3, cur7, cur8;
 
-	cur2 = nv_rd32(pfb, 0x100228);
-	cur3 = nv_rd32(pfb, 0x10022c);
-	cur7 = nv_rd32(pfb, 0x10023c);
-	cur8 = nv_rd32(pfb, 0x100240);
+	cur2 = nvkm_rd32(device, 0x100228);
+	cur3 = nvkm_rd32(device, 0x10022c);
+	cur7 = nvkm_rd32(device, 0x10023c);
+	cur8 = nvkm_rd32(device, 0x100240);
 
 
 	switch ((!T(CWL)) * ram->base.type) {
-	case NV_MEM_TYPE_DDR2:
+	case NVKM_RAM_TYPE_DDR2:
 		T(CWL) = T(CL) - 1;
 		break;
-	case NV_MEM_TYPE_GDDR3:
+	case NVKM_RAM_TYPE_GDDR3:
 		T(CWL) = ((cur2 & 0xff000000) >> 24) + 1;
 		break;
 	}
@@ -402,8 +404,8 @@ gt215_ram_timing_calc(struct nvkm_fb *pfb, u32 *timing)
 	timing[8] = cur8 & 0xffffff00;
 
 	switch (ram->base.type) {
-	case NV_MEM_TYPE_DDR2:
-	case NV_MEM_TYPE_GDDR3:
+	case NVKM_RAM_TYPE_DDR2:
+	case NVKM_RAM_TYPE_GDDR3:
 		tUNK_40_0 = prevCL - (cur8 & 0xff);
 		if (tUNK_40_0 > 0)
 			timing[8] |= T(CL);
@@ -412,11 +414,11 @@ gt215_ram_timing_calc(struct nvkm_fb *pfb, u32 *timing)
 		break;
 	}
 
-	nv_debug(pfb, "Entry: 220: %08x %08x %08x %08x\n",
-			timing[0], timing[1], timing[2], timing[3]);
-	nv_debug(pfb, "  230: %08x %08x %08x %08x\n",
-			timing[4], timing[5], timing[6], timing[7]);
-	nv_debug(pfb, "  240: %08x\n", timing[8]);
+	nvkm_debug(subdev, "Entry: 220: %08x %08x %08x %08x\n",
+		   timing[0], timing[1], timing[2], timing[3]);
+	nvkm_debug(subdev, "  230: %08x %08x %08x %08x\n",
+		   timing[4], timing[5], timing[6], timing[7]);
+	nvkm_debug(subdev, "  240: %08x\n", timing[8]);
 	return 0;
 }
 #undef T
@@ -466,13 +468,13 @@ gt215_ram_lock_pll(struct gt215_ramfuc *fuc, struct gt215_clk_info *mclk)
 static void
 gt215_ram_fbvref(struct gt215_ramfuc *fuc, u32 val)
 {
-	struct nvkm_gpio *gpio = nvkm_gpio(fuc->base.pfb);
+	struct nvkm_gpio *gpio = fuc->base.fb->subdev.device->gpio;
 	struct dcb_gpio_func func;
 	u32 reg, sh, gpio_val;
 	int ret;
 
-	if (gpio->get(gpio, 0, 0x2e, DCB_GPIO_UNUSED) != val) {
-		ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
+	if (nvkm_gpio_get(gpio, 0, 0x2e, DCB_GPIO_UNUSED) != val) {
+		ret = nvkm_gpio_find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
 		if (ret)
 			return;
 
@@ -487,12 +489,14 @@ gt215_ram_fbvref(struct gt215_ramfuc *fuc, u32 val)
 }
 
 static int
-gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
+gt215_ram_calc(struct nvkm_ram *base, u32 freq)
 {
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct gt215_ram *ram = (void *)pfb->ram;
+	struct gt215_ram *ram = gt215_ram(base);
 	struct gt215_ramfuc *fuc = &ram->fuc;
 	struct gt215_ltrain *train = &ram->ltrain;
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
 	struct gt215_clk_info mclk;
 	struct nvkm_ram_data *next;
 	u8  ver, hdr, cnt, len, strap;
@@ -508,28 +512,27 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
 	ram->base.next = next;
 
 	if (ram->ltrain.state == NVA3_TRAIN_ONCE)
-		gt215_link_train(pfb);
+		gt215_link_train(ram);
 
 	/* lookup memory config data relevant to the target frequency */
-	i = 0;
 	data = nvbios_rammapEm(bios, freq / 1000, &ver, &hdr, &cnt, &len,
 			       &next->bios);
 	if (!data || ver != 0x10 || hdr < 0x05) {
-		nv_error(pfb, "invalid/missing rammap entry\n");
+		nvkm_error(subdev, "invalid/missing rammap entry\n");
 		return -EINVAL;
 	}
 
 	/* locate specific data set for the attached memory */
-	strap = nvbios_ramcfg_index(nv_subdev(pfb));
+	strap = nvbios_ramcfg_index(subdev);
 	if (strap >= cnt) {
-		nv_error(pfb, "invalid ramcfg strap\n");
+		nvkm_error(subdev, "invalid ramcfg strap\n");
 		return -EINVAL;
 	}
 
 	data = nvbios_rammapSp(bios, data, ver, hdr, cnt, len, strap,
 			       &ver, &hdr, &next->bios);
 	if (!data || ver != 0x10 || hdr < 0x09) {
-		nv_error(pfb, "invalid/missing ramcfg entry\n");
+		nvkm_error(subdev, "invalid/missing ramcfg entry\n");
 		return -EINVAL;
 	}
 
@@ -539,20 +542,20 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
 				       &ver, &hdr, &cnt, &len,
 				       &next->bios);
 		if (!data || ver != 0x10 || hdr < 0x17) {
-			nv_error(pfb, "invalid/missing timing entry\n");
+			nvkm_error(subdev, "invalid/missing timing entry\n");
 			return -EINVAL;
 		}
 	}
 
-	ret = gt215_pll_info(nvkm_clk(pfb), 0x12, 0x4000, freq, &mclk);
+	ret = gt215_pll_info(device->clk, 0x12, 0x4000, freq, &mclk);
 	if (ret < 0) {
-		nv_error(pfb, "failed mclk calculation\n");
+		nvkm_error(subdev, "failed mclk calculation\n");
 		return ret;
 	}
 
-	gt215_ram_timing_calc(pfb, timing);
+	gt215_ram_timing_calc(ram, timing);
 
-	ret = ram_init(fuc, pfb);
+	ret = ram_init(fuc, ram->base.fb);
 	if (ret)
 		return ret;
 
@@ -562,13 +565,13 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
 	ram->base.mr[2] = ram_rd32(fuc, mr[2]);
 
 	switch (ram->base.type) {
-	case NV_MEM_TYPE_DDR2:
+	case NVKM_RAM_TYPE_DDR2:
 		ret = nvkm_sddr2_calc(&ram->base);
 		break;
-	case NV_MEM_TYPE_DDR3:
+	case NVKM_RAM_TYPE_DDR3:
 		ret = nvkm_sddr3_calc(&ram->base);
 		break;
-	case NV_MEM_TYPE_GDDR3:
+	case NVKM_RAM_TYPE_GDDR3:
 		ret = nvkm_gddr3_calc(&ram->base);
 		break;
 	default:
@@ -579,7 +582,7 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
 	if (ret)
 		return ret;
 
-	/* XXX: where the fuck does 750MHz come from? */
+	/* XXX: 750MHz seems rather arbitrary */
 	if (freq <= 750000) {
 		r004018 = 0x10000000;
 		r100760 = 0x22222222;
@@ -590,7 +593,7 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
 		r100da0 = 0x00000000;
 	}
 
-	if (!next->bios.ramcfg_10_DLLoff)
+	if (!next->bios.ramcfg_DLLoff)
 		r004018 |= 0x00004000;
 
 	/* pll2pll requires to switch to a safe clock first */
@@ -623,18 +626,18 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
 	ram_nsec(fuc, 2000);
 
 	if (!next->bios.ramcfg_10_02_10) {
-		if (ram->base.type == NV_MEM_TYPE_GDDR3)
+		if (ram->base.type == NVKM_RAM_TYPE_GDDR3)
 			ram_mask(fuc, 0x111100, 0x04020000, 0x00020000);
 		else
 			ram_mask(fuc, 0x111100, 0x04020000, 0x04020000);
 	}
 
 	/* If we're disabling the DLL, do it now */
-	switch (next->bios.ramcfg_10_DLLoff * ram->base.type) {
-	case NV_MEM_TYPE_DDR3:
+	switch (next->bios.ramcfg_DLLoff * ram->base.type) {
+	case NVKM_RAM_TYPE_DDR3:
 		nvkm_sddr3_dll_disable(fuc, ram->base.mr);
 		break;
-	case NV_MEM_TYPE_GDDR3:
+	case NVKM_RAM_TYPE_GDDR3:
 		nvkm_gddr3_dll_disable(fuc, ram->base.mr);
 		break;
 	}
@@ -650,7 +653,7 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
 	ram_wr32(fuc, 0x1002dc, 0x00000001);
 	ram_nsec(fuc, 2000);
 
-	if (nv_device(pfb)->chipset == 0xa3 && freq <= 500000)
+	if (device->chipset == 0xa3 && freq <= 500000)
 		ram_mask(fuc, 0x100700, 0x00000006, 0x00000006);
 
 	/* Fiddle with clocks */
@@ -708,7 +711,7 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
 		ram_mask(fuc, 0x1007e0, 0x22222222, r100760);
 	}
 
-	if (nv_device(pfb)->chipset == 0xa3 && freq > 500000) {
+	if (device->chipset == 0xa3 && freq > 500000) {
 		ram_mask(fuc, 0x100700, 0x00000006, 0x00000000);
 	}
 
@@ -752,11 +755,11 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
 
 	if (next->bios.ramcfg_10_02_04) {
 		switch (ram->base.type) {
-		case NV_MEM_TYPE_DDR3:
-			if (nv_device(pfb)->chipset != 0xa8)
+		case NVKM_RAM_TYPE_DDR3:
+			if (device->chipset != 0xa8)
 				r111100 |= 0x00000004;
 			/* no break */
-		case NV_MEM_TYPE_DDR2:
+		case NVKM_RAM_TYPE_DDR2:
 			r111100 |= 0x08000000;
 			break;
 		default:
@@ -764,12 +767,12 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
 		}
 	} else {
 		switch (ram->base.type) {
-		case NV_MEM_TYPE_DDR2:
+		case NVKM_RAM_TYPE_DDR2:
 			r111100 |= 0x1a800000;
 			unk714  |= 0x00000010;
 			break;
-		case NV_MEM_TYPE_DDR3:
-			if (nv_device(pfb)->chipset == 0xa8) {
+		case NVKM_RAM_TYPE_DDR3:
+			if (device->chipset == 0xa8) {
 				r111100 |=  0x08000000;
 			} else {
 				r111100 &= ~0x00000004;
@@ -777,7 +780,7 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
 			}
 			unk714  |= 0x00000010;
 			break;
-		case NV_MEM_TYPE_GDDR3:
+		case NVKM_RAM_TYPE_GDDR3:
 			r111100 |= 0x30000000;
 			unk714  |= 0x00000020;
 			break;
@@ -810,16 +813,16 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
 		gt215_ram_fbvref(fuc, 1);
 
 	/* Reset DLL */
-	if (!next->bios.ramcfg_10_DLLoff)
+	if (!next->bios.ramcfg_DLLoff)
 		nvkm_sddr2_dll_reset(fuc);
 
-	if (ram->base.type == NV_MEM_TYPE_GDDR3) {
+	if (ram->base.type == NVKM_RAM_TYPE_GDDR3) {
 		ram_nsec(fuc, 31000);
 	} else {
 		ram_nsec(fuc, 14000);
 	}
 
-	if (ram->base.type == NV_MEM_TYPE_DDR3) {
+	if (ram->base.type == NVKM_RAM_TYPE_DDR3) {
 		ram_wr32(fuc, 0x100264, 0x1);
 		ram_nsec(fuc, 2000);
 	}
@@ -855,24 +858,24 @@ gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
 }
 
 static int
-gt215_ram_prog(struct nvkm_fb *pfb)
+gt215_ram_prog(struct nvkm_ram *base)
 {
-	struct nvkm_device *device = nv_device(pfb);
-	struct gt215_ram *ram = (void *)pfb->ram;
+	struct gt215_ram *ram = gt215_ram(base);
 	struct gt215_ramfuc *fuc = &ram->fuc;
+	struct nvkm_device *device = ram->base.fb->subdev.device;
 	bool exec = nvkm_boolopt(device->cfgopt, "NvMemExec", true);
 
 	if (exec) {
-		nv_mask(pfb, 0x001534, 0x2, 0x2);
+		nvkm_mask(device, 0x001534, 0x2, 0x2);
 
 		ram_exec(fuc, true);
 
 		/* Post-processing, avoids flicker */
-		nv_mask(pfb, 0x002504, 0x1, 0x0);
-		nv_mask(pfb, 0x001534, 0x2, 0x0);
+		nvkm_mask(device, 0x002504, 0x1, 0x0);
+		nvkm_mask(device, 0x001534, 0x2, 0x0);
 
-		nv_mask(pfb, 0x616308, 0x10, 0x10);
-		nv_mask(pfb, 0x616b08, 0x10, 0x10);
+		nvkm_mask(device, 0x616308, 0x10, 0x10);
+		nvkm_mask(device, 0x616b08, 0x10, 0x10);
 	} else {
 		ram_exec(fuc, false);
 	}
@@ -880,69 +883,56 @@ gt215_ram_prog(struct nvkm_fb *pfb)
 }
 
 static void
-gt215_ram_tidy(struct nvkm_fb *pfb)
+gt215_ram_tidy(struct nvkm_ram *base)
 {
-	struct gt215_ram *ram = (void *)pfb->ram;
-	struct gt215_ramfuc *fuc = &ram->fuc;
-	ram_exec(fuc, false);
+	struct gt215_ram *ram = gt215_ram(base);
+	ram_exec(&ram->fuc, false);
 }
 
 static int
-gt215_ram_init(struct nvkm_object *object)
+gt215_ram_init(struct nvkm_ram *base)
 {
-	struct nvkm_fb *pfb = (void *)object->parent;
-	struct gt215_ram   *ram = (void *)object;
-	int ret;
-
-	ret = nvkm_ram_init(&ram->base);
-	if (ret)
-		return ret;
-
-	gt215_link_train_init(pfb);
+	struct gt215_ram *ram = gt215_ram(base);
+	gt215_link_train_init(ram);
 	return 0;
 }
 
-static int
-gt215_ram_fini(struct nvkm_object *object, bool suspend)
+static void *
+gt215_ram_dtor(struct nvkm_ram *base)
 {
-	struct nvkm_fb *pfb = (void *)object->parent;
-
-	if (!suspend)
-		gt215_link_train_fini(pfb);
-
-	return 0;
+	struct gt215_ram *ram = gt215_ram(base);
+	gt215_link_train_fini(ram);
+	return ram;
 }
 
-static int
-gt215_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 datasize,
-	       struct nvkm_object **pobject)
+static const struct nvkm_ram_func
+gt215_ram_func = {
+	.dtor = gt215_ram_dtor,
+	.init = gt215_ram_init,
+	.get = nv50_ram_get,
+	.put = nv50_ram_put,
+	.calc = gt215_ram_calc,
+	.prog = gt215_ram_prog,
+	.tidy = gt215_ram_tidy,
+};
+
+int
+gt215_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_gpio *gpio = nvkm_gpio(pfb);
+	struct nvkm_gpio *gpio = fb->subdev.device->gpio;
 	struct dcb_gpio_func func;
 	struct gt215_ram *ram;
-	int ret, i;
 	u32 reg, shift;
+	int ret, i;
+
+	if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+		return -ENOMEM;
+	*pram = &ram->base;
 
-	ret = nv50_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
+	ret = nv50_ram_ctor(&gt215_ram_func, fb, &ram->base);
 	if (ret)
 		return ret;
 
-	switch (ram->base.type) {
-	case NV_MEM_TYPE_DDR2:
-	case NV_MEM_TYPE_DDR3:
-	case NV_MEM_TYPE_GDDR3:
-		ram->base.calc = gt215_ram_calc;
-		ram->base.prog = gt215_ram_prog;
-		ram->base.tidy = gt215_ram_tidy;
-		break;
-	default:
-		nv_warn(ram, "reclocking of this ram type unsupported\n");
-		return 0;
-	}
-
 	ram->fuc.r_0x001610 = ramfuc_reg(0x001610);
 	ram->fuc.r_0x001700 = ramfuc_reg(0x001700);
 	ram->fuc.r_0x002504 = ramfuc_reg(0x002504);
@@ -992,7 +982,7 @@ gt215_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
 		ram->fuc.r_mr[3] = ramfuc_reg(0x1002e4);
 	}
 
-	ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
+	ret = nvkm_gpio_find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
 	if (ret == 0) {
 		nv50_gpio_location(func.line, &reg, &shift);
 		ram->fuc.r_gpioFBVREF = ramfuc_reg(reg);
@@ -1000,13 +990,3 @@ gt215_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
 
 	return 0;
 }
-
-struct nvkm_oclass
-gt215_ram_oclass = {
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gt215_ram_ctor,
-		.dtor = _nvkm_ram_dtor,
-		.init = gt215_ram_init,
-		.fini = gt215_ram_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
index abc18e89a97c..0a0e44b75577 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
@@ -21,81 +21,67 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv50.h"
+#define mcp77_ram(p) container_of((p), struct mcp77_ram, base)
+#include "ram.h"
 
-struct mcp77_ram_priv {
+struct mcp77_ram {
 	struct nvkm_ram base;
 	u64 poller_base;
 };
 
 static int
-mcp77_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 datasize,
-	       struct nvkm_object **pobject)
+mcp77_ram_init(struct nvkm_ram *base)
 {
-	u32 rsvd_head = ( 256 * 1024); /* vga memory */
-	u32 rsvd_tail = (1024 * 1024); /* vbios etc */
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct mcp77_ram_priv *priv;
-	int ret;
-
-	ret = nvkm_ram_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.type   = NV_MEM_TYPE_STOLEN;
-	priv->base.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
-	priv->base.size   = (u64)nv_rd32(pfb, 0x100e14) << 12;
+	struct mcp77_ram *ram = mcp77_ram(base);
+	struct nvkm_device *device = ram->base.fb->subdev.device;
+	u32 dniso  = ((ram->base.size - (ram->poller_base + 0x00)) >> 5) - 1;
+	u32 hostnb = ((ram->base.size - (ram->poller_base + 0x20)) >> 5) - 1;
+	u32 flush  = ((ram->base.size - (ram->poller_base + 0x40)) >> 5) - 1;
 
-	rsvd_tail += 0x1000;
-	priv->poller_base = priv->base.size - rsvd_tail;
-
-	ret = nvkm_mm_init(&pfb->vram, rsvd_head >> 12,
-			   (priv->base.size  - (rsvd_head + rsvd_tail)) >> 12,
-			   1);
-	if (ret)
-		return ret;
-
-	priv->base.get = nv50_ram_get;
-	priv->base.put = nv50_ram_put;
+	/* Enable NISO poller for various clients and set their associated
+	 * read address, only for MCP77/78 and MCP79/7A. (fd#25701)
+	 */
+	nvkm_wr32(device, 0x100c18, dniso);
+	nvkm_mask(device, 0x100c14, 0x00000000, 0x00000001);
+	nvkm_wr32(device, 0x100c1c, hostnb);
+	nvkm_mask(device, 0x100c14, 0x00000000, 0x00000002);
+	nvkm_wr32(device, 0x100c24, flush);
+	nvkm_mask(device, 0x100c14, 0x00000000, 0x00010000);
 	return 0;
 }
 
-static int
-mcp77_ram_init(struct nvkm_object *object)
+static const struct nvkm_ram_func
+mcp77_ram_func = {
+	.init = mcp77_ram_init,
+	.get = nv50_ram_get,
+	.put = nv50_ram_put,
+};
+
+int
+mcp77_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(object);
-	struct mcp77_ram_priv *priv = (void *)object;
+	struct nvkm_device *device = fb->subdev.device;
+	u32 rsvd_head = ( 256 * 1024); /* vga memory */
+	u32 rsvd_tail = (1024 * 1024) + 0x1000; /* vbios etc + poller mem */
+	u64 base = (u64)nvkm_rd32(device, 0x100e10) << 12;
+	u64 size = (u64)nvkm_rd32(device, 0x100e14) << 12;
+	struct mcp77_ram *ram;
 	int ret;
-	u64 dniso, hostnb, flush;
 
-	ret = nvkm_ram_init(&priv->base);
+	if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+		return -ENOMEM;
+	*pram = &ram->base;
+
+	ret = nvkm_ram_ctor(&mcp77_ram_func, fb, NVKM_RAM_TYPE_STOLEN,
+			    size, 0, &ram->base);
 	if (ret)
 		return ret;
 
-	dniso  = ((priv->base.size - (priv->poller_base + 0x00)) >> 5) - 1;
-	hostnb = ((priv->base.size - (priv->poller_base + 0x20)) >> 5) - 1;
-	flush  = ((priv->base.size - (priv->poller_base + 0x40)) >> 5) - 1;
+	ram->poller_base = size - rsvd_tail;
+	ram->base.stolen = base;
+	nvkm_mm_fini(&ram->base.vram);
 
-	/* Enable NISO poller for various clients and set their associated
-	 * read address, only for MCP77/78 and MCP79/7A. (fd#25701)
-	 */
-	nv_wr32(pfb, 0x100c18, dniso);
-	nv_mask(pfb, 0x100c14, 0x00000000, 0x00000001);
-	nv_wr32(pfb, 0x100c1c, hostnb);
-	nv_mask(pfb, 0x100c14, 0x00000000, 0x00000002);
-	nv_wr32(pfb, 0x100c24, flush);
-	nv_mask(pfb, 0x100c14, 0x00000000, 0x00010000);
-	return 0;
+	return nvkm_mm_init(&ram->base.vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+			    (size - rsvd_head - rsvd_tail) >>
+			    NVKM_RAM_MM_SHIFT, 1);
 }
-
-struct nvkm_oclass
-mcp77_ram_oclass = {
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = mcp77_ram_ctor,
-		.dtor = _nvkm_ram_dtor,
-		.init = mcp77_ram_init,
-		.fini = _nvkm_ram_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c
index 855de1617229..6f053a03d61c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c
@@ -21,59 +21,45 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "ram.h"
 #include "regsnv04.h"
 
-static int
-nv04_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
-{
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_ram *ram;
-	u32 boot0 = nv_rd32(pfb, NV04_PFB_BOOT_0);
-	int ret;
+const struct nvkm_ram_func
+nv04_ram_func = {
+};
 
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
-	if (ret)
-		return ret;
+int
+nv04_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+	struct nvkm_device *device = fb->subdev.device;
+	u32 boot0 = nvkm_rd32(device, NV04_PFB_BOOT_0);
+	u64 size;
+	enum nvkm_ram_type type;
 
 	if (boot0 & 0x00000100) {
-		ram->size  = ((boot0 >> 12) & 0xf) * 2 + 2;
-		ram->size *= 1024 * 1024;
+		size  = ((boot0 >> 12) & 0xf) * 2 + 2;
+		size *= 1024 * 1024;
 	} else {
 		switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
 		case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
-			ram->size = 32 * 1024 * 1024;
+			size = 32 * 1024 * 1024;
 			break;
 		case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
-			ram->size = 16 * 1024 * 1024;
+			size = 16 * 1024 * 1024;
 			break;
 		case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
-			ram->size = 8 * 1024 * 1024;
+			size = 8 * 1024 * 1024;
 			break;
 		case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
-			ram->size = 4 * 1024 * 1024;
+			size = 4 * 1024 * 1024;
 			break;
 		}
 	}
 
 	if ((boot0 & 0x00000038) <= 0x10)
-		ram->type = NV_MEM_TYPE_SGRAM;
+		type = NVKM_RAM_TYPE_SGRAM;
 	else
-		ram->type = NV_MEM_TYPE_SDRAM;
+		type = NVKM_RAM_TYPE_SDRAM;
 
-	return 0;
+	return nvkm_ram_new_(&nv04_ram_func, fb, type, size, 0, pram);
 }
-
-struct nvkm_oclass
-nv04_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
index 3b8a1eda5b64..dfd155c98dbb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
@@ -21,39 +21,20 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "ram.h"
 
-static int
-nv10_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+nv10_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_ram *ram;
-	u32 cfg0 = nv_rd32(pfb, 0x100200);
-	int ret;
-
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
-	if (ret)
-		return ret;
+	struct nvkm_device *device = fb->subdev.device;
+	u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
+	u32 cfg0 = nvkm_rd32(device, 0x100200);
+	enum nvkm_ram_type type;
 
 	if (cfg0 & 0x00000001)
-		ram->type = NV_MEM_TYPE_DDR1;
+		type = NVKM_RAM_TYPE_DDR1;
 	else
-		ram->type = NV_MEM_TYPE_SDRAM;
+		type = NVKM_RAM_TYPE_SDRAM;
 
-	ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
-	return 0;
+	return nvkm_ram_new_(&nv04_ram_func, fb, type, size, 0, pram);
 }
-
-struct nvkm_oclass
-nv10_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv10_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
index fbae05db4ffd..3c6a8710e812 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
@@ -21,33 +21,21 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "ram.h"
 
-#include <core/device.h>
-
-static int
-nv1a_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+nv1a_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_ram *ram;
 	struct pci_dev *bridge;
 	u32 mem, mib;
-	int ret;
 
 	bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
 	if (!bridge) {
-		nv_fatal(pfb, "no bridge device\n");
+		nvkm_error(&fb->subdev, "no bridge device\n");
 		return -ENODEV;
 	}
 
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
-	if (ret)
-		return ret;
-
-	if (nv_device(pfb)->chipset == 0x1a) {
+	if (fb->subdev.device->chipset == 0x1a) {
 		pci_read_config_dword(bridge, 0x7c, &mem);
 		mib = ((mem >> 6) & 31) + 1;
 	} else {
@@ -55,18 +43,6 @@ nv1a_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
 		mib = ((mem >> 4) & 127) + 1;
 	}
 
-	ram->type = NV_MEM_TYPE_STOLEN;
-	ram->size = mib * 1024 * 1024;
-	return 0;
+	return nvkm_ram_new_(&nv04_ram_func, fb, NVKM_RAM_TYPE_STOLEN,
+			     mib * 1024 * 1024, 0, pram);
 }
-
-struct nvkm_oclass
-nv1a_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv1a_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c
index d9e7187bd235..747e47c10cc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c
@@ -21,42 +21,29 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "ram.h"
 
-static int
-nv20_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+nv20_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_ram *ram;
-	u32 pbus1218 = nv_rd32(pfb, 0x001218);
+	struct nvkm_device *device = fb->subdev.device;
+	u32 pbus1218 =  nvkm_rd32(device, 0x001218);
+	u32     size = (nvkm_rd32(device, 0x10020c) & 0xff000000);
+	u32     tags =  nvkm_rd32(device, 0x100320);
+	enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
 	int ret;
 
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
+	switch (pbus1218 & 0x00000300) {
+	case 0x00000000: type = NVKM_RAM_TYPE_SDRAM; break;
+	case 0x00000100: type = NVKM_RAM_TYPE_DDR1 ; break;
+	case 0x00000200: type = NVKM_RAM_TYPE_GDDR3; break;
+	case 0x00000300: type = NVKM_RAM_TYPE_GDDR2; break;
+	}
+
+	ret = nvkm_ram_new_(&nv04_ram_func, fb, type, size, tags, pram);
 	if (ret)
 		return ret;
 
-	switch (pbus1218 & 0x00000300) {
-	case 0x00000000: ram->type = NV_MEM_TYPE_SDRAM; break;
-	case 0x00000100: ram->type = NV_MEM_TYPE_DDR1; break;
-	case 0x00000200: ram->type = NV_MEM_TYPE_GDDR3; break;
-	case 0x00000300: ram->type = NV_MEM_TYPE_GDDR2; break;
-	}
-	ram->size  = (nv_rd32(pfb, 0x10020c) & 0xff000000);
-	ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
-	ram->tags  = nv_rd32(pfb, 0x100320);
+	(*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
 	return 0;
 }
-
-struct nvkm_oclass
-nv20_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv20_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
index 3d31fa45c1a6..56f8cffc2560 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
@@ -21,9 +21,8 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv40.h"
+#include "ramnv40.h"
 
-#include <core/device.h>
 #include <subdev/bios.h>
 #include <subdev/bios/bit.h>
 #include <subdev/bios/init.h>
@@ -31,23 +30,23 @@
 #include <subdev/clk/pll.h>
 #include <subdev/timer.h>
 
-int
-nv40_ram_calc(struct nvkm_fb *pfb, u32 freq)
+static int
+nv40_ram_calc(struct nvkm_ram *base, u32 freq)
 {
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct nv40_ram *ram = (void *)pfb->ram;
+	struct nv40_ram *ram = nv40_ram(base);
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct nvbios_pll pll;
 	int N1, M1, N2, M2;
 	int log2P, ret;
 
 	ret = nvbios_pll_parse(bios, 0x04, &pll);
 	if (ret) {
-		nv_error(pfb, "mclk pll data not found\n");
+		nvkm_error(subdev, "mclk pll data not found\n");
 		return ret;
 	}
 
-	ret = nv04_pll_calc(nv_subdev(pfb), &pll, freq,
-			    &N1, &M1, &N2, &M2, &log2P);
+	ret = nv04_pll_calc(subdev, &pll, freq, &N1, &M1, &N2, &M2, &log2P);
 	if (ret < 0)
 		return ret;
 
@@ -64,11 +63,13 @@ nv40_ram_calc(struct nvkm_fb *pfb, u32 freq)
 	return 0;
 }
 
-int
-nv40_ram_prog(struct nvkm_fb *pfb)
+static int
+nv40_ram_prog(struct nvkm_ram *base)
 {
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct nv40_ram *ram = (void *)pfb->ram;
+	struct nv40_ram *ram = nv40_ram(base);
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
 	struct bit_entry M;
 	u32 crtc_mask = 0;
 	u8  sr1[2];
@@ -76,12 +77,12 @@ nv40_ram_prog(struct nvkm_fb *pfb)
 
 	/* determine which CRTCs are active, fetch VGA_SR1 for each */
 	for (i = 0; i < 2; i++) {
-		u32 vbl = nv_rd32(pfb, 0x600808 + (i * 0x2000));
+		u32 vbl = nvkm_rd32(device, 0x600808 + (i * 0x2000));
 		u32 cnt = 0;
 		do {
-			if (vbl != nv_rd32(pfb, 0x600808 + (i * 0x2000))) {
-				nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
-				sr1[i] = nv_rd08(pfb, 0x0c03c5 + (i * 0x2000));
+			if (vbl != nvkm_rd32(device, 0x600808 + (i * 0x2000))) {
+				nvkm_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
+				sr1[i] = nvkm_rd08(device, 0x0c03c5 + (i * 0x2000));
 				if (!(sr1[i] & 0x20))
 					crtc_mask |= (1 << i);
 				break;
@@ -94,55 +95,66 @@ nv40_ram_prog(struct nvkm_fb *pfb)
 	for (i = 0; i < 2; i++) {
 		if (!(crtc_mask & (1 << i)))
 			continue;
-		nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000);
-		nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
-		nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
-		nv_wr08(pfb, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
+
+		nvkm_msec(device, 2000,
+			u32 tmp = nvkm_rd32(device, 0x600808 + (i * 0x2000));
+			if (!(tmp & 0x00010000))
+				break;
+		);
+
+		nvkm_msec(device, 2000,
+			u32 tmp = nvkm_rd32(device, 0x600808 + (i * 0x2000));
+			if ( (tmp & 0x00010000))
+				break;
+		);
+
+		nvkm_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
+		nvkm_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
 	}
 
 	/* prepare ram for reclocking */
-	nv_wr32(pfb, 0x1002d4, 0x00000001); /* precharge */
-	nv_wr32(pfb, 0x1002d0, 0x00000001); /* refresh */
-	nv_wr32(pfb, 0x1002d0, 0x00000001); /* refresh */
-	nv_mask(pfb, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
-	nv_wr32(pfb, 0x1002dc, 0x00000001); /* enable self-refresh */
+	nvkm_wr32(device, 0x1002d4, 0x00000001); /* precharge */
+	nvkm_wr32(device, 0x1002d0, 0x00000001); /* refresh */
+	nvkm_wr32(device, 0x1002d0, 0x00000001); /* refresh */
+	nvkm_mask(device, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
+	nvkm_wr32(device, 0x1002dc, 0x00000001); /* enable self-refresh */
 
 	/* change the PLL of each memory partition */
-	nv_mask(pfb, 0x00c040, 0x0000c000, 0x00000000);
-	switch (nv_device(pfb)->chipset) {
+	nvkm_mask(device, 0x00c040, 0x0000c000, 0x00000000);
+	switch (device->chipset) {
 	case 0x40:
 	case 0x45:
 	case 0x41:
 	case 0x42:
 	case 0x47:
-		nv_mask(pfb, 0x004044, 0xc0771100, ram->ctrl);
-		nv_mask(pfb, 0x00402c, 0xc0771100, ram->ctrl);
-		nv_wr32(pfb, 0x004048, ram->coef);
-		nv_wr32(pfb, 0x004030, ram->coef);
+		nvkm_mask(device, 0x004044, 0xc0771100, ram->ctrl);
+		nvkm_mask(device, 0x00402c, 0xc0771100, ram->ctrl);
+		nvkm_wr32(device, 0x004048, ram->coef);
+		nvkm_wr32(device, 0x004030, ram->coef);
 	case 0x43:
 	case 0x49:
 	case 0x4b:
-		nv_mask(pfb, 0x004038, 0xc0771100, ram->ctrl);
-		nv_wr32(pfb, 0x00403c, ram->coef);
+		nvkm_mask(device, 0x004038, 0xc0771100, ram->ctrl);
+		nvkm_wr32(device, 0x00403c, ram->coef);
 	default:
-		nv_mask(pfb, 0x004020, 0xc0771100, ram->ctrl);
-		nv_wr32(pfb, 0x004024, ram->coef);
+		nvkm_mask(device, 0x004020, 0xc0771100, ram->ctrl);
+		nvkm_wr32(device, 0x004024, ram->coef);
 		break;
 	}
 	udelay(100);
-	nv_mask(pfb, 0x00c040, 0x0000c000, 0x0000c000);
+	nvkm_mask(device, 0x00c040, 0x0000c000, 0x0000c000);
 
 	/* re-enable normal operation of memory controller */
-	nv_wr32(pfb, 0x1002dc, 0x00000000);
-	nv_mask(pfb, 0x100210, 0x80000000, 0x80000000);
+	nvkm_wr32(device, 0x1002dc, 0x00000000);
+	nvkm_mask(device, 0x100210, 0x80000000, 0x80000000);
 	udelay(100);
 
 	/* execute memory reset script from vbios */
 	if (!bit_entry(bios, 'M', &M)) {
 		struct nvbios_init init = {
-			.subdev = nv_subdev(pfb),
+			.subdev = subdev,
 			.bios = bios,
-			.offset = nv_ro16(bios, M.offset + 0x00),
+			.offset = nvbios_rd16(bios, M.offset + 0x00),
 			.execute = 1,
 		};
 
@@ -155,58 +167,64 @@ nv40_ram_prog(struct nvkm_fb *pfb)
 	for (i = 0; i < 2; i++) {
 		if (!(crtc_mask & (1 << i)))
 			continue;
-		nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
-		nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
-		nv_wr08(pfb, 0x0c03c5 + (i * 0x2000), sr1[i]);
+
+		nvkm_msec(device, 2000,
+			u32 tmp = nvkm_rd32(device, 0x600808 + (i * 0x2000));
+			if ( (tmp & 0x00010000))
+				break;
+		);
+
+		nvkm_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
+		nvkm_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i]);
 	}
 
 	return 0;
 }
 
-void
-nv40_ram_tidy(struct nvkm_fb *pfb)
+static void
+nv40_ram_tidy(struct nvkm_ram *base)
 {
 }
 
-static int
-nv40_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+static const struct nvkm_ram_func
+nv40_ram_func = {
+	.calc = nv40_ram_calc,
+	.prog = nv40_ram_prog,
+	.tidy = nv40_ram_tidy,
+};
+
+int
+nv40_ram_new_(struct nvkm_fb *fb, enum nvkm_ram_type type, u64 size,
+	      u32 tags, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
 	struct nv40_ram *ram;
-	u32 pbus1218 = nv_rd32(pfb, 0x001218);
-	int ret;
+	if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+		return -ENOMEM;
+	*pram = &ram->base;
+	return nvkm_ram_ctor(&nv40_ram_func, fb, type, size, tags, &ram->base);
+}
 
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
-	if (ret)
-		return ret;
+int
+nv40_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+	struct nvkm_device *device = fb->subdev.device;
+	u32 pbus1218 = nvkm_rd32(device, 0x001218);
+	u32     size = nvkm_rd32(device, 0x10020c) & 0xff000000;
+	u32     tags = nvkm_rd32(device, 0x100320);
+	enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
+	int ret;
 
 	switch (pbus1218 & 0x00000300) {
-	case 0x00000000: ram->base.type = NV_MEM_TYPE_SDRAM; break;
-	case 0x00000100: ram->base.type = NV_MEM_TYPE_DDR1; break;
-	case 0x00000200: ram->base.type = NV_MEM_TYPE_GDDR3; break;
-	case 0x00000300: ram->base.type = NV_MEM_TYPE_DDR2; break;
+	case 0x00000000: type = NVKM_RAM_TYPE_SDRAM; break;
+	case 0x00000100: type = NVKM_RAM_TYPE_DDR1 ; break;
+	case 0x00000200: type = NVKM_RAM_TYPE_GDDR3; break;
+	case 0x00000300: type = NVKM_RAM_TYPE_DDR2 ; break;
 	}
 
-	ram->base.size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
-	ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
-	ram->base.tags  =  nv_rd32(pfb, 0x100320);
-	ram->base.calc = nv40_ram_calc;
-	ram->base.prog = nv40_ram_prog;
-	ram->base.tidy = nv40_ram_tidy;
+	ret = nv40_ram_new_(fb, type, size, tags, pram);
+	if (ret)
+		return ret;
+
+	(*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
 	return 0;
 }
-
-
-struct nvkm_oclass
-nv40_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
new file mode 100644
index 000000000000..8a0524566b48
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
@@ -0,0 +1,14 @@
+#ifndef __NV40_FB_RAM_H__
+#define __NV40_FB_RAM_H__
+#define nv40_ram(p) container_of((p), struct nv40_ram, base)
+#include "ram.h"
+
+struct nv40_ram {
+	struct nvkm_ram base;
+	u32 ctrl;
+	u32 coef;
+};
+
+int nv40_ram_new_(struct nvkm_fb *fb, enum nvkm_ram_type, u64, u32,
+		  struct nvkm_ram **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c
index 33c612b1355f..114828be292e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c
@@ -21,46 +21,29 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv40.h"
+#include "ramnv40.h"
 
-static int
-nv41_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+nv41_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nv40_ram *ram;
-	u32 pfb474 = nv_rd32(pfb, 0x100474);
+	struct nvkm_device *device = fb->subdev.device;
+	u32  size = nvkm_rd32(device, 0x10020c) & 0xff000000;
+	u32  tags = nvkm_rd32(device, 0x100320);
+	u32 fb474 = nvkm_rd32(device, 0x100474);
+	enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
 	int ret;
 
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
+	if (fb474 & 0x00000004)
+		type = NVKM_RAM_TYPE_GDDR3;
+	if (fb474 & 0x00000002)
+		type = NVKM_RAM_TYPE_DDR2;
+	if (fb474 & 0x00000001)
+		type = NVKM_RAM_TYPE_DDR1;
+
+	ret = nv40_ram_new_(fb, type, size, tags, pram);
 	if (ret)
 		return ret;
 
-	if (pfb474 & 0x00000004)
-		ram->base.type = NV_MEM_TYPE_GDDR3;
-	if (pfb474 & 0x00000002)
-		ram->base.type = NV_MEM_TYPE_DDR2;
-	if (pfb474 & 0x00000001)
-		ram->base.type = NV_MEM_TYPE_DDR1;
-
-	ram->base.size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
-	ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
-	ram->base.tags  =  nv_rd32(pfb, 0x100320);
-	ram->base.calc = nv40_ram_calc;
-	ram->base.prog = nv40_ram_prog;
-	ram->base.tidy = nv40_ram_tidy;
+	(*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
 	return 0;
 }
-
-struct nvkm_oclass
-nv41_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv41_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c
index f575a7246403..bc56fbf1c788 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c
@@ -21,44 +21,22 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv40.h"
+#include "ramnv40.h"
 
-static int
-nv44_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+nv44_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nv40_ram *ram;
-	u32 pfb474 = nv_rd32(pfb, 0x100474);
-	int ret;
+	struct nvkm_device *device = fb->subdev.device;
+	u32  size = nvkm_rd32(device, 0x10020c) & 0xff000000;
+	u32 fb474 = nvkm_rd32(device, 0x100474);
+	enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
 
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
-	if (ret)
-		return ret;
+	if (fb474 & 0x00000004)
+		type = NVKM_RAM_TYPE_GDDR3;
+	if (fb474 & 0x00000002)
+		type = NVKM_RAM_TYPE_DDR2;
+	if (fb474 & 0x00000001)
+		type = NVKM_RAM_TYPE_DDR1;
 
-	if (pfb474 & 0x00000004)
-		ram->base.type = NV_MEM_TYPE_GDDR3;
-	if (pfb474 & 0x00000002)
-		ram->base.type = NV_MEM_TYPE_DDR2;
-	if (pfb474 & 0x00000001)
-		ram->base.type = NV_MEM_TYPE_DDR1;
-
-	ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
-	ram->base.calc = nv40_ram_calc;
-	ram->base.prog = nv40_ram_prog;
-	ram->base.tidy = nv40_ram_tidy;
-	return 0;
+	return nv40_ram_new_(fb, type, size, 0, pram);
 }
-
-struct nvkm_oclass
-nv44_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv44_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c
index 51b44cdb2732..c01f4b1022b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c
@@ -21,46 +21,29 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv40.h"
+#include "ramnv40.h"
 
-static int
-nv49_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+nv49_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nv40_ram *ram;
-	u32 pfb914 = nv_rd32(pfb, 0x100914);
+	struct nvkm_device *device = fb->subdev.device;
+	u32  size = nvkm_rd32(device, 0x10020c) & 0xff000000;
+	u32  tags = nvkm_rd32(device, 0x100320);
+	u32 fb914 = nvkm_rd32(device, 0x100914);
+	enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
 	int ret;
 
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
-	if (ret)
-		return ret;
-
-	switch (pfb914 & 0x00000003) {
-	case 0x00000000: ram->base.type = NV_MEM_TYPE_DDR1; break;
-	case 0x00000001: ram->base.type = NV_MEM_TYPE_DDR2; break;
-	case 0x00000002: ram->base.type = NV_MEM_TYPE_GDDR3; break;
+	switch (fb914 & 0x00000003) {
+	case 0x00000000: type = NVKM_RAM_TYPE_DDR1 ; break;
+	case 0x00000001: type = NVKM_RAM_TYPE_DDR2 ; break;
+	case 0x00000002: type = NVKM_RAM_TYPE_GDDR3; break;
 	case 0x00000003: break;
 	}
 
-	ram->base.size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
-	ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
-	ram->base.tags  =  nv_rd32(pfb, 0x100320);
-	ram->base.calc = nv40_ram_calc;
-	ram->base.prog = nv40_ram_prog;
-	ram->base.tidy = nv40_ram_tidy;
+	ret = nv40_ram_new_(fb, type, size, tags, pram);
+	if (ret)
+		return ret;
+
+	(*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
 	return 0;
 }
-
-struct nvkm_oclass
-nv49_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv49_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c
index f3ed1c60d730..fa3c2e06203d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c
@@ -21,34 +21,13 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "ram.h"
 
-static int
-nv4e_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+nv4e_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_ram *ram;
-	int ret;
-
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
-	if (ret)
-		return ret;
-
-	ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
-	ram->type = NV_MEM_TYPE_STOLEN;
-	return 0;
+	struct nvkm_device *device = fb->subdev.device;
+	u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
+	return nvkm_ram_new_(&nv04_ram_func, fb, NVKM_RAM_TYPE_UNKNOWN,
+			     size, 0, pram);
 }
-
-struct nvkm_oclass
-nv4e_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv4e_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
index d2c81dd635dc..9197e0ef5cdb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
@@ -21,14 +21,16 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv50.h"
+#define nv50_ram(p) container_of((p), struct nv50_ram, base)
+#include "ram.h"
 #include "ramseq.h"
+#include "nv50.h"
 
-#include <core/device.h>
 #include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/perf.h>
 #include <subdev/bios/pll.h>
+#include <subdev/bios/rammap.h>
 #include <subdev/bios/timing.h>
 #include <subdev/clk/pll.h>
 
@@ -38,11 +40,20 @@ struct nv50_ramseq {
 	struct hwsq_reg r_0x004008;
 	struct hwsq_reg r_0x00400c;
 	struct hwsq_reg r_0x00c040;
+	struct hwsq_reg r_0x100200;
 	struct hwsq_reg r_0x100210;
+	struct hwsq_reg r_0x10021c;
 	struct hwsq_reg r_0x1002d0;
 	struct hwsq_reg r_0x1002d4;
 	struct hwsq_reg r_0x1002dc;
-	struct hwsq_reg r_0x100da0[8];
+	struct hwsq_reg r_0x10053c;
+	struct hwsq_reg r_0x1005a0;
+	struct hwsq_reg r_0x1005a4;
+	struct hwsq_reg r_0x100710;
+	struct hwsq_reg r_0x100714;
+	struct hwsq_reg r_0x100718;
+	struct hwsq_reg r_0x10071c;
+	struct hwsq_reg r_0x100da0;
 	struct hwsq_reg r_0x100e20;
 	struct hwsq_reg r_0x100e24;
 	struct hwsq_reg r_0x611200;
@@ -55,64 +66,181 @@ struct nv50_ram {
 	struct nv50_ramseq hwsq;
 };
 
-#define QFX5800NVA0 1
+#define T(t) cfg->timing_10_##t
+static int
+nv50_ram_timing_calc(struct nv50_ram *ram, u32 *timing)
+{
+	struct nvbios_ramcfg *cfg = &ram->base.target.bios;
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 cur2, cur4, cur7, cur8;
+	u8 unkt3b;
+
+	cur2 = nvkm_rd32(device, 0x100228);
+	cur4 = nvkm_rd32(device, 0x100230);
+	cur7 = nvkm_rd32(device, 0x10023c);
+	cur8 = nvkm_rd32(device, 0x100240);
+
+	switch ((!T(CWL)) * ram->base.type) {
+	case NVKM_RAM_TYPE_DDR2:
+		T(CWL) = T(CL) - 1;
+		break;
+	case NVKM_RAM_TYPE_GDDR3:
+		T(CWL) = ((cur2 & 0xff000000) >> 24) + 1;
+		break;
+	}
+
+	/* XXX: N=1 is not proper statistics */
+	if (device->chipset == 0xa0) {
+		unkt3b = 0x19 + ram->base.next->bios.rammap_00_16_40;
+		timing[6] = (0x2d + T(CL) - T(CWL) +
+				ram->base.next->bios.rammap_00_16_40) << 16 |
+			    T(CWL) << 8 |
+			    (0x2f + T(CL) - T(CWL));
+	} else {
+		unkt3b = 0x16;
+		timing[6] = (0x2b + T(CL) - T(CWL)) << 16 |
+			    max_t(s8, T(CWL) - 2, 1) << 8 |
+			    (0x2e + T(CL) - T(CWL));
+	}
+
+	timing[0] = (T(RP) << 24 | T(RAS) << 16 | T(RFC) << 8 | T(RC));
+	timing[1] = (T(WR) + 1 + T(CWL)) << 24 |
+		    max_t(u8, T(18), 1) << 16 |
+		    (T(WTR) + 1 + T(CWL)) << 8 |
+		    (3 + T(CL) - T(CWL));
+	timing[2] = (T(CWL) - 1) << 24 |
+		    (T(RRD) << 16) |
+		    (T(RCDWR) << 8) |
+		    T(RCDRD);
+	timing[3] = (unkt3b - 2 + T(CL)) << 24 |
+		    unkt3b << 16 |
+		    (T(CL) - 1) << 8 |
+		    (T(CL) - 1);
+	timing[4] = (cur4 & 0xffff0000) |
+		    T(13) << 8 |
+		    T(13);
+	timing[5] = T(RFC) << 24 |
+		    max_t(u8, T(RCDRD), T(RCDWR)) << 16 |
+		    T(RP);
+	/* Timing 6 is already done above */
+	timing[7] = (cur7 & 0xff00ffff) | (T(CL) - 1) << 16;
+	timing[8] = (cur8 & 0xffffff00);
+
+	/* XXX: P.version == 1 only has DDR2 and GDDR3? */
+	if (ram->base.type == NVKM_RAM_TYPE_DDR2) {
+		timing[5] |= (T(CL) + 3) << 8;
+		timing[8] |= (T(CL) - 4);
+	} else
+	if (ram->base.type == NVKM_RAM_TYPE_GDDR3) {
+		timing[5] |= (T(CL) + 2) << 8;
+		timing[8] |= (T(CL) - 2);
+	}
+
+	nvkm_debug(subdev, " 220: %08x %08x %08x %08x\n",
+		   timing[0], timing[1], timing[2], timing[3]);
+	nvkm_debug(subdev, " 230: %08x %08x %08x %08x\n",
+		   timing[4], timing[5], timing[6], timing[7]);
+	nvkm_debug(subdev, " 240: %08x\n", timing[8]);
+	return 0;
+}
+#undef T
+
+static void
+nvkm_sddr2_dll_reset(struct nv50_ramseq *hwsq)
+{
+	ram_mask(hwsq, mr[0], 0x100, 0x100);
+	ram_mask(hwsq, mr[0], 0x100, 0x000);
+	ram_nsec(hwsq, 24000);
+}
 
 static int
-nv50_ram_calc(struct nvkm_fb *pfb, u32 freq)
+nv50_ram_calc(struct nvkm_ram *base, u32 freq)
 {
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct nv50_ram *ram = (void *)pfb->ram;
+	struct nv50_ram *ram = nv50_ram(base);
 	struct nv50_ramseq *hwsq = &ram->hwsq;
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct nvbios_perfE perfE;
 	struct nvbios_pll mpll;
-	struct {
-		u32 data;
-		u8  size;
-	} ramcfg, timing;
-	u8  ver, hdr, cnt, len, strap;
+	struct nvkm_ram_data *next;
+	u8  ver, hdr, cnt, len, strap, size;
+	u32 data;
+	u32 r100da0, r004008, unk710, unk714, unk718, unk71c;
 	int N1, M1, N2, M2, P;
 	int ret, i;
+	u32 timing[9];
+
+	next = &ram->base.target;
+	next->freq = freq;
+	ram->base.next = next;
 
 	/* lookup closest matching performance table entry for frequency */
 	i = 0;
 	do {
-		ramcfg.data = nvbios_perfEp(bios, i++, &ver, &hdr, &cnt,
-					    &ramcfg.size, &perfE);
-		if (!ramcfg.data || (ver < 0x25 || ver >= 0x40) ||
-		    (ramcfg.size < 2)) {
-			nv_error(pfb, "invalid/missing perftab entry\n");
+		data = nvbios_perfEp(bios, i++, &ver, &hdr, &cnt,
+				     &size, &perfE);
+		if (!data || (ver < 0x25 || ver >= 0x40) ||
+		    (size < 2)) {
+			nvkm_error(subdev, "invalid/missing perftab entry\n");
 			return -EINVAL;
 		}
 	} while (perfE.memory < freq);
 
+	nvbios_rammapEp_from_perf(bios, data, hdr, &next->bios);
+
 	/* locate specific data set for the attached memory */
-	strap = nvbios_ramcfg_index(nv_subdev(pfb));
+	strap = nvbios_ramcfg_index(subdev);
 	if (strap >= cnt) {
-		nv_error(pfb, "invalid ramcfg strap\n");
+		nvkm_error(subdev, "invalid ramcfg strap\n");
 		return -EINVAL;
 	}
 
-	ramcfg.data += hdr + (strap * ramcfg.size);
+	data = nvbios_rammapSp_from_perf(bios, data + hdr, size, strap,
+			&next->bios);
+	if (!data) {
+		nvkm_error(subdev, "invalid/missing rammap entry ");
+		return -EINVAL;
+	}
 
 	/* lookup memory timings, if bios says they're present */
-	strap = nv_ro08(bios, ramcfg.data + 0x01);
-	if (strap != 0xff) {
-		timing.data = nvbios_timingEe(bios, strap, &ver, &hdr,
-					      &cnt, &len);
-		if (!timing.data || ver != 0x10 || hdr < 0x12) {
-			nv_error(pfb, "invalid/missing timing entry "
+	if (next->bios.ramcfg_timing != 0xff) {
+		data = nvbios_timingEp(bios, next->bios.ramcfg_timing,
+					&ver, &hdr, &cnt, &len, &next->bios);
+		if (!data || ver != 0x10 || hdr < 0x12) {
+			nvkm_error(subdev, "invalid/missing timing entry "
 				 "%02x %04x %02x %02x\n",
-				 strap, timing.data, ver, hdr);
+				 strap, data, ver, hdr);
 			return -EINVAL;
 		}
-	} else {
-		timing.data = 0;
 	}
 
-	ret = ram_init(hwsq, nv_subdev(pfb));
+	nv50_ram_timing_calc(ram, timing);
+
+	ret = ram_init(hwsq, subdev);
 	if (ret)
 		return ret;
 
+	/* Determine ram-specific MR values */
+	ram->base.mr[0] = ram_rd32(hwsq, mr[0]);
+	ram->base.mr[1] = ram_rd32(hwsq, mr[1]);
+	ram->base.mr[2] = ram_rd32(hwsq, mr[2]);
+
+	switch (ram->base.type) {
+	case NVKM_RAM_TYPE_GDDR3:
+		ret = nvkm_gddr3_calc(&ram->base);
+		break;
+	default:
+		ret = -ENOSYS;
+		break;
+	}
+
+	if (ret)
+		return ret;
+
+	/* Always disable this bit during reclock */
+	ram_mask(hwsq, 0x100200, 0x00000800, 0x00000000);
+
 	ram_wait(hwsq, 0x01, 0x00); /* wait for !vblank */
 	ram_wait(hwsq, 0x01, 0x01); /* wait for vblank */
 	ram_wr32(hwsq, 0x611200, 0x00003300);
@@ -120,6 +248,7 @@ nv50_ram_calc(struct nvkm_fb *pfb, u32 freq)
 	ram_nsec(hwsq, 8000);
 	ram_setf(hwsq, 0x10, 0x00); /* disable fb */
 	ram_wait(hwsq, 0x00, 0x01); /* wait for fb disabled */
+	ram_nsec(hwsq, 2000);
 
 	ram_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge */
 	ram_wr32(hwsq, 0x1002d0, 0x00000001); /* refresh */
@@ -129,97 +258,149 @@ nv50_ram_calc(struct nvkm_fb *pfb, u32 freq)
 
 	ret = nvbios_pll_parse(bios, 0x004008, &mpll);
 	mpll.vco2.max_freq = 0;
-	if (ret == 0) {
-		ret = nv04_pll_calc(nv_subdev(pfb), &mpll, freq,
+	if (ret >= 0) {
+		ret = nv04_pll_calc(subdev, &mpll, freq,
 				    &N1, &M1, &N2, &M2, &P);
-		if (ret == 0)
+		if (ret <= 0)
 			ret = -EINVAL;
 	}
 
 	if (ret < 0)
 		return ret;
 
+	/* XXX: 750MHz seems rather arbitrary */
+	if (freq <= 750000) {
+		r100da0 = 0x00000010;
+		r004008 = 0x90000000;
+	} else {
+		r100da0 = 0x00000000;
+		r004008 = 0x80000000;
+	}
+
+	r004008 |= (mpll.bias_p << 19) | (P << 22) | (P << 16);
+
 	ram_mask(hwsq, 0x00c040, 0xc000c000, 0x0000c000);
-	ram_mask(hwsq, 0x004008, 0x00000200, 0x00000200);
+	/* XXX: Is rammap_00_16_40 the DLL bit we've seen in GT215? Why does
+	 * it have a different rammap bit from DLLoff? */
+	ram_mask(hwsq, 0x004008, 0x00004200, 0x00000200 |
+			next->bios.rammap_00_16_40 << 14);
 	ram_mask(hwsq, 0x00400c, 0x0000ffff, (N1 << 8) | M1);
-	ram_mask(hwsq, 0x004008, 0x81ff0000, 0x80000000 | (mpll.bias_p << 19) |
-					     (P << 22) | (P << 16));
-#if QFX5800NVA0
-	for (i = 0; i < 8; i++)
-		ram_mask(hwsq, 0x100da0[i], 0x00000000, 0x00000000); /*XXX*/
-#endif
-	ram_nsec(hwsq, 96000); /*XXX*/
+	ram_mask(hwsq, 0x004008, 0x91ff0000, r004008);
+	if (subdev->device->chipset >= 0x96)
+		ram_wr32(hwsq, 0x100da0, r100da0);
+	ram_nsec(hwsq, 64000); /*XXX*/
+	ram_nsec(hwsq, 32000); /*XXX*/
+
 	ram_mask(hwsq, 0x004008, 0x00002200, 0x00002000);
 
 	ram_wr32(hwsq, 0x1002dc, 0x00000000); /* disable self-refresh */
+	ram_wr32(hwsq, 0x1002d4, 0x00000001); /* disable self-refresh */
 	ram_wr32(hwsq, 0x100210, 0x80000000); /* enable auto-refresh */
 
 	ram_nsec(hwsq, 12000);
 
 	switch (ram->base.type) {
-	case NV_MEM_TYPE_DDR2:
+	case NVKM_RAM_TYPE_DDR2:
 		ram_nuke(hwsq, mr[0]); /* force update */
 		ram_mask(hwsq, mr[0], 0x000, 0x000);
 		break;
-	case NV_MEM_TYPE_GDDR3:
-		ram_mask(hwsq, mr[2], 0x000, 0x000);
+	case NVKM_RAM_TYPE_GDDR3:
+		ram_nuke(hwsq, mr[1]); /* force update */
+		ram_wr32(hwsq, mr[1], ram->base.mr[1]);
 		ram_nuke(hwsq, mr[0]); /* force update */
-		ram_mask(hwsq, mr[0], 0x000, 0x000);
+		ram_wr32(hwsq, mr[0], ram->base.mr[0]);
 		break;
 	default:
 		break;
 	}
 
-	ram_mask(hwsq, timing[3], 0x00000000, 0x00000000); /*XXX*/
-	ram_mask(hwsq, timing[1], 0x00000000, 0x00000000); /*XXX*/
-	ram_mask(hwsq, timing[6], 0x00000000, 0x00000000); /*XXX*/
-	ram_mask(hwsq, timing[7], 0x00000000, 0x00000000); /*XXX*/
-	ram_mask(hwsq, timing[8], 0x00000000, 0x00000000); /*XXX*/
-	ram_mask(hwsq, timing[0], 0x00000000, 0x00000000); /*XXX*/
-	ram_mask(hwsq, timing[2], 0x00000000, 0x00000000); /*XXX*/
-	ram_mask(hwsq, timing[4], 0x00000000, 0x00000000); /*XXX*/
-	ram_mask(hwsq, timing[5], 0x00000000, 0x00000000); /*XXX*/
-
-	ram_mask(hwsq, timing[0], 0x00000000, 0x00000000); /*XXX*/
-
-#if QFX5800NVA0
-	ram_nuke(hwsq, 0x100e24);
-	ram_mask(hwsq, 0x100e24, 0x00000000, 0x00000000);
-	ram_nuke(hwsq, 0x100e20);
-	ram_mask(hwsq, 0x100e20, 0x00000000, 0x00000000);
-#endif
+	ram_mask(hwsq, timing[3], 0xffffffff, timing[3]);
+	ram_mask(hwsq, timing[1], 0xffffffff, timing[1]);
+	ram_mask(hwsq, timing[6], 0xffffffff, timing[6]);
+	ram_mask(hwsq, timing[7], 0xffffffff, timing[7]);
+	ram_mask(hwsq, timing[8], 0xffffffff, timing[8]);
+	ram_mask(hwsq, timing[0], 0xffffffff, timing[0]);
+	ram_mask(hwsq, timing[2], 0xffffffff, timing[2]);
+	ram_mask(hwsq, timing[4], 0xffffffff, timing[4]);
+	ram_mask(hwsq, timing[5], 0xffffffff, timing[5]);
+
+	if (!next->bios.ramcfg_00_03_02)
+		ram_mask(hwsq, 0x10021c, 0x00010000, 0x00000000);
+	ram_mask(hwsq, 0x100200, 0x00001000, !next->bios.ramcfg_00_04_02 << 12);
+
+	/* XXX: A lot of this could be "chipset"/"ram type" specific stuff */
+	unk710  = ram_rd32(hwsq, 0x100710) & ~0x00000101;
+	unk714  = ram_rd32(hwsq, 0x100714) & ~0xf0000020;
+	unk718  = ram_rd32(hwsq, 0x100718) & ~0x00000100;
+	unk71c  = ram_rd32(hwsq, 0x10071c) & ~0x00000100;
+
+	if ( next->bios.ramcfg_00_03_01)
+		unk71c |= 0x00000100;
+	if ( next->bios.ramcfg_00_03_02)
+		unk710 |= 0x00000100;
+	if (!next->bios.ramcfg_00_03_08) {
+		unk710 |= 0x1;
+		unk714 |= 0x20;
+	}
+	if ( next->bios.ramcfg_00_04_04)
+		unk714 |= 0x70000000;
+	if ( next->bios.ramcfg_00_04_20)
+		unk718 |= 0x00000100;
+
+	ram_mask(hwsq, 0x100714, 0xffffffff, unk714);
+	ram_mask(hwsq, 0x10071c, 0xffffffff, unk71c);
+	ram_mask(hwsq, 0x100718, 0xffffffff, unk718);
+	ram_mask(hwsq, 0x100710, 0xffffffff, unk710);
+
+	if (next->bios.rammap_00_16_20) {
+		ram_wr32(hwsq, 0x1005a0, next->bios.ramcfg_00_07 << 16 |
+					 next->bios.ramcfg_00_06 << 8 |
+					 next->bios.ramcfg_00_05);
+		ram_wr32(hwsq, 0x1005a4, next->bios.ramcfg_00_09 << 8 |
+					 next->bios.ramcfg_00_08);
+		ram_mask(hwsq, 0x10053c, 0x00001000, 0x00000000);
+	} else {
+		ram_mask(hwsq, 0x10053c, 0x00001000, 0x00001000);
+	}
+	ram_mask(hwsq, mr[1], 0xffffffff, ram->base.mr[1]);
 
-	ram_mask(hwsq, mr[0], 0x100, 0x100);
-	ram_mask(hwsq, mr[0], 0x100, 0x000);
+	/* Reset DLL */
+	if (!next->bios.ramcfg_DLLoff)
+		nvkm_sddr2_dll_reset(hwsq);
 
 	ram_setf(hwsq, 0x10, 0x01); /* enable fb */
 	ram_wait(hwsq, 0x00, 0x00); /* wait for fb enabled */
 	ram_wr32(hwsq, 0x611200, 0x00003330);
 	ram_wr32(hwsq, 0x002504, 0x00000000); /* un-block fifo */
+
+	if (next->bios.rammap_00_17_02)
+		ram_mask(hwsq, 0x100200, 0x00000800, 0x00000800);
+	if (!next->bios.rammap_00_16_40)
+		ram_mask(hwsq, 0x004008, 0x00004000, 0x00000000);
+	if (next->bios.ramcfg_00_03_02)
+		ram_mask(hwsq, 0x10021c, 0x00010000, 0x00010000);
+
 	return 0;
 }
 
 static int
-nv50_ram_prog(struct nvkm_fb *pfb)
+nv50_ram_prog(struct nvkm_ram *base)
 {
-	struct nvkm_device *device = nv_device(pfb);
-	struct nv50_ram *ram = (void *)pfb->ram;
-	struct nv50_ramseq *hwsq = &ram->hwsq;
-
-	ram_exec(hwsq, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
+	struct nv50_ram *ram = nv50_ram(base);
+	struct nvkm_device *device = ram->base.fb->subdev.device;
+	ram_exec(&ram->hwsq, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
 	return 0;
 }
 
 static void
-nv50_ram_tidy(struct nvkm_fb *pfb)
+nv50_ram_tidy(struct nvkm_ram *base)
 {
-	struct nv50_ram *ram = (void *)pfb->ram;
-	struct nv50_ramseq *hwsq = &ram->hwsq;
-	ram_exec(hwsq, false);
+	struct nv50_ram *ram = nv50_ram(base);
+	ram_exec(&ram->hwsq, false);
 }
 
 void
-__nv50_ram_put(struct nvkm_fb *pfb, struct nvkm_mem *mem)
+__nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem *mem)
 {
 	struct nvkm_mm_node *this;
 
@@ -227,14 +408,14 @@ __nv50_ram_put(struct nvkm_fb *pfb, struct nvkm_mem *mem)
 		this = list_first_entry(&mem->regions, typeof(*this), rl_entry);
 
 		list_del(&this->rl_entry);
-		nvkm_mm_free(&pfb->vram, &this);
+		nvkm_mm_free(&ram->vram, &this);
 	}
 
-	nvkm_mm_free(&pfb->tags, &mem->tag);
+	nvkm_mm_free(&ram->tags, &mem->tag);
 }
 
 void
-nv50_ram_put(struct nvkm_fb *pfb, struct nvkm_mem **pmem)
+nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem **pmem)
 {
 	struct nvkm_mem *mem = *pmem;
 
@@ -242,19 +423,19 @@ nv50_ram_put(struct nvkm_fb *pfb, struct nvkm_mem **pmem)
 	if (unlikely(mem == NULL))
 		return;
 
-	mutex_lock(&pfb->base.mutex);
-	__nv50_ram_put(pfb, mem);
-	mutex_unlock(&pfb->base.mutex);
+	mutex_lock(&ram->fb->subdev.mutex);
+	__nv50_ram_put(ram, mem);
+	mutex_unlock(&ram->fb->subdev.mutex);
 
 	kfree(mem);
 }
 
 int
-nv50_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
+nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
 	     u32 memtype, struct nvkm_mem **pmem)
 {
-	struct nvkm_mm *heap = &pfb->vram;
-	struct nvkm_mm *tags = &pfb->tags;
+	struct nvkm_mm *heap = &ram->vram;
+	struct nvkm_mm *tags = &ram->tags;
 	struct nvkm_mm_node *r;
 	struct nvkm_mem *mem;
 	int comp = (memtype & 0x300) >> 8;
@@ -262,17 +443,17 @@ nv50_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
 	int back = (memtype & 0x800);
 	int min, max, ret;
 
-	max = (size >> 12);
-	min = ncmin ? (ncmin >> 12) : max;
-	align >>= 12;
+	max = (size >> NVKM_RAM_MM_SHIFT);
+	min = ncmin ? (ncmin >> NVKM_RAM_MM_SHIFT) : max;
+	align >>= NVKM_RAM_MM_SHIFT;
 
 	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
 	if (!mem)
 		return -ENOMEM;
 
-	mutex_lock(&pfb->base.mutex);
+	mutex_lock(&ram->fb->subdev.mutex);
 	if (comp) {
-		if (align == 16) {
+		if (align == (1 << (16 - NVKM_RAM_MM_SHIFT))) {
 			int n = (max >> 4) * comp;
 
 			ret = nvkm_mm_head(tags, 0, 1, n, n, 1, &mem->tag);
@@ -295,34 +476,45 @@ nv50_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
 		else
 			ret = nvkm_mm_head(heap, 0, type, max, min, align, &r);
 		if (ret) {
-			mutex_unlock(&pfb->base.mutex);
-			pfb->ram->put(pfb, &mem);
+			mutex_unlock(&ram->fb->subdev.mutex);
+			ram->func->put(ram, &mem);
 			return ret;
 		}
 
 		list_add_tail(&r->rl_entry, &mem->regions);
 		max -= r->length;
 	} while (max);
-	mutex_unlock(&pfb->base.mutex);
+	mutex_unlock(&ram->fb->subdev.mutex);
 
 	r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry);
-	mem->offset = (u64)r->offset << 12;
+	mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT;
 	*pmem = mem;
 	return 0;
 }
 
+static const struct nvkm_ram_func
+nv50_ram_func = {
+	.get = nv50_ram_get,
+	.put = nv50_ram_put,
+	.calc = nv50_ram_calc,
+	.prog = nv50_ram_prog,
+	.tidy = nv50_ram_tidy,
+};
+
 static u32
-nv50_fb_vram_rblock(struct nvkm_fb *pfb, struct nvkm_ram *ram)
+nv50_fb_vram_rblock(struct nvkm_ram *ram)
 {
+	struct nvkm_subdev *subdev = &ram->fb->subdev;
+	struct nvkm_device *device = subdev->device;
 	int colbits, rowbitsa, rowbitsb, banks;
 	u64 rowsize, predicted;
 	u32 r0, r4, rt, rblock_size;
 
-	r0 = nv_rd32(pfb, 0x100200);
-	r4 = nv_rd32(pfb, 0x100204);
-	rt = nv_rd32(pfb, 0x100250);
-	nv_debug(pfb, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n",
-		 r0, r4, rt, nv_rd32(pfb, 0x001540));
+	r0 = nvkm_rd32(device, 0x100200);
+	r4 = nvkm_rd32(device, 0x100204);
+	rt = nvkm_rd32(device, 0x100250);
+	nvkm_debug(subdev, "memcfg %08x %08x %08x %08x\n",
+		   r0, r4, rt, nvkm_rd32(device, 0x001540));
 
 	colbits  =  (r4 & 0x0000f000) >> 12;
 	rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
@@ -335,103 +527,94 @@ nv50_fb_vram_rblock(struct nvkm_fb *pfb, struct nvkm_ram *ram)
 		predicted += rowsize << rowbitsb;
 
 	if (predicted != ram->size) {
-		nv_warn(pfb, "memory controller reports %d MiB VRAM\n",
-			(u32)(ram->size >> 20));
+		nvkm_warn(subdev, "memory controller reports %d MiB VRAM\n",
+			  (u32)(ram->size >> 20));
 	}
 
 	rblock_size = rowsize;
 	if (rt & 1)
 		rblock_size *= 3;
 
-	nv_debug(pfb, "rblock %d bytes\n", rblock_size);
+	nvkm_debug(subdev, "rblock %d bytes\n", rblock_size);
 	return rblock_size;
 }
 
 int
-nv50_ram_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, int length, void **pobject)
+nv50_ram_ctor(const struct nvkm_ram_func *func,
+	      struct nvkm_fb *fb, struct nvkm_ram *ram)
 {
-	const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
-	const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
-	struct nvkm_bios *bios = nvkm_bios(parent);
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_ram *ram;
+	struct nvkm_device *device = fb->subdev.device;
+	struct nvkm_bios *bios = device->bios;
+	const u32 rsvd_head = ( 256 * 1024); /* vga memory */
+	const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
+	u64 size = nvkm_rd32(device, 0x10020c);
+	u32 tags = nvkm_rd32(device, 0x100320);
+	enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
 	int ret;
 
-	ret = nvkm_ram_create_(parent, engine, oclass, length, pobject);
-	ram = *pobject;
-	if (ret)
-		return ret;
-
-	ram->size = nv_rd32(pfb, 0x10020c);
-	ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32);
-
-	ram->part_mask = (nv_rd32(pfb, 0x001540) & 0x00ff0000) >> 16;
-	ram->parts = hweight8(ram->part_mask);
-
-	switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
-	case 0: ram->type = NV_MEM_TYPE_DDR1; break;
+	switch (nvkm_rd32(device, 0x100714) & 0x00000007) {
+	case 0: type = NVKM_RAM_TYPE_DDR1; break;
 	case 1:
-		if (nvkm_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
-			ram->type = NV_MEM_TYPE_DDR3;
+		if (nvkm_fb_bios_memtype(bios) == NVKM_RAM_TYPE_DDR3)
+			type = NVKM_RAM_TYPE_DDR3;
 		else
-			ram->type = NV_MEM_TYPE_DDR2;
+			type = NVKM_RAM_TYPE_DDR2;
 		break;
-	case 2: ram->type = NV_MEM_TYPE_GDDR3; break;
-	case 3: ram->type = NV_MEM_TYPE_GDDR4; break;
-	case 4: ram->type = NV_MEM_TYPE_GDDR5; break;
+	case 2: type = NVKM_RAM_TYPE_GDDR3; break;
+	case 3: type = NVKM_RAM_TYPE_GDDR4; break;
+	case 4: type = NVKM_RAM_TYPE_GDDR5; break;
 	default:
 		break;
 	}
 
-	ret = nvkm_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) -
-			   (rsvd_head + rsvd_tail),
-			   nv50_fb_vram_rblock(pfb, ram) >> 12);
+	size = (size & 0x000000ff) << 32 | (size & 0xffffff00);
+
+	ret = nvkm_ram_ctor(func, fb, type, size, tags, ram);
 	if (ret)
 		return ret;
 
-	ram->ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
-	ram->tags  =  nv_rd32(pfb, 0x100320);
-	ram->get = nv50_ram_get;
-	ram->put = nv50_ram_put;
-	return 0;
+	ram->part_mask = (nvkm_rd32(device, 0x001540) & 0x00ff0000) >> 16;
+	ram->parts = hweight8(ram->part_mask);
+	ram->ranks = (nvkm_rd32(device, 0x100200) & 0x4) ? 2 : 1;
+	nvkm_mm_fini(&ram->vram);
+
+	return nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+			    (size - rsvd_head - rsvd_tail) >> NVKM_RAM_MM_SHIFT,
+			    nv50_fb_vram_rblock(ram) >> NVKM_RAM_MM_SHIFT);
 }
 
-static int
-nv50_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 datasize,
-	      struct nvkm_object **pobject)
+int
+nv50_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
 	struct nv50_ram *ram;
 	int ret, i;
 
-	ret = nv50_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
+	if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+		return -ENOMEM;
+	*pram = &ram->base;
+
+	ret = nv50_ram_ctor(&nv50_ram_func, fb, &ram->base);
 	if (ret)
 		return ret;
 
-	switch (ram->base.type) {
-	case NV_MEM_TYPE_DDR2:
-	case NV_MEM_TYPE_GDDR3:
-		ram->base.calc = nv50_ram_calc;
-		ram->base.prog = nv50_ram_prog;
-		ram->base.tidy = nv50_ram_tidy;
-		break;
-	default:
-		nv_warn(ram, "reclocking of this ram type unsupported\n");
-		return 0;
-	}
-
 	ram->hwsq.r_0x002504 = hwsq_reg(0x002504);
 	ram->hwsq.r_0x00c040 = hwsq_reg(0x00c040);
 	ram->hwsq.r_0x004008 = hwsq_reg(0x004008);
 	ram->hwsq.r_0x00400c = hwsq_reg(0x00400c);
+	ram->hwsq.r_0x100200 = hwsq_reg(0x100200);
 	ram->hwsq.r_0x100210 = hwsq_reg(0x100210);
+	ram->hwsq.r_0x10021c = hwsq_reg(0x10021c);
 	ram->hwsq.r_0x1002d0 = hwsq_reg(0x1002d0);
 	ram->hwsq.r_0x1002d4 = hwsq_reg(0x1002d4);
 	ram->hwsq.r_0x1002dc = hwsq_reg(0x1002dc);
-	for (i = 0; i < 8; i++)
-		ram->hwsq.r_0x100da0[i] = hwsq_reg(0x100da0 + (i * 0x04));
+	ram->hwsq.r_0x10053c = hwsq_reg(0x10053c);
+	ram->hwsq.r_0x1005a0 = hwsq_reg(0x1005a0);
+	ram->hwsq.r_0x1005a4 = hwsq_reg(0x1005a4);
+	ram->hwsq.r_0x100710 = hwsq_reg(0x100710);
+	ram->hwsq.r_0x100714 = hwsq_reg(0x100714);
+	ram->hwsq.r_0x100718 = hwsq_reg(0x100718);
+	ram->hwsq.r_0x10071c = hwsq_reg(0x10071c);
+	ram->hwsq.r_0x100da0 = hwsq_stride(0x100da0, 4, ram->base.part_mask);
 	ram->hwsq.r_0x100e20 = hwsq_reg(0x100e20);
 	ram->hwsq.r_0x100e24 = hwsq_reg(0x100e24);
 	ram->hwsq.r_0x611200 = hwsq_reg(0x611200);
@@ -453,13 +636,3 @@ nv50_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
 
 	return 0;
 }
-
-struct nvkm_oclass
-nv50_ram_oclass = {
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_ram_ctor,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
index afab42df28d4..86bf67456b14 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
@@ -65,7 +65,7 @@ nvkm_sddr2_calc(struct nvkm_ram *ram)
 	case 0x10:
 		CL  = ram->next->bios.timing_10_CL;
 		WR  = ram->next->bios.timing_10_WR;
-		DLL = !ram->next->bios.ramcfg_10_DLLoff;
+		DLL = !ram->next->bios.ramcfg_DLLoff;
 		ODT = ram->next->bios.timing_10_ODT & 3;
 		break;
 	case 0x20:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
index 10844355c3f3..b4edc97dc8c5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
@@ -53,7 +53,7 @@ static const struct ramxlat
 ramddr3_wr[] = {
 	{ 5, 1 }, { 6, 2 }, { 7, 3 }, { 8, 4 }, { 10, 5 }, { 12, 6 },
 	/* the below are mentioned in some, but not all, ddr3 docs */
-	{ 14, 7 }, { 16, 0 },
+	{ 14, 7 }, { 15, 7 }, { 16, 0 },
 	{ -1 }
 };
 
@@ -61,7 +61,7 @@ static const struct ramxlat
 ramddr3_cwl[] = {
 	{ 5, 0 }, { 6, 1 }, { 7, 2 }, { 8, 3 },
 	/* the below are mentioned in some, but not all, ddr3 docs */
-	{ 9, 4 },
+	{ 9, 4 }, { 10, 5 },
 	{ -1 }
 };
 
@@ -79,7 +79,7 @@ nvkm_sddr3_calc(struct nvkm_ram *ram)
 		CWL = ram->next->bios.timing_10_CWL;
 		CL  = ram->next->bios.timing_10_CL;
 		WR  = ram->next->bios.timing_10_WR;
-		DLL = !ram->next->bios.ramcfg_10_DLLoff;
+		DLL = !ram->next->bios.ramcfg_DLLoff;
 		ODT = ram->next->bios.timing_10_ODT;
 		break;
 	case 0x20:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
index b7b7193bbce7..f4144979a79c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
@@ -21,31 +21,34 @@
  *
  * Authors: Martin Peres
  */
-#include <subdev/fuse.h>
+#include "priv.h"
 
-int
-_nvkm_fuse_init(struct nvkm_object *object)
+u32
+nvkm_fuse_read(struct nvkm_fuse *fuse, u32 addr)
 {
-	struct nvkm_fuse *fuse = (void *)object;
-	return nvkm_subdev_init(&fuse->base);
+	return fuse->func->read(fuse, addr);
 }
 
-void
-_nvkm_fuse_dtor(struct nvkm_object *object)
+static void *
+nvkm_fuse_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_fuse *fuse = (void *)object;
-	nvkm_subdev_destroy(&fuse->base);
+	return nvkm_fuse(subdev);
 }
 
+static const struct nvkm_subdev_func
+nvkm_fuse = {
+	.dtor = nvkm_fuse_dtor,
+};
+
 int
-nvkm_fuse_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, int length, void **pobject)
+nvkm_fuse_new_(const struct nvkm_fuse_func *func, struct nvkm_device *device,
+	       int index, struct nvkm_fuse **pfuse)
 {
 	struct nvkm_fuse *fuse;
-	int ret;
-
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "FUSE",
-				  "fuse", length, pobject);
-	fuse = *pobject;
-	return ret;
+	if (!(fuse = *pfuse = kzalloc(sizeof(*fuse), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&nvkm_fuse, device, index, 0, &fuse->subdev);
+	fuse->func = func;
+	spin_lock_init(&fuse->lock);
+	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c
index 393ef3a0faaf..13671fedc805 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c
@@ -23,56 +23,31 @@
  */
 #include "priv.h"
 
-struct gf100_fuse_priv {
-	struct nvkm_fuse base;
-
-	spinlock_t fuse_enable_lock;
-};
-
 static u32
-gf100_fuse_rd32(struct nvkm_object *object, u64 addr)
+gf100_fuse_read(struct nvkm_fuse *fuse, u32 addr)
 {
-	struct gf100_fuse_priv *priv = (void *)object;
+	struct nvkm_device *device = fuse->subdev.device;
 	unsigned long flags;
 	u32 fuse_enable, unk, val;
 
 	/* racy if another part of nvkm start writing to these regs */
-	spin_lock_irqsave(&priv->fuse_enable_lock, flags);
-	fuse_enable = nv_mask(priv, 0x22400, 0x800, 0x800);
-	unk = nv_mask(priv, 0x21000, 0x1, 0x1);
-	val = nv_rd32(priv, 0x21100 + addr);
-	nv_wr32(priv, 0x21000, unk);
-	nv_wr32(priv, 0x22400, fuse_enable);
-	spin_unlock_irqrestore(&priv->fuse_enable_lock, flags);
+	spin_lock_irqsave(&fuse->lock, flags);
+	fuse_enable = nvkm_mask(device, 0x022400, 0x800, 0x800);
+	unk = nvkm_mask(device, 0x021000, 0x1, 0x1);
+	val = nvkm_rd32(device, 0x021100 + addr);
+	nvkm_wr32(device, 0x021000, unk);
+	nvkm_wr32(device, 0x022400, fuse_enable);
+	spin_unlock_irqrestore(&fuse->lock, flags);
 	return val;
 }
 
+static const struct nvkm_fuse_func
+gf100_fuse = {
+	.read = gf100_fuse_read,
+};
 
-static int
-gf100_fuse_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+gf100_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse)
 {
-	struct gf100_fuse_priv *priv;
-	int ret;
-
-	ret = nvkm_fuse_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	spin_lock_init(&priv->fuse_enable_lock);
-	return 0;
+	return nvkm_fuse_new_(&gf100_fuse, device, index, pfuse);
 }
-
-struct nvkm_oclass
-gf100_fuse_oclass = {
-	.handle = NV_SUBDEV(FUSE, 0xC0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_fuse_ctor,
-		.dtor = _nvkm_fuse_dtor,
-		.init = _nvkm_fuse_init,
-		.fini = _nvkm_fuse_fini,
-		.rd32 = gf100_fuse_rd32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
index 0b256aa4960f..9aff4ea04506 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
@@ -23,40 +23,20 @@
  */
 #include "priv.h"
 
-struct gm107_fuse_priv {
-	struct nvkm_fuse base;
-};
-
 static u32
-gm107_fuse_rd32(struct nvkm_object *object, u64 addr)
+gm107_fuse_read(struct nvkm_fuse *fuse, u32 addr)
 {
-	struct gf100_fuse_priv *priv = (void *)object;
-	return nv_rd32(priv, 0x21100 + addr);
+	struct nvkm_device *device = fuse->subdev.device;
+	return nvkm_rd32(device, 0x021100 + addr);
 }
 
+static const struct nvkm_fuse_func
+gm107_fuse = {
+	.read = gm107_fuse_read,
+};
 
-static int
-gm107_fuse_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+gm107_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse)
 {
-	struct gm107_fuse_priv *priv;
-	int ret;
-
-	ret = nvkm_fuse_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-
-	return ret;
+	return nvkm_fuse_new_(&gm107_fuse, device, index, pfuse);
 }
-
-struct nvkm_oclass
-gm107_fuse_oclass = {
-	.handle = NV_SUBDEV(FUSE, 0x117),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm107_fuse_ctor,
-		.dtor = _nvkm_fuse_dtor,
-		.init = _nvkm_fuse_init,
-		.fini = _nvkm_fuse_fini,
-		.rd32 = gm107_fuse_rd32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c
index 0d2afc426100..514c193db25d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c
@@ -23,54 +23,29 @@
  */
 #include "priv.h"
 
-struct nv50_fuse_priv {
-	struct nvkm_fuse base;
-
-	spinlock_t fuse_enable_lock;
-};
-
 static u32
-nv50_fuse_rd32(struct nvkm_object *object, u64 addr)
+nv50_fuse_read(struct nvkm_fuse *fuse, u32 addr)
 {
-	struct nv50_fuse_priv *priv = (void *)object;
+	struct nvkm_device *device = fuse->subdev.device;
 	unsigned long flags;
 	u32 fuse_enable, val;
 
 	/* racy if another part of nvkm start writing to this reg */
-	spin_lock_irqsave(&priv->fuse_enable_lock, flags);
-	fuse_enable = nv_mask(priv, 0x1084, 0x800, 0x800);
-	val = nv_rd32(priv, 0x21000 + addr);
-	nv_wr32(priv, 0x1084, fuse_enable);
-	spin_unlock_irqrestore(&priv->fuse_enable_lock, flags);
+	spin_lock_irqsave(&fuse->lock, flags);
+	fuse_enable = nvkm_mask(device, 0x001084, 0x800, 0x800);
+	val = nvkm_rd32(device, 0x021000 + addr);
+	nvkm_wr32(device, 0x001084, fuse_enable);
+	spin_unlock_irqrestore(&fuse->lock, flags);
 	return val;
 }
 
+static const struct nvkm_fuse_func
+nv50_fuse = {
+	.read = &nv50_fuse_read,
+};
 
-static int
-nv50_fuse_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+nv50_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse)
 {
-	struct nv50_fuse_priv *priv;
-	int ret;
-
-	ret = nvkm_fuse_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	spin_lock_init(&priv->fuse_enable_lock);
-	return 0;
+	return nvkm_fuse_new_(&nv50_fuse, device, index, pfuse);
 }
-
-struct nvkm_oclass
-nv50_fuse_oclass = {
-	.handle = NV_SUBDEV(FUSE, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_fuse_ctor,
-		.dtor = _nvkm_fuse_dtor,
-		.init = _nvkm_fuse_init,
-		.fini = _nvkm_fuse_fini,
-		.rd32 = nv50_fuse_rd32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
index 7e050f789384..b0390b540ef5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
@@ -1,7 +1,12 @@
 #ifndef __NVKM_FUSE_PRIV_H__
 #define __NVKM_FUSE_PRIV_H__
+#define nvkm_fuse(p) container_of((p), struct nvkm_fuse, subdev)
 #include <subdev/fuse.h>
 
-int _nvkm_fuse_init(struct nvkm_object *object);
-void _nvkm_fuse_dtor(struct nvkm_object *object);
+struct nvkm_fuse_func {
+	u32 (*read)(struct nvkm_fuse *, u32 addr);
+};
+
+int nvkm_fuse_new_(const struct nvkm_fuse_func *, struct nvkm_device *,
+		   int index, struct nvkm_fuse **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
index ea42a9ed1821..e52c5e87f242 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
@@ -2,5 +2,5 @@ nvkm-y += nvkm/subdev/gpio/base.o
 nvkm-y += nvkm/subdev/gpio/nv10.o
 nvkm-y += nvkm/subdev/gpio/nv50.o
 nvkm-y += nvkm/subdev/gpio/g94.o
-nvkm-y += nvkm/subdev/gpio/gf110.o
+nvkm-y += nvkm/subdev/gpio/gf119.o
 nvkm-y += nvkm/subdev/gpio/gk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
index dea58161ba46..d45ec99f0e38 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
@@ -23,28 +23,33 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
 #include <core/notify.h>
 
 static int
 nvkm_gpio_drive(struct nvkm_gpio *gpio, int idx, int line, int dir, int out)
 {
-	const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
-	return impl->drive ? impl->drive(gpio, line, dir, out) : -ENODEV;
+	return gpio->func->drive(gpio, line, dir, out);
 }
 
 static int
 nvkm_gpio_sense(struct nvkm_gpio *gpio, int idx, int line)
 {
-	const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
-	return impl->sense ? impl->sense(gpio, line) : -ENODEV;
+	return gpio->func->sense(gpio, line);
 }
 
-static int
+void
+nvkm_gpio_reset(struct nvkm_gpio *gpio, u8 func)
+{
+	if (gpio->func->reset)
+		gpio->func->reset(gpio, func);
+}
+
+int
 nvkm_gpio_find(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line,
 	       struct dcb_gpio_func *func)
 {
-	struct nvkm_bios *bios = nvkm_bios(gpio);
+	struct nvkm_device *device = gpio->subdev.device;
+	struct nvkm_bios *bios = device->bios;
 	u8  ver, len;
 	u16 data;
 
@@ -56,11 +61,11 @@ nvkm_gpio_find(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line,
 		return 0;
 
 	/* Apple iMac G4 NV18 */
-	if (nv_device_match(nv_object(gpio), 0x0189, 0x10de, 0x0010)) {
+	if (device->quirk && device->quirk->tv_gpio) {
 		if (tag == DCB_GPIO_TVDAC0) {
 			*func = (struct dcb_gpio_func) {
 				.func = DCB_GPIO_TVDAC0,
-				.line = 4,
+				.line = device->quirk->tv_gpio,
 				.log[0] = 0,
 				.log[1] = 1,
 			};
@@ -71,7 +76,7 @@ nvkm_gpio_find(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line,
 	return -ENOENT;
 }
 
-static int
+int
 nvkm_gpio_set(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line, int state)
 {
 	struct dcb_gpio_func func;
@@ -87,7 +92,7 @@ nvkm_gpio_set(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line, int state)
 	return ret;
 }
 
-static int
+int
 nvkm_gpio_get(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line)
 {
 	struct dcb_gpio_func func;
@@ -107,16 +112,14 @@ static void
 nvkm_gpio_intr_fini(struct nvkm_event *event, int type, int index)
 {
 	struct nvkm_gpio *gpio = container_of(event, typeof(*gpio), event);
-	const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
-	impl->intr_mask(gpio, type, 1 << index, 0);
+	gpio->func->intr_mask(gpio, type, 1 << index, 0);
 }
 
 static void
 nvkm_gpio_intr_init(struct nvkm_event *event, int type, int index)
 {
 	struct nvkm_gpio *gpio = container_of(event, typeof(*gpio), event);
-	const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
-	impl->intr_mask(gpio, type, 1 << index, 1 << index);
+	gpio->func->intr_mask(gpio, type, 1 << index, 1 << index);
 }
 
 static int
@@ -133,16 +136,22 @@ nvkm_gpio_intr_ctor(struct nvkm_object *object, void *data, u32 size,
 	return -EINVAL;
 }
 
+static const struct nvkm_event_func
+nvkm_gpio_intr_func = {
+	.ctor = nvkm_gpio_intr_ctor,
+	.init = nvkm_gpio_intr_init,
+	.fini = nvkm_gpio_intr_fini,
+};
+
 static void
 nvkm_gpio_intr(struct nvkm_subdev *subdev)
 {
 	struct nvkm_gpio *gpio = nvkm_gpio(subdev);
-	const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
 	u32 hi, lo, i;
 
-	impl->intr_stat(gpio, &hi, &lo);
+	gpio->func->intr_stat(gpio, &hi, &lo);
 
-	for (i = 0; (hi | lo) && i < impl->lines; i++) {
+	for (i = 0; (hi | lo) && i < gpio->func->lines; i++) {
 		struct nvkm_gpio_ntfy_rep rep = {
 			.mask = (NVKM_GPIO_HI * !!(hi & (1 << i))) |
 				(NVKM_GPIO_LO * !!(lo & (1 << i))),
@@ -151,24 +160,15 @@ nvkm_gpio_intr(struct nvkm_subdev *subdev)
 	}
 }
 
-static const struct nvkm_event_func
-nvkm_gpio_intr_func = {
-	.ctor = nvkm_gpio_intr_ctor,
-	.init = nvkm_gpio_intr_init,
-	.fini = nvkm_gpio_intr_fini,
-};
-
-int
-_nvkm_gpio_fini(struct nvkm_object *object, bool suspend)
+static int
+nvkm_gpio_fini(struct nvkm_subdev *subdev, bool suspend)
 {
-	const struct nvkm_gpio_impl *impl = (void *)object->oclass;
-	struct nvkm_gpio *gpio = nvkm_gpio(object);
-	u32 mask = (1 << impl->lines) - 1;
-
-	impl->intr_mask(gpio, NVKM_GPIO_TOGGLED, mask, 0);
-	impl->intr_stat(gpio, &mask, &mask);
+	struct nvkm_gpio *gpio = nvkm_gpio(subdev);
+	u32 mask = (1 << gpio->func->lines) - 1;
 
-	return nvkm_subdev_fini(&gpio->base, suspend);
+	gpio->func->intr_mask(gpio, NVKM_GPIO_TOGGLED, mask, 0);
+	gpio->func->intr_stat(gpio, &mask, &mask);
+	return 0;
 }
 
 static struct dmi_system_id gpio_reset_ids[] = {
@@ -182,70 +182,43 @@ static struct dmi_system_id gpio_reset_ids[] = {
 	{ }
 };
 
-int
-_nvkm_gpio_init(struct nvkm_object *object)
+static int
+nvkm_gpio_init(struct nvkm_subdev *subdev)
 {
-	struct nvkm_gpio *gpio = nvkm_gpio(object);
-	int ret;
-
-	ret = nvkm_subdev_init(&gpio->base);
-	if (ret)
-		return ret;
-
-	if (gpio->reset && dmi_check_system(gpio_reset_ids))
-		gpio->reset(gpio, DCB_GPIO_UNUSED);
-
-	return ret;
+	struct nvkm_gpio *gpio = nvkm_gpio(subdev);
+	if (dmi_check_system(gpio_reset_ids))
+		nvkm_gpio_reset(gpio, DCB_GPIO_UNUSED);
+	return 0;
 }
 
-void
-_nvkm_gpio_dtor(struct nvkm_object *object)
+static void *
+nvkm_gpio_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_gpio *gpio = (void *)object;
+	struct nvkm_gpio *gpio = nvkm_gpio(subdev);
 	nvkm_event_fini(&gpio->event);
-	nvkm_subdev_destroy(&gpio->base);
+	return gpio;
 }
 
-int
-nvkm_gpio_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, int length, void **pobject)
-{
-	const struct nvkm_gpio_impl *impl = (void *)oclass;
-	struct nvkm_gpio *gpio;
-	int ret;
-
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "GPIO",
-				  "gpio", length, pobject);
-	gpio = *pobject;
-	if (ret)
-		return ret;
-
-	gpio->find = nvkm_gpio_find;
-	gpio->set  = nvkm_gpio_set;
-	gpio->get  = nvkm_gpio_get;
-	gpio->reset = impl->reset;
-
-	ret = nvkm_event_init(&nvkm_gpio_intr_func, 2, impl->lines,
-			      &gpio->event);
-	if (ret)
-		return ret;
-
-	nv_subdev(gpio)->intr = nvkm_gpio_intr;
-	return 0;
-}
+static const struct nvkm_subdev_func
+nvkm_gpio = {
+	.dtor = nvkm_gpio_dtor,
+	.init = nvkm_gpio_init,
+	.fini = nvkm_gpio_fini,
+	.intr = nvkm_gpio_intr,
+};
 
 int
-_nvkm_gpio_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+nvkm_gpio_new_(const struct nvkm_gpio_func *func, struct nvkm_device *device,
+	       int index, struct nvkm_gpio **pgpio)
 {
 	struct nvkm_gpio *gpio;
-	int ret;
 
-	ret = nvkm_gpio_create(parent, engine, oclass, &gpio);
-	*pobject = nv_object(gpio);
-	if (ret)
-		return ret;
+	if (!(gpio = *pgpio = kzalloc(sizeof(*gpio), GFP_KERNEL)))
+		return -ENOMEM;
 
-	return 0;
+	nvkm_subdev_ctor(&nvkm_gpio, device, index, 0, &gpio->subdev);
+	gpio->func = func;
+
+	return nvkm_event_init(&nvkm_gpio_intr_func, 2, func->lines,
+			       &gpio->event);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c
index 12b3e01fca8e..6dcda55fb865 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c
@@ -26,21 +26,23 @@
 void
 g94_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
 {
-	u32 intr0 = nv_rd32(gpio, 0x00e054);
-	u32 intr1 = nv_rd32(gpio, 0x00e074);
-	u32 stat0 = nv_rd32(gpio, 0x00e050) & intr0;
-	u32 stat1 = nv_rd32(gpio, 0x00e070) & intr1;
+	struct nvkm_device *device = gpio->subdev.device;
+	u32 intr0 = nvkm_rd32(device, 0x00e054);
+	u32 intr1 = nvkm_rd32(device, 0x00e074);
+	u32 stat0 = nvkm_rd32(device, 0x00e050) & intr0;
+	u32 stat1 = nvkm_rd32(device, 0x00e070) & intr1;
 	*lo = (stat1 & 0xffff0000) | (stat0 >> 16);
 	*hi = (stat1 << 16) | (stat0 & 0x0000ffff);
-	nv_wr32(gpio, 0x00e054, intr0);
-	nv_wr32(gpio, 0x00e074, intr1);
+	nvkm_wr32(device, 0x00e054, intr0);
+	nvkm_wr32(device, 0x00e074, intr1);
 }
 
 void
 g94_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
 {
-	u32 inte0 = nv_rd32(gpio, 0x00e050);
-	u32 inte1 = nv_rd32(gpio, 0x00e070);
+	struct nvkm_device *device = gpio->subdev.device;
+	u32 inte0 = nvkm_rd32(device, 0x00e050);
+	u32 inte1 = nvkm_rd32(device, 0x00e070);
 	if (type & NVKM_GPIO_LO)
 		inte0 = (inte0 & ~(mask << 16)) | (data << 16);
 	if (type & NVKM_GPIO_HI)
@@ -51,23 +53,22 @@ g94_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
 		inte1 = (inte1 & ~(mask << 16)) | (data << 16);
 	if (type & NVKM_GPIO_HI)
 		inte1 = (inte1 & ~mask) | data;
-	nv_wr32(gpio, 0x00e050, inte0);
-	nv_wr32(gpio, 0x00e070, inte1);
+	nvkm_wr32(device, 0x00e050, inte0);
+	nvkm_wr32(device, 0x00e070, inte1);
 }
 
-struct nvkm_oclass *
-g94_gpio_oclass = &(struct nvkm_gpio_impl) {
-	.base.handle = NV_SUBDEV(GPIO, 0x94),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_gpio_ctor,
-		.dtor = _nvkm_gpio_dtor,
-		.init = _nvkm_gpio_init,
-		.fini = _nvkm_gpio_fini,
-	},
+static const struct nvkm_gpio_func
+g94_gpio = {
 	.lines = 32,
 	.intr_stat = g94_gpio_intr_stat,
 	.intr_mask = g94_gpio_intr_mask,
 	.drive = nv50_gpio_drive,
 	.sense = nv50_gpio_sense,
 	.reset = nv50_gpio_reset,
-}.base;
+};
+
+int
+g94_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+{
+	return nvkm_gpio_new_(&g94_gpio, device, index, pgpio);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c
index 2c3bb255d1f8..bb7400dfaef8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c
@@ -24,15 +24,16 @@
 #include "priv.h"
 
 void
-gf110_gpio_reset(struct nvkm_gpio *gpio, u8 match)
+gf119_gpio_reset(struct nvkm_gpio *gpio, u8 match)
 {
-	struct nvkm_bios *bios = nvkm_bios(gpio);
+	struct nvkm_device *device = gpio->subdev.device;
+	struct nvkm_bios *bios = device->bios;
 	u8 ver, len;
 	u16 entry;
 	int ent = -1;
 
 	while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
-		u32 data = nv_ro32(bios, entry);
+		u32 data = nvbios_rd32(bios, entry);
 		u8  line =   (data & 0x0000003f);
 		u8  defs = !!(data & 0x00000080);
 		u8  func =   (data & 0x0000ff00) >> 8;
@@ -43,42 +44,43 @@ gf110_gpio_reset(struct nvkm_gpio *gpio, u8 match)
 		    (match != DCB_GPIO_UNUSED && match != func))
 			continue;
 
-		gpio->set(gpio, 0, func, line, defs);
+		nvkm_gpio_set(gpio, 0, func, line, defs);
 
-		nv_mask(gpio, 0x00d610 + (line * 4), 0xff, unk0);
+		nvkm_mask(device, 0x00d610 + (line * 4), 0xff, unk0);
 		if (unk1--)
-			nv_mask(gpio, 0x00d740 + (unk1 * 4), 0xff, line);
+			nvkm_mask(device, 0x00d740 + (unk1 * 4), 0xff, line);
 	}
 }
 
 int
-gf110_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
+gf119_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
 {
+	struct nvkm_device *device = gpio->subdev.device;
 	u32 data = ((dir ^ 1) << 13) | (out << 12);
-	nv_mask(gpio, 0x00d610 + (line * 4), 0x00003000, data);
-	nv_mask(gpio, 0x00d604, 0x00000001, 0x00000001); /* update? */
+	nvkm_mask(device, 0x00d610 + (line * 4), 0x00003000, data);
+	nvkm_mask(device, 0x00d604, 0x00000001, 0x00000001); /* update? */
 	return 0;
 }
 
 int
-gf110_gpio_sense(struct nvkm_gpio *gpio, int line)
+gf119_gpio_sense(struct nvkm_gpio *gpio, int line)
 {
-	return !!(nv_rd32(gpio, 0x00d610 + (line * 4)) & 0x00004000);
+	struct nvkm_device *device = gpio->subdev.device;
+	return !!(nvkm_rd32(device, 0x00d610 + (line * 4)) & 0x00004000);
 }
 
-struct nvkm_oclass *
-gf110_gpio_oclass = &(struct nvkm_gpio_impl) {
-	.base.handle = NV_SUBDEV(GPIO, 0xd0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_gpio_ctor,
-		.dtor = _nvkm_gpio_dtor,
-		.init = _nvkm_gpio_init,
-		.fini = _nvkm_gpio_fini,
-	},
+static const struct nvkm_gpio_func
+gf119_gpio = {
 	.lines = 32,
 	.intr_stat = g94_gpio_intr_stat,
 	.intr_mask = g94_gpio_intr_mask,
-	.drive = gf110_gpio_drive,
-	.sense = gf110_gpio_sense,
-	.reset = gf110_gpio_reset,
-}.base;
+	.drive = gf119_gpio_drive,
+	.sense = gf119_gpio_sense,
+	.reset = gf119_gpio_reset,
+};
+
+int
+gf119_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+{
+	return nvkm_gpio_new_(&gf119_gpio, device, index, pgpio);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
index 42fd2faaaa4f..3f45afd17d5a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
@@ -26,21 +26,23 @@
 static void
 gk104_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
 {
-	u32 intr0 = nv_rd32(gpio, 0x00dc00);
-	u32 intr1 = nv_rd32(gpio, 0x00dc80);
-	u32 stat0 = nv_rd32(gpio, 0x00dc08) & intr0;
-	u32 stat1 = nv_rd32(gpio, 0x00dc88) & intr1;
+	struct nvkm_device *device = gpio->subdev.device;
+	u32 intr0 = nvkm_rd32(device, 0x00dc00);
+	u32 intr1 = nvkm_rd32(device, 0x00dc80);
+	u32 stat0 = nvkm_rd32(device, 0x00dc08) & intr0;
+	u32 stat1 = nvkm_rd32(device, 0x00dc88) & intr1;
 	*lo = (stat1 & 0xffff0000) | (stat0 >> 16);
 	*hi = (stat1 << 16) | (stat0 & 0x0000ffff);
-	nv_wr32(gpio, 0x00dc00, intr0);
-	nv_wr32(gpio, 0x00dc80, intr1);
+	nvkm_wr32(device, 0x00dc00, intr0);
+	nvkm_wr32(device, 0x00dc80, intr1);
 }
 
 void
 gk104_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
 {
-	u32 inte0 = nv_rd32(gpio, 0x00dc08);
-	u32 inte1 = nv_rd32(gpio, 0x00dc88);
+	struct nvkm_device *device = gpio->subdev.device;
+	u32 inte0 = nvkm_rd32(device, 0x00dc08);
+	u32 inte1 = nvkm_rd32(device, 0x00dc88);
 	if (type & NVKM_GPIO_LO)
 		inte0 = (inte0 & ~(mask << 16)) | (data << 16);
 	if (type & NVKM_GPIO_HI)
@@ -51,23 +53,22 @@ gk104_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
 		inte1 = (inte1 & ~(mask << 16)) | (data << 16);
 	if (type & NVKM_GPIO_HI)
 		inte1 = (inte1 & ~mask) | data;
-	nv_wr32(gpio, 0x00dc08, inte0);
-	nv_wr32(gpio, 0x00dc88, inte1);
+	nvkm_wr32(device, 0x00dc08, inte0);
+	nvkm_wr32(device, 0x00dc88, inte1);
 }
 
-struct nvkm_oclass *
-gk104_gpio_oclass = &(struct nvkm_gpio_impl) {
-	.base.handle = NV_SUBDEV(GPIO, 0xe0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_gpio_ctor,
-		.dtor = _nvkm_gpio_dtor,
-		.init = _nvkm_gpio_init,
-		.fini = _nvkm_gpio_fini,
-	},
+static const struct nvkm_gpio_func
+gk104_gpio = {
 	.lines = 32,
 	.intr_stat = gk104_gpio_intr_stat,
 	.intr_mask = gk104_gpio_intr_mask,
-	.drive = gf110_gpio_drive,
-	.sense = gf110_gpio_sense,
-	.reset = gf110_gpio_reset,
-}.base;
+	.drive = gf119_gpio_drive,
+	.sense = gf119_gpio_sense,
+	.reset = gf119_gpio_reset,
+};
+
+int
+gk104_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+{
+	return nvkm_gpio_new_(&gk104_gpio, device, index, pgpio);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c
index 2b295154247e..ae3499b48330 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c
@@ -28,19 +28,20 @@
 static int
 nv10_gpio_sense(struct nvkm_gpio *gpio, int line)
 {
+	struct nvkm_device *device = gpio->subdev.device;
 	if (line < 2) {
 		line = line * 16;
-		line = nv_rd32(gpio, 0x600818) >> line;
+		line = nvkm_rd32(device, 0x600818) >> line;
 		return !!(line & 0x0100);
 	} else
 	if (line < 10) {
 		line = (line - 2) * 4;
-		line = nv_rd32(gpio, 0x60081c) >> line;
+		line = nvkm_rd32(device, 0x60081c) >> line;
 		return !!(line & 0x04);
 	} else
 	if (line < 14) {
 		line = (line - 10) * 4;
-		line = nv_rd32(gpio, 0x600850) >> line;
+		line = nvkm_rd32(device, 0x600850) >> line;
 		return !!(line & 0x04);
 	}
 
@@ -50,6 +51,7 @@ nv10_gpio_sense(struct nvkm_gpio *gpio, int line)
 static int
 nv10_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
 {
+	struct nvkm_device *device = gpio->subdev.device;
 	u32 reg, mask, data;
 
 	if (line < 2) {
@@ -73,43 +75,44 @@ nv10_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
 		return -EINVAL;
 	}
 
-	nv_mask(gpio, reg, mask << line, data << line);
+	nvkm_mask(device, reg, mask << line, data << line);
 	return 0;
 }
 
 static void
 nv10_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
 {
-	u32 intr = nv_rd32(gpio, 0x001104);
-	u32 stat = nv_rd32(gpio, 0x001144) & intr;
+	struct nvkm_device *device = gpio->subdev.device;
+	u32 intr = nvkm_rd32(device, 0x001104);
+	u32 stat = nvkm_rd32(device, 0x001144) & intr;
 	*lo = (stat & 0xffff0000) >> 16;
 	*hi = (stat & 0x0000ffff);
-	nv_wr32(gpio, 0x001104, intr);
+	nvkm_wr32(device, 0x001104, intr);
 }
 
 static void
 nv10_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
 {
-	u32 inte = nv_rd32(gpio, 0x001144);
+	struct nvkm_device *device = gpio->subdev.device;
+	u32 inte = nvkm_rd32(device, 0x001144);
 	if (type & NVKM_GPIO_LO)
 		inte = (inte & ~(mask << 16)) | (data << 16);
 	if (type & NVKM_GPIO_HI)
 		inte = (inte & ~mask) | data;
-	nv_wr32(gpio, 0x001144, inte);
+	nvkm_wr32(device, 0x001144, inte);
 }
 
-struct nvkm_oclass *
-nv10_gpio_oclass = &(struct nvkm_gpio_impl) {
-	.base.handle = NV_SUBDEV(GPIO, 0x10),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_gpio_ctor,
-		.dtor = _nvkm_gpio_dtor,
-		.init = _nvkm_gpio_init,
-		.fini = _nvkm_gpio_fini,
-	},
+static const struct nvkm_gpio_func
+nv10_gpio = {
 	.lines = 16,
 	.intr_stat = nv10_gpio_intr_stat,
 	.intr_mask = nv10_gpio_intr_mask,
 	.drive = nv10_gpio_drive,
 	.sense = nv10_gpio_sense,
-}.base;
+};
+
+int
+nv10_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+{
+	return nvkm_gpio_new_(&nv10_gpio, device, index, pgpio);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
index 6a031035bd27..8996649209ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
@@ -26,14 +26,15 @@
 void
 nv50_gpio_reset(struct nvkm_gpio *gpio, u8 match)
 {
-	struct nvkm_bios *bios = nvkm_bios(gpio);
+	struct nvkm_device *device = gpio->subdev.device;
+	struct nvkm_bios *bios = device->bios;
 	u8 ver, len;
 	u16 entry;
 	int ent = -1;
 
 	while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
 		static const u32 regs[] = { 0xe100, 0xe28c };
-		u32 data = nv_ro32(bios, entry);
+		u32 data = nvbios_rd32(bios, entry);
 		u8  line =   (data & 0x0000001f);
 		u8  func =   (data & 0x0000ff00) >> 8;
 		u8  defs = !!(data & 0x01000000);
@@ -47,9 +48,9 @@ nv50_gpio_reset(struct nvkm_gpio *gpio, u8 match)
 		    (match != DCB_GPIO_UNUSED && match != func))
 			continue;
 
-		gpio->set(gpio, 0, func, line, defs);
+		nvkm_gpio_set(gpio, 0, func, line, defs);
 
-		nv_mask(gpio, reg, 0x00010001 << lsh, val << lsh);
+		nvkm_mask(device, reg, 0x00010001 << lsh, val << lsh);
 	}
 }
 
@@ -69,60 +70,63 @@ nv50_gpio_location(int line, u32 *reg, u32 *shift)
 int
 nv50_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
 {
+	struct nvkm_device *device = gpio->subdev.device;
 	u32 reg, shift;
 
 	if (nv50_gpio_location(line, &reg, &shift))
 		return -EINVAL;
 
-	nv_mask(gpio, reg, 3 << shift, (((dir ^ 1) << 1) | out) << shift);
+	nvkm_mask(device, reg, 3 << shift, (((dir ^ 1) << 1) | out) << shift);
 	return 0;
 }
 
 int
 nv50_gpio_sense(struct nvkm_gpio *gpio, int line)
 {
+	struct nvkm_device *device = gpio->subdev.device;
 	u32 reg, shift;
 
 	if (nv50_gpio_location(line, &reg, &shift))
 		return -EINVAL;
 
-	return !!(nv_rd32(gpio, reg) & (4 << shift));
+	return !!(nvkm_rd32(device, reg) & (4 << shift));
 }
 
 static void
 nv50_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
 {
-	u32 intr = nv_rd32(gpio, 0x00e054);
-	u32 stat = nv_rd32(gpio, 0x00e050) & intr;
+	struct nvkm_device *device = gpio->subdev.device;
+	u32 intr = nvkm_rd32(device, 0x00e054);
+	u32 stat = nvkm_rd32(device, 0x00e050) & intr;
 	*lo = (stat & 0xffff0000) >> 16;
 	*hi = (stat & 0x0000ffff);
-	nv_wr32(gpio, 0x00e054, intr);
+	nvkm_wr32(device, 0x00e054, intr);
 }
 
 static void
 nv50_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
 {
-	u32 inte = nv_rd32(gpio, 0x00e050);
+	struct nvkm_device *device = gpio->subdev.device;
+	u32 inte = nvkm_rd32(device, 0x00e050);
 	if (type & NVKM_GPIO_LO)
 		inte = (inte & ~(mask << 16)) | (data << 16);
 	if (type & NVKM_GPIO_HI)
 		inte = (inte & ~mask) | data;
-	nv_wr32(gpio, 0x00e050, inte);
+	nvkm_wr32(device, 0x00e050, inte);
 }
 
-struct nvkm_oclass *
-nv50_gpio_oclass = &(struct nvkm_gpio_impl) {
-	.base.handle = NV_SUBDEV(GPIO, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_gpio_ctor,
-		.dtor = _nvkm_gpio_dtor,
-		.init = _nvkm_gpio_init,
-		.fini = _nvkm_gpio_fini,
-	},
+static const struct nvkm_gpio_func
+nv50_gpio = {
 	.lines = 16,
 	.intr_stat = nv50_gpio_intr_stat,
 	.intr_mask = nv50_gpio_intr_mask,
 	.drive = nv50_gpio_drive,
 	.sense = nv50_gpio_sense,
 	.reset = nv50_gpio_reset,
-}.base;
+};
+
+int
+nv50_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+{
+	return nvkm_gpio_new_(&nv50_gpio, device, index, pgpio);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
index 382f8d44e140..371bcdbbe0d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
@@ -1,33 +1,9 @@
 #ifndef __NVKM_GPIO_PRIV_H__
 #define __NVKM_GPIO_PRIV_H__
+#define nvkm_gpio(p) container_of((p), struct nvkm_gpio, subdev)
 #include <subdev/gpio.h>
 
-#define nvkm_gpio_create(p,e,o,d)                                           \
-	nvkm_gpio_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_gpio_destroy(p) ({                                             \
-	struct nvkm_gpio *gpio = (p);                                       \
-	_nvkm_gpio_dtor(nv_object(gpio));                                   \
-})
-#define nvkm_gpio_init(p) ({                                                \
-	struct nvkm_gpio *gpio = (p);                                       \
-	_nvkm_gpio_init(nv_object(gpio));                                   \
-})
-#define nvkm_gpio_fini(p,s) ({                                              \
-	struct nvkm_gpio *gpio = (p);                                       \
-	_nvkm_gpio_fini(nv_object(gpio), (s));                              \
-})
-
-int  nvkm_gpio_create_(struct nvkm_object *, struct nvkm_object *,
-			  struct nvkm_oclass *, int, void **);
-int  _nvkm_gpio_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-void _nvkm_gpio_dtor(struct nvkm_object *);
-int  _nvkm_gpio_init(struct nvkm_object *);
-int  _nvkm_gpio_fini(struct nvkm_object *, bool);
-
-struct nvkm_gpio_impl {
-	struct nvkm_oclass base;
+struct nvkm_gpio_func {
 	int lines;
 
 	/* read and ack pending interrupts, returning only data
@@ -51,6 +27,9 @@ struct nvkm_gpio_impl {
 	void (*reset)(struct nvkm_gpio *, u8);
 };
 
+int nvkm_gpio_new_(const struct nvkm_gpio_func *, struct nvkm_device *,
+		   int index, struct nvkm_gpio **);
+
 void nv50_gpio_reset(struct nvkm_gpio *, u8);
 int  nv50_gpio_drive(struct nvkm_gpio *, int, int, int);
 int  nv50_gpio_sense(struct nvkm_gpio *, int);
@@ -58,7 +37,7 @@ int  nv50_gpio_sense(struct nvkm_gpio *, int);
 void g94_gpio_intr_stat(struct nvkm_gpio *, u32 *, u32 *);
 void g94_gpio_intr_mask(struct nvkm_gpio *, u32, u32, u32);
 
-void gf110_gpio_reset(struct nvkm_gpio *, u8);
-int  gf110_gpio_drive(struct nvkm_gpio *, int, int, int);
-int  gf110_gpio_sense(struct nvkm_gpio *, int);
+void gf119_gpio_reset(struct nvkm_gpio *, u8);
+int  gf119_gpio_drive(struct nvkm_gpio *, int, int, int);
+int  gf119_gpio_sense(struct nvkm_gpio *, int);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
index d68307409980..1f730613c237 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
@@ -1,16 +1,30 @@
 nvkm-y += nvkm/subdev/i2c/base.o
-nvkm-y += nvkm/subdev/i2c/anx9805.o
-nvkm-y += nvkm/subdev/i2c/aux.o
-nvkm-y += nvkm/subdev/i2c/bit.o
-nvkm-y += nvkm/subdev/i2c/pad.o
-nvkm-y += nvkm/subdev/i2c/padnv04.o
-nvkm-y += nvkm/subdev/i2c/padg94.o
-nvkm-y += nvkm/subdev/i2c/padgm204.o
 nvkm-y += nvkm/subdev/i2c/nv04.o
 nvkm-y += nvkm/subdev/i2c/nv4e.o
 nvkm-y += nvkm/subdev/i2c/nv50.o
 nvkm-y += nvkm/subdev/i2c/g94.o
-nvkm-y += nvkm/subdev/i2c/gf110.o
 nvkm-y += nvkm/subdev/i2c/gf117.o
+nvkm-y += nvkm/subdev/i2c/gf119.o
 nvkm-y += nvkm/subdev/i2c/gk104.o
 nvkm-y += nvkm/subdev/i2c/gm204.o
+
+nvkm-y += nvkm/subdev/i2c/pad.o
+nvkm-y += nvkm/subdev/i2c/padnv04.o
+nvkm-y += nvkm/subdev/i2c/padnv4e.o
+nvkm-y += nvkm/subdev/i2c/padnv50.o
+nvkm-y += nvkm/subdev/i2c/padg94.o
+nvkm-y += nvkm/subdev/i2c/padgf119.o
+nvkm-y += nvkm/subdev/i2c/padgm204.o
+
+nvkm-y += nvkm/subdev/i2c/bus.o
+nvkm-y += nvkm/subdev/i2c/busnv04.o
+nvkm-y += nvkm/subdev/i2c/busnv4e.o
+nvkm-y += nvkm/subdev/i2c/busnv50.o
+nvkm-y += nvkm/subdev/i2c/busgf119.o
+nvkm-y += nvkm/subdev/i2c/bit.o
+
+nvkm-y += nvkm/subdev/i2c/aux.o
+nvkm-y += nvkm/subdev/i2c/auxg94.o
+nvkm-y += nvkm/subdev/i2c/auxgm204.o
+
+nvkm-y += nvkm/subdev/i2c/anx9805.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c
index d17dd1cf3c34..b7b01c3f7037 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c
@@ -21,272 +21,258 @@
  *
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
-#include "port.h"
+#define anx9805_pad(p) container_of((p), struct anx9805_pad, base)
+#define anx9805_bus(p) container_of((p), struct anx9805_bus, base)
+#define anx9805_aux(p) container_of((p), struct anx9805_aux, base)
+#include "aux.h"
+#include "bus.h"
+
+struct anx9805_pad {
+	struct nvkm_i2c_pad base;
+	struct nvkm_i2c_bus *bus;
+	u8 addr;
+};
 
-struct anx9805_i2c_port {
-	struct nvkm_i2c_port base;
-	u32 addr;
-	u32 ctrl;
+struct anx9805_bus {
+	struct nvkm_i2c_bus base;
+	struct anx9805_pad *pad;
+	u8 addr;
 };
 
 static int
-anx9805_train(struct nvkm_i2c_port *port, int link_nr, int link_bw, bool enh)
+anx9805_bus_xfer(struct nvkm_i2c_bus *base, struct i2c_msg *msgs, int num)
 {
-	struct anx9805_i2c_port *chan = (void *)port;
-	struct nvkm_i2c_port *mast = (void *)nv_object(chan)->parent;
-	u8 tmp, i;
-
-	DBG("ANX9805 train %d 0x%02x %d\n", link_nr, link_bw, enh);
+	struct anx9805_bus *bus = anx9805_bus(base);
+	struct anx9805_pad *pad = bus->pad;
+	struct i2c_adapter *adap = &pad->bus->i2c;
+	struct i2c_msg *msg = msgs;
+	int ret = -ETIMEDOUT;
+	int i, j, cnt = num;
+	u8 seg = 0x00, off = 0x00, tmp;
 
-	nv_wri2cr(mast, chan->addr, 0xa0, link_bw);
-	nv_wri2cr(mast, chan->addr, 0xa1, link_nr | (enh ? 0x80 : 0x00));
-	nv_wri2cr(mast, chan->addr, 0xa2, 0x01);
-	nv_wri2cr(mast, chan->addr, 0xa8, 0x01);
+	tmp = nvkm_rdi2cr(adap, pad->addr, 0x07) & ~0x10;
+	nvkm_wri2cr(adap, pad->addr, 0x07, tmp | 0x10);
+	nvkm_wri2cr(adap, pad->addr, 0x07, tmp);
+	nvkm_wri2cr(adap, bus->addr, 0x43, 0x05);
+	mdelay(5);
 
-	i = 0;
-	while ((tmp = nv_rdi2cr(mast, chan->addr, 0xa8)) & 0x01) {
-		mdelay(5);
-		if (i++ == 100) {
-			nv_error(port, "link training timed out\n");
-			return -ETIMEDOUT;
+	while (cnt--) {
+		if ( (msg->flags & I2C_M_RD) && msg->addr == 0x50) {
+			nvkm_wri2cr(adap, bus->addr, 0x40, msg->addr << 1);
+			nvkm_wri2cr(adap, bus->addr, 0x41, seg);
+			nvkm_wri2cr(adap, bus->addr, 0x42, off);
+			nvkm_wri2cr(adap, bus->addr, 0x44, msg->len);
+			nvkm_wri2cr(adap, bus->addr, 0x45, 0x00);
+			nvkm_wri2cr(adap, bus->addr, 0x43, 0x01);
+			for (i = 0; i < msg->len; i++) {
+				j = 0;
+				while (nvkm_rdi2cr(adap, bus->addr, 0x46) & 0x10) {
+					mdelay(5);
+					if (j++ == 32)
+						goto done;
+				}
+				msg->buf[i] = nvkm_rdi2cr(adap, bus->addr, 0x47);
+			}
+		} else
+		if (!(msg->flags & I2C_M_RD)) {
+			if (msg->addr == 0x50 && msg->len == 0x01) {
+				off = msg->buf[0];
+			} else
+			if (msg->addr == 0x30 && msg->len == 0x01) {
+				seg = msg->buf[0];
+			} else
+				goto done;
+		} else {
+			goto done;
 		}
+		msg++;
 	}
 
-	if (tmp & 0x70) {
-		nv_error(port, "link training failed: 0x%02x\n", tmp);
-		return -EIO;
+	ret = num;
+done:
+	nvkm_wri2cr(adap, bus->addr, 0x43, 0x00);
+	return ret;
+}
+
+static const struct nvkm_i2c_bus_func
+anx9805_bus_func = {
+	.xfer = anx9805_bus_xfer,
+};
+
+static int
+anx9805_bus_new(struct nvkm_i2c_pad *base, int id, u8 drive,
+		struct nvkm_i2c_bus **pbus)
+{
+	struct anx9805_pad *pad = anx9805_pad(base);
+	struct anx9805_bus *bus;
+	int ret;
+
+	if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
+		return -ENOMEM;
+	*pbus = &bus->base;
+	bus->pad = pad;
+
+	ret = nvkm_i2c_bus_ctor(&anx9805_bus_func, &pad->base, id, &bus->base);
+	if (ret)
+		return ret;
+
+	switch (pad->addr) {
+	case 0x39: bus->addr = 0x3d; break;
+	case 0x3b: bus->addr = 0x3f; break;
+	default:
+		return -ENOSYS;
 	}
 
-	return 1;
+	return 0;
 }
 
+struct anx9805_aux {
+	struct nvkm_i2c_aux base;
+	struct anx9805_pad *pad;
+	u8 addr;
+};
+
 static int
-anx9805_aux(struct nvkm_i2c_port *port, bool retry,
-	    u8 type, u32 addr, u8 *data, u8 size)
+anx9805_aux_xfer(struct nvkm_i2c_aux *base, bool retry,
+		 u8 type, u32 addr, u8 *data, u8 size)
 {
-	struct anx9805_i2c_port *chan = (void *)port;
-	struct nvkm_i2c_port *mast = (void *)nv_object(chan)->parent;
+	struct anx9805_aux *aux = anx9805_aux(base);
+	struct anx9805_pad *pad = aux->pad;
+	struct i2c_adapter *adap = &pad->bus->i2c;
 	int i, ret = -ETIMEDOUT;
 	u8 buf[16] = {};
 	u8 tmp;
 
-	DBG("%02x %05x %d\n", type, addr, size);
+	AUX_DBG(&aux->base, "%02x %05x %d", type, addr, size);
 
-	tmp = nv_rdi2cr(mast, chan->ctrl, 0x07) & ~0x04;
-	nv_wri2cr(mast, chan->ctrl, 0x07, tmp | 0x04);
-	nv_wri2cr(mast, chan->ctrl, 0x07, tmp);
-	nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01);
+	tmp = nvkm_rdi2cr(adap, pad->addr, 0x07) & ~0x04;
+	nvkm_wri2cr(adap, pad->addr, 0x07, tmp | 0x04);
+	nvkm_wri2cr(adap, pad->addr, 0x07, tmp);
+	nvkm_wri2cr(adap, pad->addr, 0xf7, 0x01);
 
-	nv_wri2cr(mast, chan->addr, 0xe4, 0x80);
+	nvkm_wri2cr(adap, aux->addr, 0xe4, 0x80);
 	if (!(type & 1)) {
 		memcpy(buf, data, size);
-		DBG("%16ph", buf);
+		AUX_DBG(&aux->base, "%16ph", buf);
 		for (i = 0; i < size; i++)
-			nv_wri2cr(mast, chan->addr, 0xf0 + i, buf[i]);
+			nvkm_wri2cr(adap, aux->addr, 0xf0 + i, buf[i]);
 	}
-	nv_wri2cr(mast, chan->addr, 0xe5, ((size - 1) << 4) | type);
-	nv_wri2cr(mast, chan->addr, 0xe6, (addr & 0x000ff) >>  0);
-	nv_wri2cr(mast, chan->addr, 0xe7, (addr & 0x0ff00) >>  8);
-	nv_wri2cr(mast, chan->addr, 0xe8, (addr & 0xf0000) >> 16);
-	nv_wri2cr(mast, chan->addr, 0xe9, 0x01);
+	nvkm_wri2cr(adap, aux->addr, 0xe5, ((size - 1) << 4) | type);
+	nvkm_wri2cr(adap, aux->addr, 0xe6, (addr & 0x000ff) >>  0);
+	nvkm_wri2cr(adap, aux->addr, 0xe7, (addr & 0x0ff00) >>  8);
+	nvkm_wri2cr(adap, aux->addr, 0xe8, (addr & 0xf0000) >> 16);
+	nvkm_wri2cr(adap, aux->addr, 0xe9, 0x01);
 
 	i = 0;
-	while ((tmp = nv_rdi2cr(mast, chan->addr, 0xe9)) & 0x01) {
+	while ((tmp = nvkm_rdi2cr(adap, aux->addr, 0xe9)) & 0x01) {
 		mdelay(5);
 		if (i++ == 32)
 			goto done;
 	}
 
-	if ((tmp = nv_rdi2cr(mast, chan->ctrl, 0xf7)) & 0x01) {
+	if ((tmp = nvkm_rdi2cr(adap, pad->addr, 0xf7)) & 0x01) {
 		ret = -EIO;
 		goto done;
 	}
 
 	if (type & 1) {
 		for (i = 0; i < size; i++)
-			buf[i] = nv_rdi2cr(mast, chan->addr, 0xf0 + i);
-		DBG("%16ph", buf);
+			buf[i] = nvkm_rdi2cr(adap, aux->addr, 0xf0 + i);
+		AUX_DBG(&aux->base, "%16ph", buf);
 		memcpy(data, buf, size);
 	}
 
 	ret = 0;
 done:
-	nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01);
+	nvkm_wri2cr(adap, pad->addr, 0xf7, 0x01);
 	return ret;
 }
 
-static const struct nvkm_i2c_func
-anx9805_aux_func = {
-	.aux = anx9805_aux,
-	.lnk_ctl = anx9805_train,
-};
-
 static int
-anx9805_aux_chan_ctor(struct nvkm_object *parent,
-		      struct nvkm_object *engine,
-		      struct nvkm_oclass *oclass, void *data, u32 index,
-		      struct nvkm_object **pobject)
+anx9805_aux_lnk_ctl(struct nvkm_i2c_aux *base,
+		    int link_nr, int link_bw, bool enh)
 {
-	struct nvkm_i2c_port *mast = (void *)parent;
-	struct anx9805_i2c_port *chan;
-	int ret;
-
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &nvkm_i2c_aux_algo, &anx9805_aux_func,
-				   &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	switch ((oclass->handle & 0xff00) >> 8) {
-	case 0x0d:
-		chan->addr = 0x38;
-		chan->ctrl = 0x39;
-		break;
-	case 0x0e:
-		chan->addr = 0x3c;
-		chan->ctrl = 0x3b;
-		break;
-	default:
-		BUG_ON(1);
-	}
-
-	if (mast->adapter.algo == &i2c_bit_algo) {
-		struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
-		algo->udelay = max(algo->udelay, 40);
-	}
-
-	return 0;
-}
-
-static struct nvkm_ofuncs
-anx9805_aux_ofuncs = {
-	.ctor =  anx9805_aux_chan_ctor,
-	.dtor = _nvkm_i2c_port_dtor,
-	.init = _nvkm_i2c_port_init,
-	.fini = _nvkm_i2c_port_fini,
-};
+	struct anx9805_aux *aux = anx9805_aux(base);
+	struct anx9805_pad *pad = aux->pad;
+	struct i2c_adapter *adap = &pad->bus->i2c;
+	u8 tmp, i;
 
-static int
-anx9805_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
-{
-	struct anx9805_i2c_port *port = adap->algo_data;
-	struct nvkm_i2c_port *mast = (void *)nv_object(port)->parent;
-	struct i2c_msg *msg = msgs;
-	int ret = -ETIMEDOUT;
-	int i, j, cnt = num;
-	u8 seg = 0x00, off = 0x00, tmp;
+	AUX_DBG(&aux->base, "ANX9805 train %d %02x %d",
+		link_nr, link_bw, enh);
 
-	tmp = nv_rdi2cr(mast, port->ctrl, 0x07) & ~0x10;
-	nv_wri2cr(mast, port->ctrl, 0x07, tmp | 0x10);
-	nv_wri2cr(mast, port->ctrl, 0x07, tmp);
-	nv_wri2cr(mast, port->addr, 0x43, 0x05);
-	mdelay(5);
+	nvkm_wri2cr(adap, aux->addr, 0xa0, link_bw);
+	nvkm_wri2cr(adap, aux->addr, 0xa1, link_nr | (enh ? 0x80 : 0x00));
+	nvkm_wri2cr(adap, aux->addr, 0xa2, 0x01);
+	nvkm_wri2cr(adap, aux->addr, 0xa8, 0x01);
 
-	while (cnt--) {
-		if ( (msg->flags & I2C_M_RD) && msg->addr == 0x50) {
-			nv_wri2cr(mast, port->addr, 0x40, msg->addr << 1);
-			nv_wri2cr(mast, port->addr, 0x41, seg);
-			nv_wri2cr(mast, port->addr, 0x42, off);
-			nv_wri2cr(mast, port->addr, 0x44, msg->len);
-			nv_wri2cr(mast, port->addr, 0x45, 0x00);
-			nv_wri2cr(mast, port->addr, 0x43, 0x01);
-			for (i = 0; i < msg->len; i++) {
-				j = 0;
-				while (nv_rdi2cr(mast, port->addr, 0x46) & 0x10) {
-					mdelay(5);
-					if (j++ == 32)
-						goto done;
-				}
-				msg->buf[i] = nv_rdi2cr(mast, port->addr, 0x47);
-			}
-		} else
-		if (!(msg->flags & I2C_M_RD)) {
-			if (msg->addr == 0x50 && msg->len == 0x01) {
-				off = msg->buf[0];
-			} else
-			if (msg->addr == 0x30 && msg->len == 0x01) {
-				seg = msg->buf[0];
-			} else
-				goto done;
-		} else {
-			goto done;
+	i = 0;
+	while ((tmp = nvkm_rdi2cr(adap, aux->addr, 0xa8)) & 0x01) {
+		mdelay(5);
+		if (i++ == 100) {
+			AUX_ERR(&aux->base, "link training timeout");
+			return -ETIMEDOUT;
 		}
-		msg++;
 	}
 
-	ret = num;
-done:
-	nv_wri2cr(mast, port->addr, 0x43, 0x00);
-	return ret;
-}
+	if (tmp & 0x70) {
+		AUX_ERR(&aux->base, "link training failed");
+		return -EIO;
+	}
 
-static u32
-anx9805_func(struct i2c_adapter *adap)
-{
-	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+	return 0;
 }
 
-static const struct i2c_algorithm
-anx9805_i2c_algo = {
-	.master_xfer = anx9805_xfer,
-	.functionality = anx9805_func
-};
-
-static const struct nvkm_i2c_func
-anx9805_i2c_func = {
+static const struct nvkm_i2c_aux_func
+anx9805_aux_func = {
+	.xfer = anx9805_aux_xfer,
+	.lnk_ctl = anx9805_aux_lnk_ctl,
 };
 
 static int
-anx9805_ddc_port_ctor(struct nvkm_object *parent,
-		      struct nvkm_object *engine,
-		      struct nvkm_oclass *oclass, void *data, u32 index,
-		      struct nvkm_object **pobject)
+anx9805_aux_new(struct nvkm_i2c_pad *base, int id, u8 drive,
+		struct nvkm_i2c_aux **pbus)
 {
-	struct nvkm_i2c_port *mast = (void *)parent;
-	struct anx9805_i2c_port *port;
+	struct anx9805_pad *pad = anx9805_pad(base);
+	struct anx9805_aux *aux;
 	int ret;
 
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &anx9805_i2c_algo, &anx9805_i2c_func, &port);
-	*pobject = nv_object(port);
+	if (!(aux = kzalloc(sizeof(*aux), GFP_KERNEL)))
+		return -ENOMEM;
+	*pbus = &aux->base;
+	aux->pad = pad;
+
+	ret = nvkm_i2c_aux_ctor(&anx9805_aux_func, &pad->base, id, &aux->base);
 	if (ret)
 		return ret;
 
-	switch ((oclass->handle & 0xff00) >> 8) {
-	case 0x0d:
-		port->addr = 0x3d;
-		port->ctrl = 0x39;
-		break;
-	case 0x0e:
-		port->addr = 0x3f;
-		port->ctrl = 0x3b;
-		break;
+	switch (pad->addr) {
+	case 0x39: aux->addr = 0x38; break;
+	case 0x3b: aux->addr = 0x3c; break;
 	default:
-		BUG_ON(1);
-	}
-
-	if (mast->adapter.algo == &i2c_bit_algo) {
-		struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
-		algo->udelay = max(algo->udelay, 40);
+		return -ENOSYS;
 	}
 
 	return 0;
 }
 
-static struct nvkm_ofuncs
-anx9805_ddc_ofuncs = {
-	.ctor =  anx9805_ddc_port_ctor,
-	.dtor = _nvkm_i2c_port_dtor,
-	.init = _nvkm_i2c_port_init,
-	.fini = _nvkm_i2c_port_fini,
+static const struct nvkm_i2c_pad_func
+anx9805_pad_func = {
+	.bus_new_4 = anx9805_bus_new,
+	.aux_new_6 = anx9805_aux_new,
 };
 
-struct nvkm_oclass
-nvkm_anx9805_sclass[] = {
-	{ .handle = NV_I2C_TYPE_EXTDDC(0x0d), .ofuncs = &anx9805_ddc_ofuncs },
-	{ .handle = NV_I2C_TYPE_EXTAUX(0x0d), .ofuncs = &anx9805_aux_ofuncs },
-	{ .handle = NV_I2C_TYPE_EXTDDC(0x0e), .ofuncs = &anx9805_ddc_ofuncs },
-	{ .handle = NV_I2C_TYPE_EXTAUX(0x0e), .ofuncs = &anx9805_aux_ofuncs },
-	{}
-};
+int
+anx9805_pad_new(struct nvkm_i2c_bus *bus, int id, u8 addr,
+		struct nvkm_i2c_pad **ppad)
+{
+	struct anx9805_pad *pad;
+
+	if (!(pad = kzalloc(sizeof(*pad), GFP_KERNEL)))
+		return -ENOMEM;
+	*ppad = &pad->base;
+
+	nvkm_i2c_pad_ctor(&anx9805_pad_func, bus->pad->i2c, id, &pad->base);
+	pad->bus = bus;
+	pad->addr = addr;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index 1c18860f80d1..f0851d57df2f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -21,50 +21,17 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
-
-int
-nv_rdaux(struct nvkm_i2c_port *port, u32 addr, u8 *data, u8 size)
-{
-	struct nvkm_i2c *i2c = nvkm_i2c(port);
-	if (port->func->aux) {
-		int ret = i2c->acquire(port, 0);
-		if (ret == 0) {
-			ret = port->func->aux(port, true, 9, addr, data, size);
-			i2c->release(port);
-		}
-		return ret;
-	}
-	return -ENODEV;
-}
-
-int
-nv_wraux(struct nvkm_i2c_port *port, u32 addr, u8 *data, u8 size)
-{
-	struct nvkm_i2c *i2c = nvkm_i2c(port);
-	if (port->func->aux) {
-		int ret = i2c->acquire(port, 0);
-		if (ret == 0) {
-			ret = port->func->aux(port, true, 8, addr, data, size);
-			i2c->release(port);
-		}
-		return ret;
-	}
-	return -ENODEV;
-}
+#include "aux.h"
+#include "pad.h"
 
 static int
-aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
 {
-	struct nvkm_i2c_port *port = adap->algo_data;
-	struct nvkm_i2c *i2c = nvkm_i2c(port);
+	struct nvkm_i2c_aux *aux = container_of(adap, typeof(*aux), i2c);
 	struct i2c_msg *msg = msgs;
 	int ret, mcnt = num;
 
-	if (!port->func->aux)
-		return -ENODEV;
-
-	ret = i2c->acquire(port, 0);
+	ret = nvkm_i2c_aux_acquire(aux);
 	if (ret)
 		return ret;
 
@@ -84,9 +51,9 @@ aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
 			if (mcnt || remaining > 16)
 				cmd |= 4; /* MOT */
 
-			ret = port->func->aux(port, true, cmd, msg->addr, ptr, cnt);
+			ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, cnt);
 			if (ret < 0) {
-				i2c->release(port);
+				nvkm_i2c_aux_release(aux);
 				return ret;
 			}
 
@@ -97,17 +64,111 @@ aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
 		msg++;
 	}
 
-	i2c->release(port);
+	nvkm_i2c_aux_release(aux);
 	return num;
 }
 
 static u32
-aux_func(struct i2c_adapter *adap)
+nvkm_i2c_aux_i2c_func(struct i2c_adapter *adap)
 {
 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
 }
 
-const struct i2c_algorithm nvkm_i2c_aux_algo = {
-	.master_xfer = aux_xfer,
-	.functionality = aux_func
+const struct i2c_algorithm
+nvkm_i2c_aux_i2c_algo = {
+	.master_xfer = nvkm_i2c_aux_i2c_xfer,
+	.functionality = nvkm_i2c_aux_i2c_func
 };
+
+void
+nvkm_i2c_aux_monitor(struct nvkm_i2c_aux *aux, bool monitor)
+{
+	struct nvkm_i2c_pad *pad = aux->pad;
+	AUX_TRACE(aux, "monitor: %s", monitor ? "yes" : "no");
+	if (monitor)
+		nvkm_i2c_pad_mode(pad, NVKM_I2C_PAD_AUX);
+	else
+		nvkm_i2c_pad_mode(pad, NVKM_I2C_PAD_OFF);
+}
+
+void
+nvkm_i2c_aux_release(struct nvkm_i2c_aux *aux)
+{
+	struct nvkm_i2c_pad *pad = aux->pad;
+	AUX_TRACE(aux, "release");
+	nvkm_i2c_pad_release(pad);
+	mutex_unlock(&aux->mutex);
+}
+
+int
+nvkm_i2c_aux_acquire(struct nvkm_i2c_aux *aux)
+{
+	struct nvkm_i2c_pad *pad = aux->pad;
+	int ret;
+	AUX_TRACE(aux, "acquire");
+	mutex_lock(&aux->mutex);
+	ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_AUX);
+	if (ret)
+		mutex_unlock(&aux->mutex);
+	return ret;
+}
+
+int
+nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type,
+		  u32 addr, u8 *data, u8 size)
+{
+	return aux->func->xfer(aux, retry, type, addr, data, size);
+}
+
+int
+nvkm_i2c_aux_lnk_ctl(struct nvkm_i2c_aux *aux, int nr, int bw, bool ef)
+{
+	if (aux->func->lnk_ctl)
+		return aux->func->lnk_ctl(aux, nr, bw, ef);
+	return -ENODEV;
+}
+
+void
+nvkm_i2c_aux_del(struct nvkm_i2c_aux **paux)
+{
+	struct nvkm_i2c_aux *aux = *paux;
+	if (aux && !WARN_ON(!aux->func)) {
+		AUX_TRACE(aux, "dtor");
+		list_del(&aux->head);
+		i2c_del_adapter(&aux->i2c);
+		kfree(*paux);
+		*paux = NULL;
+	}
+}
+
+int
+nvkm_i2c_aux_ctor(const struct nvkm_i2c_aux_func *func,
+		  struct nvkm_i2c_pad *pad, int id,
+		  struct nvkm_i2c_aux *aux)
+{
+	struct nvkm_device *device = pad->i2c->subdev.device;
+
+	aux->func = func;
+	aux->pad = pad;
+	aux->id = id;
+	mutex_init(&aux->mutex);
+	list_add_tail(&aux->head, &pad->i2c->aux);
+	AUX_TRACE(aux, "ctor");
+
+	snprintf(aux->i2c.name, sizeof(aux->i2c.name), "nvkm-%s-aux-%04x",
+		 dev_name(device->dev), id);
+	aux->i2c.owner = THIS_MODULE;
+	aux->i2c.dev.parent = device->dev;
+	aux->i2c.algo = &nvkm_i2c_aux_i2c_algo;
+	return i2c_add_adapter(&aux->i2c);
+}
+
+int
+nvkm_i2c_aux_new_(const struct nvkm_i2c_aux_func *func,
+		  struct nvkm_i2c_pad *pad, int id,
+		  struct nvkm_i2c_aux **paux)
+{
+	if (!(*paux = kzalloc(sizeof(**paux), GFP_KERNEL)))
+		return -ENOMEM;
+	return nvkm_i2c_aux_ctor(func, pad, id, *paux);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
new file mode 100644
index 000000000000..35a892e4a4c3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
@@ -0,0 +1,30 @@
+#ifndef __NVKM_I2C_AUX_H__
+#define __NVKM_I2C_AUX_H__
+#include "pad.h"
+
+struct nvkm_i2c_aux_func {
+	int  (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type,
+		     u32 addr, u8 *data, u8 size);
+	int  (*lnk_ctl)(struct nvkm_i2c_aux *, int link_nr, int link_bw,
+			bool enhanced_framing);
+};
+
+int nvkm_i2c_aux_ctor(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
+		      int id, struct nvkm_i2c_aux *);
+int nvkm_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
+		      int id, struct nvkm_i2c_aux **);
+void nvkm_i2c_aux_del(struct nvkm_i2c_aux **);
+int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
+		      u32 addr, u8 *data, u8 size);
+
+int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
+int gm204_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
+
+#define AUX_MSG(b,l,f,a...) do {                                               \
+	struct nvkm_i2c_aux *_aux = (b);                                       \
+	nvkm_##l(&_aux->pad->i2c->subdev, "aux %04x: "f"\n", _aux->id, ##a);   \
+} while(0)
+#define AUX_ERR(b,f,a...) AUX_MSG((b), error, f, ##a)
+#define AUX_DBG(b,f,a...) AUX_MSG((b), debug, f, ##a)
+#define AUX_TRACE(b,f,a...) AUX_MSG((b), trace, f, ##a)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
new file mode 100644
index 000000000000..954f5b76bfcf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial busions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#define g94_i2c_aux(p) container_of((p), struct g94_i2c_aux, base)
+#include "aux.h"
+
+struct g94_i2c_aux {
+	struct nvkm_i2c_aux base;
+	int ch;
+};
+
+static void
+g94_i2c_aux_fini(struct g94_i2c_aux *aux)
+{
+	struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+	nvkm_mask(device, 0x00e4e4 + (aux->ch * 0x50), 0x00310000, 0x00000000);
+}
+
+static int
+g94_i2c_aux_init(struct g94_i2c_aux *aux)
+{
+	struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+	const u32 unksel = 1; /* nfi which to use, or if it matters.. */
+	const u32 ureq = unksel ? 0x00100000 : 0x00200000;
+	const u32 urep = unksel ? 0x01000000 : 0x02000000;
+	u32 ctrl, timeout;
+
+	/* wait up to 1ms for any previous transaction to be done... */
+	timeout = 1000;
+	do {
+		ctrl = nvkm_rd32(device, 0x00e4e4 + (aux->ch * 0x50));
+		udelay(1);
+		if (!timeout--) {
+			AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl);
+			return -EBUSY;
+		}
+	} while (ctrl & 0x03010000);
+
+	/* set some magic, and wait up to 1ms for it to appear */
+	nvkm_mask(device, 0x00e4e4 + (aux->ch * 0x50), 0x00300000, ureq);
+	timeout = 1000;
+	do {
+		ctrl = nvkm_rd32(device, 0x00e4e4 + (aux->ch * 0x50));
+		udelay(1);
+		if (!timeout--) {
+			AUX_ERR(&aux->base, "magic wait %08x", ctrl);
+			g94_i2c_aux_fini(aux);
+			return -EBUSY;
+		}
+	} while ((ctrl & 0x03000000) != urep);
+
+	return 0;
+}
+
+static int
+g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
+		 u8 type, u32 addr, u8 *data, u8 size)
+{
+	struct g94_i2c_aux *aux = g94_i2c_aux(obj);
+	struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+	const u32 base = aux->ch * 0x50;
+	u32 ctrl, stat, timeout, retries;
+	u32 xbuf[4] = {};
+	int ret, i;
+
+	AUX_TRACE(&aux->base, "%d: %08x %d", type, addr, size);
+
+	ret = g94_i2c_aux_init(aux);
+	if (ret < 0)
+		goto out;
+
+	stat = nvkm_rd32(device, 0x00e4e8 + base);
+	if (!(stat & 0x10000000)) {
+		AUX_TRACE(&aux->base, "sink not detected");
+		ret = -ENXIO;
+		goto out;
+	}
+
+	if (!(type & 1)) {
+		memcpy(xbuf, data, size);
+		for (i = 0; i < 16; i += 4) {
+			AUX_TRACE(&aux->base, "wr %08x", xbuf[i / 4]);
+			nvkm_wr32(device, 0x00e4c0 + base + i, xbuf[i / 4]);
+		}
+	}
+
+	ctrl  = nvkm_rd32(device, 0x00e4e4 + base);
+	ctrl &= ~0x0001f0ff;
+	ctrl |= type << 12;
+	ctrl |= size - 1;
+	nvkm_wr32(device, 0x00e4e0 + base, addr);
+
+	/* (maybe) retry transaction a number of times on failure... */
+	for (retries = 0; !ret && retries < 32; retries++) {
+		/* reset, and delay a while if this is a retry */
+		nvkm_wr32(device, 0x00e4e4 + base, 0x80000000 | ctrl);
+		nvkm_wr32(device, 0x00e4e4 + base, 0x00000000 | ctrl);
+		if (retries)
+			udelay(400);
+
+		/* transaction request, wait up to 1ms for it to complete */
+		nvkm_wr32(device, 0x00e4e4 + base, 0x00010000 | ctrl);
+
+		timeout = 1000;
+		do {
+			ctrl = nvkm_rd32(device, 0x00e4e4 + base);
+			udelay(1);
+			if (!timeout--) {
+				AUX_ERR(&aux->base, "timeout %08x", ctrl);
+				ret = -EIO;
+				goto out;
+			}
+		} while (ctrl & 0x00010000);
+		ret = 1;
+
+		/* read status, and check if transaction completed ok */
+		stat = nvkm_mask(device, 0x00e4e8 + base, 0, 0);
+		if ((stat & 0x000f0000) == 0x00080000 ||
+		    (stat & 0x000f0000) == 0x00020000)
+			ret = retry ? 0 : 1;
+		if ((stat & 0x00000100))
+			ret = -ETIMEDOUT;
+		if ((stat & 0x00000e00))
+			ret = -EIO;
+
+		AUX_TRACE(&aux->base, "%02d %08x %08x", retries, ctrl, stat);
+	}
+
+	if (type & 1) {
+		for (i = 0; i < 16; i += 4) {
+			xbuf[i / 4] = nvkm_rd32(device, 0x00e4d0 + base + i);
+			AUX_TRACE(&aux->base, "rd %08x", xbuf[i / 4]);
+		}
+		memcpy(data, xbuf, size);
+	}
+
+out:
+	g94_i2c_aux_fini(aux);
+	return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
+}
+
+static const struct nvkm_i2c_aux_func
+g94_i2c_aux_func = {
+	.xfer = g94_i2c_aux_xfer,
+};
+
+int
+g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
+		struct nvkm_i2c_aux **paux)
+{
+	struct g94_i2c_aux *aux;
+
+	if (!(aux = kzalloc(sizeof(*aux), GFP_KERNEL)))
+		return -ENOMEM;
+	*paux = &aux->base;
+
+	nvkm_i2c_aux_ctor(&g94_i2c_aux_func, pad, index, &aux->base);
+	aux->ch = drive;
+	aux->base.intr = 1 << aux->ch;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c
new file mode 100644
index 000000000000..bed231b56dbd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial busions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#define gm204_i2c_aux(p) container_of((p), struct gm204_i2c_aux, base)
+#include "aux.h"
+
+struct gm204_i2c_aux {
+	struct nvkm_i2c_aux base;
+	int ch;
+};
+
+static void
+gm204_i2c_aux_fini(struct gm204_i2c_aux *aux)
+{
+	struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+	nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00310000, 0x00000000);
+}
+
+static int
+gm204_i2c_aux_init(struct gm204_i2c_aux *aux)
+{
+	struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+	const u32 unksel = 1; /* nfi which to use, or if it matters.. */
+	const u32 ureq = unksel ? 0x00100000 : 0x00200000;
+	const u32 urep = unksel ? 0x01000000 : 0x02000000;
+	u32 ctrl, timeout;
+
+	/* wait up to 1ms for any previous transaction to be done... */
+	timeout = 1000;
+	do {
+		ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50));
+		udelay(1);
+		if (!timeout--) {
+			AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl);
+			return -EBUSY;
+		}
+	} while (ctrl & 0x03010000);
+
+	/* set some magic, and wait up to 1ms for it to appear */
+	nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00300000, ureq);
+	timeout = 1000;
+	do {
+		ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50));
+		udelay(1);
+		if (!timeout--) {
+			AUX_ERR(&aux->base, "magic wait %08x", ctrl);
+			gm204_i2c_aux_fini(aux);
+			return -EBUSY;
+		}
+	} while ((ctrl & 0x03000000) != urep);
+
+	return 0;
+}
+
+static int
+gm204_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
+		   u8 type, u32 addr, u8 *data, u8 size)
+{
+	struct gm204_i2c_aux *aux = gm204_i2c_aux(obj);
+	struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+	const u32 base = aux->ch * 0x50;
+	u32 ctrl, stat, timeout, retries;
+	u32 xbuf[4] = {};
+	int ret, i;
+
+	AUX_TRACE(&aux->base, "%d: %08x %d", type, addr, size);
+
+	ret = gm204_i2c_aux_init(aux);
+	if (ret < 0)
+		goto out;
+
+	stat = nvkm_rd32(device, 0x00d958 + base);
+	if (!(stat & 0x10000000)) {
+		AUX_TRACE(&aux->base, "sink not detected");
+		ret = -ENXIO;
+		goto out;
+	}
+
+	if (!(type & 1)) {
+		memcpy(xbuf, data, size);
+		for (i = 0; i < 16; i += 4) {
+			AUX_TRACE(&aux->base, "wr %08x", xbuf[i / 4]);
+			nvkm_wr32(device, 0x00d930 + base + i, xbuf[i / 4]);
+		}
+	}
+
+	ctrl  = nvkm_rd32(device, 0x00d954 + base);
+	ctrl &= ~0x0001f0ff;
+	ctrl |= type << 12;
+	ctrl |= size - 1;
+	nvkm_wr32(device, 0x00d950 + base, addr);
+
+	/* (maybe) retry transaction a number of times on failure... */
+	for (retries = 0; !ret && retries < 32; retries++) {
+		/* reset, and delay a while if this is a retry */
+		nvkm_wr32(device, 0x00d954 + base, 0x80000000 | ctrl);
+		nvkm_wr32(device, 0x00d954 + base, 0x00000000 | ctrl);
+		if (retries)
+			udelay(400);
+
+		/* transaction request, wait up to 1ms for it to complete */
+		nvkm_wr32(device, 0x00d954 + base, 0x00010000 | ctrl);
+
+		timeout = 1000;
+		do {
+			ctrl = nvkm_rd32(device, 0x00d954 + base);
+			udelay(1);
+			if (!timeout--) {
+				AUX_ERR(&aux->base, "timeout %08x", ctrl);
+				ret = -EIO;
+				goto out;
+			}
+		} while (ctrl & 0x00010000);
+		ret = 1;
+
+		/* read status, and check if transaction completed ok */
+		stat = nvkm_mask(device, 0x00d958 + base, 0, 0);
+		if ((stat & 0x000f0000) == 0x00080000 ||
+		    (stat & 0x000f0000) == 0x00020000)
+			ret = retry ? 0 : 1;
+		if ((stat & 0x00000100))
+			ret = -ETIMEDOUT;
+		if ((stat & 0x00000e00))
+			ret = -EIO;
+
+		AUX_TRACE(&aux->base, "%02d %08x %08x", retries, ctrl, stat);
+	}
+
+	if (type & 1) {
+		for (i = 0; i < 16; i += 4) {
+			xbuf[i / 4] = nvkm_rd32(device, 0x00d940 + base + i);
+			AUX_TRACE(&aux->base, "rd %08x", xbuf[i / 4]);
+		}
+		memcpy(data, xbuf, size);
+	}
+
+out:
+	gm204_i2c_aux_fini(aux);
+	return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
+}
+
+static const struct nvkm_i2c_aux_func
+gm204_i2c_aux_func = {
+	.xfer = gm204_i2c_aux_xfer,
+};
+
+int
+gm204_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
+		struct nvkm_i2c_aux **paux)
+{
+	struct gm204_i2c_aux *aux;
+
+	if (!(aux = kzalloc(sizeof(*aux), GFP_KERNEL)))
+		return -ENOMEM;
+	*paux = &aux->base;
+
+	nvkm_i2c_aux_ctor(&gm204_i2c_aux_func, pad, index, &aux->base);
+	aux->ch = drive;
+	aux->base.intr = 1 << aux->ch;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
index 9200f122c02c..243a71ff0a0d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
@@ -22,328 +22,91 @@
  * Authors: Ben Skeggs
  */
 #include "priv.h"
+#include "aux.h"
+#include "bus.h"
 #include "pad.h"
 
-#include <core/device.h>
 #include <core/notify.h>
 #include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/dcb.h>
+#include <subdev/bios/i2c.h>
 
-/******************************************************************************
- * interface to linux i2c bit-banging algorithm
- *****************************************************************************/
-
-#ifdef CONFIG_NOUVEAU_I2C_INTERNAL_DEFAULT
-#define CSTMSEL true
-#else
-#define CSTMSEL false
-#endif
-
-static int
-nvkm_i2c_pre_xfer(struct i2c_adapter *adap)
+static struct nvkm_i2c_pad *
+nvkm_i2c_pad_find(struct nvkm_i2c *i2c, int id)
 {
-	struct i2c_algo_bit_data *bit = adap->algo_data;
-	struct nvkm_i2c_port *port = bit->data;
-	return nvkm_i2c(port)->acquire(port, bit->timeout);
-}
+	struct nvkm_i2c_pad *pad;
 
-static void
-nvkm_i2c_post_xfer(struct i2c_adapter *adap)
-{
-	struct i2c_algo_bit_data *bit = adap->algo_data;
-	struct nvkm_i2c_port *port = bit->data;
-	return nvkm_i2c(port)->release(port);
-}
-
-static void
-nvkm_i2c_setscl(void *data, int state)
-{
-	struct nvkm_i2c_port *port = data;
-	port->func->drive_scl(port, state);
-}
-
-static void
-nvkm_i2c_setsda(void *data, int state)
-{
-	struct nvkm_i2c_port *port = data;
-	port->func->drive_sda(port, state);
-}
-
-static int
-nvkm_i2c_getscl(void *data)
-{
-	struct nvkm_i2c_port *port = data;
-	return port->func->sense_scl(port);
-}
-
-static int
-nvkm_i2c_getsda(void *data)
-{
-	struct nvkm_i2c_port *port = data;
-	return port->func->sense_sda(port);
-}
-
-/******************************************************************************
- * base i2c "port" class implementation
- *****************************************************************************/
-
-int
-_nvkm_i2c_port_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nvkm_i2c_port *port = (void *)object;
-	struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
-	nv_ofuncs(pad)->fini(nv_object(pad), suspend);
-	return nvkm_object_fini(&port->base, suspend);
-}
-
-void
-_nvkm_i2c_port_dtor(struct nvkm_object *object)
-{
-	struct nvkm_i2c_port *port = (void *)object;
-	i2c_del_adapter(&port->adapter);
-	nvkm_object_destroy(&port->base);
-}
-
-int
-nvkm_i2c_port_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		      struct nvkm_oclass *oclass, u8 index,
-		      const struct i2c_algorithm *algo,
-		      const struct nvkm_i2c_func *func,
-		      int size, void **pobject)
-{
-	struct nvkm_device *device = nv_device(parent);
-	struct nvkm_i2c *i2c = nvkm_i2c(parent);
-	struct nvkm_i2c_port *port;
-	int ret;
-
-	ret = nvkm_object_create_(parent, engine, oclass, 0, size, pobject);
-	port = *pobject;
-	if (ret)
-		return ret;
-
-	snprintf(port->adapter.name, sizeof(port->adapter.name),
-		 "nvkm-%s-%d", device->name, index);
-	port->adapter.owner = THIS_MODULE;
-	port->adapter.dev.parent = nv_device_base(device);
-	port->index = index;
-	port->aux = -1;
-	port->func = func;
-	mutex_init(&port->mutex);
-
-	if ( algo == &nvkm_i2c_bit_algo &&
-	    !nvkm_boolopt(device->cfgopt, "NvI2C", CSTMSEL)) {
-		struct i2c_algo_bit_data *bit;
-
-		bit = kzalloc(sizeof(*bit), GFP_KERNEL);
-		if (!bit)
-			return -ENOMEM;
-
-		bit->udelay = 10;
-		bit->timeout = usecs_to_jiffies(2200);
-		bit->data = port;
-		bit->pre_xfer = nvkm_i2c_pre_xfer;
-		bit->post_xfer = nvkm_i2c_post_xfer;
-		bit->setsda = nvkm_i2c_setsda;
-		bit->setscl = nvkm_i2c_setscl;
-		bit->getsda = nvkm_i2c_getsda;
-		bit->getscl = nvkm_i2c_getscl;
-
-		port->adapter.algo_data = bit;
-		ret = i2c_bit_add_bus(&port->adapter);
-	} else {
-		port->adapter.algo_data = port;
-		port->adapter.algo = algo;
-		ret = i2c_add_adapter(&port->adapter);
+	list_for_each_entry(pad, &i2c->pad, head) {
+		if (pad->id == id)
+			return pad;
 	}
 
-	if (ret == 0)
-		list_add_tail(&port->head, &i2c->ports);
-	return ret;
+	return NULL;
 }
 
-/******************************************************************************
- * base i2c subdev class implementation
- *****************************************************************************/
-
-static struct nvkm_i2c_port *
-nvkm_i2c_find(struct nvkm_i2c *i2c, u8 index)
+struct nvkm_i2c_bus *
+nvkm_i2c_bus_find(struct nvkm_i2c *i2c, int id)
 {
-	struct nvkm_bios *bios = nvkm_bios(i2c);
-	struct nvkm_i2c_port *port;
+	struct nvkm_bios *bios = i2c->subdev.device->bios;
+	struct nvkm_i2c_bus *bus;
 
-	if (index == NV_I2C_DEFAULT(0) ||
-	    index == NV_I2C_DEFAULT(1)) {
+	if (id == NVKM_I2C_BUS_PRI || id == NVKM_I2C_BUS_SEC) {
 		u8  ver, hdr, cnt, len;
 		u16 i2c = dcb_i2c_table(bios, &ver, &hdr, &cnt, &len);
 		if (i2c && ver >= 0x30) {
-			u8 auxidx = nv_ro08(bios, i2c + 4);
-			if (index == NV_I2C_DEFAULT(0))
-				index = (auxidx & 0x0f) >> 0;
+			u8 auxidx = nvbios_rd08(bios, i2c + 4);
+			if (id == NVKM_I2C_BUS_PRI)
+				id = NVKM_I2C_BUS_CCB((auxidx & 0x0f) >> 0);
 			else
-				index = (auxidx & 0xf0) >> 4;
+				id = NVKM_I2C_BUS_CCB((auxidx & 0xf0) >> 4);
 		} else {
-			index = 2;
+			id = NVKM_I2C_BUS_CCB(2);
 		}
 	}
 
-	list_for_each_entry(port, &i2c->ports, head) {
-		if (port->index == index)
-			return port;
+	list_for_each_entry(bus, &i2c->bus, head) {
+		if (bus->id == id)
+			return bus;
 	}
 
 	return NULL;
 }
 
-static struct nvkm_i2c_port *
-nvkm_i2c_find_type(struct nvkm_i2c *i2c, u16 type)
+struct nvkm_i2c_aux *
+nvkm_i2c_aux_find(struct nvkm_i2c *i2c, int id)
 {
-	struct nvkm_i2c_port *port;
+	struct nvkm_i2c_aux *aux;
 
-	list_for_each_entry(port, &i2c->ports, head) {
-		if (nv_hclass(port) == type)
-			return port;
+	list_for_each_entry(aux, &i2c->aux, head) {
+		if (aux->id == id)
+			return aux;
 	}
 
 	return NULL;
 }
 
 static void
-nvkm_i2c_release_pad(struct nvkm_i2c_port *port)
-{
-	struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
-	struct nvkm_i2c *i2c = nvkm_i2c(port);
-
-	if (atomic_dec_and_test(&nv_object(pad)->usecount)) {
-		nv_ofuncs(pad)->fini(nv_object(pad), false);
-		wake_up_all(&i2c->wait);
-	}
-}
-
-static int
-nvkm_i2c_try_acquire_pad(struct nvkm_i2c_port *port)
-{
-	struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
-
-	if (atomic_add_return(1, &nv_object(pad)->usecount) != 1) {
-		struct nvkm_object *owner = (void *)pad->port;
-		do {
-			if (owner == (void *)port)
-				return 0;
-			owner = owner->parent;
-		} while(owner);
-		nvkm_i2c_release_pad(port);
-		return -EBUSY;
-	}
-
-	pad->next = port;
-	nv_ofuncs(pad)->init(nv_object(pad));
-	return 0;
-}
-
-static int
-nvkm_i2c_acquire_pad(struct nvkm_i2c_port *port, unsigned long timeout)
-{
-	struct nvkm_i2c *i2c = nvkm_i2c(port);
-
-	if (timeout) {
-		if (wait_event_timeout(i2c->wait,
-				       nvkm_i2c_try_acquire_pad(port) == 0,
-				       timeout) == 0)
-			return -EBUSY;
-	} else {
-		wait_event(i2c->wait, nvkm_i2c_try_acquire_pad(port) == 0);
-	}
-
-	return 0;
-}
-
-static void
-nvkm_i2c_release(struct nvkm_i2c_port *port)
-__releases(pad->mutex)
-{
-	nvkm_i2c(port)->release_pad(port);
-	mutex_unlock(&port->mutex);
-}
-
-static int
-nvkm_i2c_acquire(struct nvkm_i2c_port *port, unsigned long timeout)
-__acquires(pad->mutex)
-{
-	int ret;
-	mutex_lock(&port->mutex);
-	if ((ret = nvkm_i2c(port)->acquire_pad(port, timeout)))
-		mutex_unlock(&port->mutex);
-	return ret;
-}
-
-static int
-nvkm_i2c_identify(struct nvkm_i2c *i2c, int index, const char *what,
-		  struct nvkm_i2c_board_info *info,
-		  bool (*match)(struct nvkm_i2c_port *,
-				struct i2c_board_info *, void *), void *data)
-{
-	struct nvkm_i2c_port *port = nvkm_i2c_find(i2c, index);
-	int i;
-
-	if (!port) {
-		nv_debug(i2c, "no bus when probing %s on %d\n", what, index);
-		return -ENODEV;
-	}
-
-	nv_debug(i2c, "probing %ss on bus: %d\n", what, port->index);
-	for (i = 0; info[i].dev.addr; i++) {
-		u8 orig_udelay = 0;
-
-		if ((port->adapter.algo == &i2c_bit_algo) &&
-		    (info[i].udelay != 0)) {
-			struct i2c_algo_bit_data *algo = port->adapter.algo_data;
-			nv_debug(i2c, "using custom udelay %d instead of %d\n",
-			         info[i].udelay, algo->udelay);
-			orig_udelay = algo->udelay;
-			algo->udelay = info[i].udelay;
-		}
-
-		if (nv_probe_i2c(port, info[i].dev.addr) &&
-		    (!match || match(port, &info[i].dev, data))) {
-			nv_info(i2c, "detected %s: %s\n", what,
-				info[i].dev.type);
-			return i;
-		}
-
-		if (orig_udelay) {
-			struct i2c_algo_bit_data *algo = port->adapter.algo_data;
-			algo->udelay = orig_udelay;
-		}
-	}
-
-	nv_debug(i2c, "no devices found.\n");
-	return -ENODEV;
-}
-
-static void
-nvkm_i2c_intr_fini(struct nvkm_event *event, int type, int index)
+nvkm_i2c_intr_fini(struct nvkm_event *event, int type, int id)
 {
 	struct nvkm_i2c *i2c = container_of(event, typeof(*i2c), event);
-	struct nvkm_i2c_port *port = i2c->find(i2c, index);
-	const struct nvkm_i2c_impl *impl = (void *)nv_object(i2c)->oclass;
-	if (port && port->aux >= 0)
-		impl->aux_mask(i2c, type, 1 << port->aux, 0);
+	struct nvkm_i2c_aux *aux = nvkm_i2c_aux_find(i2c, id);
+	if (aux)
+		i2c->func->aux_mask(i2c, type, aux->intr, 0);
 }
 
 static void
-nvkm_i2c_intr_init(struct nvkm_event *event, int type, int index)
+nvkm_i2c_intr_init(struct nvkm_event *event, int type, int id)
 {
 	struct nvkm_i2c *i2c = container_of(event, typeof(*i2c), event);
-	struct nvkm_i2c_port *port = i2c->find(i2c, index);
-	const struct nvkm_i2c_impl *impl = (void *)nv_object(i2c)->oclass;
-	if (port && port->aux >= 0)
-		impl->aux_mask(i2c, type, 1 << port->aux, 1 << port->aux);
+	struct nvkm_i2c_aux *aux = nvkm_i2c_aux_find(i2c, id);
+	if (aux)
+		i2c->func->aux_mask(i2c, type, aux->intr, aux->intr);
 }
 
 static int
 nvkm_i2c_intr_ctor(struct nvkm_object *object, void *data, u32 size,
-		      struct nvkm_notify *notify)
+		   struct nvkm_notify *notify)
 {
 	struct nvkm_i2c_ntfy_req *req = data;
 	if (!WARN_ON(size != sizeof(*req))) {
@@ -355,38 +118,6 @@ nvkm_i2c_intr_ctor(struct nvkm_object *object, void *data, u32 size,
 	return -EINVAL;
 }
 
-static void
-nvkm_i2c_intr(struct nvkm_subdev *subdev)
-{
-	struct nvkm_i2c_impl *impl = (void *)nv_oclass(subdev);
-	struct nvkm_i2c *i2c = nvkm_i2c(subdev);
-	struct nvkm_i2c_port *port;
-	u32 hi, lo, rq, tx, e;
-
-	if (impl->aux_stat) {
-		impl->aux_stat(i2c, &hi, &lo, &rq, &tx);
-		if (hi || lo || rq || tx) {
-			list_for_each_entry(port, &i2c->ports, head) {
-				if (e = 0, port->aux < 0)
-					continue;
-
-				if (hi & (1 << port->aux)) e |= NVKM_I2C_PLUG;
-				if (lo & (1 << port->aux)) e |= NVKM_I2C_UNPLUG;
-				if (rq & (1 << port->aux)) e |= NVKM_I2C_IRQ;
-				if (tx & (1 << port->aux)) e |= NVKM_I2C_DONE;
-				if (e) {
-					struct nvkm_i2c_ntfy_rep rep = {
-						.mask = e,
-					};
-					nvkm_event_send(&i2c->event, rep.mask,
-							port->index, &rep,
-							sizeof(rep));
-				}
-			}
-		}
-	}
-}
-
 static const struct nvkm_event_func
 nvkm_i2c_intr_func = {
 	.ctor = nvkm_i2c_intr_ctor,
@@ -394,229 +125,272 @@ nvkm_i2c_intr_func = {
 	.fini = nvkm_i2c_intr_fini,
 };
 
-int
-_nvkm_i2c_fini(struct nvkm_object *object, bool suspend)
+static void
+nvkm_i2c_intr(struct nvkm_subdev *subdev)
 {
-	struct nvkm_i2c_impl *impl = (void *)nv_oclass(object);
-	struct nvkm_i2c *i2c = (void *)object;
-	struct nvkm_i2c_port *port;
-	u32 mask;
-	int ret;
+	struct nvkm_i2c *i2c = nvkm_i2c(subdev);
+	struct nvkm_i2c_aux *aux;
+	u32 hi, lo, rq, tx;
+
+	if (!i2c->func->aux_stat)
+		return;
+
+	i2c->func->aux_stat(i2c, &hi, &lo, &rq, &tx);
+	if (!hi && !lo && !rq && !tx)
+		return;
 
-	list_for_each_entry(port, &i2c->ports, head) {
-		ret = nv_ofuncs(port)->fini(nv_object(port), suspend);
-		if (ret && suspend)
-			goto fail;
+	list_for_each_entry(aux, &i2c->aux, head) {
+		u32 mask = 0;
+		if (hi & aux->intr) mask |= NVKM_I2C_PLUG;
+		if (lo & aux->intr) mask |= NVKM_I2C_UNPLUG;
+		if (rq & aux->intr) mask |= NVKM_I2C_IRQ;
+		if (tx & aux->intr) mask |= NVKM_I2C_DONE;
+		if (mask) {
+			struct nvkm_i2c_ntfy_rep rep = {
+				.mask = mask,
+			};
+			nvkm_event_send(&i2c->event, rep.mask, aux->id,
+					&rep, sizeof(rep));
+		}
 	}
+}
+
+static int
+nvkm_i2c_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+	struct nvkm_i2c *i2c = nvkm_i2c(subdev);
+	struct nvkm_i2c_pad *pad;
+	u32 mask;
 
-	if ((mask = (1 << impl->aux) - 1), impl->aux_stat) {
-		impl->aux_mask(i2c, NVKM_I2C_ANY, mask, 0);
-		impl->aux_stat(i2c, &mask, &mask, &mask, &mask);
+	if ((mask = (1 << i2c->func->aux) - 1), i2c->func->aux_stat) {
+		i2c->func->aux_mask(i2c, NVKM_I2C_ANY, mask, 0);
+		i2c->func->aux_stat(i2c, &mask, &mask, &mask, &mask);
 	}
 
-	return nvkm_subdev_fini(&i2c->base, suspend);
-fail:
-	list_for_each_entry_continue_reverse(port, &i2c->ports, head) {
-		nv_ofuncs(port)->init(nv_object(port));
+	list_for_each_entry(pad, &i2c->pad, head) {
+		nvkm_i2c_pad_fini(pad);
 	}
 
-	return ret;
+	return 0;
 }
 
-int
-_nvkm_i2c_init(struct nvkm_object *object)
+static int
+nvkm_i2c_init(struct nvkm_subdev *subdev)
 {
-	struct nvkm_i2c *i2c = (void *)object;
-	struct nvkm_i2c_port *port;
-	int ret;
-
-	ret = nvkm_subdev_init(&i2c->base);
-	if (ret == 0) {
-		list_for_each_entry(port, &i2c->ports, head) {
-			ret = nv_ofuncs(port)->init(nv_object(port));
-			if (ret)
-				goto fail;
-		}
+	struct nvkm_i2c *i2c = nvkm_i2c(subdev);
+	struct nvkm_i2c_bus *bus;
+	struct nvkm_i2c_pad *pad;
+
+	list_for_each_entry(pad, &i2c->pad, head) {
+		nvkm_i2c_pad_init(pad);
 	}
 
-	return ret;
-fail:
-	list_for_each_entry_continue_reverse(port, &i2c->ports, head) {
-		nv_ofuncs(port)->fini(nv_object(port), false);
+	list_for_each_entry(bus, &i2c->bus, head) {
+		nvkm_i2c_bus_init(bus);
 	}
 
-	return ret;
+	return 0;
 }
 
-void
-_nvkm_i2c_dtor(struct nvkm_object *object)
+static void *
+nvkm_i2c_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_i2c *i2c = (void *)object;
-	struct nvkm_i2c_port *port, *temp;
+	struct nvkm_i2c *i2c = nvkm_i2c(subdev);
 
 	nvkm_event_fini(&i2c->event);
 
-	list_for_each_entry_safe(port, temp, &i2c->ports, head) {
-		nvkm_object_ref(NULL, (struct nvkm_object **)&port);
+	while (!list_empty(&i2c->aux)) {
+		struct nvkm_i2c_aux *aux =
+			list_first_entry(&i2c->aux, typeof(*aux), head);
+		nvkm_i2c_aux_del(&aux);
 	}
 
-	nvkm_subdev_destroy(&i2c->base);
-}
-
-static struct nvkm_oclass *
-nvkm_i2c_extdev_sclass[] = {
-	nvkm_anx9805_sclass,
-};
+	while (!list_empty(&i2c->bus)) {
+		struct nvkm_i2c_bus *bus =
+			list_first_entry(&i2c->bus, typeof(*bus), head);
+		nvkm_i2c_bus_del(&bus);
+	}
 
-static void
-nvkm_i2c_create_port(struct nvkm_i2c *i2c, int index, u8 type,
-		     struct dcb_i2c_entry *info)
-{
-	const struct nvkm_i2c_impl *impl = (void *)nv_oclass(i2c);
-	struct nvkm_oclass *oclass;
-	struct nvkm_object *parent;
-	struct nvkm_object *object;
-	int ret, pad;
-
-	if (info->share != DCB_I2C_UNUSED) {
-		pad    = info->share;
-		oclass = impl->pad_s;
-	} else {
-		if (type != DCB_I2C_NVIO_AUX)
-			pad = 0x100 + info->drive;
-		else
-			pad = 0x100 + info->auxch;
-		oclass = impl->pad_x;
+	while (!list_empty(&i2c->pad)) {
+		struct nvkm_i2c_pad *pad =
+			list_first_entry(&i2c->pad, typeof(*pad), head);
+		nvkm_i2c_pad_del(&pad);
 	}
 
-	ret = nvkm_object_ctor(nv_object(i2c), NULL, oclass,
-			       NULL, pad, &parent);
-	if (ret < 0)
-		return;
+	return i2c;
+}
 
-	oclass = impl->sclass;
-	do {
-		ret = -EINVAL;
-		if (oclass->handle == type) {
-			ret = nvkm_object_ctor(parent, NULL, oclass,
-					       info, index, &object);
-		}
-	} while (ret && (++oclass)->handle);
+static const struct nvkm_subdev_func
+nvkm_i2c = {
+	.dtor = nvkm_i2c_dtor,
+	.init = nvkm_i2c_init,
+	.fini = nvkm_i2c_fini,
+	.intr = nvkm_i2c_intr,
+};
 
-	nvkm_object_ref(NULL, &parent);
+static const struct nvkm_i2c_drv {
+	u8 bios;
+	u8 addr;
+	int (*pad_new)(struct nvkm_i2c_bus *, int id, u8 addr,
+		       struct nvkm_i2c_pad **);
 }
+nvkm_i2c_drv[] = {
+	{ 0x0d, 0x39, anx9805_pad_new },
+	{ 0x0e, 0x3b, anx9805_pad_new },
+	{}
+};
 
 int
-nvkm_i2c_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, int length, void **pobject)
+nvkm_i2c_new_(const struct nvkm_i2c_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_i2c **pi2c)
 {
-	struct nvkm_bios *bios = nvkm_bios(parent);
+	struct nvkm_bios *bios = device->bios;
 	struct nvkm_i2c *i2c;
-	struct nvkm_object *object;
-	struct dcb_i2c_entry info;
-	int ret, i, j, index = -1;
-	struct dcb_output outp;
-	u8  ver, hdr;
-	u32 data;
-
-	ret = nvkm_subdev_create(parent, engine, oclass, 0, "I2C", "i2c", &i2c);
-	*pobject = nv_object(i2c);
-	if (ret)
-		return ret;
-
-	nv_subdev(i2c)->intr = nvkm_i2c_intr;
-	i2c->find = nvkm_i2c_find;
-	i2c->find_type = nvkm_i2c_find_type;
-	i2c->acquire_pad = nvkm_i2c_acquire_pad;
-	i2c->release_pad = nvkm_i2c_release_pad;
-	i2c->acquire = nvkm_i2c_acquire;
-	i2c->release = nvkm_i2c_release;
-	i2c->identify = nvkm_i2c_identify;
-	init_waitqueue_head(&i2c->wait);
-	INIT_LIST_HEAD(&i2c->ports);
-
-	while (!dcb_i2c_parse(bios, ++index, &info)) {
-		switch (info.type) {
-		case DCB_I2C_NV04_BIT:
-		case DCB_I2C_NV4E_BIT:
-		case DCB_I2C_NVIO_BIT:
-			nvkm_i2c_create_port(i2c, NV_I2C_PORT(index),
-					     info.type, &info);
-			break;
-		case DCB_I2C_NVIO_AUX:
-			nvkm_i2c_create_port(i2c, NV_I2C_AUX(index),
-					     info.type, &info);
-			break;
-		case DCB_I2C_PMGR:
-			if (info.drive != DCB_I2C_UNUSED) {
-				nvkm_i2c_create_port(i2c, NV_I2C_PORT(index),
-						     DCB_I2C_NVIO_BIT, &info);
-			}
-			if (info.auxch != DCB_I2C_UNUSED) {
-				nvkm_i2c_create_port(i2c, NV_I2C_AUX(index),
-						     DCB_I2C_NVIO_AUX, &info);
-			}
-			break;
-		case DCB_I2C_UNUSED:
-		default:
+	struct dcb_i2c_entry ccbE;
+	struct dcb_output dcbE;
+	u8 ver, hdr;
+	int ret, i;
+
+	if (!(i2c = *pi2c = kzalloc(sizeof(*i2c), GFP_KERNEL)))
+		return -ENOMEM;
+
+	nvkm_subdev_ctor(&nvkm_i2c, device, index, 0, &i2c->subdev);
+	i2c->func = func;
+	INIT_LIST_HEAD(&i2c->pad);
+	INIT_LIST_HEAD(&i2c->bus);
+	INIT_LIST_HEAD(&i2c->aux);
+
+	i = -1;
+	while (!dcb_i2c_parse(bios, ++i, &ccbE)) {
+		struct nvkm_i2c_pad *pad = NULL;
+		struct nvkm_i2c_bus *bus = NULL;
+		struct nvkm_i2c_aux *aux = NULL;
+
+		nvkm_debug(&i2c->subdev, "ccb %02x: type %02x drive %02x "
+			   "sense %02x share %02x auxch %02x\n", i, ccbE.type,
+			   ccbE.drive, ccbE.sense, ccbE.share, ccbE.auxch);
+
+		if (ccbE.share != DCB_I2C_UNUSED) {
+			const int id = NVKM_I2C_PAD_HYBRID(ccbE.share);
+			if (!(pad = nvkm_i2c_pad_find(i2c, id)))
+				ret = func->pad_s_new(i2c, id, &pad);
+			else
+				ret = 0;
+		} else {
+			ret = func->pad_x_new(i2c, NVKM_I2C_PAD_CCB(i), &pad);
+		}
+
+		if (ret) {
+			nvkm_error(&i2c->subdev, "ccb %02x pad, %d\n", i, ret);
+			nvkm_i2c_pad_del(&pad);
+			continue;
+		}
+
+		if (pad->func->bus_new_0 && ccbE.type == DCB_I2C_NV04_BIT) {
+			ret = pad->func->bus_new_0(pad, NVKM_I2C_BUS_CCB(i),
+						   ccbE.drive,
+						   ccbE.sense, &bus);
+		} else
+		if (pad->func->bus_new_4 &&
+		    ( ccbE.type == DCB_I2C_NV4E_BIT ||
+		      ccbE.type == DCB_I2C_NVIO_BIT ||
+		     (ccbE.type == DCB_I2C_PMGR &&
+		      ccbE.drive != DCB_I2C_UNUSED))) {
+			ret = pad->func->bus_new_4(pad, NVKM_I2C_BUS_CCB(i),
+						   ccbE.drive, &bus);
+		}
+
+		if (ret) {
+			nvkm_error(&i2c->subdev, "ccb %02x bus, %d\n", i, ret);
+			nvkm_i2c_bus_del(&bus);
+		}
+
+		if (pad->func->aux_new_6 &&
+		    ( ccbE.type == DCB_I2C_NVIO_AUX ||
+		     (ccbE.type == DCB_I2C_PMGR &&
+		      ccbE.auxch != DCB_I2C_UNUSED))) {
+			ret = pad->func->aux_new_6(pad, NVKM_I2C_BUS_CCB(i),
+						   ccbE.auxch, &aux);
+		} else {
+			ret = 0;
+		}
+
+		if (ret) {
+			nvkm_error(&i2c->subdev, "ccb %02x aux, %d\n", i, ret);
+			nvkm_i2c_aux_del(&aux);
+		}
+
+		if (ccbE.type != DCB_I2C_UNUSED && !bus && !aux) {
+			nvkm_warn(&i2c->subdev, "ccb %02x was ignored\n", i);
 			continue;
 		}
 	}
 
-	/* in addition to the busses specified in the i2c table, there
-	 * may be ddc/aux channels hiding behind external tmds/dp/etc
-	 * transmitters.
-	 */
-	index = NV_I2C_EXT(0);
 	i = -1;
-	while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &outp))) {
-		if (!outp.location || !outp.extdev)
+	while (dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE)) {
+		const struct nvkm_i2c_drv *drv = nvkm_i2c_drv;
+		struct nvkm_i2c_bus *bus;
+		struct nvkm_i2c_pad *pad;
+
+		/* internal outputs handled by native i2c busses (above) */
+		if (!dcbE.location)
 			continue;
 
-		switch (outp.type) {
-		case DCB_OUTPUT_TMDS:
-			info.type = NV_I2C_TYPE_EXTDDC(outp.extdev);
-			break;
-		case DCB_OUTPUT_DP:
-			info.type = NV_I2C_TYPE_EXTAUX(outp.extdev);
-			break;
-		default:
+		/* we need an i2c bus to talk to the external encoder */
+		bus = nvkm_i2c_bus_find(i2c, dcbE.i2c_index);
+		if (!bus) {
+			nvkm_debug(&i2c->subdev, "dcb %02x no bus\n", i);
 			continue;
 		}
 
-		ret = -ENODEV;
-		j = -1;
-		while (ret && ++j < ARRAY_SIZE(nvkm_i2c_extdev_sclass)) {
-			parent = nv_object(i2c->find(i2c, outp.i2c_index));
-			oclass = nvkm_i2c_extdev_sclass[j];
-			do {
-				if (oclass->handle != info.type)
-					continue;
-				ret = nvkm_object_ctor(parent, NULL, oclass,
-						       NULL, index++, &object);
-			} while (ret && (++oclass)->handle);
+		/* ... and a driver for it */
+		while (drv->pad_new) {
+			if (drv->bios == dcbE.extdev)
+				break;
+			drv++;
 		}
-	}
 
-	ret = nvkm_event_init(&nvkm_i2c_intr_func, 4, index, &i2c->event);
-	if (ret)
-		return ret;
-
-	return 0;
-}
+		if (!drv->pad_new) {
+			nvkm_debug(&i2c->subdev, "dcb %02x drv %02x unknown\n",
+				   i, dcbE.extdev);
+			continue;
+		}
 
-int
-_nvkm_i2c_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nvkm_i2c *i2c;
-	int ret;
+		/* find/create an instance of the driver */
+		pad = nvkm_i2c_pad_find(i2c, NVKM_I2C_PAD_EXT(dcbE.extdev));
+		if (!pad) {
+			const int id = NVKM_I2C_PAD_EXT(dcbE.extdev);
+			ret = drv->pad_new(bus, id, drv->addr, &pad);
+			if (ret) {
+				nvkm_error(&i2c->subdev, "dcb %02x pad, %d\n",
+					   i, ret);
+				nvkm_i2c_pad_del(&pad);
+				continue;
+			}
+		}
 
-	ret = nvkm_i2c_create(parent, engine, oclass, &i2c);
-	*pobject = nv_object(i2c);
-	if (ret)
-		return ret;
+		/* create any i2c bus / aux channel required by the output */
+		if (pad->func->aux_new_6 && dcbE.type == DCB_OUTPUT_DP) {
+			const int id = NVKM_I2C_AUX_EXT(dcbE.extdev);
+			struct nvkm_i2c_aux *aux = NULL;
+			ret = pad->func->aux_new_6(pad, id, 0, &aux);
+			if (ret) {
+				nvkm_error(&i2c->subdev, "dcb %02x aux, %d\n",
+					   i, ret);
+				nvkm_i2c_aux_del(&aux);
+			}
+		} else
+		if (pad->func->bus_new_4) {
+			const int id = NVKM_I2C_BUS_EXT(dcbE.extdev);
+			struct nvkm_i2c_bus *bus = NULL;
+			ret = pad->func->bus_new_4(pad, id, 0, &bus);
+			if (ret) {
+				nvkm_error(&i2c->subdev, "dcb %02x bus, %d\n",
+					   i, ret);
+				nvkm_i2c_bus_del(&bus);
+			}
+		}
+	}
 
-	return 0;
+	return nvkm_event_init(&nvkm_i2c_intr_func, 4, i, &i2c->event);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c
index 861a453d2a67..cdce11bbabe5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c
@@ -9,7 +9,7 @@
  * Software is furnished to do so, subject to the following conditions:
  *
  * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
+ * all copies or substantial busions of the Software.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
@@ -21,7 +21,7 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "bus.h"
 
 #ifdef CONFIG_NOUVEAU_I2C_INTERNAL
 #define T_TIMEOUT  2200000
@@ -29,205 +29,188 @@
 #define T_HOLD     5000
 
 static inline void
-i2c_drive_scl(struct nvkm_i2c_port *port, int state)
+nvkm_i2c_drive_scl(struct nvkm_i2c_bus *bus, int state)
 {
-	port->func->drive_scl(port, state);
+	bus->func->drive_scl(bus, state);
 }
 
 static inline void
-i2c_drive_sda(struct nvkm_i2c_port *port, int state)
+nvkm_i2c_drive_sda(struct nvkm_i2c_bus *bus, int state)
 {
-	port->func->drive_sda(port, state);
+	bus->func->drive_sda(bus, state);
 }
 
 static inline int
-i2c_sense_scl(struct nvkm_i2c_port *port)
+nvkm_i2c_sense_scl(struct nvkm_i2c_bus *bus)
 {
-	return port->func->sense_scl(port);
+	return bus->func->sense_scl(bus);
 }
 
 static inline int
-i2c_sense_sda(struct nvkm_i2c_port *port)
+nvkm_i2c_sense_sda(struct nvkm_i2c_bus *bus)
 {
-	return port->func->sense_sda(port);
+	return bus->func->sense_sda(bus);
 }
 
 static void
-i2c_delay(struct nvkm_i2c_port *port, u32 nsec)
+nvkm_i2c_delay(struct nvkm_i2c_bus *bus, u32 nsec)
 {
 	udelay((nsec + 500) / 1000);
 }
 
 static bool
-i2c_raise_scl(struct nvkm_i2c_port *port)
+nvkm_i2c_raise_scl(struct nvkm_i2c_bus *bus)
 {
 	u32 timeout = T_TIMEOUT / T_RISEFALL;
 
-	i2c_drive_scl(port, 1);
+	nvkm_i2c_drive_scl(bus, 1);
 	do {
-		i2c_delay(port, T_RISEFALL);
-	} while (!i2c_sense_scl(port) && --timeout);
+		nvkm_i2c_delay(bus, T_RISEFALL);
+	} while (!nvkm_i2c_sense_scl(bus) && --timeout);
 
 	return timeout != 0;
 }
 
 static int
-i2c_start(struct nvkm_i2c_port *port)
+i2c_start(struct nvkm_i2c_bus *bus)
 {
 	int ret = 0;
 
-	if (!i2c_sense_scl(port) ||
-	    !i2c_sense_sda(port)) {
-		i2c_drive_scl(port, 0);
-		i2c_drive_sda(port, 1);
-		if (!i2c_raise_scl(port))
+	if (!nvkm_i2c_sense_scl(bus) ||
+	    !nvkm_i2c_sense_sda(bus)) {
+		nvkm_i2c_drive_scl(bus, 0);
+		nvkm_i2c_drive_sda(bus, 1);
+		if (!nvkm_i2c_raise_scl(bus))
 			ret = -EBUSY;
 	}
 
-	i2c_drive_sda(port, 0);
-	i2c_delay(port, T_HOLD);
-	i2c_drive_scl(port, 0);
-	i2c_delay(port, T_HOLD);
+	nvkm_i2c_drive_sda(bus, 0);
+	nvkm_i2c_delay(bus, T_HOLD);
+	nvkm_i2c_drive_scl(bus, 0);
+	nvkm_i2c_delay(bus, T_HOLD);
 	return ret;
 }
 
 static void
-i2c_stop(struct nvkm_i2c_port *port)
+i2c_stop(struct nvkm_i2c_bus *bus)
 {
-	i2c_drive_scl(port, 0);
-	i2c_drive_sda(port, 0);
-	i2c_delay(port, T_RISEFALL);
-
-	i2c_drive_scl(port, 1);
-	i2c_delay(port, T_HOLD);
-	i2c_drive_sda(port, 1);
-	i2c_delay(port, T_HOLD);
+	nvkm_i2c_drive_scl(bus, 0);
+	nvkm_i2c_drive_sda(bus, 0);
+	nvkm_i2c_delay(bus, T_RISEFALL);
+
+	nvkm_i2c_drive_scl(bus, 1);
+	nvkm_i2c_delay(bus, T_HOLD);
+	nvkm_i2c_drive_sda(bus, 1);
+	nvkm_i2c_delay(bus, T_HOLD);
 }
 
 static int
-i2c_bitw(struct nvkm_i2c_port *port, int sda)
+i2c_bitw(struct nvkm_i2c_bus *bus, int sda)
 {
-	i2c_drive_sda(port, sda);
-	i2c_delay(port, T_RISEFALL);
+	nvkm_i2c_drive_sda(bus, sda);
+	nvkm_i2c_delay(bus, T_RISEFALL);
 
-	if (!i2c_raise_scl(port))
+	if (!nvkm_i2c_raise_scl(bus))
 		return -ETIMEDOUT;
-	i2c_delay(port, T_HOLD);
+	nvkm_i2c_delay(bus, T_HOLD);
 
-	i2c_drive_scl(port, 0);
-	i2c_delay(port, T_HOLD);
+	nvkm_i2c_drive_scl(bus, 0);
+	nvkm_i2c_delay(bus, T_HOLD);
 	return 0;
 }
 
 static int
-i2c_bitr(struct nvkm_i2c_port *port)
+i2c_bitr(struct nvkm_i2c_bus *bus)
 {
 	int sda;
 
-	i2c_drive_sda(port, 1);
-	i2c_delay(port, T_RISEFALL);
+	nvkm_i2c_drive_sda(bus, 1);
+	nvkm_i2c_delay(bus, T_RISEFALL);
 
-	if (!i2c_raise_scl(port))
+	if (!nvkm_i2c_raise_scl(bus))
 		return -ETIMEDOUT;
-	i2c_delay(port, T_HOLD);
+	nvkm_i2c_delay(bus, T_HOLD);
 
-	sda = i2c_sense_sda(port);
+	sda = nvkm_i2c_sense_sda(bus);
 
-	i2c_drive_scl(port, 0);
-	i2c_delay(port, T_HOLD);
+	nvkm_i2c_drive_scl(bus, 0);
+	nvkm_i2c_delay(bus, T_HOLD);
 	return sda;
 }
 
 static int
-i2c_get_byte(struct nvkm_i2c_port *port, u8 *byte, bool last)
+nvkm_i2c_get_byte(struct nvkm_i2c_bus *bus, u8 *byte, bool last)
 {
 	int i, bit;
 
 	*byte = 0;
 	for (i = 7; i >= 0; i--) {
-		bit = i2c_bitr(port);
+		bit = i2c_bitr(bus);
 		if (bit < 0)
 			return bit;
 		*byte |= bit << i;
 	}
 
-	return i2c_bitw(port, last ? 1 : 0);
+	return i2c_bitw(bus, last ? 1 : 0);
 }
 
 static int
-i2c_put_byte(struct nvkm_i2c_port *port, u8 byte)
+nvkm_i2c_put_byte(struct nvkm_i2c_bus *bus, u8 byte)
 {
 	int i, ret;
 	for (i = 7; i >= 0; i--) {
-		ret = i2c_bitw(port, !!(byte & (1 << i)));
+		ret = i2c_bitw(bus, !!(byte & (1 << i)));
 		if (ret < 0)
 			return ret;
 	}
 
-	ret = i2c_bitr(port);
+	ret = i2c_bitr(bus);
 	if (ret == 1) /* nack */
 		ret = -EIO;
 	return ret;
 }
 
 static int
-i2c_addr(struct nvkm_i2c_port *port, struct i2c_msg *msg)
+i2c_addr(struct nvkm_i2c_bus *bus, struct i2c_msg *msg)
 {
 	u32 addr = msg->addr << 1;
 	if (msg->flags & I2C_M_RD)
 		addr |= 1;
-	return i2c_put_byte(port, addr);
+	return nvkm_i2c_put_byte(bus, addr);
 }
 
-static int
-i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+int
+nvkm_i2c_bit_xfer(struct nvkm_i2c_bus *bus, struct i2c_msg *msgs, int num)
 {
-	struct nvkm_i2c_port *port = adap->algo_data;
 	struct i2c_msg *msg = msgs;
 	int ret = 0, mcnt = num;
 
-	ret = nvkm_i2c(port)->acquire(port, nsecs_to_jiffies(T_TIMEOUT));
-	if (ret)
-		return ret;
-
 	while (!ret && mcnt--) {
 		u8 remaining = msg->len;
 		u8 *ptr = msg->buf;
 
-		ret = i2c_start(port);
+		ret = i2c_start(bus);
 		if (ret == 0)
-			ret = i2c_addr(port, msg);
+			ret = i2c_addr(bus, msg);
 
 		if (msg->flags & I2C_M_RD) {
 			while (!ret && remaining--)
-				ret = i2c_get_byte(port, ptr++, !remaining);
+				ret = nvkm_i2c_get_byte(bus, ptr++, !remaining);
 		} else {
 			while (!ret && remaining--)
-				ret = i2c_put_byte(port, *ptr++);
+				ret = nvkm_i2c_put_byte(bus, *ptr++);
 		}
 
 		msg++;
 	}
 
-	i2c_stop(port);
-	nvkm_i2c(port)->release(port);
+	i2c_stop(bus);
 	return (ret < 0) ? ret : num;
 }
 #else
-static int
-i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+int
+nvkm_i2c_bit_xfer(struct nvkm_i2c_bus *bus, struct i2c_msg *msgs, int num)
 {
 	return -ENODEV;
 }
 #endif
-
-static u32
-i2c_bit_func(struct i2c_adapter *adap)
-{
-	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
-
-const struct i2c_algorithm nvkm_i2c_bit_algo = {
-	.master_xfer = i2c_bit_xfer,
-	.functionality = i2c_bit_func
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c
new file mode 100644
index 000000000000..807a2b67bd64
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "bus.h"
+#include "pad.h"
+
+#include <core/option.h>
+
+/*******************************************************************************
+ * i2c-algo-bit
+ ******************************************************************************/
+static int
+nvkm_i2c_bus_pre_xfer(struct i2c_adapter *adap)
+{
+	struct nvkm_i2c_bus *bus = container_of(adap, typeof(*bus), i2c);
+	return nvkm_i2c_bus_acquire(bus);
+}
+
+static void
+nvkm_i2c_bus_post_xfer(struct i2c_adapter *adap)
+{
+	struct nvkm_i2c_bus *bus = container_of(adap, typeof(*bus), i2c);
+	return nvkm_i2c_bus_release(bus);
+}
+
+static void
+nvkm_i2c_bus_setscl(void *data, int state)
+{
+	struct nvkm_i2c_bus *bus = data;
+	bus->func->drive_scl(bus, state);
+}
+
+static void
+nvkm_i2c_bus_setsda(void *data, int state)
+{
+	struct nvkm_i2c_bus *bus = data;
+	bus->func->drive_sda(bus, state);
+}
+
+static int
+nvkm_i2c_bus_getscl(void *data)
+{
+	struct nvkm_i2c_bus *bus = data;
+	return bus->func->sense_scl(bus);
+}
+
+static int
+nvkm_i2c_bus_getsda(void *data)
+{
+	struct nvkm_i2c_bus *bus = data;
+	return bus->func->sense_sda(bus);
+}
+
+/*******************************************************************************
+ * !i2c-algo-bit (off-chip i2c bus / hw i2c / internal bit-banging algo)
+ ******************************************************************************/
+static int
+nvkm_i2c_bus_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+	struct nvkm_i2c_bus *bus = container_of(adap, typeof(*bus), i2c);
+	int ret;
+
+	ret = nvkm_i2c_bus_acquire(bus);
+	if (ret)
+		return ret;
+
+	ret = bus->func->xfer(bus, msgs, num);
+	nvkm_i2c_bus_release(bus);
+	return ret;
+}
+
+static u32
+nvkm_i2c_bus_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm
+nvkm_i2c_bus_algo = {
+	.master_xfer = nvkm_i2c_bus_xfer,
+	.functionality = nvkm_i2c_bus_func,
+};
+
+/*******************************************************************************
+ * nvkm_i2c_bus base
+ ******************************************************************************/
+void
+nvkm_i2c_bus_init(struct nvkm_i2c_bus *bus)
+{
+	BUS_TRACE(bus, "init");
+	if (bus->func->init)
+		bus->func->init(bus);
+}
+
+void
+nvkm_i2c_bus_release(struct nvkm_i2c_bus *bus)
+{
+	struct nvkm_i2c_pad *pad = bus->pad;
+	BUS_TRACE(bus, "release");
+	nvkm_i2c_pad_release(pad);
+	mutex_unlock(&bus->mutex);
+}
+
+int
+nvkm_i2c_bus_acquire(struct nvkm_i2c_bus *bus)
+{
+	struct nvkm_i2c_pad *pad = bus->pad;
+	int ret;
+	BUS_TRACE(bus, "acquire");
+	mutex_lock(&bus->mutex);
+	ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_I2C);
+	if (ret)
+		mutex_unlock(&bus->mutex);
+	return ret;
+}
+
+int
+nvkm_i2c_bus_probe(struct nvkm_i2c_bus *bus, const char *what,
+		   struct nvkm_i2c_bus_probe *info,
+		   bool (*match)(struct nvkm_i2c_bus *,
+				 struct i2c_board_info *, void *), void *data)
+{
+	int i;
+
+	BUS_DBG(bus, "probing %ss", what);
+	for (i = 0; info[i].dev.addr; i++) {
+		u8 orig_udelay = 0;
+
+		if ((bus->i2c.algo == &i2c_bit_algo) && (info[i].udelay != 0)) {
+			struct i2c_algo_bit_data *algo = bus->i2c.algo_data;
+			BUS_DBG(bus, "%dms delay instead of %dms",
+				     info[i].udelay, algo->udelay);
+			orig_udelay = algo->udelay;
+			algo->udelay = info[i].udelay;
+		}
+
+		if (nvkm_probe_i2c(&bus->i2c, info[i].dev.addr) &&
+		    (!match || match(bus, &info[i].dev, data))) {
+			BUS_DBG(bus, "detected %s: %s",
+				what, info[i].dev.type);
+			return i;
+		}
+
+		if (orig_udelay) {
+			struct i2c_algo_bit_data *algo = bus->i2c.algo_data;
+			algo->udelay = orig_udelay;
+		}
+	}
+
+	BUS_DBG(bus, "no devices found.");
+	return -ENODEV;
+}
+
+void
+nvkm_i2c_bus_del(struct nvkm_i2c_bus **pbus)
+{
+	struct nvkm_i2c_bus *bus = *pbus;
+	if (bus && !WARN_ON(!bus->func)) {
+		BUS_TRACE(bus, "dtor");
+		list_del(&bus->head);
+		i2c_del_adapter(&bus->i2c);
+		kfree(bus->i2c.algo_data);
+		kfree(*pbus);
+		*pbus = NULL;
+	}
+}
+
+int
+nvkm_i2c_bus_ctor(const struct nvkm_i2c_bus_func *func,
+		  struct nvkm_i2c_pad *pad, int id,
+		  struct nvkm_i2c_bus *bus)
+{
+	struct nvkm_device *device = pad->i2c->subdev.device;
+	struct i2c_algo_bit_data *bit;
+#ifndef CONFIG_NOUVEAU_I2C_INTERNAL_DEFAULT
+	const bool internal = false;
+#else
+	const bool internal = true;
+#endif
+	int ret;
+
+	bus->func = func;
+	bus->pad = pad;
+	bus->id = id;
+	mutex_init(&bus->mutex);
+	list_add_tail(&bus->head, &pad->i2c->bus);
+	BUS_TRACE(bus, "ctor");
+
+	snprintf(bus->i2c.name, sizeof(bus->i2c.name), "nvkm-%s-bus-%04x",
+		 dev_name(device->dev), id);
+	bus->i2c.owner = THIS_MODULE;
+	bus->i2c.dev.parent = device->dev;
+
+	if ( bus->func->drive_scl &&
+	    !nvkm_boolopt(device->cfgopt, "NvI2C", internal)) {
+		if (!(bit = kzalloc(sizeof(*bit), GFP_KERNEL)))
+			return -ENOMEM;
+		bit->udelay = 10;
+		bit->timeout = usecs_to_jiffies(2200);
+		bit->data = bus;
+		bit->pre_xfer = nvkm_i2c_bus_pre_xfer;
+		bit->post_xfer = nvkm_i2c_bus_post_xfer;
+		bit->setscl = nvkm_i2c_bus_setscl;
+		bit->setsda = nvkm_i2c_bus_setsda;
+		bit->getscl = nvkm_i2c_bus_getscl;
+		bit->getsda = nvkm_i2c_bus_getsda;
+		bus->i2c.algo_data = bit;
+		ret = i2c_bit_add_bus(&bus->i2c);
+	} else {
+		bus->i2c.algo = &nvkm_i2c_bus_algo;
+		ret = i2c_add_adapter(&bus->i2c);
+	}
+
+	return ret;
+}
+
+int
+nvkm_i2c_bus_new_(const struct nvkm_i2c_bus_func *func,
+		  struct nvkm_i2c_pad *pad, int id,
+		  struct nvkm_i2c_bus **pbus)
+{
+	if (!(*pbus = kzalloc(sizeof(**pbus), GFP_KERNEL)))
+		return -ENOMEM;
+	return nvkm_i2c_bus_ctor(func, pad, id, *pbus);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
new file mode 100644
index 000000000000..e1be14c23e54
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
@@ -0,0 +1,37 @@
+#ifndef __NVKM_I2C_BUS_H__
+#define __NVKM_I2C_BUS_H__
+#include "pad.h"
+
+struct nvkm_i2c_bus_func {
+	void (*init)(struct nvkm_i2c_bus *);
+	void (*drive_scl)(struct nvkm_i2c_bus *, int state);
+	void (*drive_sda)(struct nvkm_i2c_bus *, int state);
+	int (*sense_scl)(struct nvkm_i2c_bus *);
+	int (*sense_sda)(struct nvkm_i2c_bus *);
+	int (*xfer)(struct nvkm_i2c_bus *, struct i2c_msg *, int num);
+};
+
+int nvkm_i2c_bus_ctor(const struct nvkm_i2c_bus_func *, struct nvkm_i2c_pad *,
+		      int id, struct nvkm_i2c_bus *);
+int nvkm_i2c_bus_new_(const struct nvkm_i2c_bus_func *, struct nvkm_i2c_pad *,
+		      int id, struct nvkm_i2c_bus **);
+void nvkm_i2c_bus_del(struct nvkm_i2c_bus **);
+void nvkm_i2c_bus_init(struct nvkm_i2c_bus *);
+
+int nvkm_i2c_bit_xfer(struct nvkm_i2c_bus *, struct i2c_msg *, int);
+
+int nv04_i2c_bus_new(struct nvkm_i2c_pad *, int, u8, u8,
+		     struct nvkm_i2c_bus **);
+
+int nv4e_i2c_bus_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_bus **);
+int nv50_i2c_bus_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_bus **);
+int gf119_i2c_bus_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_bus **);
+
+#define BUS_MSG(b,l,f,a...) do {                                               \
+	struct nvkm_i2c_bus *_bus = (b);                                       \
+	nvkm_##l(&_bus->pad->i2c->subdev, "bus %04x: "f"\n", _bus->id, ##a);   \
+} while(0)
+#define BUS_ERR(b,f,a...) BUS_MSG((b), error, f, ##a)
+#define BUS_DBG(b,f,a...) BUS_MSG((b), debug, f, ##a)
+#define BUS_TRACE(b,f,a...) BUS_MSG((b), trace, f, ##a)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busgf119.c
new file mode 100644
index 000000000000..96bbdda0f439
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busgf119.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial busions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#define gf119_i2c_bus(p) container_of((p), struct gf119_i2c_bus, base)
+#include "bus.h"
+
+struct gf119_i2c_bus {
+	struct nvkm_i2c_bus base;
+	u32 addr;
+};
+
+static void
+gf119_i2c_bus_drive_scl(struct nvkm_i2c_bus *base, int state)
+{
+	struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	nvkm_mask(device, bus->addr, 0x00000001, state ? 0x00000001 : 0);
+}
+
+static void
+gf119_i2c_bus_drive_sda(struct nvkm_i2c_bus *base, int state)
+{
+	struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	nvkm_mask(device, bus->addr, 0x00000002, state ? 0x00000002 : 0);
+}
+
+static int
+gf119_i2c_bus_sense_scl(struct nvkm_i2c_bus *base)
+{
+	struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	return !!(nvkm_rd32(device, bus->addr) & 0x00000010);
+}
+
+static int
+gf119_i2c_bus_sense_sda(struct nvkm_i2c_bus *base)
+{
+	struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	return !!(nvkm_rd32(device, bus->addr) & 0x00000020);
+}
+
+static void
+gf119_i2c_bus_init(struct nvkm_i2c_bus *base)
+{
+	struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	nvkm_wr32(device, bus->addr, 0x00000007);
+}
+
+static const struct nvkm_i2c_bus_func
+gf119_i2c_bus_func = {
+	.init = gf119_i2c_bus_init,
+	.drive_scl = gf119_i2c_bus_drive_scl,
+	.drive_sda = gf119_i2c_bus_drive_sda,
+	.sense_scl = gf119_i2c_bus_sense_scl,
+	.sense_sda = gf119_i2c_bus_sense_sda,
+	.xfer = nvkm_i2c_bit_xfer,
+};
+
+int
+gf119_i2c_bus_new(struct nvkm_i2c_pad *pad, int id, u8 drive,
+		 struct nvkm_i2c_bus **pbus)
+{
+	struct gf119_i2c_bus *bus;
+
+	if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
+		return -ENOMEM;
+	*pbus = &bus->base;
+
+	nvkm_i2c_bus_ctor(&gf119_i2c_bus_func, pad, id, &bus->base);
+	bus->addr = 0x00d014 + (drive * 0x20);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv04.c
new file mode 100644
index 000000000000..a58db159231f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv04.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial busions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#define nv04_i2c_bus(p) container_of((p), struct nv04_i2c_bus, base)
+#include "bus.h"
+
+#include <subdev/vga.h>
+
+struct nv04_i2c_bus {
+	struct nvkm_i2c_bus base;
+	u8 drive;
+	u8 sense;
+};
+
+static void
+nv04_i2c_bus_drive_scl(struct nvkm_i2c_bus *base, int state)
+{
+	struct nv04_i2c_bus *bus = nv04_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	u8 val = nvkm_rdvgac(device, 0, bus->drive);
+	if (state) val |= 0x20;
+	else	   val &= 0xdf;
+	nvkm_wrvgac(device, 0, bus->drive, val | 0x01);
+}
+
+static void
+nv04_i2c_bus_drive_sda(struct nvkm_i2c_bus *base, int state)
+{
+	struct nv04_i2c_bus *bus = nv04_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	u8 val = nvkm_rdvgac(device, 0, bus->drive);
+	if (state) val |= 0x10;
+	else	   val &= 0xef;
+	nvkm_wrvgac(device, 0, bus->drive, val | 0x01);
+}
+
+static int
+nv04_i2c_bus_sense_scl(struct nvkm_i2c_bus *base)
+{
+	struct nv04_i2c_bus *bus = nv04_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	return !!(nvkm_rdvgac(device, 0, bus->sense) & 0x04);
+}
+
+static int
+nv04_i2c_bus_sense_sda(struct nvkm_i2c_bus *base)
+{
+	struct nv04_i2c_bus *bus = nv04_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	return !!(nvkm_rdvgac(device, 0, bus->sense) & 0x08);
+}
+
+static const struct nvkm_i2c_bus_func
+nv04_i2c_bus_func = {
+	.drive_scl = nv04_i2c_bus_drive_scl,
+	.drive_sda = nv04_i2c_bus_drive_sda,
+	.sense_scl = nv04_i2c_bus_sense_scl,
+	.sense_sda = nv04_i2c_bus_sense_sda,
+	.xfer = nvkm_i2c_bit_xfer,
+};
+
+int
+nv04_i2c_bus_new(struct nvkm_i2c_pad *pad, int id, u8 drive, u8 sense,
+		 struct nvkm_i2c_bus **pbus)
+{
+	struct nv04_i2c_bus *bus;
+
+	if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
+		return -ENOMEM;
+	*pbus = &bus->base;
+
+	nvkm_i2c_bus_ctor(&nv04_i2c_bus_func, pad, id, &bus->base);
+	bus->drive = drive;
+	bus->sense = sense;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv4e.c
new file mode 100644
index 000000000000..cdd73dcb1197
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv4e.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial busions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#define nv4e_i2c_bus(p) container_of((p), struct nv4e_i2c_bus, base)
+#include "bus.h"
+
+struct nv4e_i2c_bus {
+	struct nvkm_i2c_bus base;
+	u32 addr;
+};
+
+static void
+nv4e_i2c_bus_drive_scl(struct nvkm_i2c_bus *base, int state)
+{
+	struct nv4e_i2c_bus *bus = nv4e_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	nvkm_mask(device, bus->addr, 0x2f, state ? 0x21 : 0x01);
+}
+
+static void
+nv4e_i2c_bus_drive_sda(struct nvkm_i2c_bus *base, int state)
+{
+	struct nv4e_i2c_bus *bus = nv4e_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	nvkm_mask(device, bus->addr, 0x1f, state ? 0x11 : 0x01);
+}
+
+static int
+nv4e_i2c_bus_sense_scl(struct nvkm_i2c_bus *base)
+{
+	struct nv4e_i2c_bus *bus = nv4e_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	return !!(nvkm_rd32(device, bus->addr) & 0x00040000);
+}
+
+static int
+nv4e_i2c_bus_sense_sda(struct nvkm_i2c_bus *base)
+{
+	struct nv4e_i2c_bus *bus = nv4e_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	return !!(nvkm_rd32(device, bus->addr) & 0x00080000);
+}
+
+static const struct nvkm_i2c_bus_func
+nv4e_i2c_bus_func = {
+	.drive_scl = nv4e_i2c_bus_drive_scl,
+	.drive_sda = nv4e_i2c_bus_drive_sda,
+	.sense_scl = nv4e_i2c_bus_sense_scl,
+	.sense_sda = nv4e_i2c_bus_sense_sda,
+	.xfer = nvkm_i2c_bit_xfer,
+};
+
+int
+nv4e_i2c_bus_new(struct nvkm_i2c_pad *pad, int id, u8 drive,
+		 struct nvkm_i2c_bus **pbus)
+{
+	struct nv4e_i2c_bus *bus;
+
+	if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
+		return -ENOMEM;
+	*pbus = &bus->base;
+
+	nvkm_i2c_bus_ctor(&nv4e_i2c_bus_func, pad, id, &bus->base);
+	bus->addr = 0x600800 + drive;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv50.c
new file mode 100644
index 000000000000..8db8399381ca
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv50.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial busions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#define nv50_i2c_bus(p) container_of((p), struct nv50_i2c_bus, base)
+#include "bus.h"
+
+#include <subdev/vga.h>
+
+struct nv50_i2c_bus {
+	struct nvkm_i2c_bus base;
+	u32 addr;
+	u32 data;
+};
+
+static void
+nv50_i2c_bus_drive_scl(struct nvkm_i2c_bus *base, int state)
+{
+	struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	if (state) bus->data |= 0x01;
+	else	   bus->data &= 0xfe;
+	nvkm_wr32(device, bus->addr, bus->data);
+}
+
+static void
+nv50_i2c_bus_drive_sda(struct nvkm_i2c_bus *base, int state)
+{
+	struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	if (state) bus->data |= 0x02;
+	else	   bus->data &= 0xfd;
+	nvkm_wr32(device, bus->addr, bus->data);
+}
+
+static int
+nv50_i2c_bus_sense_scl(struct nvkm_i2c_bus *base)
+{
+	struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	return !!(nvkm_rd32(device, bus->addr) & 0x00000001);
+}
+
+static int
+nv50_i2c_bus_sense_sda(struct nvkm_i2c_bus *base)
+{
+	struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	return !!(nvkm_rd32(device, bus->addr) & 0x00000002);
+}
+
+static void
+nv50_i2c_bus_init(struct nvkm_i2c_bus *base)
+{
+	struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	nvkm_wr32(device, bus->addr, (bus->data = 0x00000007));
+}
+
+static const struct nvkm_i2c_bus_func
+nv50_i2c_bus_func = {
+	.init = nv50_i2c_bus_init,
+	.drive_scl = nv50_i2c_bus_drive_scl,
+	.drive_sda = nv50_i2c_bus_drive_sda,
+	.sense_scl = nv50_i2c_bus_sense_scl,
+	.sense_sda = nv50_i2c_bus_sense_sda,
+	.xfer = nvkm_i2c_bit_xfer,
+};
+
+int
+nv50_i2c_bus_new(struct nvkm_i2c_pad *pad, int id, u8 drive,
+		 struct nvkm_i2c_bus **pbus)
+{
+	static const u32 addr[] = {
+		0x00e138, 0x00e150, 0x00e168, 0x00e180,
+		0x00e254, 0x00e274, 0x00e764, 0x00e780,
+		0x00e79c, 0x00e7b8
+	};
+	struct nv50_i2c_bus *bus;
+
+	if (drive >= ARRAY_SIZE(addr)) {
+		nvkm_warn(&pad->i2c->subdev, "bus %d unknown\n", drive);
+		return -ENODEV;
+	}
+
+	if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
+		return -ENOMEM;
+	*pbus = &bus->base;
+
+	nvkm_i2c_bus_ctor(&nv50_i2c_bus_func, pad, id, &bus->base);
+	bus->addr = addr[drive];
+	bus->data = 0x00000007;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
index 2a2dd47b9835..bb2a31d88161 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
@@ -21,26 +21,29 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv50.h"
+#include "priv.h"
+#include "pad.h"
 
 void
 g94_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
 {
-	u32 intr = nv_rd32(i2c, 0x00e06c);
-	u32 stat = nv_rd32(i2c, 0x00e068) & intr, i;
+	struct nvkm_device *device = i2c->subdev.device;
+	u32 intr = nvkm_rd32(device, 0x00e06c);
+	u32 stat = nvkm_rd32(device, 0x00e068) & intr, i;
 	for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) {
 		if ((stat & (1 << (i * 4)))) *hi |= 1 << i;
 		if ((stat & (2 << (i * 4)))) *lo |= 1 << i;
 		if ((stat & (4 << (i * 4)))) *rq |= 1 << i;
 		if ((stat & (8 << (i * 4)))) *tx |= 1 << i;
 	}
-	nv_wr32(i2c, 0x00e06c, intr);
+	nvkm_wr32(device, 0x00e06c, intr);
 }
 
 void
 g94_aux_mask(struct nvkm_i2c *i2c, u32 type, u32 mask, u32 data)
 {
-	u32 temp = nv_rd32(i2c, 0x00e068), i;
+	struct nvkm_device *device = i2c->subdev.device;
+	u32 temp = nvkm_rd32(device, 0x00e068), i;
 	for (i = 0; i < 8; i++) {
 		if (mask & (1 << i)) {
 			if (!(data & (1 << i))) {
@@ -50,230 +53,20 @@ g94_aux_mask(struct nvkm_i2c *i2c, u32 type, u32 mask, u32 data)
 			temp |= type << (i * 4);
 		}
 	}
-	nv_wr32(i2c, 0x00e068, temp);
-}
-
-#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
-#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
-
-static void
-auxch_fini(struct nvkm_i2c *aux, int ch)
-{
-	nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000);
-}
-
-static int
-auxch_init(struct nvkm_i2c *aux, int ch)
-{
-	const u32 unksel = 1; /* nfi which to use, or if it matters.. */
-	const u32 ureq = unksel ? 0x00100000 : 0x00200000;
-	const u32 urep = unksel ? 0x01000000 : 0x02000000;
-	u32 ctrl, timeout;
-
-	/* wait up to 1ms for any previous transaction to be done... */
-	timeout = 1000;
-	do {
-		ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
-		udelay(1);
-		if (!timeout--) {
-			AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
-			return -EBUSY;
-		}
-	} while (ctrl & 0x03010000);
-
-	/* set some magic, and wait up to 1ms for it to appear */
-	nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq);
-	timeout = 1000;
-	do {
-		ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
-		udelay(1);
-		if (!timeout--) {
-			AUX_ERR("magic wait 0x%08x\n", ctrl);
-			auxch_fini(aux, ch);
-			return -EBUSY;
-		}
-	} while ((ctrl & 0x03000000) != urep);
-
-	return 0;
-}
-
-int
-g94_aux(struct nvkm_i2c_port *base, bool retry,
-	 u8 type, u32 addr, u8 *data, u8 size)
-{
-	struct nvkm_i2c *aux = nvkm_i2c(base);
-	struct nv50_i2c_port *port = (void *)base;
-	u32 ctrl, stat, timeout, retries;
-	u32 xbuf[4] = {};
-	int ch = port->addr;
-	int ret, i;
-
-	AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
-
-	ret = auxch_init(aux, ch);
-	if (ret)
-		goto out;
-
-	stat = nv_rd32(aux, 0x00e4e8 + (ch * 0x50));
-	if (!(stat & 0x10000000)) {
-		AUX_DBG("sink not detected\n");
-		ret = -ENXIO;
-		goto out;
-	}
-
-	if (!(type & 1)) {
-		memcpy(xbuf, data, size);
-		for (i = 0; i < 16; i += 4) {
-			AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
-			nv_wr32(aux, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]);
-		}
-	}
-
-	ctrl  = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
-	ctrl &= ~0x0001f0ff;
-	ctrl |= type << 12;
-	ctrl |= size - 1;
-	nv_wr32(aux, 0x00e4e0 + (ch * 0x50), addr);
-
-	/* (maybe) retry transaction a number of times on failure... */
-	for (retries = 0; !ret && retries < 32; retries++) {
-		/* reset, and delay a while if this is a retry */
-		nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl);
-		nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl);
-		if (retries)
-			udelay(400);
-
-		/* transaction request, wait up to 1ms for it to complete */
-		nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl);
-
-		timeout = 1000;
-		do {
-			ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
-			udelay(1);
-			if (!timeout--) {
-				AUX_ERR("tx req timeout 0x%08x\n", ctrl);
-				ret = -EIO;
-				goto out;
-			}
-		} while (ctrl & 0x00010000);
-		ret = 1;
-
-		/* read status, and check if transaction completed ok */
-		stat = nv_mask(aux, 0x00e4e8 + (ch * 0x50), 0, 0);
-		if ((stat & 0x000f0000) == 0x00080000 ||
-		    (stat & 0x000f0000) == 0x00020000)
-			ret = retry ? 0 : 1;
-		if ((stat & 0x00000100))
-			ret = -ETIMEDOUT;
-		if ((stat & 0x00000e00))
-			ret = -EIO;
-
-		AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
-	}
-
-	if (type & 1) {
-		for (i = 0; i < 16; i += 4) {
-			xbuf[i / 4] = nv_rd32(aux, 0x00e4d0 + (ch * 0x50) + i);
-			AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
-		}
-		memcpy(data, xbuf, size);
-	}
-
-out:
-	auxch_fini(aux, ch);
-	return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
+	nvkm_wr32(device, 0x00e068, temp);
 }
 
 static const struct nvkm_i2c_func
-g94_i2c_func = {
-	.drive_scl = nv50_i2c_drive_scl,
-	.drive_sda = nv50_i2c_drive_sda,
-	.sense_scl = nv50_i2c_sense_scl,
-	.sense_sda = nv50_i2c_sense_sda,
-};
-
-static int
-g94_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 index,
-		  struct nvkm_object **pobject)
-{
-	struct dcb_i2c_entry *info = data;
-	struct nv50_i2c_port *port;
-	int ret;
-
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &nvkm_i2c_bit_algo, &g94_i2c_func, &port);
-	*pobject = nv_object(port);
-	if (ret)
-		return ret;
-
-	if (info->drive >= nv50_i2c_addr_nr)
-		return -EINVAL;
-
-	port->state = 7;
-	port->addr = nv50_i2c_addr[info->drive];
-	return 0;
-}
-
-static const struct nvkm_i2c_func
-g94_aux_func = {
-	.aux       = g94_aux,
+g94_i2c = {
+	.pad_x_new = g94_i2c_pad_x_new,
+	.pad_s_new = g94_i2c_pad_s_new,
+	.aux = 4,
+	.aux_stat = g94_aux_stat,
+	.aux_mask = g94_aux_mask,
 };
 
 int
-g94_aux_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 index,
-		  struct nvkm_object **pobject)
+g94_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
 {
-	struct dcb_i2c_entry *info = data;
-	struct nv50_i2c_port *port;
-	int ret;
-
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &nvkm_i2c_aux_algo, &g94_aux_func, &port);
-	*pobject = nv_object(port);
-	if (ret)
-		return ret;
-
-	port->base.aux = info->auxch;
-	port->addr = info->auxch;
-	return 0;
+	return nvkm_i2c_new_(&g94_i2c, device, index, pi2c);
 }
-
-static struct nvkm_oclass
-g94_i2c_sclass[] = {
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = g94_i2c_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = nv50_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = g94_aux_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = _nvkm_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{}
-};
-
-struct nvkm_oclass *
-g94_i2c_oclass = &(struct nvkm_i2c_impl) {
-	.base.handle = NV_SUBDEV(I2C, 0x94),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_ctor,
-		.dtor = _nvkm_i2c_dtor,
-		.init = _nvkm_i2c_init,
-		.fini = _nvkm_i2c_fini,
-	},
-	.sclass = g94_i2c_sclass,
-	.pad_x = &nv04_i2c_pad_oclass,
-	.pad_s = &g94_i2c_pad_oclass,
-	.aux = 4,
-	.aux_stat = g94_aux_stat,
-	.aux_mask = g94_aux_mask,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf110.c
deleted file mode 100644
index 4d4ac6638140..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf110.c
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv50.h"
-
-static int
-gf110_i2c_sense_scl(struct nvkm_i2c_port *base)
-{
-	struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv50_i2c_port *port = (void *)base;
-	return !!(nv_rd32(priv, port->addr) & 0x00000010);
-}
-
-static int
-gf110_i2c_sense_sda(struct nvkm_i2c_port *base)
-{
-	struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv50_i2c_port *port = (void *)base;
-	return !!(nv_rd32(priv, port->addr) & 0x00000020);
-}
-
-static const struct nvkm_i2c_func
-gf110_i2c_func = {
-	.drive_scl = nv50_i2c_drive_scl,
-	.drive_sda = nv50_i2c_drive_sda,
-	.sense_scl = gf110_i2c_sense_scl,
-	.sense_sda = gf110_i2c_sense_sda,
-};
-
-int
-gf110_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 index,
-		    struct nvkm_object **pobject)
-{
-	struct dcb_i2c_entry *info = data;
-	struct nv50_i2c_port *port;
-	int ret;
-
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &nvkm_i2c_bit_algo, &gf110_i2c_func, &port);
-	*pobject = nv_object(port);
-	if (ret)
-		return ret;
-
-	port->state = 0x00000007;
-	port->addr = 0x00d014 + (info->drive * 0x20);
-	return 0;
-}
-
-struct nvkm_oclass
-gf110_i2c_sclass[] = {
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = gf110_i2c_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = nv50_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = g94_aux_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = _nvkm_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{}
-};
-
-struct nvkm_oclass *
-gf110_i2c_oclass = &(struct nvkm_i2c_impl) {
-	.base.handle = NV_SUBDEV(I2C, 0xd0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_ctor,
-		.dtor = _nvkm_i2c_dtor,
-		.init = _nvkm_i2c_init,
-		.fini = _nvkm_i2c_fini,
-	},
-	.sclass = gf110_i2c_sclass,
-	.pad_x = &nv04_i2c_pad_oclass,
-	.pad_s = &g94_i2c_pad_oclass,
-	.aux = 4,
-	.aux_stat = g94_aux_stat,
-	.aux_mask = g94_aux_mask,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c
index e290b40f2d13..ae4aad3fcd2e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c
@@ -21,18 +21,16 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv50.h"
+#include "priv.h"
+#include "pad.h"
 
-struct nvkm_oclass *
-gf117_i2c_oclass = &(struct nvkm_i2c_impl) {
-	.base.handle = NV_SUBDEV(I2C, 0xd7),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_ctor,
-		.dtor = _nvkm_i2c_dtor,
-		.init = _nvkm_i2c_init,
-		.fini = _nvkm_i2c_fini,
-	},
-	.sclass = gf110_i2c_sclass,
-	.pad_x = &nv04_i2c_pad_oclass,
-	.pad_s = &nv04_i2c_pad_oclass,
-}.base;
+static const struct nvkm_i2c_func
+gf117_i2c = {
+	.pad_x_new = gf119_i2c_pad_x_new,
+};
+
+int
+gf117_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+{
+	return nvkm_i2c_new_(&gf117_i2c, device, index, pi2c);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c
new file mode 100644
index 000000000000..6f2b02af42c8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "pad.h"
+
+static const struct nvkm_i2c_func
+gf119_i2c = {
+	.pad_x_new = gf119_i2c_pad_x_new,
+	.pad_s_new = gf119_i2c_pad_s_new,
+	.aux = 4,
+	.aux_stat = g94_aux_stat,
+	.aux_mask = g94_aux_mask,
+};
+
+int
+gf119_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+{
+	return nvkm_i2c_new_(&gf119_i2c, device, index, pi2c);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
index 1a464903a992..f9f6bf4b66c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
@@ -21,26 +21,29 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv50.h"
+#include "priv.h"
+#include "pad.h"
 
 void
 gk104_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
 {
-	u32 intr = nv_rd32(i2c, 0x00dc60);
-	u32 stat = nv_rd32(i2c, 0x00dc68) & intr, i;
+	struct nvkm_device *device = i2c->subdev.device;
+	u32 intr = nvkm_rd32(device, 0x00dc60);
+	u32 stat = nvkm_rd32(device, 0x00dc68) & intr, i;
 	for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) {
 		if ((stat & (1 << (i * 4)))) *hi |= 1 << i;
 		if ((stat & (2 << (i * 4)))) *lo |= 1 << i;
 		if ((stat & (4 << (i * 4)))) *rq |= 1 << i;
 		if ((stat & (8 << (i * 4)))) *tx |= 1 << i;
 	}
-	nv_wr32(i2c, 0x00dc60, intr);
+	nvkm_wr32(device, 0x00dc60, intr);
 }
 
 void
 gk104_aux_mask(struct nvkm_i2c *i2c, u32 type, u32 mask, u32 data)
 {
-	u32 temp = nv_rd32(i2c, 0x00dc68), i;
+	struct nvkm_device *device = i2c->subdev.device;
+	u32 temp = nvkm_rd32(device, 0x00dc68), i;
 	for (i = 0; i < 8; i++) {
 		if (mask & (1 << i)) {
 			if (!(data & (1 << i))) {
@@ -50,22 +53,20 @@ gk104_aux_mask(struct nvkm_i2c *i2c, u32 type, u32 mask, u32 data)
 			temp |= type << (i * 4);
 		}
 	}
-	nv_wr32(i2c, 0x00dc68, temp);
+	nvkm_wr32(device, 0x00dc68, temp);
 }
 
-struct nvkm_oclass *
-gk104_i2c_oclass = &(struct nvkm_i2c_impl) {
-	.base.handle = NV_SUBDEV(I2C, 0xe0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_ctor,
-		.dtor = _nvkm_i2c_dtor,
-		.init = _nvkm_i2c_init,
-		.fini = _nvkm_i2c_fini,
-	},
-	.sclass = gf110_i2c_sclass,
-	.pad_x = &nv04_i2c_pad_oclass,
-	.pad_s = &g94_i2c_pad_oclass,
+static const struct nvkm_i2c_func
+gk104_i2c = {
+	.pad_x_new = gf119_i2c_pad_x_new,
+	.pad_s_new = gf119_i2c_pad_s_new,
 	.aux = 4,
 	.aux_stat = gk104_aux_stat,
 	.aux_mask = gk104_aux_mask,
-}.base;
+};
+
+int
+gk104_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+{
+	return nvkm_i2c_new_(&gk104_i2c, device, index, pi2c);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c
index ab64237b3842..ff9f7d62f6be 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c
@@ -21,199 +21,20 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv50.h"
-
-#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
-#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
-
-static void
-auxch_fini(struct nvkm_i2c *aux, int ch)
-{
-	nv_mask(aux, 0x00d954 + (ch * 0x50), 0x00310000, 0x00000000);
-}
-
-static int
-auxch_init(struct nvkm_i2c *aux, int ch)
-{
-	const u32 unksel = 1; /* nfi which to use, or if it matters.. */
-	const u32 ureq = unksel ? 0x00100000 : 0x00200000;
-	const u32 urep = unksel ? 0x01000000 : 0x02000000;
-	u32 ctrl, timeout;
-
-	/* wait up to 1ms for any previous transaction to be done... */
-	timeout = 1000;
-	do {
-		ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
-		udelay(1);
-		if (!timeout--) {
-			AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
-			return -EBUSY;
-		}
-	} while (ctrl & 0x03010000);
-
-	/* set some magic, and wait up to 1ms for it to appear */
-	nv_mask(aux, 0x00d954 + (ch * 0x50), 0x00300000, ureq);
-	timeout = 1000;
-	do {
-		ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
-		udelay(1);
-		if (!timeout--) {
-			AUX_ERR("magic wait 0x%08x\n", ctrl);
-			auxch_fini(aux, ch);
-			return -EBUSY;
-		}
-	} while ((ctrl & 0x03000000) != urep);
-
-	return 0;
-}
-
-int
-gm204_aux(struct nvkm_i2c_port *base, bool retry,
-	 u8 type, u32 addr, u8 *data, u8 size)
-{
-	struct nvkm_i2c *aux = nvkm_i2c(base);
-	struct nv50_i2c_port *port = (void *)base;
-	u32 ctrl, stat, timeout, retries;
-	u32 xbuf[4] = {};
-	int ch = port->addr;
-	int ret, i;
-
-	AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
-
-	ret = auxch_init(aux, ch);
-	if (ret)
-		goto out;
-
-	stat = nv_rd32(aux, 0x00d958 + (ch * 0x50));
-	if (!(stat & 0x10000000)) {
-		AUX_DBG("sink not detected\n");
-		ret = -ENXIO;
-		goto out;
-	}
-
-	if (!(type & 1)) {
-		memcpy(xbuf, data, size);
-		for (i = 0; i < 16; i += 4) {
-			AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
-			nv_wr32(aux, 0x00d930 + (ch * 0x50) + i, xbuf[i / 4]);
-		}
-	}
-
-	ctrl  = nv_rd32(aux, 0x00d954 + (ch * 0x50));
-	ctrl &= ~0x0001f0ff;
-	ctrl |= type << 12;
-	ctrl |= size - 1;
-	nv_wr32(aux, 0x00d950 + (ch * 0x50), addr);
-
-	/* (maybe) retry transaction a number of times on failure... */
-	for (retries = 0; !ret && retries < 32; retries++) {
-		/* reset, and delay a while if this is a retry */
-		nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x80000000 | ctrl);
-		nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x00000000 | ctrl);
-		if (retries)
-			udelay(400);
-
-		/* transaction request, wait up to 1ms for it to complete */
-		nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x00010000 | ctrl);
-
-		timeout = 1000;
-		do {
-			ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
-			udelay(1);
-			if (!timeout--) {
-				AUX_ERR("tx req timeout 0x%08x\n", ctrl);
-				ret = -EIO;
-				goto out;
-			}
-		} while (ctrl & 0x00010000);
-		ret = 1;
-
-		/* read status, and check if transaction completed ok */
-		stat = nv_mask(aux, 0x00d958 + (ch * 0x50), 0, 0);
-		if ((stat & 0x000f0000) == 0x00080000 ||
-		    (stat & 0x000f0000) == 0x00020000)
-			ret = retry ? 0 : 1;
-		if ((stat & 0x00000100))
-			ret = -ETIMEDOUT;
-		if ((stat & 0x00000e00))
-			ret = -EIO;
-
-		AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
-	}
-
-	if (type & 1) {
-		for (i = 0; i < 16; i += 4) {
-			xbuf[i / 4] = nv_rd32(aux, 0x00d940 + (ch * 0x50) + i);
-			AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
-		}
-		memcpy(data, xbuf, size);
-	}
-
-out:
-	auxch_fini(aux, ch);
-	return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
-}
+#include "priv.h"
+#include "pad.h"
 
 static const struct nvkm_i2c_func
-gm204_aux_func = {
-	.aux       = gm204_aux,
+gm204_i2c = {
+	.pad_x_new = gf119_i2c_pad_x_new,
+	.pad_s_new = gm204_i2c_pad_s_new,
+	.aux = 8,
+	.aux_stat = gk104_aux_stat,
+	.aux_mask = gk104_aux_mask,
 };
 
 int
-gm204_aux_port_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 index,
-		    struct nvkm_object **pobject)
+gm204_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
 {
-	struct dcb_i2c_entry *info = data;
-	struct nv50_i2c_port *port;
-	int ret;
-
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &nvkm_i2c_aux_algo, &gm204_aux_func, &port);
-	*pobject = nv_object(port);
-	if (ret)
-		return ret;
-
-	port->base.aux = info->auxch;
-	port->addr = info->auxch;
-	return 0;
+	return nvkm_i2c_new_(&gm204_i2c, device, index, pi2c);
 }
-
-struct nvkm_oclass
-gm204_i2c_sclass[] = {
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = gf110_i2c_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = nv50_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = gm204_aux_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = _nvkm_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{}
-};
-
-struct nvkm_oclass *
-gm204_i2c_oclass = &(struct nvkm_i2c_impl) {
-	.base.handle = NV_SUBDEV(I2C, 0x24),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_ctor,
-		.dtor = _nvkm_i2c_dtor,
-		.init = _nvkm_i2c_init,
-		.fini = _nvkm_i2c_fini,
-	},
-	.sclass = gm204_i2c_sclass,
-	.pad_x = &nv04_i2c_pad_oclass,
-	.pad_s = &gm204_i2c_pad_oclass,
-	.aux = 8,
-	.aux_stat = gk104_aux_stat,
-	.aux_mask = gk104_aux_mask,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c
index 4cdf1c489353..18776f49355c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c
@@ -22,107 +22,15 @@
  * Authors: Ben Skeggs
  */
 #include "priv.h"
-
-#include <subdev/vga.h>
-
-struct nv04_i2c_priv {
-	struct nvkm_i2c base;
-};
-
-struct nv04_i2c_port {
-	struct nvkm_i2c_port base;
-	u8 drive;
-	u8 sense;
-};
-
-static void
-nv04_i2c_drive_scl(struct nvkm_i2c_port *base, int state)
-{
-	struct nv04_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv04_i2c_port *port = (void *)base;
-	u8 val = nv_rdvgac(priv, 0, port->drive);
-	if (state) val |= 0x20;
-	else	   val &= 0xdf;
-	nv_wrvgac(priv, 0, port->drive, val | 0x01);
-}
-
-static void
-nv04_i2c_drive_sda(struct nvkm_i2c_port *base, int state)
-{
-	struct nv04_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv04_i2c_port *port = (void *)base;
-	u8 val = nv_rdvgac(priv, 0, port->drive);
-	if (state) val |= 0x10;
-	else	   val &= 0xef;
-	nv_wrvgac(priv, 0, port->drive, val | 0x01);
-}
-
-static int
-nv04_i2c_sense_scl(struct nvkm_i2c_port *base)
-{
-	struct nv04_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv04_i2c_port *port = (void *)base;
-	return !!(nv_rdvgac(priv, 0, port->sense) & 0x04);
-}
-
-static int
-nv04_i2c_sense_sda(struct nvkm_i2c_port *base)
-{
-	struct nv04_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv04_i2c_port *port = (void *)base;
-	return !!(nv_rdvgac(priv, 0, port->sense) & 0x08);
-}
+#include "pad.h"
 
 static const struct nvkm_i2c_func
-nv04_i2c_func = {
-	.drive_scl = nv04_i2c_drive_scl,
-	.drive_sda = nv04_i2c_drive_sda,
-	.sense_scl = nv04_i2c_sense_scl,
-	.sense_sda = nv04_i2c_sense_sda,
+nv04_i2c = {
+	.pad_x_new = nv04_i2c_pad_new,
 };
 
-static int
-nv04_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, void *data, u32 index,
-		   struct nvkm_object **pobject)
+int
+nv04_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
 {
-	struct dcb_i2c_entry *info = data;
-	struct nv04_i2c_port *port;
-	int ret;
-
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &nvkm_i2c_bit_algo, &nv04_i2c_func, &port);
-	*pobject = nv_object(port);
-	if (ret)
-		return ret;
-
-	port->drive = info->drive;
-	port->sense = info->sense;
-	return 0;
+	return nvkm_i2c_new_(&nv04_i2c, device, index, pi2c);
 }
-
-static struct nvkm_oclass
-nv04_i2c_sclass[] = {
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NV04_BIT),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = nv04_i2c_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = _nvkm_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{}
-};
-
-struct nvkm_oclass *
-nv04_i2c_oclass = &(struct nvkm_i2c_impl) {
-	.base.handle = NV_SUBDEV(I2C, 0x04),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_ctor,
-		.dtor = _nvkm_i2c_dtor,
-		.init = _nvkm_i2c_init,
-		.fini = _nvkm_i2c_fini,
-	},
-	.sclass = nv04_i2c_sclass,
-	.pad_x = &nv04_i2c_pad_oclass,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c
index 046fe5e2ea19..6b762f7cee9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c
@@ -22,99 +22,15 @@
  * Authors: Ben Skeggs
  */
 #include "priv.h"
-
-#include <subdev/vga.h>
-
-struct nv4e_i2c_priv {
-	struct nvkm_i2c base;
-};
-
-struct nv4e_i2c_port {
-	struct nvkm_i2c_port base;
-	u32 addr;
-};
-
-static void
-nv4e_i2c_drive_scl(struct nvkm_i2c_port *base, int state)
-{
-	struct nv4e_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv4e_i2c_port *port = (void *)base;
-	nv_mask(priv, port->addr, 0x2f, state ? 0x21 : 0x01);
-}
-
-static void
-nv4e_i2c_drive_sda(struct nvkm_i2c_port *base, int state)
-{
-	struct nv4e_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv4e_i2c_port *port = (void *)base;
-	nv_mask(priv, port->addr, 0x1f, state ? 0x11 : 0x01);
-}
-
-static int
-nv4e_i2c_sense_scl(struct nvkm_i2c_port *base)
-{
-	struct nv4e_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv4e_i2c_port *port = (void *)base;
-	return !!(nv_rd32(priv, port->addr) & 0x00040000);
-}
-
-static int
-nv4e_i2c_sense_sda(struct nvkm_i2c_port *base)
-{
-	struct nv4e_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv4e_i2c_port *port = (void *)base;
-	return !!(nv_rd32(priv, port->addr) & 0x00080000);
-}
+#include "pad.h"
 
 static const struct nvkm_i2c_func
-nv4e_i2c_func = {
-	.drive_scl = nv4e_i2c_drive_scl,
-	.drive_sda = nv4e_i2c_drive_sda,
-	.sense_scl = nv4e_i2c_sense_scl,
-	.sense_sda = nv4e_i2c_sense_sda,
+nv4e_i2c = {
+	.pad_x_new = nv4e_i2c_pad_new,
 };
 
-static int
-nv4e_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, void *data, u32 index,
-		   struct nvkm_object **pobject)
+int
+nv4e_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
 {
-	struct dcb_i2c_entry *info = data;
-	struct nv4e_i2c_port *port;
-	int ret;
-
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &nvkm_i2c_bit_algo, &nv4e_i2c_func, &port);
-	*pobject = nv_object(port);
-	if (ret)
-		return ret;
-
-	port->addr = 0x600800 + info->drive;
-	return 0;
+	return nvkm_i2c_new_(&nv4e_i2c, device, index, pi2c);
 }
-
-static struct nvkm_oclass
-nv4e_i2c_sclass[] = {
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NV4E_BIT),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = nv4e_i2c_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = _nvkm_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{}
-};
-
-struct nvkm_oclass *
-nv4e_i2c_oclass = &(struct nvkm_i2c_impl) {
-	.base.handle = NV_SUBDEV(I2C, 0x4e),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_ctor,
-		.dtor = _nvkm_i2c_dtor,
-		.init = _nvkm_i2c_init,
-		.fini = _nvkm_i2c_fini,
-	},
-	.sclass = nv4e_i2c_sclass,
-	.pad_x = &nv04_i2c_pad_oclass,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c
index fba5b26a5682..75640ab97d6a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c
@@ -21,113 +21,16 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv50.h"
-
-void
-nv50_i2c_drive_scl(struct nvkm_i2c_port *base, int state)
-{
-	struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv50_i2c_port *port = (void *)base;
-	if (state) port->state |= 0x01;
-	else	   port->state &= 0xfe;
-	nv_wr32(priv, port->addr, port->state);
-}
-
-void
-nv50_i2c_drive_sda(struct nvkm_i2c_port *base, int state)
-{
-	struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv50_i2c_port *port = (void *)base;
-	if (state) port->state |= 0x02;
-	else	   port->state &= 0xfd;
-	nv_wr32(priv, port->addr, port->state);
-}
-
-int
-nv50_i2c_sense_scl(struct nvkm_i2c_port *base)
-{
-	struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv50_i2c_port *port = (void *)base;
-	return !!(nv_rd32(priv, port->addr) & 0x00000001);
-}
-
-int
-nv50_i2c_sense_sda(struct nvkm_i2c_port *base)
-{
-	struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv50_i2c_port *port = (void *)base;
-	return !!(nv_rd32(priv, port->addr) & 0x00000002);
-}
+#include "priv.h"
+#include "pad.h"
 
 static const struct nvkm_i2c_func
-nv50_i2c_func = {
-	.drive_scl = nv50_i2c_drive_scl,
-	.drive_sda = nv50_i2c_drive_sda,
-	.sense_scl = nv50_i2c_sense_scl,
-	.sense_sda = nv50_i2c_sense_sda,
-};
-
-const u32 nv50_i2c_addr[] = {
-	0x00e138, 0x00e150, 0x00e168, 0x00e180,
-	0x00e254, 0x00e274, 0x00e764, 0x00e780,
-	0x00e79c, 0x00e7b8
+nv50_i2c = {
+	.pad_x_new = nv50_i2c_pad_new,
 };
-const int nv50_i2c_addr_nr = ARRAY_SIZE(nv50_i2c_addr);
-
-static int
-nv50_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, void *data, u32 index,
-		   struct nvkm_object **pobject)
-{
-	struct dcb_i2c_entry *info = data;
-	struct nv50_i2c_port *port;
-	int ret;
-
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &nvkm_i2c_bit_algo, &nv50_i2c_func, &port);
-	*pobject = nv_object(port);
-	if (ret)
-		return ret;
-
-	if (info->drive >= nv50_i2c_addr_nr)
-		return -EINVAL;
-
-	port->state = 0x00000007;
-	port->addr = nv50_i2c_addr[info->drive];
-	return 0;
-}
 
 int
-nv50_i2c_port_init(struct nvkm_object *object)
+nv50_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
 {
-	struct nv50_i2c_priv *priv = (void *)nvkm_i2c(object);
-	struct nv50_i2c_port *port = (void *)object;
-	nv_wr32(priv, port->addr, port->state);
-	return nvkm_i2c_port_init(&port->base);
+	return nvkm_i2c_new_(&nv50_i2c, device, index, pi2c);
 }
-
-static struct nvkm_oclass
-nv50_i2c_sclass[] = {
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = nv50_i2c_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = nv50_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{}
-};
-
-struct nvkm_oclass *
-nv50_i2c_oclass = &(struct nvkm_i2c_impl) {
-	.base.handle = NV_SUBDEV(I2C, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_ctor,
-		.dtor = _nvkm_i2c_dtor,
-		.init = _nvkm_i2c_init,
-		.fini = _nvkm_i2c_fini,
-	},
-	.sclass = nv50_i2c_sclass,
-	.pad_x = &nv04_i2c_pad_oclass,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.h
deleted file mode 100644
index b3139e721b02..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef __NV50_I2C_H__
-#define __NV50_I2C_H__
-#include "priv.h"
-
-struct nv50_i2c_priv {
-	struct nvkm_i2c base;
-};
-
-struct nv50_i2c_port {
-	struct nvkm_i2c_port base;
-	u32 addr;
-	u32 state;
-};
-
-extern const u32 nv50_i2c_addr[];
-extern const int nv50_i2c_addr_nr;
-int  nv50_i2c_port_init(struct nvkm_object *);
-int  nv50_i2c_sense_scl(struct nvkm_i2c_port *);
-int  nv50_i2c_sense_sda(struct nvkm_i2c_port *);
-void nv50_i2c_drive_scl(struct nvkm_i2c_port *, int state);
-void nv50_i2c_drive_sda(struct nvkm_i2c_port *, int state);
-
-int  g94_aux_port_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-void g94_i2c_acquire(struct nvkm_i2c_port *);
-void g94_i2c_release(struct nvkm_i2c_port *);
-
-int  gf110_i2c_port_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c
index a242eeb67829..2c5fcb9c504b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2014 Red Hat Inc.
+ * Copyright 2015 Red Hat Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -19,65 +19,98 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
  *
- * Authors: Ben Skeggs
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
 #include "pad.h"
 
-int
-_nvkm_i2c_pad_fini(struct nvkm_object *object, bool suspend)
+static void
+nvkm_i2c_pad_mode_locked(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
 {
-	struct nvkm_i2c_pad *pad = (void *)object;
-	DBG("-> NULL\n");
-	pad->port = NULL;
-	return nvkm_object_fini(&pad->base, suspend);
+	PAD_TRACE(pad, "-> %s", (mode == NVKM_I2C_PAD_AUX) ? "aux" :
+			      (mode == NVKM_I2C_PAD_I2C) ? "i2c" : "off");
+	if (pad->func->mode)
+		pad->func->mode(pad, mode);
 }
 
-int
-_nvkm_i2c_pad_init(struct nvkm_object *object)
+void
+nvkm_i2c_pad_mode(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
 {
-	struct nvkm_i2c_pad *pad = (void *)object;
-	DBG("-> PORT:%02x\n", pad->next->index);
-	pad->port = pad->next;
-	return nvkm_object_init(&pad->base);
+	PAD_TRACE(pad, "mode %d", mode);
+	mutex_lock(&pad->mutex);
+	nvkm_i2c_pad_mode_locked(pad, mode);
+	pad->mode = mode;
+	mutex_unlock(&pad->mutex);
 }
 
-int
-nvkm_i2c_pad_create_(struct nvkm_object *parent,
-		     struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, int index,
-		     int size, void **pobject)
+void
+nvkm_i2c_pad_release(struct nvkm_i2c_pad *pad)
 {
-	struct nvkm_i2c *i2c = nvkm_i2c(parent);
-	struct nvkm_i2c_port *port;
-	struct nvkm_i2c_pad *pad;
-	int ret;
+	PAD_TRACE(pad, "release");
+	if (pad->mode == NVKM_I2C_PAD_OFF)
+		nvkm_i2c_pad_mode_locked(pad, pad->mode);
+	mutex_unlock(&pad->mutex);
+}
 
-	list_for_each_entry(port, &i2c->ports, head) {
-		pad = nvkm_i2c_pad(port);
-		if (pad->index == index) {
-			atomic_inc(&nv_object(pad)->refcount);
-			*pobject = pad;
-			return 1;
+int
+nvkm_i2c_pad_acquire(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
+{
+	PAD_TRACE(pad, "acquire");
+	mutex_lock(&pad->mutex);
+	if (pad->mode != mode) {
+		if (pad->mode != NVKM_I2C_PAD_OFF) {
+			mutex_unlock(&pad->mutex);
+			return -EBUSY;
 		}
+		nvkm_i2c_pad_mode_locked(pad, mode);
 	}
+	return 0;
+}
+
+void
+nvkm_i2c_pad_fini(struct nvkm_i2c_pad *pad)
+{
+	PAD_TRACE(pad, "fini");
+	nvkm_i2c_pad_mode_locked(pad, NVKM_I2C_PAD_OFF);
+}
 
-	ret = nvkm_object_create_(parent, engine, oclass, 0, size, pobject);
-	pad = *pobject;
-	if (ret)
-		return ret;
+void
+nvkm_i2c_pad_init(struct nvkm_i2c_pad *pad)
+{
+	PAD_TRACE(pad, "init");
+	nvkm_i2c_pad_mode_locked(pad, pad->mode);
+}
 
-	pad->index = index;
-	return 0;
+void
+nvkm_i2c_pad_del(struct nvkm_i2c_pad **ppad)
+{
+	struct nvkm_i2c_pad *pad = *ppad;
+	if (pad) {
+		PAD_TRACE(pad, "dtor");
+		list_del(&pad->head);
+		kfree(pad);
+		pad = NULL;
+	}
+}
+
+void
+nvkm_i2c_pad_ctor(const struct nvkm_i2c_pad_func *func, struct nvkm_i2c *i2c,
+		  int id, struct nvkm_i2c_pad *pad)
+{
+	pad->func = func;
+	pad->i2c = i2c;
+	pad->id = id;
+	pad->mode = NVKM_I2C_PAD_OFF;
+	mutex_init(&pad->mutex);
+	list_add_tail(&pad->head, &i2c->pad);
+	PAD_TRACE(pad, "ctor");
 }
 
 int
-_nvkm_i2c_pad_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, void *data, u32 index,
-		   struct nvkm_object **pobject)
+nvkm_i2c_pad_new_(const struct nvkm_i2c_pad_func *func, struct nvkm_i2c *i2c,
+		  int id, struct nvkm_i2c_pad **ppad)
 {
-	struct nvkm_i2c_pad *pad;
-	int ret;
-	ret = nvkm_i2c_pad_create(parent, engine, oclass, index, &pad);
-	*pobject = nv_object(pad);
-	return ret;
+	if (!(*ppad = kzalloc(sizeof(**ppad), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_i2c_pad_ctor(func, i2c, id, *ppad);
+	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
index f3422cc6f8db..9eeb992944c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
@@ -1,56 +1,67 @@
 #ifndef __NVKM_I2C_PAD_H__
 #define __NVKM_I2C_PAD_H__
-#include "priv.h"
+#include <subdev/i2c.h>
 
 struct nvkm_i2c_pad {
-	struct nvkm_object base;
-	int index;
-	struct nvkm_i2c_port *port;
-	struct nvkm_i2c_port *next;
+	const struct nvkm_i2c_pad_func *func;
+	struct nvkm_i2c *i2c;
+#define NVKM_I2C_PAD_HYBRID(n) /* 'n' is hw pad index */                     (n)
+#define NVKM_I2C_PAD_CCB(n) /* 'n' is ccb index */                 ((n) + 0x100)
+#define NVKM_I2C_PAD_EXT(n) /* 'n' is dcb external encoder type */ ((n) + 0x200)
+	int id;
+
+	enum nvkm_i2c_pad_mode {
+		NVKM_I2C_PAD_OFF,
+		NVKM_I2C_PAD_I2C,
+		NVKM_I2C_PAD_AUX,
+	} mode;
+	struct mutex mutex;
+	struct list_head head;
+};
+
+struct nvkm_i2c_pad_func {
+	int (*bus_new_0)(struct nvkm_i2c_pad *, int id, u8 drive, u8 sense,
+			 struct nvkm_i2c_bus **);
+	int (*bus_new_4)(struct nvkm_i2c_pad *, int id, u8 drive,
+			 struct nvkm_i2c_bus **);
+
+	int (*aux_new_6)(struct nvkm_i2c_pad *, int id, u8 drive,
+			 struct nvkm_i2c_aux **);
+
+	void (*mode)(struct nvkm_i2c_pad *, enum nvkm_i2c_pad_mode);
 };
 
-static inline struct nvkm_i2c_pad *
-nvkm_i2c_pad(struct nvkm_i2c_port *port)
-{
-	struct nvkm_object *pad = nv_object(port);
-	while (!nv_iclass(pad->parent, NV_SUBDEV_CLASS))
-		pad = pad->parent;
-	return (void *)pad;
-}
-
-#define nvkm_i2c_pad_create(p,e,o,i,d)                                         \
-	nvkm_i2c_pad_create_((p), (e), (o), (i), sizeof(**d), (void **)d)
-#define nvkm_i2c_pad_destroy(p) ({                                             \
-	struct nvkm_i2c_pad *_p = (p);                                         \
-	_nvkm_i2c_pad_dtor(nv_object(_p));                                     \
-})
-#define nvkm_i2c_pad_init(p) ({                                                \
-	struct nvkm_i2c_pad *_p = (p);                                         \
-	_nvkm_i2c_pad_init(nv_object(_p));                                     \
-})
-#define nvkm_i2c_pad_fini(p,s) ({                                              \
-	struct nvkm_i2c_pad *_p = (p);                                         \
-	_nvkm_i2c_pad_fini(nv_object(_p), (s));                                \
-})
-
-int nvkm_i2c_pad_create_(struct nvkm_object *, struct nvkm_object *,
-			 struct nvkm_oclass *, int index, int, void **);
-
-int _nvkm_i2c_pad_ctor(struct nvkm_object *, struct nvkm_object *,
-		       struct nvkm_oclass *, void *, u32,
-		       struct nvkm_object **);
-#define _nvkm_i2c_pad_dtor nvkm_object_destroy
-int _nvkm_i2c_pad_init(struct nvkm_object *);
-int _nvkm_i2c_pad_fini(struct nvkm_object *, bool);
-
-#ifndef MSG
-#define MSG(l,f,a...) do {                                                     \
-	struct nvkm_i2c_pad *_pad = (void *)pad;                               \
-	nv_##l(_pad, "PAD:%c:%02x: "f,                                         \
-	       _pad->index >= 0x100 ? 'X' : 'S',                               \
-	       _pad->index >= 0x100 ? _pad->index - 0x100 : _pad->index, ##a); \
+void nvkm_i2c_pad_ctor(const struct nvkm_i2c_pad_func *, struct nvkm_i2c *,
+		       int id, struct nvkm_i2c_pad *);
+int nvkm_i2c_pad_new_(const struct nvkm_i2c_pad_func *, struct nvkm_i2c *,
+		      int id, struct nvkm_i2c_pad **);
+void nvkm_i2c_pad_del(struct nvkm_i2c_pad **);
+void nvkm_i2c_pad_init(struct nvkm_i2c_pad *);
+void nvkm_i2c_pad_fini(struct nvkm_i2c_pad *);
+void nvkm_i2c_pad_mode(struct nvkm_i2c_pad *, enum nvkm_i2c_pad_mode);
+int nvkm_i2c_pad_acquire(struct nvkm_i2c_pad *, enum nvkm_i2c_pad_mode);
+void nvkm_i2c_pad_release(struct nvkm_i2c_pad *);
+
+void g94_i2c_pad_mode(struct nvkm_i2c_pad *, enum nvkm_i2c_pad_mode);
+
+int nv04_i2c_pad_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int nv4e_i2c_pad_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int nv50_i2c_pad_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int g94_i2c_pad_x_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int gf119_i2c_pad_x_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int gm204_i2c_pad_x_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+
+int g94_i2c_pad_s_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int gf119_i2c_pad_s_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int gm204_i2c_pad_s_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+
+int anx9805_pad_new(struct nvkm_i2c_bus *, int, u8, struct nvkm_i2c_pad **);
+
+#define PAD_MSG(p,l,f,a...) do {                                               \
+	struct nvkm_i2c_pad *_pad = (p);                                       \
+	nvkm_##l(&_pad->i2c->subdev, "pad %04x: "f"\n", _pad->id, ##a);        \
 } while(0)
-#define DBG(f,a...) MSG(debug, f, ##a)
-#define ERR(f,a...) MSG(error, f, ##a)
-#endif
+#define PAD_ERR(p,f,a...) PAD_MSG((p), error, f, ##a)
+#define PAD_DBG(p,f,a...) PAD_MSG((p), debug, f, ##a)
+#define PAD_TRACE(p,f,a...) PAD_MSG((p), trace, f, ##a)
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c
index e9832f7a7e38..5904bc5f2d2a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c
@@ -22,64 +22,55 @@
  * Authors: Ben Skeggs
  */
 #include "pad.h"
+#include "aux.h"
+#include "bus.h"
 
-struct g94_i2c_pad {
-	struct nvkm_i2c_pad base;
-	int addr;
-};
-
-static int
-g94_i2c_pad_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nvkm_i2c *i2c = (void *)nvkm_i2c(object);
-	struct g94_i2c_pad *pad = (void *)object;
-	nv_mask(i2c, 0x00e50c + pad->addr, 0x00000001, 0x00000001);
-	return nvkm_i2c_pad_fini(&pad->base, suspend);
-}
-
-static int
-g94_i2c_pad_init(struct nvkm_object *object)
+void
+g94_i2c_pad_mode(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
 {
-	struct nvkm_i2c *i2c = (void *)nvkm_i2c(object);
-	struct g94_i2c_pad *pad = (void *)object;
+	struct nvkm_subdev *subdev = &pad->i2c->subdev;
+	struct nvkm_device *device = subdev->device;
+	const u32 base = (pad->id - NVKM_I2C_PAD_HYBRID(0)) * 0x50;
 
-	switch (nv_oclass(pad->base.next)->handle) {
-	case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX):
-		nv_mask(i2c, 0x00e500 + pad->addr, 0x0000c003, 0x00000002);
+	switch (mode) {
+	case NVKM_I2C_PAD_OFF:
+		nvkm_mask(device, 0x00e50c + base, 0x00000001, 0x00000001);
+		break;
+	case NVKM_I2C_PAD_I2C:
+		nvkm_mask(device, 0x00e500 + base, 0x0000c003, 0x0000c001);
+		nvkm_mask(device, 0x00e50c + base, 0x00000001, 0x00000000);
+		break;
+	case NVKM_I2C_PAD_AUX:
+		nvkm_mask(device, 0x00e500 + base, 0x0000c003, 0x00000002);
+		nvkm_mask(device, 0x00e50c + base, 0x00000001, 0x00000000);
 		break;
-	case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT):
 	default:
-		nv_mask(i2c, 0x00e500 + pad->addr, 0x0000c003, 0x0000c001);
+		WARN_ON(1);
 		break;
 	}
-
-	nv_mask(i2c, 0x00e50c + pad->addr, 0x00000001, 0x00000000);
-	return nvkm_i2c_pad_init(&pad->base);
 }
 
-static int
-g94_i2c_pad_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 index,
-		 struct nvkm_object **pobject)
-{
-	struct g94_i2c_pad *pad;
-	int ret;
-
-	ret = nvkm_i2c_pad_create(parent, engine, oclass, index, &pad);
-	*pobject = nv_object(pad);
-	if (ret)
-		return ret;
+static const struct nvkm_i2c_pad_func
+g94_i2c_pad_s_func = {
+	.bus_new_4 = nv50_i2c_bus_new,
+	.aux_new_6 = g94_i2c_aux_new,
+	.mode = g94_i2c_pad_mode,
+};
 
-	pad->addr = index * 0x50;;
-	return 0;
+int
+g94_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&g94_i2c_pad_s_func, i2c, id, ppad);
 }
 
-struct nvkm_oclass
-g94_i2c_pad_oclass = {
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g94_i2c_pad_ctor,
-		.dtor = _nvkm_i2c_pad_dtor,
-		.init = g94_i2c_pad_init,
-		.fini = g94_i2c_pad_fini,
-	},
+static const struct nvkm_i2c_pad_func
+g94_i2c_pad_x_func = {
+	.bus_new_4 = nv50_i2c_bus_new,
+	.aux_new_6 = g94_i2c_aux_new,
 };
+
+int
+g94_i2c_pad_x_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&g94_i2c_pad_x_func, i2c, id, ppad);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
new file mode 100644
index 000000000000..d53212f1aa52
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "pad.h"
+#include "aux.h"
+#include "bus.h"
+
+static const struct nvkm_i2c_pad_func
+gf119_i2c_pad_s_func = {
+	.bus_new_4 = gf119_i2c_bus_new,
+	.aux_new_6 = g94_i2c_aux_new,
+	.mode = g94_i2c_pad_mode,
+};
+
+int
+gf119_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&gf119_i2c_pad_s_func, i2c, id, ppad);
+}
+
+static const struct nvkm_i2c_pad_func
+gf119_i2c_pad_x_func = {
+	.bus_new_4 = gf119_i2c_bus_new,
+	.aux_new_6 = g94_i2c_aux_new,
+};
+
+int
+gf119_i2c_pad_x_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&gf119_i2c_pad_x_func, i2c, id, ppad);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c
index be590405444d..24a4d760c67b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c
@@ -22,64 +22,55 @@
  * Authors: Ben Skeggs
  */
 #include "pad.h"
+#include "aux.h"
+#include "bus.h"
 
-struct gm204_i2c_pad {
-	struct nvkm_i2c_pad base;
-	int addr;
-};
-
-static int
-gm204_i2c_pad_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nvkm_i2c *i2c = (void *)nvkm_i2c(object);
-	struct gm204_i2c_pad *pad = (void *)object;
-	nv_mask(i2c, 0x00d97c + pad->addr, 0x00000001, 0x00000001);
-	return nvkm_i2c_pad_fini(&pad->base, suspend);
-}
-
-static int
-gm204_i2c_pad_init(struct nvkm_object *object)
+static void
+gm204_i2c_pad_mode(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
 {
-	struct nvkm_i2c *i2c = (void *)nvkm_i2c(object);
-	struct gm204_i2c_pad *pad = (void *)object;
+	struct nvkm_subdev *subdev = &pad->i2c->subdev;
+	struct nvkm_device *device = subdev->device;
+	const u32 base = (pad->id - NVKM_I2C_PAD_HYBRID(0)) * 0x50;
 
-	switch (nv_oclass(pad->base.next)->handle) {
-	case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX):
-		nv_mask(i2c, 0x00d970 + pad->addr, 0x0000c003, 0x00000002);
+	switch (mode) {
+	case NVKM_I2C_PAD_OFF:
+		nvkm_mask(device, 0x00d97c + base, 0x00000001, 0x00000001);
+		break;
+	case NVKM_I2C_PAD_I2C:
+		nvkm_mask(device, 0x00d970 + base, 0x0000c003, 0x0000c001);
+		nvkm_mask(device, 0x00d97c + base, 0x00000001, 0x00000000);
+		break;
+	case NVKM_I2C_PAD_AUX:
+		nvkm_mask(device, 0x00d970 + base, 0x0000c003, 0x00000002);
+		nvkm_mask(device, 0x00d97c + base, 0x00000001, 0x00000000);
 		break;
-	case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT):
 	default:
-		nv_mask(i2c, 0x00d970 + pad->addr, 0x0000c003, 0x0000c001);
+		WARN_ON(1);
 		break;
 	}
-
-	nv_mask(i2c, 0x00d97c + pad->addr, 0x00000001, 0x00000000);
-	return nvkm_i2c_pad_init(&pad->base);
 }
 
-static int
-gm204_i2c_pad_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, void *data, u32 index,
-		   struct nvkm_object **pobject)
-{
-	struct gm204_i2c_pad *pad;
-	int ret;
-
-	ret = nvkm_i2c_pad_create(parent, engine, oclass, index, &pad);
-	*pobject = nv_object(pad);
-	if (ret)
-		return ret;
+static const struct nvkm_i2c_pad_func
+gm204_i2c_pad_s_func = {
+	.bus_new_4 = gf119_i2c_bus_new,
+	.aux_new_6 = gm204_i2c_aux_new,
+	.mode = gm204_i2c_pad_mode,
+};
 
-	pad->addr = index * 0x50;;
-	return 0;
+int
+gm204_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&gm204_i2c_pad_s_func, i2c, id, ppad);
 }
 
-struct nvkm_oclass
-gm204_i2c_pad_oclass = {
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm204_i2c_pad_ctor,
-		.dtor = _nvkm_i2c_pad_dtor,
-		.init = gm204_i2c_pad_init,
-		.fini = gm204_i2c_pad_fini,
-	},
+static const struct nvkm_i2c_pad_func
+gm204_i2c_pad_x_func = {
+	.bus_new_4 = gf119_i2c_bus_new,
+	.aux_new_6 = gm204_i2c_aux_new,
 };
+
+int
+gm204_i2c_pad_x_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&gm204_i2c_pad_x_func, i2c, id, ppad);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c
index 22c7daaad3a0..310046ad9c61 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c
@@ -22,13 +22,15 @@
  * Authors: Ben Skeggs
  */
 #include "pad.h"
+#include "bus.h"
 
-struct nvkm_oclass
-nv04_i2c_pad_oclass = {
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_pad_ctor,
-		.dtor = _nvkm_i2c_pad_dtor,
-		.init = _nvkm_i2c_pad_init,
-		.fini = _nvkm_i2c_pad_fini,
-	},
+static const struct nvkm_i2c_pad_func
+nv04_i2c_pad_func = {
+	.bus_new_0 = nv04_i2c_bus_new,
 };
+
+int
+nv04_i2c_pad_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&nv04_i2c_pad_func, i2c, id, ppad);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv4e.c
new file mode 100644
index 000000000000..dda6fc0b089d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv4e.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "pad.h"
+#include "bus.h"
+
+static const struct nvkm_i2c_pad_func
+nv4e_i2c_pad_func = {
+	.bus_new_4 = nv4e_i2c_bus_new,
+};
+
+int
+nv4e_i2c_pad_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&nv4e_i2c_pad_func, i2c, id, ppad);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv50.c
new file mode 100644
index 000000000000..a03f25b1914f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv50.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "pad.h"
+#include "bus.h"
+
+static const struct nvkm_i2c_pad_func
+nv50_i2c_pad_func = {
+	.bus_new_4 = nv50_i2c_bus_new,
+};
+
+int
+nv50_i2c_pad_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&nv50_i2c_pad_func, i2c, id, ppad);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/port.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/port.h
deleted file mode 100644
index 586f53dad813..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/port.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __NVKM_I2C_PORT_H__
-#define __NVKM_I2C_PORT_H__
-#include "priv.h"
-
-#ifndef MSG
-#define MSG(l,f,a...) do {                                                     \
-	struct nvkm_i2c_port *_port = (void *)port;                         \
-	nv_##l(_port, "PORT:%02x: "f, _port->index, ##a);                      \
-} while(0)
-#define DBG(f,a...) MSG(debug, f, ##a)
-#define ERR(f,a...) MSG(error, f, ##a)
-#endif
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
index 6586e1567fcf..bf655a66ef40 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
@@ -1,69 +1,14 @@
 #ifndef __NVKM_I2C_PRIV_H__
 #define __NVKM_I2C_PRIV_H__
+#define nvkm_i2c(p) container_of((p), struct nvkm_i2c, subdev)
 #include <subdev/i2c.h>
 
-extern struct nvkm_oclass nv04_i2c_pad_oclass;
-extern struct nvkm_oclass g94_i2c_pad_oclass;
-extern struct nvkm_oclass gm204_i2c_pad_oclass;
+int nvkm_i2c_new_(const struct nvkm_i2c_func *, struct nvkm_device *,
+		  int index, struct nvkm_i2c **);
 
-#define nvkm_i2c_port_create(p,e,o,i,a,f,d)                                 \
-	nvkm_i2c_port_create_((p), (e), (o), (i), (a), (f),                 \
-				 sizeof(**d), (void **)d)
-#define nvkm_i2c_port_destroy(p) ({                                         \
-	struct nvkm_i2c_port *port = (p);                                   \
-	_nvkm_i2c_port_dtor(nv_object(i2c));                                \
-})
-#define nvkm_i2c_port_init(p)                                               \
-	nvkm_object_init(&(p)->base)
-#define nvkm_i2c_port_fini(p,s)                                             \
-	nvkm_object_fini(&(p)->base, (s))
-
-int nvkm_i2c_port_create_(struct nvkm_object *, struct nvkm_object *,
-			     struct nvkm_oclass *, u8,
-			     const struct i2c_algorithm *,
-			     const struct nvkm_i2c_func *,
-			     int, void **);
-void _nvkm_i2c_port_dtor(struct nvkm_object *);
-#define _nvkm_i2c_port_init nvkm_object_init
-int  _nvkm_i2c_port_fini(struct nvkm_object *, bool);
-
-#define nvkm_i2c_create(p,e,o,d)                                            \
-	nvkm_i2c_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_i2c_destroy(p) ({                                              \
-	struct nvkm_i2c *i2c = (p);                                         \
-	_nvkm_i2c_dtor(nv_object(i2c));                                     \
-})
-#define nvkm_i2c_init(p) ({                                                 \
-	struct nvkm_i2c *i2c = (p);                                         \
-	_nvkm_i2c_init(nv_object(i2c));                                     \
-})
-#define nvkm_i2c_fini(p,s) ({                                               \
-	struct nvkm_i2c *i2c = (p);                                         \
-	_nvkm_i2c_fini(nv_object(i2c), (s));                                \
-})
-
-int nvkm_i2c_create_(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, int, void **);
-int  _nvkm_i2c_ctor(struct nvkm_object *, struct nvkm_object *,
-		       struct nvkm_oclass *, void *, u32,
-		       struct nvkm_object **);
-void _nvkm_i2c_dtor(struct nvkm_object *);
-int  _nvkm_i2c_init(struct nvkm_object *);
-int  _nvkm_i2c_fini(struct nvkm_object *, bool);
-
-extern struct nvkm_oclass nvkm_anx9805_sclass[];
-extern struct nvkm_oclass gf110_i2c_sclass[];
-
-extern const struct i2c_algorithm nvkm_i2c_bit_algo;
-extern const struct i2c_algorithm nvkm_i2c_aux_algo;
-
-struct nvkm_i2c_impl {
-	struct nvkm_oclass base;
-
-	/* supported i2c port classes */
-	struct nvkm_oclass *sclass;
-	struct nvkm_oclass *pad_x;
-	struct nvkm_oclass *pad_s;
+struct nvkm_i2c_func {
+	int (*pad_x_new)(struct nvkm_i2c *, int id, struct nvkm_i2c_pad **);
+	int (*pad_s_new)(struct nvkm_i2c *, int id, struct nvkm_i2c_pad **);
 
 	/* number of native dp aux channels present */
 	int aux;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
index 8e578f802f66..37a0496f7ed1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
@@ -23,55 +23,54 @@
  */
 #include <subdev/ibus.h>
 
-struct gf100_ibus_priv {
-	struct nvkm_ibus base;
-};
-
 static void
-gf100_ibus_intr_hub(struct gf100_ibus_priv *priv, int i)
+gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
 {
-	u32 addr = nv_rd32(priv, 0x122120 + (i * 0x0400));
-	u32 data = nv_rd32(priv, 0x122124 + (i * 0x0400));
-	u32 stat = nv_rd32(priv, 0x122128 + (i * 0x0400));
-	nv_error(priv, "HUB%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
-	nv_mask(priv, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
+	struct nvkm_device *device = ibus->device;
+	u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0400));
+	u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400));
+	u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400));
+	nvkm_error(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
+	nvkm_mask(device, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
 }
 
 static void
-gf100_ibus_intr_rop(struct gf100_ibus_priv *priv, int i)
+gf100_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
 {
-	u32 addr = nv_rd32(priv, 0x124120 + (i * 0x0400));
-	u32 data = nv_rd32(priv, 0x124124 + (i * 0x0400));
-	u32 stat = nv_rd32(priv, 0x124128 + (i * 0x0400));
-	nv_error(priv, "ROP%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
-	nv_mask(priv, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
+	struct nvkm_device *device = ibus->device;
+	u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0400));
+	u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400));
+	u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400));
+	nvkm_error(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
+	nvkm_mask(device, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
 }
 
 static void
-gf100_ibus_intr_gpc(struct gf100_ibus_priv *priv, int i)
+gf100_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
 {
-	u32 addr = nv_rd32(priv, 0x128120 + (i * 0x0400));
-	u32 data = nv_rd32(priv, 0x128124 + (i * 0x0400));
-	u32 stat = nv_rd32(priv, 0x128128 + (i * 0x0400));
-	nv_error(priv, "GPC%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
-	nv_mask(priv, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
+	struct nvkm_device *device = ibus->device;
+	u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0400));
+	u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400));
+	u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400));
+	nvkm_error(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
+	nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
 }
 
 static void
-gf100_ibus_intr(struct nvkm_subdev *subdev)
+gf100_ibus_intr(struct nvkm_subdev *ibus)
 {
-	struct gf100_ibus_priv *priv = (void *)subdev;
-	u32 intr0 = nv_rd32(priv, 0x121c58);
-	u32 intr1 = nv_rd32(priv, 0x121c5c);
-	u32 hubnr = nv_rd32(priv, 0x121c70);
-	u32 ropnr = nv_rd32(priv, 0x121c74);
-	u32 gpcnr = nv_rd32(priv, 0x121c78);
+	struct nvkm_device *device = ibus->device;
+	u32 intr0 = nvkm_rd32(device, 0x121c58);
+	u32 intr1 = nvkm_rd32(device, 0x121c5c);
+	u32 hubnr = nvkm_rd32(device, 0x121c70);
+	u32 ropnr = nvkm_rd32(device, 0x121c74);
+	u32 gpcnr = nvkm_rd32(device, 0x121c78);
 	u32 i;
 
 	for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
 		u32 stat = 0x00000100 << i;
 		if (intr0 & stat) {
-			gf100_ibus_intr_hub(priv, i);
+			gf100_ibus_intr_hub(ibus, i);
 			intr0 &= ~stat;
 		}
 	}
@@ -79,7 +78,7 @@ gf100_ibus_intr(struct nvkm_subdev *subdev)
 	for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
 		u32 stat = 0x00010000 << i;
 		if (intr0 & stat) {
-			gf100_ibus_intr_rop(priv, i);
+			gf100_ibus_intr_rop(ibus, i);
 			intr0 &= ~stat;
 		}
 	}
@@ -87,36 +86,24 @@ gf100_ibus_intr(struct nvkm_subdev *subdev)
 	for (i = 0; intr1 && i < gpcnr; i++) {
 		u32 stat = 0x00000001 << i;
 		if (intr1 & stat) {
-			gf100_ibus_intr_gpc(priv, i);
+			gf100_ibus_intr_gpc(ibus, i);
 			intr1 &= ~stat;
 		}
 	}
 }
 
-static int
-gf100_ibus_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
-{
-	struct gf100_ibus_priv *priv;
-	int ret;
-
-	ret = nvkm_ibus_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+static const struct nvkm_subdev_func
+gf100_ibus = {
+	.intr = gf100_ibus_intr,
+};
 
-	nv_subdev(priv)->intr = gf100_ibus_intr;
+int
+gf100_ibus_new(struct nvkm_device *device, int index,
+	       struct nvkm_subdev **pibus)
+{
+	struct nvkm_subdev *ibus;
+	if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&gf100_ibus, device, index, 0, ibus);
 	return 0;
 }
-
-struct nvkm_oclass
-gf100_ibus_oclass = {
-	.handle = NV_SUBDEV(IBUS, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_ibus_ctor,
-		.dtor = _nvkm_ibus_dtor,
-		.init = _nvkm_ibus_init,
-		.fini = _nvkm_ibus_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
index 7b6e9a6cd7b2..ba33609f643c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
@@ -23,55 +23,54 @@
  */
 #include <subdev/ibus.h>
 
-struct gk104_ibus_priv {
-	struct nvkm_ibus base;
-};
-
 static void
-gk104_ibus_intr_hub(struct gk104_ibus_priv *priv, int i)
+gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
 {
-	u32 addr = nv_rd32(priv, 0x122120 + (i * 0x0800));
-	u32 data = nv_rd32(priv, 0x122124 + (i * 0x0800));
-	u32 stat = nv_rd32(priv, 0x122128 + (i * 0x0800));
-	nv_error(priv, "HUB%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
-	nv_mask(priv, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
+	struct nvkm_device *device = ibus->device;
+	u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0800));
+	u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800));
+	u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800));
+	nvkm_error(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
+	nvkm_mask(device, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
 }
 
 static void
-gk104_ibus_intr_rop(struct gk104_ibus_priv *priv, int i)
+gk104_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
 {
-	u32 addr = nv_rd32(priv, 0x124120 + (i * 0x0800));
-	u32 data = nv_rd32(priv, 0x124124 + (i * 0x0800));
-	u32 stat = nv_rd32(priv, 0x124128 + (i * 0x0800));
-	nv_error(priv, "ROP%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
-	nv_mask(priv, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
+	struct nvkm_device *device = ibus->device;
+	u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0800));
+	u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800));
+	u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800));
+	nvkm_error(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
+	nvkm_mask(device, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
 }
 
 static void
-gk104_ibus_intr_gpc(struct gk104_ibus_priv *priv, int i)
+gk104_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
 {
-	u32 addr = nv_rd32(priv, 0x128120 + (i * 0x0800));
-	u32 data = nv_rd32(priv, 0x128124 + (i * 0x0800));
-	u32 stat = nv_rd32(priv, 0x128128 + (i * 0x0800));
-	nv_error(priv, "GPC%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
-	nv_mask(priv, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
+	struct nvkm_device *device = ibus->device;
+	u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0800));
+	u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800));
+	u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800));
+	nvkm_error(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
+	nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
 }
 
 static void
-gk104_ibus_intr(struct nvkm_subdev *subdev)
+gk104_ibus_intr(struct nvkm_subdev *ibus)
 {
-	struct gk104_ibus_priv *priv = (void *)subdev;
-	u32 intr0 = nv_rd32(priv, 0x120058);
-	u32 intr1 = nv_rd32(priv, 0x12005c);
-	u32 hubnr = nv_rd32(priv, 0x120070);
-	u32 ropnr = nv_rd32(priv, 0x120074);
-	u32 gpcnr = nv_rd32(priv, 0x120078);
+	struct nvkm_device *device = ibus->device;
+	u32 intr0 = nvkm_rd32(device, 0x120058);
+	u32 intr1 = nvkm_rd32(device, 0x12005c);
+	u32 hubnr = nvkm_rd32(device, 0x120070);
+	u32 ropnr = nvkm_rd32(device, 0x120074);
+	u32 gpcnr = nvkm_rd32(device, 0x120078);
 	u32 i;
 
 	for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
 		u32 stat = 0x00000100 << i;
 		if (intr0 & stat) {
-			gk104_ibus_intr_hub(priv, i);
+			gk104_ibus_intr_hub(ibus, i);
 			intr0 &= ~stat;
 		}
 	}
@@ -79,7 +78,7 @@ gk104_ibus_intr(struct nvkm_subdev *subdev)
 	for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
 		u32 stat = 0x00010000 << i;
 		if (intr0 & stat) {
-			gk104_ibus_intr_rop(priv, i);
+			gk104_ibus_intr_rop(ibus, i);
 			intr0 &= ~stat;
 		}
 	}
@@ -87,53 +86,40 @@ gk104_ibus_intr(struct nvkm_subdev *subdev)
 	for (i = 0; intr1 && i < gpcnr; i++) {
 		u32 stat = 0x00000001 << i;
 		if (intr1 & stat) {
-			gk104_ibus_intr_gpc(priv, i);
+			gk104_ibus_intr_gpc(ibus, i);
 			intr1 &= ~stat;
 		}
 	}
 }
 
 static int
-gk104_ibus_init(struct nvkm_object *object)
+gk104_ibus_init(struct nvkm_subdev *ibus)
 {
-	struct gk104_ibus_priv *priv = (void *)object;
-	int ret = nvkm_ibus_init(&priv->base);
-	if (ret == 0) {
-		nv_mask(priv, 0x122318, 0x0003ffff, 0x00001000);
-		nv_mask(priv, 0x12231c, 0x0003ffff, 0x00000200);
-		nv_mask(priv, 0x122310, 0x0003ffff, 0x00000800);
-		nv_mask(priv, 0x122348, 0x0003ffff, 0x00000100);
-		nv_mask(priv, 0x1223b0, 0x0003ffff, 0x00000fff);
-		nv_mask(priv, 0x122348, 0x0003ffff, 0x00000200);
-		nv_mask(priv, 0x122358, 0x0003ffff, 0x00002880);
-	}
-	return ret;
+	struct nvkm_device *device = ibus->device;
+	nvkm_mask(device, 0x122318, 0x0003ffff, 0x00001000);
+	nvkm_mask(device, 0x12231c, 0x0003ffff, 0x00000200);
+	nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800);
+	nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000100);
+	nvkm_mask(device, 0x1223b0, 0x0003ffff, 0x00000fff);
+	nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000200);
+	nvkm_mask(device, 0x122358, 0x0003ffff, 0x00002880);
+	return 0;
 }
 
-static int
-gk104_ibus_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
-{
-	struct gk104_ibus_priv *priv;
-	int ret;
-
-	ret = nvkm_ibus_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+static const struct nvkm_subdev_func
+gk104_ibus = {
+	.preinit = gk104_ibus_init,
+	.init = gk104_ibus_init,
+	.intr = gk104_ibus_intr,
+};
 
-	nv_subdev(priv)->intr = gk104_ibus_intr;
+int
+gk104_ibus_new(struct nvkm_device *device, int index,
+	       struct nvkm_subdev **pibus)
+{
+	struct nvkm_subdev *ibus;
+	if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&gk104_ibus, device, index, 0, ibus);
 	return 0;
 }
-
-struct nvkm_oclass
-gk104_ibus_oclass = {
-	.handle = NV_SUBDEV(IBUS, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_ibus_ctor,
-		.dtor = _nvkm_ibus_dtor,
-		.init = gk104_ibus_init,
-		.fini = _nvkm_ibus_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
index 24dcdfb58a8d..3484079e885a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
@@ -22,89 +22,68 @@
 #include <subdev/ibus.h>
 #include <subdev/timer.h>
 
-struct gk20a_ibus_priv {
-	struct nvkm_ibus base;
-};
-
 static void
-gk20a_ibus_init_priv_ring(struct gk20a_ibus_priv *priv)
+gk20a_ibus_init_ibus_ring(struct nvkm_subdev *ibus)
 {
-	nv_mask(priv, 0x137250, 0x3f, 0);
+	struct nvkm_device *device = ibus->device;
+	nvkm_mask(device, 0x137250, 0x3f, 0);
 
-	nv_mask(priv, 0x000200, 0x20, 0);
+	nvkm_mask(device, 0x000200, 0x20, 0);
 	usleep_range(20, 30);
-	nv_mask(priv, 0x000200, 0x20, 0x20);
+	nvkm_mask(device, 0x000200, 0x20, 0x20);
 
-	nv_wr32(priv, 0x12004c, 0x4);
-	nv_wr32(priv, 0x122204, 0x2);
-	nv_rd32(priv, 0x122204);
+	nvkm_wr32(device, 0x12004c, 0x4);
+	nvkm_wr32(device, 0x122204, 0x2);
+	nvkm_rd32(device, 0x122204);
 
 	/*
 	 * Bug: increase clock timeout to avoid operation failure at high
 	 * gpcclk rate.
 	 */
-	nv_wr32(priv, 0x122354, 0x800);
-	nv_wr32(priv, 0x128328, 0x800);
-	nv_wr32(priv, 0x124320, 0x800);
+	nvkm_wr32(device, 0x122354, 0x800);
+	nvkm_wr32(device, 0x128328, 0x800);
+	nvkm_wr32(device, 0x124320, 0x800);
 }
 
 static void
-gk20a_ibus_intr(struct nvkm_subdev *subdev)
+gk20a_ibus_intr(struct nvkm_subdev *ibus)
 {
-	struct gk20a_ibus_priv *priv = (void *)subdev;
-	u32 status0 = nv_rd32(priv, 0x120058);
+	struct nvkm_device *device = ibus->device;
+	u32 status0 = nvkm_rd32(device, 0x120058);
 
 	if (status0 & 0x7) {
-		nv_debug(priv, "resetting priv ring\n");
-		gk20a_ibus_init_priv_ring(priv);
+		nvkm_debug(ibus, "resetting ibus ring\n");
+		gk20a_ibus_init_ibus_ring(ibus);
 	}
 
 	/* Acknowledge interrupt */
-	nv_mask(priv, 0x12004c, 0x2, 0x2);
-
-	if (!nv_wait(subdev, 0x12004c, 0x3f, 0x00))
-		nv_warn(priv, "timeout waiting for ringmaster ack\n");
+	nvkm_mask(device, 0x12004c, 0x2, 0x2);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x12004c) & 0x0000003f))
+			break;
+	);
 }
 
 static int
-gk20a_ibus_init(struct nvkm_object *object)
+gk20a_ibus_init(struct nvkm_subdev *ibus)
 {
-	struct gk20a_ibus_priv *priv = (void *)object;
-	int ret;
-
-	ret = _nvkm_ibus_init(object);
-	if (ret)
-		return ret;
-
-	gk20a_ibus_init_priv_ring(priv);
-
+	gk20a_ibus_init_ibus_ring(ibus);
 	return 0;
 }
 
-static int
-gk20a_ibus_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
-{
-	struct gk20a_ibus_priv *priv;
-	int ret;
-
-	ret = nvkm_ibus_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+static const struct nvkm_subdev_func
+gk20a_ibus = {
+	.init = gk20a_ibus_init,
+	.intr = gk20a_ibus_intr,
+};
 
-	nv_subdev(priv)->intr = gk20a_ibus_intr;
+int
+gk20a_ibus_new(struct nvkm_device *device, int index,
+	       struct nvkm_subdev **pibus)
+{
+	struct nvkm_subdev *ibus;
+	if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&gk20a_ibus, device, index, 0, ibus);
 	return 0;
 }
-
-struct nvkm_oclass
-gk20a_ibus_oclass = {
-	.handle = NV_SUBDEV(IBUS, 0xea),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk20a_ibus_ctor,
-		.dtor = _nvkm_ibus_dtor,
-		.init = gk20a_ibus_init,
-		.fini = _nvkm_ibus_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index d16358cc6cbb..895ba74057d4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -23,124 +23,291 @@
  */
 #include "priv.h"
 
-#include <core/engine.h>
+#include <core/memory.h>
+#include <subdev/bar.h>
 
 /******************************************************************************
  * instmem object base implementation
  *****************************************************************************/
+#define nvkm_instobj(p) container_of((p), struct nvkm_instobj, memory)
 
-void
-_nvkm_instobj_dtor(struct nvkm_object *object)
+struct nvkm_instobj {
+	struct nvkm_memory memory;
+	struct nvkm_memory *parent;
+	struct nvkm_instmem *imem;
+	struct list_head head;
+	u32 *suspend;
+	void __iomem *map;
+};
+
+static enum nvkm_memory_target
+nvkm_instobj_target(struct nvkm_memory *memory)
+{
+	memory = nvkm_instobj(memory)->parent;
+	return nvkm_memory_target(memory);
+}
+
+static u64
+nvkm_instobj_addr(struct nvkm_memory *memory)
+{
+	memory = nvkm_instobj(memory)->parent;
+	return nvkm_memory_addr(memory);
+}
+
+static u64
+nvkm_instobj_size(struct nvkm_memory *memory)
+{
+	memory = nvkm_instobj(memory)->parent;
+	return nvkm_memory_size(memory);
+}
+
+static void
+nvkm_instobj_release(struct nvkm_memory *memory)
+{
+	struct nvkm_instobj *iobj = nvkm_instobj(memory);
+	nvkm_bar_flush(iobj->imem->subdev.device->bar);
+}
+
+static void __iomem *
+nvkm_instobj_acquire(struct nvkm_memory *memory)
+{
+	return nvkm_instobj(memory)->map;
+}
+
+static u32
+nvkm_instobj_rd32(struct nvkm_memory *memory, u64 offset)
+{
+	return ioread32_native(nvkm_instobj(memory)->map + offset);
+}
+
+static void
+nvkm_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
 {
-	struct nvkm_instmem *imem = nvkm_instmem(object);
-	struct nvkm_instobj *iobj = (void *)object;
+	iowrite32_native(data, nvkm_instobj(memory)->map + offset);
+}
+
+static void
+nvkm_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
+{
+	memory = nvkm_instobj(memory)->parent;
+	nvkm_memory_map(memory, vma, offset);
+}
 
-	mutex_lock(&nv_subdev(imem)->mutex);
+static void *
+nvkm_instobj_dtor(struct nvkm_memory *memory)
+{
+	struct nvkm_instobj *iobj = nvkm_instobj(memory);
 	list_del(&iobj->head);
-	mutex_unlock(&nv_subdev(imem)->mutex);
+	nvkm_memory_del(&iobj->parent);
+	return iobj;
+}
+
+const struct nvkm_memory_func
+nvkm_instobj_func = {
+	.dtor = nvkm_instobj_dtor,
+	.target = nvkm_instobj_target,
+	.addr = nvkm_instobj_addr,
+	.size = nvkm_instobj_size,
+	.acquire = nvkm_instobj_acquire,
+	.release = nvkm_instobj_release,
+	.rd32 = nvkm_instobj_rd32,
+	.wr32 = nvkm_instobj_wr32,
+	.map = nvkm_instobj_map,
+};
+
+static void
+nvkm_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm)
+{
+	memory = nvkm_instobj(memory)->parent;
+	nvkm_memory_boot(memory, vm);
+}
+
+static void
+nvkm_instobj_release_slow(struct nvkm_memory *memory)
+{
+	struct nvkm_instobj *iobj = nvkm_instobj(memory);
+	nvkm_instobj_release(memory);
+	nvkm_done(iobj->parent);
+}
+
+static void __iomem *
+nvkm_instobj_acquire_slow(struct nvkm_memory *memory)
+{
+	struct nvkm_instobj *iobj = nvkm_instobj(memory);
+	iobj->map = nvkm_kmap(iobj->parent);
+	if (iobj->map)
+		memory->func = &nvkm_instobj_func;
+	return iobj->map;
+}
+
+static u32
+nvkm_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset)
+{
+	struct nvkm_instobj *iobj = nvkm_instobj(memory);
+	return nvkm_ro32(iobj->parent, offset);
+}
 
-	return nvkm_object_destroy(&iobj->base);
+static void
+nvkm_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
+{
+	struct nvkm_instobj *iobj = nvkm_instobj(memory);
+	return nvkm_wo32(iobj->parent, offset, data);
 }
 
+const struct nvkm_memory_func
+nvkm_instobj_func_slow = {
+	.dtor = nvkm_instobj_dtor,
+	.target = nvkm_instobj_target,
+	.addr = nvkm_instobj_addr,
+	.size = nvkm_instobj_size,
+	.boot = nvkm_instobj_boot,
+	.acquire = nvkm_instobj_acquire_slow,
+	.release = nvkm_instobj_release_slow,
+	.rd32 = nvkm_instobj_rd32_slow,
+	.wr32 = nvkm_instobj_wr32_slow,
+	.map = nvkm_instobj_map,
+};
+
 int
-nvkm_instobj_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, int length, void **pobject)
+nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
+		 struct nvkm_memory **pmemory)
 {
-	struct nvkm_instmem *imem = nvkm_instmem(parent);
+	struct nvkm_memory *memory = NULL;
 	struct nvkm_instobj *iobj;
+	u32 offset;
 	int ret;
 
-	ret = nvkm_object_create_(parent, engine, oclass, NV_MEMOBJ_CLASS,
-				  length, pobject);
-	iobj = *pobject;
+	ret = imem->func->memory_new(imem, size, align, zero, &memory);
 	if (ret)
-		return ret;
+		goto done;
 
-	mutex_lock(&imem->base.mutex);
-	list_add(&iobj->head, &imem->list);
-	mutex_unlock(&imem->base.mutex);
-	return 0;
+	if (!imem->func->persistent) {
+		if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL))) {
+			ret = -ENOMEM;
+			goto done;
+		}
+
+		nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory);
+		iobj->parent = memory;
+		iobj->imem = imem;
+		list_add_tail(&iobj->head, &imem->list);
+		memory = &iobj->memory;
+	}
+
+	if (!imem->func->zero && zero) {
+		void __iomem *map = nvkm_kmap(memory);
+		if (unlikely(!map)) {
+			for (offset = 0; offset < size; offset += 4)
+				nvkm_wo32(memory, offset, 0x00000000);
+		} else {
+			memset_io(map, 0x00, size);
+		}
+		nvkm_done(memory);
+	}
+
+done:
+	if (ret)
+		nvkm_memory_del(&memory);
+	*pmemory = memory;
+	return ret;
 }
 
 /******************************************************************************
  * instmem subdev base implementation
  *****************************************************************************/
 
-static int
-nvkm_instmem_alloc(struct nvkm_instmem *imem, struct nvkm_object *parent,
-		   u32 size, u32 align, struct nvkm_object **pobject)
+u32
+nvkm_instmem_rd32(struct nvkm_instmem *imem, u32 addr)
 {
-	struct nvkm_instmem_impl *impl = (void *)imem->base.object.oclass;
-	struct nvkm_instobj_args args = { .size = size, .align = align };
-	return nvkm_object_ctor(parent, &parent->engine->subdev.object,
-				impl->instobj, &args, sizeof(args), pobject);
+	return imem->func->rd32(imem, addr);
 }
 
-int
-_nvkm_instmem_fini(struct nvkm_object *object, bool suspend)
+void
+nvkm_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data)
+{
+	return imem->func->wr32(imem, addr, data);
+}
+
+static int
+nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend)
 {
-	struct nvkm_instmem *imem = (void *)object;
+	struct nvkm_instmem *imem = nvkm_instmem(subdev);
 	struct nvkm_instobj *iobj;
-	int i, ret = 0;
+	int i;
+
+	if (imem->func->fini)
+		imem->func->fini(imem);
 
 	if (suspend) {
-		mutex_lock(&imem->base.mutex);
 		list_for_each_entry(iobj, &imem->list, head) {
-			iobj->suspend = vmalloc(iobj->size);
-			if (!iobj->suspend) {
-				ret = -ENOMEM;
-				break;
-			}
-
-			for (i = 0; i < iobj->size; i += 4)
-				iobj->suspend[i / 4] = nv_ro32(iobj, i);
+			struct nvkm_memory *memory = iobj->parent;
+			u64 size = nvkm_memory_size(memory);
+
+			iobj->suspend = vmalloc(size);
+			if (!iobj->suspend)
+				return -ENOMEM;
+
+			for (i = 0; i < size; i += 4)
+				iobj->suspend[i / 4] = nvkm_ro32(memory, i);
 		}
-		mutex_unlock(&imem->base.mutex);
-		if (ret)
-			return ret;
 	}
 
-	return nvkm_subdev_fini(&imem->base, suspend);
+	return 0;
 }
 
-int
-_nvkm_instmem_init(struct nvkm_object *object)
+static int
+nvkm_instmem_oneinit(struct nvkm_subdev *subdev)
 {
-	struct nvkm_instmem *imem = (void *)object;
-	struct nvkm_instobj *iobj;
-	int ret, i;
+	struct nvkm_instmem *imem = nvkm_instmem(subdev);
+	if (imem->func->oneinit)
+		return imem->func->oneinit(imem);
+	return 0;
+}
 
-	ret = nvkm_subdev_init(&imem->base);
-	if (ret)
-		return ret;
+static int
+nvkm_instmem_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_instmem *imem = nvkm_instmem(subdev);
+	struct nvkm_instobj *iobj;
+	int i;
 
-	mutex_lock(&imem->base.mutex);
 	list_for_each_entry(iobj, &imem->list, head) {
 		if (iobj->suspend) {
-			for (i = 0; i < iobj->size; i += 4)
-				nv_wo32(iobj, i, iobj->suspend[i / 4]);
+			struct nvkm_memory *memory = iobj->parent;
+			u64 size = nvkm_memory_size(memory);
+			for (i = 0; i < size; i += 4)
+				nvkm_wo32(memory, i, iobj->suspend[i / 4]);
 			vfree(iobj->suspend);
 			iobj->suspend = NULL;
 		}
 	}
-	mutex_unlock(&imem->base.mutex);
+
 	return 0;
 }
 
-int
-nvkm_instmem_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, int length, void **pobject)
+static void *
+nvkm_instmem_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_instmem *imem;
-	int ret;
+	struct nvkm_instmem *imem = nvkm_instmem(subdev);
+	if (imem->func->dtor)
+		return imem->func->dtor(imem);
+	return imem;
+}
 
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "INSTMEM",
-				  "instmem", length, pobject);
-	imem = *pobject;
-	if (ret)
-		return ret;
+static const struct nvkm_subdev_func
+nvkm_instmem = {
+	.dtor = nvkm_instmem_dtor,
+	.oneinit = nvkm_instmem_oneinit,
+	.init = nvkm_instmem_init,
+	.fini = nvkm_instmem_fini,
+};
 
+void
+nvkm_instmem_ctor(const struct nvkm_instmem_func *func,
+		  struct nvkm_device *device, int index,
+		  struct nvkm_instmem *imem)
+{
+	nvkm_subdev_ctor(&nvkm_instmem, device, index, 0, &imem->subdev);
+	imem->func = func;
 	INIT_LIST_HEAD(&imem->list);
-	imem->alloc = nvkm_instmem_alloc;
-	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index dd0994d9ebfc..cd7feb1b25f6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -37,32 +37,27 @@
  * to use more "relaxed" allocation parameters when using the DMA API, since we
  * never need a kernel mapping.
  */
+#define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base)
+#include "priv.h"
 
-#include <subdev/fb.h>
+#include <core/memory.h>
 #include <core/mm.h>
-#include <core/device.h>
+#include <core/tegra.h>
+#include <subdev/fb.h>
 
-#ifdef __KERNEL__
-#include <linux/dma-attrs.h>
-#include <linux/iommu.h>
-#include <nouveau_platform.h>
-#endif
+#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
 
-#include "priv.h"
-
-struct gk20a_instobj_priv {
-	struct nvkm_instobj base;
-	/* Must be second member here - see nouveau_gpuobj_map_vm() */
-	struct nvkm_mem *mem;
-	/* Pointed by mem */
-	struct nvkm_mem _mem;
+struct gk20a_instobj {
+	struct nvkm_memory memory;
+	struct gk20a_instmem *imem;
+	struct nvkm_mem mem;
 };
 
 /*
  * Used for objects allocated using the DMA API
  */
 struct gk20a_instobj_dma {
-	struct gk20a_instobj_priv base;
+	struct gk20a_instobj base;
 
 	void *cpuaddr;
 	dma_addr_t handle;
@@ -73,14 +68,15 @@ struct gk20a_instobj_dma {
  * Used for objects flattened using the IOMMU API
  */
 struct gk20a_instobj_iommu {
-	struct gk20a_instobj_priv base;
+	struct gk20a_instobj base;
 
 	/* array of base.mem->size pages */
 	struct page *pages[];
 };
 
-struct gk20a_instmem_priv {
+struct gk20a_instmem {
 	struct nvkm_instmem base;
+	unsigned long lock_flags;
 	spinlock_t lock;
 	u64 addr;
 
@@ -94,6 +90,42 @@ struct gk20a_instmem_priv {
 	struct dma_attrs attrs;
 };
 
+static enum nvkm_memory_target
+gk20a_instobj_target(struct nvkm_memory *memory)
+{
+	return NVKM_MEM_TARGET_HOST;
+}
+
+static u64
+gk20a_instobj_addr(struct nvkm_memory *memory)
+{
+	return gk20a_instobj(memory)->mem.offset;
+
+}
+
+static u64
+gk20a_instobj_size(struct nvkm_memory *memory)
+{
+	return (u64)gk20a_instobj(memory)->mem.size << 12;
+}
+
+static void __iomem *
+gk20a_instobj_acquire(struct nvkm_memory *memory)
+{
+	struct gk20a_instmem *imem = gk20a_instobj(memory)->imem;
+	unsigned long flags;
+	spin_lock_irqsave(&imem->lock, flags);
+	imem->lock_flags = flags;
+	return NULL;
+}
+
+static void
+gk20a_instobj_release(struct nvkm_memory *memory)
+{
+	struct gk20a_instmem *imem = gk20a_instobj(memory)->imem;
+	spin_unlock_irqrestore(&imem->lock, imem->lock_flags);
+}
+
 /*
  * Use PRAMIN to read/write data and avoid coherency issues.
  * PRAMIN uses the GPU path and ensures data will always be coherent.
@@ -104,160 +136,170 @@ struct gk20a_instmem_priv {
  */
 
 static u32
-gk20a_instobj_rd32(struct nvkm_object *object, u64 offset)
+gk20a_instobj_rd32(struct nvkm_memory *memory, u64 offset)
 {
-	struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(object);
-	struct gk20a_instobj_priv *node = (void *)object;
-	unsigned long flags;
-	u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
-	u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
+	struct gk20a_instobj *node = gk20a_instobj(memory);
+	struct gk20a_instmem *imem = node->imem;
+	struct nvkm_device *device = imem->base.subdev.device;
+	u64 base = (node->mem.offset + offset) & 0xffffff00000ULL;
+	u64 addr = (node->mem.offset + offset) & 0x000000fffffULL;
 	u32 data;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	if (unlikely(priv->addr != base)) {
-		nv_wr32(priv, 0x001700, base >> 16);
-		priv->addr = base;
+	if (unlikely(imem->addr != base)) {
+		nvkm_wr32(device, 0x001700, base >> 16);
+		imem->addr = base;
 	}
-	data = nv_rd32(priv, 0x700000 + addr);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	data = nvkm_rd32(device, 0x700000 + addr);
 	return data;
 }
 
 static void
-gk20a_instobj_wr32(struct nvkm_object *object, u64 offset, u32 data)
+gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
 {
-	struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(object);
-	struct gk20a_instobj_priv *node = (void *)object;
-	unsigned long flags;
-	u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
-	u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
-
-	spin_lock_irqsave(&priv->lock, flags);
-	if (unlikely(priv->addr != base)) {
-		nv_wr32(priv, 0x001700, base >> 16);
-		priv->addr = base;
+	struct gk20a_instobj *node = gk20a_instobj(memory);
+	struct gk20a_instmem *imem = node->imem;
+	struct nvkm_device *device = imem->base.subdev.device;
+	u64 base = (node->mem.offset + offset) & 0xffffff00000ULL;
+	u64 addr = (node->mem.offset + offset) & 0x000000fffffULL;
+
+	if (unlikely(imem->addr != base)) {
+		nvkm_wr32(device, 0x001700, base >> 16);
+		imem->addr = base;
 	}
-	nv_wr32(priv, 0x700000 + addr, data);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	nvkm_wr32(device, 0x700000 + addr, data);
+}
+
+static void
+gk20a_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
+{
+	struct gk20a_instobj *node = gk20a_instobj(memory);
+	nvkm_vm_map_at(vma, offset, &node->mem);
 }
 
 static void
-gk20a_instobj_dtor_dma(struct gk20a_instobj_priv *_node)
+gk20a_instobj_dtor_dma(struct gk20a_instobj *_node)
 {
 	struct gk20a_instobj_dma *node = (void *)_node;
-	struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
-	struct device *dev = nv_device_base(nv_device(priv));
+	struct gk20a_instmem *imem = _node->imem;
+	struct device *dev = imem->base.subdev.device->dev;
 
 	if (unlikely(!node->cpuaddr))
 		return;
 
-	dma_free_attrs(dev, _node->mem->size << PAGE_SHIFT, node->cpuaddr,
-		       node->handle, &priv->attrs);
+	dma_free_attrs(dev, _node->mem.size << PAGE_SHIFT, node->cpuaddr,
+		       node->handle, &imem->attrs);
 }
 
 static void
-gk20a_instobj_dtor_iommu(struct gk20a_instobj_priv *_node)
+gk20a_instobj_dtor_iommu(struct gk20a_instobj *_node)
 {
 	struct gk20a_instobj_iommu *node = (void *)_node;
-	struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
+	struct gk20a_instmem *imem = _node->imem;
 	struct nvkm_mm_node *r;
 	int i;
 
-	if (unlikely(list_empty(&_node->mem->regions)))
+	if (unlikely(list_empty(&_node->mem.regions)))
 		return;
 
-	r = list_first_entry(&_node->mem->regions, struct nvkm_mm_node,
+	r = list_first_entry(&_node->mem.regions, struct nvkm_mm_node,
 			     rl_entry);
 
 	/* clear bit 34 to unmap pages */
-	r->offset &= ~BIT(34 - priv->iommu_pgshift);
+	r->offset &= ~BIT(34 - imem->iommu_pgshift);
 
 	/* Unmap pages from GPU address space and free them */
-	for (i = 0; i < _node->mem->size; i++) {
-		iommu_unmap(priv->domain,
-			    (r->offset + i) << priv->iommu_pgshift, PAGE_SIZE);
+	for (i = 0; i < _node->mem.size; i++) {
+		iommu_unmap(imem->domain,
+			    (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
 		__free_page(node->pages[i]);
 	}
 
 	/* Release area from GPU address space */
-	mutex_lock(priv->mm_mutex);
-	nvkm_mm_free(priv->mm, &r);
-	mutex_unlock(priv->mm_mutex);
+	mutex_lock(imem->mm_mutex);
+	nvkm_mm_free(imem->mm, &r);
+	mutex_unlock(imem->mm_mutex);
 }
 
-static void
-gk20a_instobj_dtor(struct nvkm_object *object)
+static void *
+gk20a_instobj_dtor(struct nvkm_memory *memory)
 {
-	struct gk20a_instobj_priv *node = (void *)object;
-	struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
+	struct gk20a_instobj *node = gk20a_instobj(memory);
+	struct gk20a_instmem *imem = node->imem;
 
-	if (priv->domain)
+	if (imem->domain)
 		gk20a_instobj_dtor_iommu(node);
 	else
 		gk20a_instobj_dtor_dma(node);
 
-	nvkm_instobj_destroy(&node->base);
+	return node;
 }
 
+static const struct nvkm_memory_func
+gk20a_instobj_func = {
+	.dtor = gk20a_instobj_dtor,
+	.target = gk20a_instobj_target,
+	.addr = gk20a_instobj_addr,
+	.size = gk20a_instobj_size,
+	.acquire = gk20a_instobj_acquire,
+	.release = gk20a_instobj_release,
+	.rd32 = gk20a_instobj_rd32,
+	.wr32 = gk20a_instobj_wr32,
+	.map = gk20a_instobj_map,
+};
+
 static int
-gk20a_instobj_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, u32 npages, u32 align,
-		       struct gk20a_instobj_priv **_node)
+gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
+		       struct gk20a_instobj **_node)
 {
 	struct gk20a_instobj_dma *node;
-	struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
-	struct device *dev = nv_device_base(nv_device(parent));
-	int ret;
+	struct nvkm_subdev *subdev = &imem->base.subdev;
+	struct device *dev = subdev->device->dev;
 
-	ret = nvkm_instobj_create_(parent, engine, oclass, sizeof(*node),
-				   (void **)&node);
+	if (!(node = kzalloc(sizeof(*node), GFP_KERNEL)))
+		return -ENOMEM;
 	*_node = &node->base;
-	if (ret)
-		return ret;
 
 	node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
 					&node->handle, GFP_KERNEL,
-					&priv->attrs);
+					&imem->attrs);
 	if (!node->cpuaddr) {
-		nv_error(priv, "cannot allocate DMA memory\n");
+		nvkm_error(subdev, "cannot allocate DMA memory\n");
 		return -ENOMEM;
 	}
 
 	/* alignment check */
 	if (unlikely(node->handle & (align - 1)))
-		nv_warn(priv, "memory not aligned as requested: %pad (0x%x)\n",
-			&node->handle, align);
+		nvkm_warn(subdev,
+			  "memory not aligned as requested: %pad (0x%x)\n",
+			  &node->handle, align);
 
 	/* present memory for being mapped using small pages */
 	node->r.type = 12;
 	node->r.offset = node->handle >> 12;
 	node->r.length = (npages << PAGE_SHIFT) >> 12;
 
-	node->base._mem.offset = node->handle;
+	node->base.mem.offset = node->handle;
 
-	INIT_LIST_HEAD(&node->base._mem.regions);
-	list_add_tail(&node->r.rl_entry, &node->base._mem.regions);
+	INIT_LIST_HEAD(&node->base.mem.regions);
+	list_add_tail(&node->r.rl_entry, &node->base.mem.regions);
 
 	return 0;
 }
 
 static int
-gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine,
-			 struct nvkm_oclass *oclass, u32 npages, u32 align,
-			 struct gk20a_instobj_priv **_node)
+gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
+			 struct gk20a_instobj **_node)
 {
 	struct gk20a_instobj_iommu *node;
-	struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
+	struct nvkm_subdev *subdev = &imem->base.subdev;
 	struct nvkm_mm_node *r;
 	int ret;
 	int i;
 
-	ret = nvkm_instobj_create_(parent, engine, oclass,
-				sizeof(*node) + sizeof(node->pages[0]) * npages,
-				(void **)&node);
+	if (!(node = kzalloc(sizeof(*node) +
+			     sizeof( node->pages[0]) * npages, GFP_KERNEL)))
+		return -ENOMEM;
 	*_node = &node->base;
-	if (ret)
-		return ret;
 
 	/* Allocate backing memory */
 	for (i = 0; i < npages; i++) {
@@ -270,48 +312,48 @@ gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine,
 		node->pages[i] = p;
 	}
 
-	mutex_lock(priv->mm_mutex);
+	mutex_lock(imem->mm_mutex);
 	/* Reserve area from GPU address space */
-	ret = nvkm_mm_head(priv->mm, 0, 1, npages, npages,
-			   align >> priv->iommu_pgshift, &r);
-	mutex_unlock(priv->mm_mutex);
+	ret = nvkm_mm_head(imem->mm, 0, 1, npages, npages,
+			   align >> imem->iommu_pgshift, &r);
+	mutex_unlock(imem->mm_mutex);
 	if (ret) {
-		nv_error(priv, "virtual space is full!\n");
+		nvkm_error(subdev, "virtual space is full!\n");
 		goto free_pages;
 	}
 
 	/* Map into GPU address space */
 	for (i = 0; i < npages; i++) {
 		struct page *p = node->pages[i];
-		u32 offset = (r->offset + i) << priv->iommu_pgshift;
+		u32 offset = (r->offset + i) << imem->iommu_pgshift;
 
-		ret = iommu_map(priv->domain, offset, page_to_phys(p),
+		ret = iommu_map(imem->domain, offset, page_to_phys(p),
 				PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
 		if (ret < 0) {
-			nv_error(priv, "IOMMU mapping failure: %d\n", ret);
+			nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret);
 
 			while (i-- > 0) {
 				offset -= PAGE_SIZE;
-				iommu_unmap(priv->domain, offset, PAGE_SIZE);
+				iommu_unmap(imem->domain, offset, PAGE_SIZE);
 			}
 			goto release_area;
 		}
 	}
 
 	/* Bit 34 tells that an address is to be resolved through the IOMMU */
-	r->offset |= BIT(34 - priv->iommu_pgshift);
+	r->offset |= BIT(34 - imem->iommu_pgshift);
 
-	node->base._mem.offset = ((u64)r->offset) << priv->iommu_pgshift;
+	node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift;
 
-	INIT_LIST_HEAD(&node->base._mem.regions);
-	list_add_tail(&r->rl_entry, &node->base._mem.regions);
+	INIT_LIST_HEAD(&node->base.mem.regions);
+	list_add_tail(&r->rl_entry, &node->base.mem.regions);
 
 	return 0;
 
 release_area:
-	mutex_lock(priv->mm_mutex);
-	nvkm_mm_free(priv->mm, &r);
-	mutex_unlock(priv->mm_mutex);
+	mutex_lock(imem->mm_mutex);
+	nvkm_mm_free(imem->mm, &r);
+	mutex_unlock(imem->mm_mutex);
 
 free_pages:
 	for (i = 0; i < npages && node->pages[i] != NULL; i++)
@@ -321,120 +363,92 @@ free_pages:
 }
 
 static int
-gk20a_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, void *data, u32 _size,
-		   struct nvkm_object **pobject)
+gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
+		  struct nvkm_memory **pmemory)
 {
-	struct nvkm_instobj_args *args = data;
-	struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
-	struct gk20a_instobj_priv *node;
-	u32 size, align;
+	struct gk20a_instmem *imem = gk20a_instmem(base);
+	struct gk20a_instobj *node = NULL;
+	struct nvkm_subdev *subdev = &imem->base.subdev;
 	int ret;
 
-	nv_debug(parent, "%s (%s): size: %x align: %x\n", __func__,
-		 priv->domain ? "IOMMU" : "DMA", args->size, args->align);
+	nvkm_debug(subdev, "%s (%s): size: %x align: %x\n", __func__,
+		   imem->domain ? "IOMMU" : "DMA", size, align);
 
 	/* Round size and align to page bounds */
-	size = max(roundup(args->size, PAGE_SIZE), PAGE_SIZE);
-	align = max(roundup(args->align, PAGE_SIZE), PAGE_SIZE);
+	size = max(roundup(size, PAGE_SIZE), PAGE_SIZE);
+	align = max(roundup(align, PAGE_SIZE), PAGE_SIZE);
 
-	if (priv->domain)
-		ret = gk20a_instobj_ctor_iommu(parent, engine, oclass,
-					      size >> PAGE_SHIFT, align, &node);
+	if (imem->domain)
+		ret = gk20a_instobj_ctor_iommu(imem, size >> PAGE_SHIFT,
+					       align, &node);
 	else
-		ret = gk20a_instobj_ctor_dma(parent, engine, oclass,
-					     size >> PAGE_SHIFT, align, &node);
-	*pobject = nv_object(node);
+		ret = gk20a_instobj_ctor_dma(imem, size >> PAGE_SHIFT,
+					     align, &node);
+	*pmemory = node ? &node->memory : NULL;
 	if (ret)
 		return ret;
 
-	node->mem = &node->_mem;
+	nvkm_memory_ctor(&gk20a_instobj_func, &node->memory);
+	node->imem = imem;
 
 	/* present memory for being mapped using small pages */
-	node->mem->size = size >> 12;
-	node->mem->memtype = 0;
-	node->mem->page_shift = 12;
-
-	node->base.addr = node->mem->offset;
-	node->base.size = size;
+	node->mem.size = size >> 12;
+	node->mem.memtype = 0;
+	node->mem.page_shift = 12;
 
-	nv_debug(parent, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
-		 size, align, node->mem->offset);
+	nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
+		   size, align, node->mem.offset);
 
 	return 0;
 }
 
-static struct nvkm_instobj_impl
-gk20a_instobj_oclass = {
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk20a_instobj_ctor,
-		.dtor = gk20a_instobj_dtor,
-		.init = _nvkm_instobj_init,
-		.fini = _nvkm_instobj_fini,
-		.rd32 = gk20a_instobj_rd32,
-		.wr32 = gk20a_instobj_wr32,
-	},
-};
-
-
-
-static int
-gk20a_instmem_fini(struct nvkm_object *object, bool suspend)
+static void
+gk20a_instmem_fini(struct nvkm_instmem *base)
 {
-	struct gk20a_instmem_priv *priv = (void *)object;
-	priv->addr = ~0ULL;
-	return nvkm_instmem_fini(&priv->base, suspend);
+	gk20a_instmem(base)->addr = ~0ULL;
 }
 
-static int
-gk20a_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, void *data, u32 size,
-		   struct nvkm_object **pobject)
-{
-	struct gk20a_instmem_priv *priv;
-	struct nouveau_platform_device *plat;
-	int ret;
+static const struct nvkm_instmem_func
+gk20a_instmem = {
+	.fini = gk20a_instmem_fini,
+	.memory_new = gk20a_instobj_new,
+	.persistent = true,
+	.zero = false,
+};
 
-	ret = nvkm_instmem_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+int
+gk20a_instmem_new(struct nvkm_device *device, int index,
+		  struct nvkm_instmem **pimem)
+{
+	struct nvkm_device_tegra *tdev = device->func->tegra(device);
+	struct gk20a_instmem *imem;
 
-	spin_lock_init(&priv->lock);
+	if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base);
+	spin_lock_init(&imem->lock);
+	*pimem = &imem->base;
 
-	plat = nv_device_to_platform(nv_device(parent));
-	if (plat->gpu->iommu.domain) {
-		priv->domain = plat->gpu->iommu.domain;
-		priv->mm = plat->gpu->iommu.mm;
-		priv->iommu_pgshift = plat->gpu->iommu.pgshift;
-		priv->mm_mutex = &plat->gpu->iommu.mutex;
+	if (tdev->iommu.domain) {
+		imem->domain = tdev->iommu.domain;
+		imem->mm = &tdev->iommu.mm;
+		imem->iommu_pgshift = tdev->iommu.pgshift;
+		imem->mm_mutex = &tdev->iommu.mutex;
 
-		nv_info(priv, "using IOMMU\n");
+		nvkm_info(&imem->base.subdev, "using IOMMU\n");
 	} else {
-		init_dma_attrs(&priv->attrs);
+		init_dma_attrs(&imem->attrs);
 		/*
 		 * We will access instmem through PRAMIN and thus do not need a
 		 * consistent CPU pointer or kernel mapping
 		 */
-		dma_set_attr(DMA_ATTR_NON_CONSISTENT, &priv->attrs);
-		dma_set_attr(DMA_ATTR_WEAK_ORDERING, &priv->attrs);
-		dma_set_attr(DMA_ATTR_WRITE_COMBINE, &priv->attrs);
-		dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &priv->attrs);
+		dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs);
+		dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs);
+		dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs);
+		dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &imem->attrs);
 
-		nv_info(priv, "using DMA API\n");
+		nvkm_info(&imem->base.subdev, "using DMA API\n");
 	}
 
 	return 0;
 }
-
-struct nvkm_oclass *
-gk20a_instmem_oclass = &(struct nvkm_instmem_impl) {
-	.base.handle = NV_SUBDEV(INSTMEM, 0xea),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk20a_instmem_ctor,
-		.dtor = _nvkm_instmem_dtor,
-		.init = _nvkm_instmem_init,
-		.fini = gk20a_instmem_fini,
-	},
-	.instobj = &gk20a_instobj_oclass.base,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
index 282143f49d72..6133c8bb2d42 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
@@ -21,173 +21,207 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#define nv04_instmem(p) container_of((p), struct nv04_instmem, base)
+#include "priv.h"
 
+#include <core/memory.h>
 #include <core/ramht.h>
 
+struct nv04_instmem {
+	struct nvkm_instmem base;
+	struct nvkm_mm heap;
+};
+
 /******************************************************************************
  * instmem object implementation
  *****************************************************************************/
+#define nv04_instobj(p) container_of((p), struct nv04_instobj, memory)
 
-static u32
-nv04_instobj_rd32(struct nvkm_object *object, u64 addr)
+struct nv04_instobj {
+	struct nvkm_memory memory;
+	struct nv04_instmem *imem;
+	struct nvkm_mm_node *node;
+};
+
+static enum nvkm_memory_target
+nv04_instobj_target(struct nvkm_memory *memory)
 {
-	struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
-	struct nv04_instobj_priv *node = (void *)object;
-	return nv_ro32(priv, node->mem->offset + addr);
+	return NVKM_MEM_TARGET_INST;
 }
 
-static void
-nv04_instobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
+static u64
+nv04_instobj_addr(struct nvkm_memory *memory)
+{
+	return nv04_instobj(memory)->node->offset;
+}
+
+static u64
+nv04_instobj_size(struct nvkm_memory *memory)
+{
+	return nv04_instobj(memory)->node->length;
+}
+
+static void __iomem *
+nv04_instobj_acquire(struct nvkm_memory *memory)
 {
-	struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
-	struct nv04_instobj_priv *node = (void *)object;
-	nv_wo32(priv, node->mem->offset + addr, data);
+	struct nv04_instobj *iobj = nv04_instobj(memory);
+	struct nvkm_device *device = iobj->imem->base.subdev.device;
+	return device->pri + 0x700000 + iobj->node->offset;
 }
 
 static void
-nv04_instobj_dtor(struct nvkm_object *object)
+nv04_instobj_release(struct nvkm_memory *memory)
 {
-	struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
-	struct nv04_instobj_priv *node = (void *)object;
-	struct nvkm_subdev *subdev = (void *)priv;
+}
 
-	mutex_lock(&subdev->mutex);
-	nvkm_mm_free(&priv->heap, &node->mem);
-	mutex_unlock(&subdev->mutex);
+static u32
+nv04_instobj_rd32(struct nvkm_memory *memory, u64 offset)
+{
+	struct nv04_instobj *iobj = nv04_instobj(memory);
+	struct nvkm_device *device = iobj->imem->base.subdev.device;
+	return nvkm_rd32(device, 0x700000 + iobj->node->offset + offset);
+}
 
-	nvkm_instobj_destroy(&node->base);
+static void
+nv04_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
+{
+	struct nv04_instobj *iobj = nv04_instobj(memory);
+	struct nvkm_device *device = iobj->imem->base.subdev.device;
+	nvkm_wr32(device, 0x700000 + iobj->node->offset + offset, data);
 }
 
+static void *
+nv04_instobj_dtor(struct nvkm_memory *memory)
+{
+	struct nv04_instobj *iobj = nv04_instobj(memory);
+	mutex_lock(&iobj->imem->base.subdev.mutex);
+	nvkm_mm_free(&iobj->imem->heap, &iobj->node);
+	mutex_unlock(&iobj->imem->base.subdev.mutex);
+	return iobj;
+}
+
+static const struct nvkm_memory_func
+nv04_instobj_func = {
+	.dtor = nv04_instobj_dtor,
+	.target = nv04_instobj_target,
+	.size = nv04_instobj_size,
+	.addr = nv04_instobj_addr,
+	.acquire = nv04_instobj_acquire,
+	.release = nv04_instobj_release,
+	.rd32 = nv04_instobj_rd32,
+	.wr32 = nv04_instobj_wr32,
+};
+
 static int
-nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nv04_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
+		 struct nvkm_memory **pmemory)
 {
-	struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent);
-	struct nv04_instobj_priv *node;
-	struct nvkm_instobj_args *args = data;
-	struct nvkm_subdev *subdev = (void *)priv;
+	struct nv04_instmem *imem = nv04_instmem(base);
+	struct nv04_instobj *iobj;
 	int ret;
 
-	if (!args->align)
-		args->align = 1;
+	if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
+		return -ENOMEM;
+	*pmemory = &iobj->memory;
 
-	ret = nvkm_instobj_create(parent, engine, oclass, &node);
-	*pobject = nv_object(node);
-	if (ret)
-		return ret;
-
-	mutex_lock(&subdev->mutex);
-	ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size,
-			   args->align, &node->mem);
-	mutex_unlock(&subdev->mutex);
-	if (ret)
-		return ret;
+	nvkm_memory_ctor(&nv04_instobj_func, &iobj->memory);
+	iobj->imem = imem;
 
-	node->base.addr = node->mem->offset;
-	node->base.size = node->mem->length;
-	return 0;
+	mutex_lock(&imem->base.subdev.mutex);
+	ret = nvkm_mm_head(&imem->heap, 0, 1, size, size,
+			   align ? align : 1, &iobj->node);
+	mutex_unlock(&imem->base.subdev.mutex);
+	return ret;
 }
 
-struct nvkm_instobj_impl
-nv04_instobj_oclass = {
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_instobj_ctor,
-		.dtor = nv04_instobj_dtor,
-		.init = _nvkm_instobj_init,
-		.fini = _nvkm_instobj_fini,
-		.rd32 = nv04_instobj_rd32,
-		.wr32 = nv04_instobj_wr32,
-	},
-};
-
 /******************************************************************************
  * instmem subdev implementation
  *****************************************************************************/
 
 static u32
-nv04_instmem_rd32(struct nvkm_object *object, u64 addr)
+nv04_instmem_rd32(struct nvkm_instmem *imem, u32 addr)
 {
-	return nv_rd32(object, 0x700000 + addr);
+	return nvkm_rd32(imem->subdev.device, 0x700000 + addr);
 }
 
 static void
-nv04_instmem_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
-	return nv_wr32(object, 0x700000 + addr, data);
-}
-
-void
-nv04_instmem_dtor(struct nvkm_object *object)
+nv04_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data)
 {
-	struct nv04_instmem_priv *priv = (void *)object;
-	nvkm_gpuobj_ref(NULL, &priv->ramfc);
-	nvkm_gpuobj_ref(NULL, &priv->ramro);
-	nvkm_ramht_ref(NULL, &priv->ramht);
-	nvkm_gpuobj_ref(NULL, &priv->vbios);
-	nvkm_mm_fini(&priv->heap);
-	if (priv->iomem)
-		iounmap(priv->iomem);
-	nvkm_instmem_destroy(&priv->base);
+	nvkm_wr32(imem->subdev.device, 0x700000 + addr, data);
 }
 
 static int
-nv04_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nv04_instmem_oneinit(struct nvkm_instmem *base)
 {
-	struct nv04_instmem_priv *priv;
+	struct nv04_instmem *imem = nv04_instmem(base);
+	struct nvkm_device *device = imem->base.subdev.device;
 	int ret;
 
-	ret = nvkm_instmem_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
 	/* PRAMIN aperture maps over the end of VRAM, reserve it */
-	priv->base.reserved = 512 * 1024;
+	imem->base.reserved = 512 * 1024;
 
-	ret = nvkm_mm_init(&priv->heap, 0, priv->base.reserved, 1);
+	ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1);
 	if (ret)
 		return ret;
 
 	/* 0x00000-0x10000: reserve for probable vbios image */
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
-			      &priv->vbios);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x10000, 0, false,
+			      &imem->base.vbios);
 	if (ret)
 		return ret;
 
 	/* 0x10000-0x18000: reserve for RAMHT */
-	ret = nvkm_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht);
+	ret = nvkm_ramht_new(device, 0x08000, 0, NULL, &imem->base.ramht);
 	if (ret)
 		return ret;
 
 	/* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x00800, 0,
-			      NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x00800, 0, true,
+			      &imem->base.ramfc);
 	if (ret)
 		return ret;
 
 	/* 0x18800-0x18a00: reserve for RAMRO */
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x00200, 0, 0,
-			      &priv->ramro);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x00200, 0, false,
+			      &imem->base.ramro);
 	if (ret)
 		return ret;
 
 	return 0;
 }
 
-struct nvkm_oclass *
-nv04_instmem_oclass = &(struct nvkm_instmem_impl) {
-	.base.handle = NV_SUBDEV(INSTMEM, 0x04),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_instmem_ctor,
-		.dtor = nv04_instmem_dtor,
-		.init = _nvkm_instmem_init,
-		.fini = _nvkm_instmem_fini,
-		.rd32 = nv04_instmem_rd32,
-		.wr32 = nv04_instmem_wr32,
-	},
-	.instobj = &nv04_instobj_oclass.base,
-}.base;
+static void *
+nv04_instmem_dtor(struct nvkm_instmem *base)
+{
+	struct nv04_instmem *imem = nv04_instmem(base);
+	nvkm_memory_del(&imem->base.ramfc);
+	nvkm_memory_del(&imem->base.ramro);
+	nvkm_ramht_del(&imem->base.ramht);
+	nvkm_memory_del(&imem->base.vbios);
+	nvkm_mm_fini(&imem->heap);
+	return imem;
+}
+
+static const struct nvkm_instmem_func
+nv04_instmem = {
+	.dtor = nv04_instmem_dtor,
+	.oneinit = nv04_instmem_oneinit,
+	.rd32 = nv04_instmem_rd32,
+	.wr32 = nv04_instmem_wr32,
+	.memory_new = nv04_instobj_new,
+	.persistent = false,
+	.zero = false,
+};
+
+int
+nv04_instmem_new(struct nvkm_device *device, int index,
+		 struct nvkm_instmem **pimem)
+{
+	struct nv04_instmem *imem;
+
+	if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_instmem_ctor(&nv04_instmem, device, index, &imem->base);
+	*pimem = &imem->base;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h
deleted file mode 100644
index 42b6c928047c..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h
+++ /dev/null
@@ -1,36 +0,0 @@
-#ifndef __NV04_INSTMEM_H__
-#define __NV04_INSTMEM_H__
-#include "priv.h"
-
-#include <core/mm.h>
-
-extern struct nvkm_instobj_impl nv04_instobj_oclass;
-
-struct nv04_instmem_priv {
-	struct nvkm_instmem base;
-
-	void __iomem *iomem;
-	struct nvkm_mm heap;
-
-	struct nvkm_gpuobj *vbios;
-	struct nvkm_ramht  *ramht;
-	struct nvkm_gpuobj *ramro;
-	struct nvkm_gpuobj *ramfc;
-};
-
-static inline struct nv04_instmem_priv *
-nv04_instmem(void *obj)
-{
-	return (void *)nvkm_instmem(obj);
-}
-
-struct nv04_instobj_priv {
-	struct nvkm_instobj base;
-	struct nvkm_mm_node *mem;
-};
-
-void nv04_instmem_dtor(struct nvkm_object *);
-
-int nv04_instmem_alloc(struct nvkm_instmem *, struct nvkm_object *,
-		       u32 size, u32 align, struct nvkm_object **pobject);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
index b42b8588fc0e..c0543875e490 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
@@ -21,116 +21,239 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#define nv40_instmem(p) container_of((p), struct nv40_instmem, base)
+#include "priv.h"
 
+#include <core/memory.h>
 #include <core/ramht.h>
 #include <engine/gr/nv40.h>
 
+struct nv40_instmem {
+	struct nvkm_instmem base;
+	struct nvkm_mm heap;
+	void __iomem *iomem;
+};
+
 /******************************************************************************
- * instmem subdev implementation
+ * instmem object implementation
  *****************************************************************************/
+#define nv40_instobj(p) container_of((p), struct nv40_instobj, memory)
+
+struct nv40_instobj {
+	struct nvkm_memory memory;
+	struct nv40_instmem *imem;
+	struct nvkm_mm_node *node;
+};
+
+static enum nvkm_memory_target
+nv40_instobj_target(struct nvkm_memory *memory)
+{
+	return NVKM_MEM_TARGET_INST;
+}
+
+static u64
+nv40_instobj_addr(struct nvkm_memory *memory)
+{
+	return nv40_instobj(memory)->node->offset;
+}
+
+static u64
+nv40_instobj_size(struct nvkm_memory *memory)
+{
+	return nv40_instobj(memory)->node->length;
+}
+
+static void __iomem *
+nv40_instobj_acquire(struct nvkm_memory *memory)
+{
+	struct nv40_instobj *iobj = nv40_instobj(memory);
+	return iobj->imem->iomem + iobj->node->offset;
+}
+
+static void
+nv40_instobj_release(struct nvkm_memory *memory)
+{
+}
 
 static u32
-nv40_instmem_rd32(struct nvkm_object *object, u64 addr)
+nv40_instobj_rd32(struct nvkm_memory *memory, u64 offset)
 {
-	struct nv04_instmem_priv *priv = (void *)object;
-	return ioread32_native(priv->iomem + addr);
+	struct nv40_instobj *iobj = nv40_instobj(memory);
+	return ioread32_native(iobj->imem->iomem + iobj->node->offset + offset);
 }
 
 static void
-nv40_instmem_wr32(struct nvkm_object *object, u64 addr, u32 data)
+nv40_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
+{
+	struct nv40_instobj *iobj = nv40_instobj(memory);
+	iowrite32_native(data, iobj->imem->iomem + iobj->node->offset + offset);
+}
+
+static void *
+nv40_instobj_dtor(struct nvkm_memory *memory)
 {
-	struct nv04_instmem_priv *priv = (void *)object;
-	iowrite32_native(data, priv->iomem + addr);
+	struct nv40_instobj *iobj = nv40_instobj(memory);
+	mutex_lock(&iobj->imem->base.subdev.mutex);
+	nvkm_mm_free(&iobj->imem->heap, &iobj->node);
+	mutex_unlock(&iobj->imem->base.subdev.mutex);
+	return iobj;
 }
 
+static const struct nvkm_memory_func
+nv40_instobj_func = {
+	.dtor = nv40_instobj_dtor,
+	.target = nv40_instobj_target,
+	.size = nv40_instobj_size,
+	.addr = nv40_instobj_addr,
+	.acquire = nv40_instobj_acquire,
+	.release = nv40_instobj_release,
+	.rd32 = nv40_instobj_rd32,
+	.wr32 = nv40_instobj_wr32,
+};
+
 static int
-nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nv40_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
+		 struct nvkm_memory **pmemory)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct nv04_instmem_priv *priv;
-	int ret, bar, vs;
+	struct nv40_instmem *imem = nv40_instmem(base);
+	struct nv40_instobj *iobj;
+	int ret;
 
-	ret = nvkm_instmem_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
+		return -ENOMEM;
+	*pmemory = &iobj->memory;
 
-	/* map bar */
-	if (nv_device_resource_len(device, 2))
-		bar = 2;
-	else
-		bar = 3;
+	nvkm_memory_ctor(&nv40_instobj_func, &iobj->memory);
+	iobj->imem = imem;
 
-	priv->iomem = ioremap(nv_device_resource_start(device, bar),
-			      nv_device_resource_len(device, bar));
-	if (!priv->iomem) {
-		nv_error(priv, "unable to map PRAMIN BAR\n");
-		return -EFAULT;
-	}
+	mutex_lock(&imem->base.subdev.mutex);
+	ret = nvkm_mm_head(&imem->heap, 0, 1, size, size,
+			   align ? align : 1, &iobj->node);
+	mutex_unlock(&imem->base.subdev.mutex);
+	return ret;
+}
+
+/******************************************************************************
+ * instmem subdev implementation
+ *****************************************************************************/
+
+static u32
+nv40_instmem_rd32(struct nvkm_instmem *base, u32 addr)
+{
+	return ioread32_native(nv40_instmem(base)->iomem + addr);
+}
+
+static void
+nv40_instmem_wr32(struct nvkm_instmem *base, u32 addr, u32 data)
+{
+	iowrite32_native(data, nv40_instmem(base)->iomem + addr);
+}
+
+static int
+nv40_instmem_oneinit(struct nvkm_instmem *base)
+{
+	struct nv40_instmem *imem = nv40_instmem(base);
+	struct nvkm_device *device = imem->base.subdev.device;
+	int ret, vs;
 
 	/* PRAMIN aperture maps over the end of vram, reserve enough space
 	 * to fit graphics contexts for every channel, the magics come
 	 * from engine/gr/nv40.c
 	 */
-	vs = hweight8((nv_rd32(priv, 0x001540) & 0x0000ff00) >> 8);
-	if      (device->chipset == 0x40) priv->base.reserved = 0x6aa0 * vs;
-	else if (device->chipset  < 0x43) priv->base.reserved = 0x4f00 * vs;
-	else if (nv44_gr_class(priv))  priv->base.reserved = 0x4980 * vs;
-	else				  priv->base.reserved = 0x4a40 * vs;
-	priv->base.reserved += 16 * 1024;
-	priv->base.reserved *= 32;		/* per-channel */
-	priv->base.reserved += 512 * 1024;	/* pci(e)gart table */
-	priv->base.reserved += 512 * 1024;	/* object storage */
-
-	priv->base.reserved = round_up(priv->base.reserved, 4096);
-
-	ret = nvkm_mm_init(&priv->heap, 0, priv->base.reserved, 1);
+	vs = hweight8((nvkm_rd32(device, 0x001540) & 0x0000ff00) >> 8);
+	if      (device->chipset == 0x40) imem->base.reserved = 0x6aa0 * vs;
+	else if (device->chipset  < 0x43) imem->base.reserved = 0x4f00 * vs;
+	else if (nv44_gr_class(device))   imem->base.reserved = 0x4980 * vs;
+	else				  imem->base.reserved = 0x4a40 * vs;
+	imem->base.reserved += 16 * 1024;
+	imem->base.reserved *= 32;		/* per-channel */
+	imem->base.reserved += 512 * 1024;	/* pci(e)gart table */
+	imem->base.reserved += 512 * 1024;	/* object storage */
+	imem->base.reserved = round_up(imem->base.reserved, 4096);
+
+	ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1);
 	if (ret)
 		return ret;
 
 	/* 0x00000-0x10000: reserve for probable vbios image */
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
-			      &priv->vbios);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x10000, 0, false,
+			      &imem->base.vbios);
 	if (ret)
 		return ret;
 
 	/* 0x10000-0x18000: reserve for RAMHT */
-	ret = nvkm_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht);
+	ret = nvkm_ramht_new(device, 0x08000, 0, NULL, &imem->base.ramht);
 	if (ret)
 		return ret;
 
 	/* 0x18000-0x18200: reserve for RAMRO
 	 * 0x18200-0x20000: padding
 	 */
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x08000, 0, 0,
-			      &priv->ramro);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x08000, 0, false,
+			      &imem->base.ramro);
 	if (ret)
 		return ret;
 
 	/* 0x20000-0x21000: reserve for RAMFC
 	 * 0x21000-0x40000: padding and some unknown crap
 	 */
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
-			      NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x20000, 0, true,
+			      &imem->base.ramfc);
 	if (ret)
 		return ret;
 
 	return 0;
 }
 
-struct nvkm_oclass *
-nv40_instmem_oclass = &(struct nvkm_instmem_impl) {
-	.base.handle = NV_SUBDEV(INSTMEM, 0x40),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_instmem_ctor,
-		.dtor = nv04_instmem_dtor,
-		.init = _nvkm_instmem_init,
-		.fini = _nvkm_instmem_fini,
-		.rd32 = nv40_instmem_rd32,
-		.wr32 = nv40_instmem_wr32,
-	},
-	.instobj = &nv04_instobj_oclass.base,
-}.base;
+static void *
+nv40_instmem_dtor(struct nvkm_instmem *base)
+{
+	struct nv40_instmem *imem = nv40_instmem(base);
+	nvkm_memory_del(&imem->base.ramfc);
+	nvkm_memory_del(&imem->base.ramro);
+	nvkm_ramht_del(&imem->base.ramht);
+	nvkm_memory_del(&imem->base.vbios);
+	nvkm_mm_fini(&imem->heap);
+	if (imem->iomem)
+		iounmap(imem->iomem);
+	return imem;
+}
+
+static const struct nvkm_instmem_func
+nv40_instmem = {
+	.dtor = nv40_instmem_dtor,
+	.oneinit = nv40_instmem_oneinit,
+	.rd32 = nv40_instmem_rd32,
+	.wr32 = nv40_instmem_wr32,
+	.memory_new = nv40_instobj_new,
+	.persistent = false,
+	.zero = false,
+};
+
+int
+nv40_instmem_new(struct nvkm_device *device, int index,
+		 struct nvkm_instmem **pimem)
+{
+	struct nv40_instmem *imem;
+	int bar;
+
+	if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_instmem_ctor(&nv40_instmem, device, index, &imem->base);
+	*pimem = &imem->base;
+
+	/* map bar */
+	if (device->func->resource_size(device, 2))
+		bar = 2;
+	else
+		bar = 3;
+
+	imem->iomem = ioremap(device->func->resource_addr(device, bar),
+			      device->func->resource_size(device, bar));
+	if (!imem->iomem) {
+		nvkm_error(&imem->base.subdev, "unable to map PRAMIN BAR\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index 8404143f93ee..6d512c062ae3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -21,149 +21,229 @@
  *
  * Authors: Ben Skeggs
  */
+#define nv50_instmem(p) container_of((p), struct nv50_instmem, base)
 #include "priv.h"
 
+#include <core/memory.h>
+#include <subdev/bar.h>
 #include <subdev/fb.h>
+#include <subdev/mmu.h>
 
-struct nv50_instmem_priv {
+struct nv50_instmem {
 	struct nvkm_instmem base;
+	unsigned long lock_flags;
 	spinlock_t lock;
 	u64 addr;
 };
 
-struct nv50_instobj_priv {
-	struct nvkm_instobj base;
-	struct nvkm_mem *mem;
-};
-
 /******************************************************************************
  * instmem object implementation
  *****************************************************************************/
+#define nv50_instobj(p) container_of((p), struct nv50_instobj, memory)
 
-static u32
-nv50_instobj_rd32(struct nvkm_object *object, u64 offset)
+struct nv50_instobj {
+	struct nvkm_memory memory;
+	struct nv50_instmem *imem;
+	struct nvkm_mem *mem;
+	struct nvkm_vma bar;
+	void *map;
+};
+
+static enum nvkm_memory_target
+nv50_instobj_target(struct nvkm_memory *memory)
+{
+	return NVKM_MEM_TARGET_VRAM;
+}
+
+static u64
+nv50_instobj_addr(struct nvkm_memory *memory)
+{
+	return nv50_instobj(memory)->mem->offset;
+}
+
+static u64
+nv50_instobj_size(struct nvkm_memory *memory)
+{
+	return (u64)nv50_instobj(memory)->mem->size << NVKM_RAM_MM_SHIFT;
+}
+
+static void
+nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm)
+{
+	struct nv50_instobj *iobj = nv50_instobj(memory);
+	struct nvkm_subdev *subdev = &iobj->imem->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	u64 size = nvkm_memory_size(memory);
+	void __iomem *map;
+	int ret;
+
+	iobj->map = ERR_PTR(-ENOMEM);
+
+	ret = nvkm_vm_get(vm, size, 12, NV_MEM_ACCESS_RW, &iobj->bar);
+	if (ret == 0) {
+		map = ioremap(device->func->resource_addr(device, 3) +
+			      (u32)iobj->bar.offset, size);
+		if (map) {
+			nvkm_memory_map(memory, &iobj->bar, 0);
+			iobj->map = map;
+		} else {
+			nvkm_warn(subdev, "PRAMIN ioremap failed\n");
+			nvkm_vm_put(&iobj->bar);
+		}
+	} else {
+		nvkm_warn(subdev, "PRAMIN exhausted\n");
+	}
+}
+
+static void
+nv50_instobj_release(struct nvkm_memory *memory)
 {
-	struct nv50_instmem_priv *priv = (void *)nvkm_instmem(object);
-	struct nv50_instobj_priv *node = (void *)object;
+	struct nv50_instmem *imem = nv50_instobj(memory)->imem;
+	spin_unlock_irqrestore(&imem->lock, imem->lock_flags);
+}
+
+static void __iomem *
+nv50_instobj_acquire(struct nvkm_memory *memory)
+{
+	struct nv50_instobj *iobj = nv50_instobj(memory);
+	struct nv50_instmem *imem = iobj->imem;
+	struct nvkm_bar *bar = imem->base.subdev.device->bar;
+	struct nvkm_vm *vm;
 	unsigned long flags;
-	u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
-	u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
+
+	if (!iobj->map && (vm = nvkm_bar_kmap(bar)))
+		nvkm_memory_boot(memory, vm);
+	if (!IS_ERR_OR_NULL(iobj->map))
+		return iobj->map;
+
+	spin_lock_irqsave(&imem->lock, flags);
+	imem->lock_flags = flags;
+	return NULL;
+}
+
+static u32
+nv50_instobj_rd32(struct nvkm_memory *memory, u64 offset)
+{
+	struct nv50_instobj *iobj = nv50_instobj(memory);
+	struct nv50_instmem *imem = iobj->imem;
+	struct nvkm_device *device = imem->base.subdev.device;
+	u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
+	u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
 	u32 data;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	if (unlikely(priv->addr != base)) {
-		nv_wr32(priv, 0x001700, base >> 16);
-		priv->addr = base;
+	if (unlikely(imem->addr != base)) {
+		nvkm_wr32(device, 0x001700, base >> 16);
+		imem->addr = base;
 	}
-	data = nv_rd32(priv, 0x700000 + addr);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	data = nvkm_rd32(device, 0x700000 + addr);
 	return data;
 }
 
 static void
-nv50_instobj_wr32(struct nvkm_object *object, u64 offset, u32 data)
+nv50_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
 {
-	struct nv50_instmem_priv *priv = (void *)nvkm_instmem(object);
-	struct nv50_instobj_priv *node = (void *)object;
-	unsigned long flags;
-	u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
-	u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
-
-	spin_lock_irqsave(&priv->lock, flags);
-	if (unlikely(priv->addr != base)) {
-		nv_wr32(priv, 0x001700, base >> 16);
-		priv->addr = base;
+	struct nv50_instobj *iobj = nv50_instobj(memory);
+	struct nv50_instmem *imem = iobj->imem;
+	struct nvkm_device *device = imem->base.subdev.device;
+	u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
+	u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
+
+	if (unlikely(imem->addr != base)) {
+		nvkm_wr32(device, 0x001700, base >> 16);
+		imem->addr = base;
 	}
-	nv_wr32(priv, 0x700000 + addr, data);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	nvkm_wr32(device, 0x700000 + addr, data);
 }
 
 static void
-nv50_instobj_dtor(struct nvkm_object *object)
+nv50_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
 {
-	struct nv50_instobj_priv *node = (void *)object;
-	struct nvkm_fb *pfb = nvkm_fb(object);
-	pfb->ram->put(pfb, &node->mem);
-	nvkm_instobj_destroy(&node->base);
+	struct nv50_instobj *iobj = nv50_instobj(memory);
+	nvkm_vm_map_at(vma, offset, iobj->mem);
 }
 
+static void *
+nv50_instobj_dtor(struct nvkm_memory *memory)
+{
+	struct nv50_instobj *iobj = nv50_instobj(memory);
+	struct nvkm_ram *ram = iobj->imem->base.subdev.device->fb->ram;
+	if (!IS_ERR_OR_NULL(iobj->map)) {
+		nvkm_vm_put(&iobj->bar);
+		iounmap(iobj->map);
+	}
+	ram->func->put(ram, &iobj->mem);
+	return iobj;
+}
+
+static const struct nvkm_memory_func
+nv50_instobj_func = {
+	.dtor = nv50_instobj_dtor,
+	.target = nv50_instobj_target,
+	.size = nv50_instobj_size,
+	.addr = nv50_instobj_addr,
+	.boot = nv50_instobj_boot,
+	.acquire = nv50_instobj_acquire,
+	.release = nv50_instobj_release,
+	.rd32 = nv50_instobj_rd32,
+	.wr32 = nv50_instobj_wr32,
+	.map = nv50_instobj_map,
+};
+
 static int
-nv50_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nv50_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
+		 struct nvkm_memory **pmemory)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_instobj_args *args = data;
-	struct nv50_instobj_priv *node;
+	struct nv50_instmem *imem = nv50_instmem(base);
+	struct nv50_instobj *iobj;
+	struct nvkm_ram *ram = imem->base.subdev.device->fb->ram;
 	int ret;
 
-	args->size  = max((args->size  + 4095) & ~4095, (u32)4096);
-	args->align = max((args->align + 4095) & ~4095, (u32)4096);
+	if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
+		return -ENOMEM;
+	*pmemory = &iobj->memory;
 
-	ret = nvkm_instobj_create(parent, engine, oclass, &node);
-	*pobject = nv_object(node);
-	if (ret)
-		return ret;
+	nvkm_memory_ctor(&nv50_instobj_func, &iobj->memory);
+	iobj->imem = imem;
+
+	size  = max((size  + 4095) & ~4095, (u32)4096);
+	align = max((align + 4095) & ~4095, (u32)4096);
 
-	ret = pfb->ram->get(pfb, args->size, args->align, 0, 0x800, &node->mem);
+	ret = ram->func->get(ram, size, align, 0, 0x800, &iobj->mem);
 	if (ret)
 		return ret;
 
-	node->base.addr = node->mem->offset;
-	node->base.size = node->mem->size << 12;
-	node->mem->page_shift = 12;
+	iobj->mem->page_shift = 12;
 	return 0;
 }
 
-static struct nvkm_instobj_impl
-nv50_instobj_oclass = {
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_instobj_ctor,
-		.dtor = nv50_instobj_dtor,
-		.init = _nvkm_instobj_init,
-		.fini = _nvkm_instobj_fini,
-		.rd32 = nv50_instobj_rd32,
-		.wr32 = nv50_instobj_wr32,
-	},
-};
-
 /******************************************************************************
  * instmem subdev implementation
  *****************************************************************************/
 
-static int
-nv50_instmem_fini(struct nvkm_object *object, bool suspend)
+static void
+nv50_instmem_fini(struct nvkm_instmem *base)
 {
-	struct nv50_instmem_priv *priv = (void *)object;
-	priv->addr = ~0ULL;
-	return nvkm_instmem_fini(&priv->base, suspend);
+	nv50_instmem(base)->addr = ~0ULL;
 }
 
-static int
-nv50_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
-{
-	struct nv50_instmem_priv *priv;
-	int ret;
+static const struct nvkm_instmem_func
+nv50_instmem = {
+	.fini = nv50_instmem_fini,
+	.memory_new = nv50_instobj_new,
+	.persistent = false,
+	.zero = false,
+};
 
-	ret = nvkm_instmem_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+int
+nv50_instmem_new(struct nvkm_device *device, int index,
+		 struct nvkm_instmem **pimem)
+{
+	struct nv50_instmem *imem;
 
-	spin_lock_init(&priv->lock);
+	if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_instmem_ctor(&nv50_instmem, device, index, &imem->base);
+	spin_lock_init(&imem->lock);
+	*pimem = &imem->base;
 	return 0;
 }
-
-struct nvkm_oclass *
-nv50_instmem_oclass = &(struct nvkm_instmem_impl) {
-	.base.handle = NV_SUBDEV(INSTMEM, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_instmem_ctor,
-		.dtor = _nvkm_instmem_dtor,
-		.init = _nvkm_instmem_init,
-		.fini = nv50_instmem_fini,
-	},
-	.instobj = &nv50_instobj_oclass.base,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
index b10e292e5607..ace4471864a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
@@ -1,54 +1,20 @@
 #ifndef __NVKM_INSTMEM_PRIV_H__
 #define __NVKM_INSTMEM_PRIV_H__
+#define nvkm_instmem(p) container_of((p), struct nvkm_instmem, subdev)
 #include <subdev/instmem.h>
 
-struct nvkm_instobj_impl {
-	struct nvkm_oclass base;
+struct nvkm_instmem_func {
+	void *(*dtor)(struct nvkm_instmem *);
+	int (*oneinit)(struct nvkm_instmem *);
+	void (*fini)(struct nvkm_instmem *);
+	u32  (*rd32)(struct nvkm_instmem *, u32 addr);
+	void (*wr32)(struct nvkm_instmem *, u32 addr, u32 data);
+	int (*memory_new)(struct nvkm_instmem *, u32 size, u32 align,
+			  bool zero, struct nvkm_memory **);
+	bool persistent;
+	bool zero;
 };
 
-struct nvkm_instobj_args {
-	u32 size;
-	u32 align;
-};
-
-#define nvkm_instobj_create(p,e,o,d)                                        \
-	nvkm_instobj_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_instobj_destroy(p) ({                                          \
-	struct nvkm_instobj *iobj = (p);                                    \
-	_nvkm_instobj_dtor(nv_object(iobj));                                \
-})
-#define nvkm_instobj_init(p)                                                \
-	nvkm_object_init(&(p)->base)
-#define nvkm_instobj_fini(p,s)                                              \
-	nvkm_object_fini(&(p)->base, (s))
-
-int  nvkm_instobj_create_(struct nvkm_object *, struct nvkm_object *,
-			     struct nvkm_oclass *, int, void **);
-void _nvkm_instobj_dtor(struct nvkm_object *);
-#define _nvkm_instobj_init nvkm_object_init
-#define _nvkm_instobj_fini nvkm_object_fini
-
-struct nvkm_instmem_impl {
-	struct nvkm_oclass base;
-	struct nvkm_oclass *instobj;
-};
-
-#define nvkm_instmem_create(p,e,o,d)                                        \
-	nvkm_instmem_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_instmem_destroy(p)                                             \
-	nvkm_subdev_destroy(&(p)->base)
-#define nvkm_instmem_init(p) ({                                             \
-	struct nvkm_instmem *imem = (p);                                    \
-	_nvkm_instmem_init(nv_object(imem));                                \
-})
-#define nvkm_instmem_fini(p,s) ({                                           \
-	struct nvkm_instmem *imem = (p);                                    \
-	_nvkm_instmem_fini(nv_object(imem), (s));                           \
-})
-
-int nvkm_instmem_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, int, void **);
-#define _nvkm_instmem_dtor _nvkm_subdev_dtor
-int _nvkm_instmem_init(struct nvkm_object *);
-int _nvkm_instmem_fini(struct nvkm_object *, bool);
+void nvkm_instmem_ctor(const struct nvkm_instmem_func *, struct nvkm_device *,
+		       int index, struct nvkm_instmem *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
index 2fb87fbfd11c..930d25b6e63c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
@@ -23,102 +23,110 @@
  */
 #include "priv.h"
 
-static int
+#include <subdev/fb.h>
+
+int
 nvkm_ltc_tags_alloc(struct nvkm_ltc *ltc, u32 n, struct nvkm_mm_node **pnode)
 {
-	struct nvkm_ltc_priv *priv = (void *)ltc;
-	int ret;
-
-	ret = nvkm_mm_head(&priv->tags, 0, 1, n, n, 1, pnode);
+	int ret = nvkm_mm_head(&ltc->tags, 0, 1, n, n, 1, pnode);
 	if (ret)
 		*pnode = NULL;
-
 	return ret;
 }
 
-static void
+void
 nvkm_ltc_tags_free(struct nvkm_ltc *ltc, struct nvkm_mm_node **pnode)
 {
-	struct nvkm_ltc_priv *priv = (void *)ltc;
-	nvkm_mm_free(&priv->tags, pnode);
+	nvkm_mm_free(&ltc->tags, pnode);
 }
 
-static void
+void
 nvkm_ltc_tags_clear(struct nvkm_ltc *ltc, u32 first, u32 count)
 {
-	const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc);
-	struct nvkm_ltc_priv *priv = (void *)ltc;
 	const u32 limit = first + count - 1;
 
-	BUG_ON((first > limit) || (limit >= priv->num_tags));
+	BUG_ON((first > limit) || (limit >= ltc->num_tags));
 
-	impl->cbc_clear(priv, first, limit);
-	impl->cbc_wait(priv);
+	ltc->func->cbc_clear(ltc, first, limit);
+	ltc->func->cbc_wait(ltc);
 }
 
-static int
+int
 nvkm_ltc_zbc_color_get(struct nvkm_ltc *ltc, int index, const u32 color[4])
 {
-	const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc);
-	struct nvkm_ltc_priv *priv = (void *)ltc;
-	memcpy(priv->zbc_color[index], color, sizeof(priv->zbc_color[index]));
-	impl->zbc_clear_color(priv, index, color);
+	memcpy(ltc->zbc_color[index], color, sizeof(ltc->zbc_color[index]));
+	ltc->func->zbc_clear_color(ltc, index, color);
 	return index;
 }
 
-static int
+int
 nvkm_ltc_zbc_depth_get(struct nvkm_ltc *ltc, int index, const u32 depth)
 {
-	const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc);
-	struct nvkm_ltc_priv *priv = (void *)ltc;
-	priv->zbc_depth[index] = depth;
-	impl->zbc_clear_depth(priv, index, depth);
+	ltc->zbc_depth[index] = depth;
+	ltc->func->zbc_clear_depth(ltc, index, depth);
 	return index;
 }
 
-int
-_nvkm_ltc_init(struct nvkm_object *object)
+static void
+nvkm_ltc_intr(struct nvkm_subdev *subdev)
 {
-	const struct nvkm_ltc_impl *impl = (void *)nv_oclass(object);
-	struct nvkm_ltc_priv *priv = (void *)object;
-	int ret, i;
+	struct nvkm_ltc *ltc = nvkm_ltc(subdev);
+	ltc->func->intr(ltc);
+}
 
-	ret = nvkm_subdev_init(&priv->base.base);
-	if (ret)
-		return ret;
+static int
+nvkm_ltc_oneinit(struct nvkm_subdev *subdev)
+{
+	struct nvkm_ltc *ltc = nvkm_ltc(subdev);
+	return ltc->func->oneinit(ltc);
+}
+
+static int
+nvkm_ltc_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_ltc *ltc = nvkm_ltc(subdev);
+	int i;
 
-	for (i = priv->base.zbc_min; i <= priv->base.zbc_max; i++) {
-		impl->zbc_clear_color(priv, i, priv->zbc_color[i]);
-		impl->zbc_clear_depth(priv, i, priv->zbc_depth[i]);
+	for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
+		ltc->func->zbc_clear_color(ltc, i, ltc->zbc_color[i]);
+		ltc->func->zbc_clear_depth(ltc, i, ltc->zbc_depth[i]);
 	}
 
+	ltc->func->init(ltc);
 	return 0;
 }
 
+static void *
+nvkm_ltc_dtor(struct nvkm_subdev *subdev)
+{
+	struct nvkm_ltc *ltc = nvkm_ltc(subdev);
+	struct nvkm_ram *ram = ltc->subdev.device->fb->ram;
+	nvkm_mm_fini(&ltc->tags);
+	if (ram)
+		nvkm_mm_free(&ram->vram, &ltc->tag_ram);
+	return ltc;
+}
+
+static const struct nvkm_subdev_func
+nvkm_ltc = {
+	.dtor = nvkm_ltc_dtor,
+	.oneinit = nvkm_ltc_oneinit,
+	.init = nvkm_ltc_init,
+	.intr = nvkm_ltc_intr,
+};
+
 int
-nvkm_ltc_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, int length, void **pobject)
+nvkm_ltc_new_(const struct nvkm_ltc_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_ltc **pltc)
 {
-	const struct nvkm_ltc_impl *impl = (void *)oclass;
-	struct nvkm_ltc_priv *priv;
-	int ret;
+	struct nvkm_ltc *ltc;
 
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PLTCG",
-				  "l2c", length, pobject);
-	priv = *pobject;
-	if (ret)
-		return ret;
-
-	memset(priv->zbc_color, 0x00, sizeof(priv->zbc_color));
-	memset(priv->zbc_depth, 0x00, sizeof(priv->zbc_depth));
-
-	priv->base.base.intr = impl->intr;
-	priv->base.tags_alloc = nvkm_ltc_tags_alloc;
-	priv->base.tags_free = nvkm_ltc_tags_free;
-	priv->base.tags_clear = nvkm_ltc_tags_clear;
-	priv->base.zbc_min = 1; /* reserve 0 for disabled */
-	priv->base.zbc_max = min(impl->zbc, NVKM_LTC_MAX_ZBC_CNT) - 1;
-	priv->base.zbc_color_get = nvkm_ltc_zbc_color_get;
-	priv->base.zbc_depth_get = nvkm_ltc_zbc_depth_get;
+	if (!(ltc = *pltc = kzalloc(sizeof(*ltc), GFP_KERNEL)))
+		return -ENOMEM;
+
+	nvkm_subdev_ctor(&nvkm_ltc, device, index, 0, &ltc->subdev);
+	ltc->func = func;
+	ltc->zbc_min = 1; /* reserve 0 for disabled */
+	ltc->zbc_max = min(func->zbc, NVKM_LTC_MAX_ZBC_CNT) - 1;
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
index 7fb5ea0314cb..45ac765b753e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
@@ -28,38 +28,47 @@
 #include <subdev/timer.h>
 
 void
-gf100_ltc_cbc_clear(struct nvkm_ltc_priv *priv, u32 start, u32 limit)
+gf100_ltc_cbc_clear(struct nvkm_ltc *ltc, u32 start, u32 limit)
 {
-	nv_wr32(priv, 0x17e8cc, start);
-	nv_wr32(priv, 0x17e8d0, limit);
-	nv_wr32(priv, 0x17e8c8, 0x00000004);
+	struct nvkm_device *device = ltc->subdev.device;
+	nvkm_wr32(device, 0x17e8cc, start);
+	nvkm_wr32(device, 0x17e8d0, limit);
+	nvkm_wr32(device, 0x17e8c8, 0x00000004);
 }
 
 void
-gf100_ltc_cbc_wait(struct nvkm_ltc_priv *priv)
+gf100_ltc_cbc_wait(struct nvkm_ltc *ltc)
 {
+	struct nvkm_device *device = ltc->subdev.device;
 	int c, s;
-	for (c = 0; c < priv->ltc_nr; c++) {
-		for (s = 0; s < priv->lts_nr; s++)
-			nv_wait(priv, 0x1410c8 + c * 0x2000 + s * 0x400, ~0, 0);
+	for (c = 0; c < ltc->ltc_nr; c++) {
+		for (s = 0; s < ltc->lts_nr; s++) {
+			const u32 addr = 0x1410c8 + (c * 0x2000) + (s * 0x400);
+			nvkm_msec(device, 2000,
+				if (!nvkm_rd32(device, addr))
+					break;
+			);
+		}
 	}
 }
 
 void
-gf100_ltc_zbc_clear_color(struct nvkm_ltc_priv *priv, int i, const u32 color[4])
+gf100_ltc_zbc_clear_color(struct nvkm_ltc *ltc, int i, const u32 color[4])
 {
-	nv_mask(priv, 0x17ea44, 0x0000000f, i);
-	nv_wr32(priv, 0x17ea48, color[0]);
-	nv_wr32(priv, 0x17ea4c, color[1]);
-	nv_wr32(priv, 0x17ea50, color[2]);
-	nv_wr32(priv, 0x17ea54, color[3]);
+	struct nvkm_device *device = ltc->subdev.device;
+	nvkm_mask(device, 0x17ea44, 0x0000000f, i);
+	nvkm_wr32(device, 0x17ea48, color[0]);
+	nvkm_wr32(device, 0x17ea4c, color[1]);
+	nvkm_wr32(device, 0x17ea50, color[2]);
+	nvkm_wr32(device, 0x17ea54, color[3]);
 }
 
 void
-gf100_ltc_zbc_clear_depth(struct nvkm_ltc_priv *priv, int i, const u32 depth)
+gf100_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
 {
-	nv_mask(priv, 0x17ea44, 0x0000000f, i);
-	nv_wr32(priv, 0x17ea58, depth);
+	struct nvkm_device *device = ltc->subdev.device;
+	nvkm_mask(device, 0x17ea44, 0x0000000f, i);
+	nvkm_wr32(device, 0x17ea58, depth);
 }
 
 static const struct nvkm_bitfield
@@ -81,88 +90,60 @@ gf100_ltc_lts_intr_name[] = {
 };
 
 static void
-gf100_ltc_lts_intr(struct nvkm_ltc_priv *priv, int ltc, int lts)
+gf100_ltc_lts_intr(struct nvkm_ltc *ltc, int c, int s)
 {
-	u32 base = 0x141000 + (ltc * 0x2000) + (lts * 0x400);
-	u32 intr = nv_rd32(priv, base + 0x020);
+	struct nvkm_subdev *subdev = &ltc->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 base = 0x141000 + (c * 0x2000) + (s * 0x400);
+	u32 intr = nvkm_rd32(device, base + 0x020);
 	u32 stat = intr & 0x0000ffff;
+	char msg[128];
 
 	if (stat) {
-		nv_info(priv, "LTC%d_LTS%d:", ltc, lts);
-		nvkm_bitfield_print(gf100_ltc_lts_intr_name, stat);
-		pr_cont("\n");
+		nvkm_snprintbf(msg, sizeof(msg), gf100_ltc_lts_intr_name, stat);
+		nvkm_error(subdev, "LTC%d_LTS%d: %08x [%s]\n", c, s, stat, msg);
 	}
 
-	nv_wr32(priv, base + 0x020, intr);
+	nvkm_wr32(device, base + 0x020, intr);
 }
 
 void
-gf100_ltc_intr(struct nvkm_subdev *subdev)
+gf100_ltc_intr(struct nvkm_ltc *ltc)
 {
-	struct nvkm_ltc_priv *priv = (void *)subdev;
+	struct nvkm_device *device = ltc->subdev.device;
 	u32 mask;
 
-	mask = nv_rd32(priv, 0x00017c);
+	mask = nvkm_rd32(device, 0x00017c);
 	while (mask) {
-		u32 lts, ltc = __ffs(mask);
-		for (lts = 0; lts < priv->lts_nr; lts++)
-			gf100_ltc_lts_intr(priv, ltc, lts);
-		mask &= ~(1 << ltc);
+		u32 s, c = __ffs(mask);
+		for (s = 0; s < ltc->lts_nr; s++)
+			gf100_ltc_lts_intr(ltc, c, s);
+		mask &= ~(1 << c);
 	}
 }
 
-static int
-gf100_ltc_init(struct nvkm_object *object)
-{
-	struct nvkm_ltc_priv *priv = (void *)object;
-	u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
-	int ret;
-
-	ret = nvkm_ltc_init(priv);
-	if (ret)
-		return ret;
-
-	nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
-	nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
-	nv_wr32(priv, 0x17e8d4, priv->tag_base);
-	nv_mask(priv, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
-	return 0;
-}
-
-void
-gf100_ltc_dtor(struct nvkm_object *object)
-{
-	struct nvkm_fb *pfb = nvkm_fb(object);
-	struct nvkm_ltc_priv *priv = (void *)object;
-
-	nvkm_mm_fini(&priv->tags);
-	if (pfb->ram)
-		nvkm_mm_free(&pfb->vram, &priv->tag_ram);
-
-	nvkm_ltc_destroy(priv);
-}
-
 /* TODO: Figure out tag memory details and drop the over-cautious allocation.
  */
 int
-gf100_ltc_init_tag_ram(struct nvkm_fb *pfb, struct nvkm_ltc_priv *priv)
+gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc)
 {
+	struct nvkm_ram *ram = ltc->subdev.device->fb->ram;
 	u32 tag_size, tag_margin, tag_align;
 	int ret;
 
 	/* No VRAM, no tags for now. */
-	if (!pfb->ram) {
-		priv->num_tags = 0;
+	if (!ram) {
+		ltc->num_tags = 0;
 		goto mm_init;
 	}
 
 	/* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
-	priv->num_tags = (pfb->ram->size >> 17) / 4;
-	if (priv->num_tags > (1 << 17))
-		priv->num_tags = 1 << 17; /* we have 17 bits in PTE */
-	priv->num_tags = (priv->num_tags + 63) & ~63; /* round up to 64 */
+	ltc->num_tags = (ram->size >> 17) / 4;
+	if (ltc->num_tags > (1 << 17))
+		ltc->num_tags = 1 << 17; /* we have 17 bits in PTE */
+	ltc->num_tags = (ltc->num_tags + 63) & ~63; /* round up to 64 */
 
-	tag_align = priv->ltc_nr * 0x800;
+	tag_align = ltc->ltc_nr * 0x800;
 	tag_margin = (tag_align < 0x6000) ? 0x6000 : tag_align;
 
 	/* 4 part 4 sub: 0x2000 bytes for 56 tags */
@@ -173,72 +154,71 @@ gf100_ltc_init_tag_ram(struct nvkm_fb *pfb, struct nvkm_ltc_priv *priv)
 	 *
 	 * For 4 GiB of memory we'll have 8192 tags which makes 3 MiB, < 0.1 %.
 	 */
-	tag_size  = (priv->num_tags / 64) * 0x6000 + tag_margin;
+	tag_size  = (ltc->num_tags / 64) * 0x6000 + tag_margin;
 	tag_size += tag_align;
 	tag_size  = (tag_size + 0xfff) >> 12; /* round up */
 
-	ret = nvkm_mm_tail(&pfb->vram, 1, 1, tag_size, tag_size, 1,
-			   &priv->tag_ram);
+	ret = nvkm_mm_tail(&ram->vram, 1, 1, tag_size, tag_size, 1,
+			   &ltc->tag_ram);
 	if (ret) {
-		priv->num_tags = 0;
+		ltc->num_tags = 0;
 	} else {
-		u64 tag_base = ((u64)priv->tag_ram->offset << 12) + tag_margin;
+		u64 tag_base = ((u64)ltc->tag_ram->offset << 12) + tag_margin;
 
 		tag_base += tag_align - 1;
-		ret = do_div(tag_base, tag_align);
+		do_div(tag_base, tag_align);
 
-		priv->tag_base = tag_base;
+		ltc->tag_base = tag_base;
 	}
 
 mm_init:
-	ret = nvkm_mm_init(&priv->tags, 0, priv->num_tags, 1);
-	return ret;
+	return nvkm_mm_init(&ltc->tags, 0, ltc->num_tags, 1);
 }
 
 int
-gf100_ltc_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+gf100_ltc_oneinit(struct nvkm_ltc *ltc)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_ltc_priv *priv;
-	u32 parts, mask;
-	int ret, i;
-
-	ret = nvkm_ltc_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	parts = nv_rd32(priv, 0x022438);
-	mask = nv_rd32(priv, 0x022554);
+	struct nvkm_device *device = ltc->subdev.device;
+	const u32 parts = nvkm_rd32(device, 0x022438);
+	const u32  mask = nvkm_rd32(device, 0x022554);
+	const u32 slice = nvkm_rd32(device, 0x17e8dc) >> 28;
+	int i;
+
 	for (i = 0; i < parts; i++) {
 		if (!(mask & (1 << i)))
-			priv->ltc_nr++;
+			ltc->ltc_nr++;
 	}
-	priv->lts_nr = nv_rd32(priv, 0x17e8dc) >> 28;
+	ltc->lts_nr = slice;
+
+	return gf100_ltc_oneinit_tag_ram(ltc);
+}
 
-	ret = gf100_ltc_init_tag_ram(pfb, priv);
-	if (ret)
-		return ret;
+static void
+gf100_ltc_init(struct nvkm_ltc *ltc)
+{
+	struct nvkm_device *device = ltc->subdev.device;
+	u32 lpg128 = !(nvkm_rd32(device, 0x100c80) & 0x00000001);
 
-	nv_subdev(priv)->intr = gf100_ltc_intr;
-	return 0;
+	nvkm_mask(device, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
+	nvkm_wr32(device, 0x17e8d8, ltc->ltc_nr);
+	nvkm_wr32(device, 0x17e8d4, ltc->tag_base);
+	nvkm_mask(device, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
 }
 
-struct nvkm_oclass *
-gf100_ltc_oclass = &(struct nvkm_ltc_impl) {
-	.base.handle = NV_SUBDEV(LTC, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_ltc_ctor,
-		.dtor = gf100_ltc_dtor,
-		.init = gf100_ltc_init,
-		.fini = _nvkm_ltc_fini,
-	},
+static const struct nvkm_ltc_func
+gf100_ltc = {
+	.oneinit = gf100_ltc_oneinit,
+	.init = gf100_ltc_init,
 	.intr = gf100_ltc_intr,
 	.cbc_clear = gf100_ltc_cbc_clear,
 	.cbc_wait = gf100_ltc_cbc_wait,
 	.zbc = 16,
 	.zbc_clear_color = gf100_ltc_zbc_clear_color,
 	.zbc_clear_depth = gf100_ltc_zbc_clear_depth,
-}.base;
+};
+
+int
+gf100_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+{
+	return nvkm_ltc_new_(&gf100_ltc, device, index, pltc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
index d53959b5ec67..839e6b4c597b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
@@ -23,37 +23,32 @@
  */
 #include "priv.h"
 
-static int
-gk104_ltc_init(struct nvkm_object *object)
+static void
+gk104_ltc_init(struct nvkm_ltc *ltc)
 {
-	struct nvkm_ltc_priv *priv = (void *)object;
-	u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
-	int ret;
+	struct nvkm_device *device = ltc->subdev.device;
+	u32 lpg128 = !(nvkm_rd32(device, 0x100c80) & 0x00000001);
 
-	ret = nvkm_ltc_init(priv);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
-	nv_wr32(priv, 0x17e000, priv->ltc_nr);
-	nv_wr32(priv, 0x17e8d4, priv->tag_base);
-	nv_mask(priv, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
-	return 0;
+	nvkm_wr32(device, 0x17e8d8, ltc->ltc_nr);
+	nvkm_wr32(device, 0x17e000, ltc->ltc_nr);
+	nvkm_wr32(device, 0x17e8d4, ltc->tag_base);
+	nvkm_mask(device, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
 }
 
-struct nvkm_oclass *
-gk104_ltc_oclass = &(struct nvkm_ltc_impl) {
-	.base.handle = NV_SUBDEV(LTC, 0xe4),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_ltc_ctor,
-		.dtor = gf100_ltc_dtor,
-		.init = gk104_ltc_init,
-		.fini = _nvkm_ltc_fini,
-	},
+static const struct nvkm_ltc_func
+gk104_ltc = {
+	.oneinit = gf100_ltc_oneinit,
+	.init = gk104_ltc_init,
 	.intr = gf100_ltc_intr,
 	.cbc_clear = gf100_ltc_cbc_clear,
 	.cbc_wait = gf100_ltc_cbc_wait,
 	.zbc = 16,
 	.zbc_clear_color = gf100_ltc_zbc_clear_color,
 	.zbc_clear_depth = gf100_ltc_zbc_clear_depth,
-}.base;
+};
+
+int
+gk104_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+{
+	return nvkm_ltc_new_(&gk104_ltc, device, index, pltc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
index 6b3f6f4ce107..389331bb63ba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
@@ -27,127 +27,121 @@
 #include <subdev/timer.h>
 
 static void
-gm107_ltc_cbc_clear(struct nvkm_ltc_priv *priv, u32 start, u32 limit)
+gm107_ltc_cbc_clear(struct nvkm_ltc *ltc, u32 start, u32 limit)
 {
-	nv_wr32(priv, 0x17e270, start);
-	nv_wr32(priv, 0x17e274, limit);
-	nv_wr32(priv, 0x17e26c, 0x00000004);
+	struct nvkm_device *device = ltc->subdev.device;
+	nvkm_wr32(device, 0x17e270, start);
+	nvkm_wr32(device, 0x17e274, limit);
+	nvkm_wr32(device, 0x17e26c, 0x00000004);
 }
 
 static void
-gm107_ltc_cbc_wait(struct nvkm_ltc_priv *priv)
+gm107_ltc_cbc_wait(struct nvkm_ltc *ltc)
 {
+	struct nvkm_device *device = ltc->subdev.device;
 	int c, s;
-	for (c = 0; c < priv->ltc_nr; c++) {
-		for (s = 0; s < priv->lts_nr; s++)
-			nv_wait(priv, 0x14046c + c * 0x2000 + s * 0x200, ~0, 0);
+	for (c = 0; c < ltc->ltc_nr; c++) {
+		for (s = 0; s < ltc->lts_nr; s++) {
+			const u32 addr = 0x14046c + (c * 0x2000) + (s * 0x200);
+			nvkm_msec(device, 2000,
+				if (!nvkm_rd32(device, addr))
+					break;
+			);
+		}
 	}
 }
 
 static void
-gm107_ltc_zbc_clear_color(struct nvkm_ltc_priv *priv, int i, const u32 color[4])
+gm107_ltc_zbc_clear_color(struct nvkm_ltc *ltc, int i, const u32 color[4])
 {
-	nv_mask(priv, 0x17e338, 0x0000000f, i);
-	nv_wr32(priv, 0x17e33c, color[0]);
-	nv_wr32(priv, 0x17e340, color[1]);
-	nv_wr32(priv, 0x17e344, color[2]);
-	nv_wr32(priv, 0x17e348, color[3]);
+	struct nvkm_device *device = ltc->subdev.device;
+	nvkm_mask(device, 0x17e338, 0x0000000f, i);
+	nvkm_wr32(device, 0x17e33c, color[0]);
+	nvkm_wr32(device, 0x17e340, color[1]);
+	nvkm_wr32(device, 0x17e344, color[2]);
+	nvkm_wr32(device, 0x17e348, color[3]);
 }
 
 static void
-gm107_ltc_zbc_clear_depth(struct nvkm_ltc_priv *priv, int i, const u32 depth)
+gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
 {
-	nv_mask(priv, 0x17e338, 0x0000000f, i);
-	nv_wr32(priv, 0x17e34c, depth);
+	struct nvkm_device *device = ltc->subdev.device;
+	nvkm_mask(device, 0x17e338, 0x0000000f, i);
+	nvkm_wr32(device, 0x17e34c, depth);
 }
 
 static void
-gm107_ltc_lts_isr(struct nvkm_ltc_priv *priv, int ltc, int lts)
+gm107_ltc_lts_isr(struct nvkm_ltc *ltc, int c, int s)
 {
-	u32 base = 0x140000 + (ltc * 0x2000) + (lts * 0x400);
-	u32 stat = nv_rd32(priv, base + 0x00c);
+	struct nvkm_subdev *subdev = &ltc->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 base = 0x140000 + (c * 0x2000) + (s * 0x400);
+	u32 stat = nvkm_rd32(device, base + 0x00c);
 
 	if (stat) {
-		nv_info(priv, "LTC%d_LTS%d: 0x%08x\n", ltc, lts, stat);
-		nv_wr32(priv, base + 0x00c, stat);
+		nvkm_error(subdev, "LTC%d_LTS%d: %08x\n", c, s, stat);
+		nvkm_wr32(device, base + 0x00c, stat);
 	}
 }
 
 static void
-gm107_ltc_intr(struct nvkm_subdev *subdev)
+gm107_ltc_intr(struct nvkm_ltc *ltc)
 {
-	struct nvkm_ltc_priv *priv = (void *)subdev;
+	struct nvkm_device *device = ltc->subdev.device;
 	u32 mask;
 
-	mask = nv_rd32(priv, 0x00017c);
+	mask = nvkm_rd32(device, 0x00017c);
 	while (mask) {
-		u32 lts, ltc = __ffs(mask);
-		for (lts = 0; lts < priv->lts_nr; lts++)
-			gm107_ltc_lts_isr(priv, ltc, lts);
-		mask &= ~(1 << ltc);
+		u32 s, c = __ffs(mask);
+		for (s = 0; s < ltc->lts_nr; s++)
+			gm107_ltc_lts_isr(ltc, c, s);
+		mask &= ~(1 << c);
 	}
 }
 
 static int
-gm107_ltc_init(struct nvkm_object *object)
+gm107_ltc_oneinit(struct nvkm_ltc *ltc)
 {
-	struct nvkm_ltc_priv *priv = (void *)object;
-	u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
-	int ret;
-
-	ret = nvkm_ltc_init(priv);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x17e27c, priv->ltc_nr);
-	nv_wr32(priv, 0x17e278, priv->tag_base);
-	nv_mask(priv, 0x17e264, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
-	return 0;
-}
+	struct nvkm_device *device = ltc->subdev.device;
+	const u32 parts = nvkm_rd32(device, 0x022438);
+	const u32  mask = nvkm_rd32(device, 0x021c14);
+	const u32 slice = nvkm_rd32(device, 0x17e280) >> 28;
+	int i;
 
-static int
-gm107_ltc_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_ltc_priv *priv;
-	u32 parts, mask;
-	int ret, i;
-
-	ret = nvkm_ltc_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	parts = nv_rd32(priv, 0x022438);
-	mask = nv_rd32(priv, 0x021c14);
 	for (i = 0; i < parts; i++) {
 		if (!(mask & (1 << i)))
-			priv->ltc_nr++;
+			ltc->ltc_nr++;
 	}
-	priv->lts_nr = nv_rd32(priv, 0x17e280) >> 28;
+	ltc->lts_nr = slice;
+
+	return gf100_ltc_oneinit_tag_ram(ltc);
+}
 
-	ret = gf100_ltc_init_tag_ram(pfb, priv);
-	if (ret)
-		return ret;
+static void
+gm107_ltc_init(struct nvkm_ltc *ltc)
+{
+	struct nvkm_device *device = ltc->subdev.device;
+	u32 lpg128 = !(nvkm_rd32(device, 0x100c80) & 0x00000001);
 
-	return 0;
+	nvkm_wr32(device, 0x17e27c, ltc->ltc_nr);
+	nvkm_wr32(device, 0x17e278, ltc->tag_base);
+	nvkm_mask(device, 0x17e264, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
 }
 
-struct nvkm_oclass *
-gm107_ltc_oclass = &(struct nvkm_ltc_impl) {
-	.base.handle = NV_SUBDEV(LTC, 0xff),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm107_ltc_ctor,
-		.dtor = gf100_ltc_dtor,
-		.init = gm107_ltc_init,
-		.fini = _nvkm_ltc_fini,
-	},
+static const struct nvkm_ltc_func
+gm107_ltc = {
+	.oneinit = gm107_ltc_oneinit,
+	.init = gm107_ltc_init,
 	.intr = gm107_ltc_intr,
 	.cbc_clear = gm107_ltc_cbc_clear,
 	.cbc_wait = gm107_ltc_cbc_wait,
 	.zbc = 16,
 	.zbc_clear_color = gm107_ltc_zbc_clear_color,
 	.zbc_clear_depth = gm107_ltc_zbc_clear_depth,
-}.base;
+};
+
+int
+gm107_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+{
+	return nvkm_ltc_new_(&gm107_ltc, device, index, pltc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
index 09537d7b6783..4e05037cc99f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
@@ -1,69 +1,29 @@
 #ifndef __NVKM_LTC_PRIV_H__
 #define __NVKM_LTC_PRIV_H__
+#define nvkm_ltc(p) container_of((p), struct nvkm_ltc, subdev)
 #include <subdev/ltc.h>
 
-#include <core/mm.h>
-struct nvkm_fb;
+int nvkm_ltc_new_(const struct nvkm_ltc_func *, struct nvkm_device *,
+		  int index, struct nvkm_ltc **);
 
-struct nvkm_ltc_priv {
-	struct nvkm_ltc base;
-	u32 ltc_nr;
-	u32 lts_nr;
+struct nvkm_ltc_func {
+	int  (*oneinit)(struct nvkm_ltc *);
+	void (*init)(struct nvkm_ltc *);
+	void (*intr)(struct nvkm_ltc *);
 
-	u32 num_tags;
-	u32 tag_base;
-	struct nvkm_mm tags;
-	struct nvkm_mm_node *tag_ram;
-
-	u32 zbc_color[NVKM_LTC_MAX_ZBC_CNT][4];
-	u32 zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
-};
-
-#define nvkm_ltc_create(p,e,o,d)                                               \
-	nvkm_ltc_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_ltc_destroy(p) ({                                                 \
-	struct nvkm_ltc_priv *_priv = (p);                                     \
-	_nvkm_ltc_dtor(nv_object(_priv));                                      \
-})
-#define nvkm_ltc_init(p) ({                                                    \
-	struct nvkm_ltc_priv *_priv = (p);                                     \
-	_nvkm_ltc_init(nv_object(_priv));                                      \
-})
-#define nvkm_ltc_fini(p,s) ({                                                  \
-	struct nvkm_ltc_priv *_priv = (p);                                     \
-	_nvkm_ltc_fini(nv_object(_priv), (s));                                 \
-})
-
-int  nvkm_ltc_create_(struct nvkm_object *, struct nvkm_object *,
-		      struct nvkm_oclass *, int, void **);
-
-#define _nvkm_ltc_dtor _nvkm_subdev_dtor
-int _nvkm_ltc_init(struct nvkm_object *);
-#define _nvkm_ltc_fini _nvkm_subdev_fini
-
-int  gf100_ltc_ctor(struct nvkm_object *, struct nvkm_object *,
-		    struct nvkm_oclass *, void *, u32,
-		    struct nvkm_object **);
-void gf100_ltc_dtor(struct nvkm_object *);
-int  gf100_ltc_init_tag_ram(struct nvkm_fb *, struct nvkm_ltc_priv *);
-int  gf100_ltc_tags_alloc(struct nvkm_ltc *, u32, struct nvkm_mm_node **);
-void gf100_ltc_tags_free(struct nvkm_ltc *, struct nvkm_mm_node **);
-
-struct nvkm_ltc_impl {
-	struct nvkm_oclass base;
-	void (*intr)(struct nvkm_subdev *);
-
-	void (*cbc_clear)(struct nvkm_ltc_priv *, u32 start, u32 limit);
-	void (*cbc_wait)(struct nvkm_ltc_priv *);
+	void (*cbc_clear)(struct nvkm_ltc *, u32 start, u32 limit);
+	void (*cbc_wait)(struct nvkm_ltc *);
 
 	int zbc;
-	void (*zbc_clear_color)(struct nvkm_ltc_priv *, int, const u32[4]);
-	void (*zbc_clear_depth)(struct nvkm_ltc_priv *, int, const u32);
+	void (*zbc_clear_color)(struct nvkm_ltc *, int, const u32[4]);
+	void (*zbc_clear_depth)(struct nvkm_ltc *, int, const u32);
 };
 
-void gf100_ltc_intr(struct nvkm_subdev *);
-void gf100_ltc_cbc_clear(struct nvkm_ltc_priv *, u32, u32);
-void gf100_ltc_cbc_wait(struct nvkm_ltc_priv *);
-void gf100_ltc_zbc_clear_color(struct nvkm_ltc_priv *, int, const u32[4]);
-void gf100_ltc_zbc_clear_depth(struct nvkm_ltc_priv *, int, const u32);
+int gf100_ltc_oneinit(struct nvkm_ltc *);
+int gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *);
+void gf100_ltc_intr(struct nvkm_ltc *);
+void gf100_ltc_cbc_clear(struct nvkm_ltc *, u32, u32);
+void gf100_ltc_cbc_wait(struct nvkm_ltc *);
+void gf100_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]);
+void gf100_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
index 721643f04bb5..bef325dcb4d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
@@ -1,11 +1,7 @@
 nvkm-y += nvkm/subdev/mc/base.o
 nvkm-y += nvkm/subdev/mc/nv04.o
-nvkm-y += nvkm/subdev/mc/nv40.o
 nvkm-y += nvkm/subdev/mc/nv44.o
-nvkm-y += nvkm/subdev/mc/nv4c.o
 nvkm-y += nvkm/subdev/mc/nv50.o
-nvkm-y += nvkm/subdev/mc/g94.o
 nvkm-y += nvkm/subdev/mc/g98.o
 nvkm-y += nvkm/subdev/mc/gf100.o
-nvkm-y += nvkm/subdev/mc/gf106.o
 nvkm-y += nvkm/subdev/mc/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
index 5b051a26653e..954fbbe56c4b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
@@ -23,147 +23,101 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
 #include <core/option.h>
 
-static inline void
-nvkm_mc_unk260(struct nvkm_mc *pmc, u32 data)
+void
+nvkm_mc_unk260(struct nvkm_mc *mc, u32 data)
 {
-	const struct nvkm_mc_oclass *impl = (void *)nv_oclass(pmc);
-	if (impl->unk260)
-		impl->unk260(pmc, data);
+	if (mc->func->unk260)
+		mc->func->unk260(mc, data);
 }
 
-static inline u32
-nvkm_mc_intr_mask(struct nvkm_mc *pmc)
+void
+nvkm_mc_intr_unarm(struct nvkm_mc *mc)
 {
-	u32 intr = nv_rd32(pmc, 0x000100);
-	if (intr == 0xffffffff) /* likely fallen off the bus */
-		intr = 0x00000000;
-	return intr;
+	return mc->func->intr_unarm(mc);
 }
 
-static irqreturn_t
-nvkm_mc_intr(int irq, void *arg)
+void
+nvkm_mc_intr_rearm(struct nvkm_mc *mc)
 {
-	struct nvkm_mc *pmc = arg;
-	const struct nvkm_mc_oclass *oclass = (void *)nv_object(pmc)->oclass;
-	const struct nvkm_mc_intr *map = oclass->intr;
-	struct nvkm_subdev *unit;
-	u32 intr;
+	return mc->func->intr_rearm(mc);
+}
 
-	nv_wr32(pmc, 0x000140, 0x00000000);
-	nv_rd32(pmc, 0x000140);
-	intr = nvkm_mc_intr_mask(pmc);
-	if (pmc->use_msi)
-		oclass->msi_rearm(pmc);
+static u32
+nvkm_mc_intr_mask(struct nvkm_mc *mc)
+{
+	u32 intr = mc->func->intr_mask(mc);
+	if (WARN_ON_ONCE(intr == 0xffffffff))
+		intr = 0; /* likely fallen off the bus */
+	return intr;
+}
 
-	if (intr) {
-		u32 stat = intr = nvkm_mc_intr_mask(pmc);
-		while (map->stat) {
-			if (intr & map->stat) {
-				unit = nvkm_subdev(pmc, map->unit);
-				if (unit && unit->intr)
-					unit->intr(unit);
-				stat &= ~map->stat;
-			}
-			map++;
+void
+nvkm_mc_intr(struct nvkm_mc *mc, bool *handled)
+{
+	struct nvkm_device *device = mc->subdev.device;
+	struct nvkm_subdev *subdev;
+	const struct nvkm_mc_intr *map = mc->func->intr;
+	u32 stat, intr;
+
+	stat = intr = nvkm_mc_intr_mask(mc);
+	while (map->stat) {
+		if (intr & map->stat) {
+			subdev = nvkm_device_subdev(device, map->unit);
+			if (subdev)
+				nvkm_subdev_intr(subdev);
+			stat &= ~map->stat;
 		}
-
-		if (stat)
-			nv_error(pmc, "unknown intr 0x%08x\n", stat);
+		map++;
 	}
 
-	nv_wr32(pmc, 0x000140, 0x00000001);
-	return intr ? IRQ_HANDLED : IRQ_NONE;
+	if (stat)
+		nvkm_error(&mc->subdev, "intr %08x\n", stat);
+	*handled = intr != 0;
 }
 
-int
-_nvkm_mc_fini(struct nvkm_object *object, bool suspend)
+static int
+nvkm_mc_fini(struct nvkm_subdev *subdev, bool suspend)
 {
-	struct nvkm_mc *pmc = (void *)object;
-	nv_wr32(pmc, 0x000140, 0x00000000);
-	return nvkm_subdev_fini(&pmc->base, suspend);
+	struct nvkm_mc *mc = nvkm_mc(subdev);
+	nvkm_mc_intr_unarm(mc);
+	return 0;
 }
 
-int
-_nvkm_mc_init(struct nvkm_object *object)
+static int
+nvkm_mc_init(struct nvkm_subdev *subdev)
 {
-	struct nvkm_mc *pmc = (void *)object;
-	int ret = nvkm_subdev_init(&pmc->base);
-	if (ret)
-		return ret;
-	nv_wr32(pmc, 0x000140, 0x00000001);
+	struct nvkm_mc *mc = nvkm_mc(subdev);
+	if (mc->func->init)
+		mc->func->init(mc);
+	nvkm_mc_intr_rearm(mc);
 	return 0;
 }
 
-void
-_nvkm_mc_dtor(struct nvkm_object *object)
+static void *
+nvkm_mc_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_device *device = nv_device(object);
-	struct nvkm_mc *pmc = (void *)object;
-	free_irq(pmc->irq, pmc);
-	if (pmc->use_msi)
-		pci_disable_msi(device->pdev);
-	nvkm_subdev_destroy(&pmc->base);
+	return nvkm_mc(subdev);
 }
 
+static const struct nvkm_subdev_func
+nvkm_mc = {
+	.dtor = nvkm_mc_dtor,
+	.init = nvkm_mc_init,
+	.fini = nvkm_mc_fini,
+};
+
 int
-nvkm_mc_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *bclass, int length, void **pobject)
+nvkm_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_mc **pmc)
 {
-	const struct nvkm_mc_oclass *oclass = (void *)bclass;
-	struct nvkm_device *device = nv_device(parent);
-	struct nvkm_mc *pmc;
-	int ret;
-
-	ret = nvkm_subdev_create_(parent, engine, bclass, 0, "PMC",
-				  "master", length, pobject);
-	pmc = *pobject;
-	if (ret)
-		return ret;
-
-	pmc->unk260 = nvkm_mc_unk260;
-
-	if (nv_device_is_pci(device)) {
-		switch (device->pdev->device & 0x0ff0) {
-		case 0x00f0:
-		case 0x02e0:
-			/* BR02? NFI how these would be handled yet exactly */
-			break;
-		default:
-			switch (device->chipset) {
-			case 0xaa:
-				/* reported broken, nv also disable it */
-				break;
-			default:
-				pmc->use_msi = true;
-				break;
-			}
-		}
-
-		pmc->use_msi = nvkm_boolopt(device->cfgopt, "NvMSI",
-					    pmc->use_msi);
-
-		if (pmc->use_msi && oclass->msi_rearm) {
-			pmc->use_msi = pci_enable_msi(device->pdev) == 0;
-			if (pmc->use_msi) {
-				nv_info(pmc, "MSI interrupts enabled\n");
-				oclass->msi_rearm(pmc);
-			}
-		} else {
-			pmc->use_msi = false;
-		}
-	}
-
-	ret = nv_device_get_irq(device, true);
-	if (ret < 0)
-		return ret;
-	pmc->irq = ret;
+	struct nvkm_mc *mc;
 
-	ret = request_irq(pmc->irq, nvkm_mc_intr, IRQF_SHARED, "nvkm", pmc);
-	if (ret < 0)
-		return ret;
+	if (!(mc = *pmc = kzalloc(sizeof(*mc), GFP_KERNEL)))
+		return -ENOMEM;
 
+	nvkm_subdev_ctor(&nvkm_mc, device, index, 0, &mc->subdev);
+	mc->func = func;
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
index 8ab7f1272a14..7344ad659105 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
@@ -21,38 +21,40 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
 static const struct nvkm_mc_intr
 g98_mc_intr[] = {
-	{ 0x04000000, NVDEV_ENGINE_DISP },  /* DISP first, so pageflip timestamps work */
-	{ 0x00000001, NVDEV_ENGINE_MSPPP },
-	{ 0x00000100, NVDEV_ENGINE_FIFO },
-	{ 0x00001000, NVDEV_ENGINE_GR },
-	{ 0x00004000, NVDEV_ENGINE_SEC },	/* NV84:NVA3 */
-	{ 0x00008000, NVDEV_ENGINE_MSVLD },
-	{ 0x00020000, NVDEV_ENGINE_MSPDEC },
-	{ 0x00040000, NVDEV_SUBDEV_PMU },	/* NVA3:NVC0 */
-	{ 0x00080000, NVDEV_SUBDEV_THERM },	/* NVA3:NVC0 */
-	{ 0x00100000, NVDEV_SUBDEV_TIMER },
-	{ 0x00200000, NVDEV_SUBDEV_GPIO },	/* PMGR->GPIO */
-	{ 0x00200000, NVDEV_SUBDEV_I2C }, 	/* PMGR->I2C/AUX */
-	{ 0x00400000, NVDEV_ENGINE_CE0 },	/* NVA3-     */
-	{ 0x10000000, NVDEV_SUBDEV_BUS },
-	{ 0x80000000, NVDEV_ENGINE_SW },
-	{ 0x0042d101, NVDEV_SUBDEV_FB },
+	{ 0x04000000, NVKM_ENGINE_DISP },  /* DISP first, so pageflip timestamps work */
+	{ 0x00000001, NVKM_ENGINE_MSPPP },
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{ 0x00001000, NVKM_ENGINE_GR },
+	{ 0x00004000, NVKM_ENGINE_SEC },	/* NV84:NVA3 */
+	{ 0x00008000, NVKM_ENGINE_MSVLD },
+	{ 0x00020000, NVKM_ENGINE_MSPDEC },
+	{ 0x00040000, NVKM_SUBDEV_PMU },	/* NVA3:NVC0 */
+	{ 0x00080000, NVKM_SUBDEV_THERM },	/* NVA3:NVC0 */
+	{ 0x00100000, NVKM_SUBDEV_TIMER },
+	{ 0x00200000, NVKM_SUBDEV_GPIO },	/* PMGR->GPIO */
+	{ 0x00200000, NVKM_SUBDEV_I2C }, 	/* PMGR->I2C/AUX */
+	{ 0x00400000, NVKM_ENGINE_CE0 },	/* NVA3-     */
+	{ 0x10000000, NVKM_SUBDEV_BUS },
+	{ 0x80000000, NVKM_ENGINE_SW },
+	{ 0x0042d101, NVKM_SUBDEV_FB },
 	{},
 };
 
-struct nvkm_oclass *
-g98_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0x98),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv50_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
+static const struct nvkm_mc_func
+g98_mc = {
+	.init = nv50_mc_init,
 	.intr = g98_mc_intr,
-	.msi_rearm = nv40_mc_msi_rearm,
-}.base;
+	.intr_unarm = nv04_mc_intr_unarm,
+	.intr_rearm = nv04_mc_intr_rearm,
+	.intr_mask = nv04_mc_intr_mask,
+};
+
+int
+g98_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+	return nvkm_mc_new_(&g98_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
index 2425984b045e..122fe69e83e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
@@ -21,56 +21,77 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
 const struct nvkm_mc_intr
 gf100_mc_intr[] = {
-	{ 0x04000000, NVDEV_ENGINE_DISP },  /* DISP first, so pageflip timestamps work. */
-	{ 0x00000001, NVDEV_ENGINE_MSPPP },
-	{ 0x00000020, NVDEV_ENGINE_CE0 },
-	{ 0x00000040, NVDEV_ENGINE_CE1 },
-	{ 0x00000080, NVDEV_ENGINE_CE2 },
-	{ 0x00000100, NVDEV_ENGINE_FIFO },
-	{ 0x00001000, NVDEV_ENGINE_GR },
-	{ 0x00002000, NVDEV_SUBDEV_FB },
-	{ 0x00008000, NVDEV_ENGINE_MSVLD },
-	{ 0x00040000, NVDEV_SUBDEV_THERM },
-	{ 0x00020000, NVDEV_ENGINE_MSPDEC },
-	{ 0x00100000, NVDEV_SUBDEV_TIMER },
-	{ 0x00200000, NVDEV_SUBDEV_GPIO },	/* PMGR->GPIO */
-	{ 0x00200000, NVDEV_SUBDEV_I2C },	/* PMGR->I2C/AUX */
-	{ 0x01000000, NVDEV_SUBDEV_PMU },
-	{ 0x02000000, NVDEV_SUBDEV_LTC },
-	{ 0x08000000, NVDEV_SUBDEV_FB },
-	{ 0x10000000, NVDEV_SUBDEV_BUS },
-	{ 0x40000000, NVDEV_SUBDEV_IBUS },
-	{ 0x80000000, NVDEV_ENGINE_SW },
+	{ 0x04000000, NVKM_ENGINE_DISP },  /* DISP first, so pageflip timestamps work. */
+	{ 0x00000001, NVKM_ENGINE_MSPPP },
+	{ 0x00000020, NVKM_ENGINE_CE0 },
+	{ 0x00000040, NVKM_ENGINE_CE1 },
+	{ 0x00000080, NVKM_ENGINE_CE2 },
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{ 0x00001000, NVKM_ENGINE_GR },
+	{ 0x00002000, NVKM_SUBDEV_FB },
+	{ 0x00008000, NVKM_ENGINE_MSVLD },
+	{ 0x00040000, NVKM_SUBDEV_THERM },
+	{ 0x00020000, NVKM_ENGINE_MSPDEC },
+	{ 0x00100000, NVKM_SUBDEV_TIMER },
+	{ 0x00200000, NVKM_SUBDEV_GPIO },	/* PMGR->GPIO */
+	{ 0x00200000, NVKM_SUBDEV_I2C },	/* PMGR->I2C/AUX */
+	{ 0x01000000, NVKM_SUBDEV_PMU },
+	{ 0x02000000, NVKM_SUBDEV_LTC },
+	{ 0x08000000, NVKM_SUBDEV_FB },
+	{ 0x10000000, NVKM_SUBDEV_BUS },
+	{ 0x40000000, NVKM_SUBDEV_IBUS },
+	{ 0x80000000, NVKM_ENGINE_SW },
 	{},
 };
 
-static void
-gf100_mc_msi_rearm(struct nvkm_mc *pmc)
+void
+gf100_mc_intr_unarm(struct nvkm_mc *mc)
+{
+	struct nvkm_device *device = mc->subdev.device;
+	nvkm_wr32(device, 0x000140, 0x00000000);
+	nvkm_wr32(device, 0x000144, 0x00000000);
+	nvkm_rd32(device, 0x000140);
+}
+
+void
+gf100_mc_intr_rearm(struct nvkm_mc *mc)
+{
+	struct nvkm_device *device = mc->subdev.device;
+	nvkm_wr32(device, 0x000140, 0x00000001);
+	nvkm_wr32(device, 0x000144, 0x00000001);
+}
+
+u32
+gf100_mc_intr_mask(struct nvkm_mc *mc)
 {
-	struct nv04_mc_priv *priv = (void *)pmc;
-	nv_wr32(priv, 0x088704, 0x00000000);
+	struct nvkm_device *device = mc->subdev.device;
+	u32 intr0 = nvkm_rd32(device, 0x000100);
+	u32 intr1 = nvkm_rd32(device, 0x000104);
+	return intr0 | intr1;
 }
 
 void
-gf100_mc_unk260(struct nvkm_mc *pmc, u32 data)
+gf100_mc_unk260(struct nvkm_mc *mc, u32 data)
 {
-	nv_wr32(pmc, 0x000260, data);
+	nvkm_wr32(mc->subdev.device, 0x000260, data);
 }
 
-struct nvkm_oclass *
-gf100_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv50_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
+static const struct nvkm_mc_func
+gf100_mc = {
+	.init = nv50_mc_init,
 	.intr = gf100_mc_intr,
-	.msi_rearm = gf100_mc_msi_rearm,
+	.intr_unarm = gf100_mc_intr_unarm,
+	.intr_rearm = gf100_mc_intr_rearm,
+	.intr_mask = gf100_mc_intr_mask,
 	.unk260 = gf100_mc_unk260,
-}.base;
+};
+
+int
+gf100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+	return nvkm_mc_new_(&gf100_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
index 43b27742956d..d92efb33bcc3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
@@ -21,17 +21,19 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
-struct nvkm_oclass *
-gk20a_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0xea),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv50_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
+static const struct nvkm_mc_func
+gk20a_mc = {
+	.init = nv50_mc_init,
 	.intr = gf100_mc_intr,
-	.msi_rearm = nv40_mc_msi_rearm,
-}.base;
+	.intr_unarm = gf100_mc_intr_unarm,
+	.intr_rearm = gf100_mc_intr_rearm,
+	.intr_mask = gf100_mc_intr_mask,
+};
+
+int
+gk20a_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+	return nvkm_mc_new_(&gk20a_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
index 32713827b4dc..d282ec1555f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
@@ -21,58 +21,63 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
 const struct nvkm_mc_intr
 nv04_mc_intr[] = {
-	{ 0x00000001, NVDEV_ENGINE_MPEG },	/* NV17- MPEG/ME */
-	{ 0x00000100, NVDEV_ENGINE_FIFO },
-	{ 0x00001000, NVDEV_ENGINE_GR },
-	{ 0x00010000, NVDEV_ENGINE_DISP },
-	{ 0x00020000, NVDEV_ENGINE_VP },	/* NV40- */
-	{ 0x00100000, NVDEV_SUBDEV_TIMER },
-	{ 0x01000000, NVDEV_ENGINE_DISP },	/* NV04- PCRTC0 */
-	{ 0x02000000, NVDEV_ENGINE_DISP },	/* NV11- PCRTC1 */
-	{ 0x10000000, NVDEV_SUBDEV_BUS },
-	{ 0x80000000, NVDEV_ENGINE_SW },
+	{ 0x00000001, NVKM_ENGINE_MPEG },	/* NV17- MPEG/ME */
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{ 0x00001000, NVKM_ENGINE_GR },
+	{ 0x00010000, NVKM_ENGINE_DISP },
+	{ 0x00020000, NVKM_ENGINE_VP },	/* NV40- */
+	{ 0x00100000, NVKM_SUBDEV_TIMER },
+	{ 0x01000000, NVKM_ENGINE_DISP },	/* NV04- PCRTC0 */
+	{ 0x02000000, NVKM_ENGINE_DISP },	/* NV11- PCRTC1 */
+	{ 0x10000000, NVKM_SUBDEV_BUS },
+	{ 0x80000000, NVKM_ENGINE_SW },
 	{}
 };
 
-int
-nv04_mc_init(struct nvkm_object *object)
+void
+nv04_mc_intr_unarm(struct nvkm_mc *mc)
 {
-	struct nv04_mc_priv *priv = (void *)object;
-
-	nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
-	nv_wr32(priv, 0x001850, 0x00000001); /* disable rom access */
-
-	return nvkm_mc_init(&priv->base);
+	struct nvkm_device *device = mc->subdev.device;
+	nvkm_wr32(device, 0x000140, 0x00000000);
+	nvkm_rd32(device, 0x000140);
 }
 
-int
-nv04_mc_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+void
+nv04_mc_intr_rearm(struct nvkm_mc *mc)
 {
-	struct nv04_mc_priv *priv;
-	int ret;
+	struct nvkm_device *device = mc->subdev.device;
+	nvkm_wr32(device, 0x000140, 0x00000001);
+}
 
-	ret = nvkm_mc_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+u32
+nv04_mc_intr_mask(struct nvkm_mc *mc)
+{
+	return nvkm_rd32(mc->subdev.device, 0x000100);
+}
 
-	return 0;
+void
+nv04_mc_init(struct nvkm_mc *mc)
+{
+	struct nvkm_device *device = mc->subdev.device;
+	nvkm_wr32(device, 0x000200, 0xffffffff); /* everything enabled */
+	nvkm_wr32(device, 0x001850, 0x00000001); /* disable rom access */
 }
 
-struct nvkm_oclass *
-nv04_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0x04),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv04_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
+static const struct nvkm_mc_func
+nv04_mc = {
+	.init = nv04_mc_init,
 	.intr = nv04_mc_intr,
-}.base;
+	.intr_unarm = nv04_mc_intr_unarm,
+	.intr_rearm = nv04_mc_intr_rearm,
+	.intr_mask = nv04_mc_intr_mask,
+};
+
+int
+nv04_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+	return nvkm_mc_new_(&nv04_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.h
deleted file mode 100644
index 411de3d08ab6..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef __NVKM_MC_NV04_H__
-#define __NVKM_MC_NV04_H__
-#include "priv.h"
-
-struct nv04_mc_priv {
-	struct nvkm_mc base;
-};
-
-int  nv04_mc_ctor(struct nvkm_object *, struct nvkm_object *,
-		  struct nvkm_oclass *, void *, u32,
-		  struct nvkm_object **);
-
-extern const struct nvkm_mc_intr nv04_mc_intr[];
-int  nv04_mc_init(struct nvkm_object *);
-void nv40_mc_msi_rearm(struct nvkm_mc *);
-int  nv44_mc_init(struct nvkm_object *object);
-int  nv50_mc_init(struct nvkm_object *);
-extern const struct nvkm_mc_intr nv50_mc_intr[];
-extern const struct nvkm_mc_intr gf100_mc_intr[];
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
index 2c7f7c701a2b..9a3ac9965be0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
@@ -21,33 +21,33 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
-int
-nv44_mc_init(struct nvkm_object *object)
+void
+nv44_mc_init(struct nvkm_mc *mc)
 {
-	struct nv04_mc_priv *priv = (void *)object;
-	u32 tmp = nv_rd32(priv, 0x10020c);
-
-	nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
+	struct nvkm_device *device = mc->subdev.device;
+	u32 tmp = nvkm_rd32(device, 0x10020c);
 
-	nv_wr32(priv, 0x001700, tmp);
-	nv_wr32(priv, 0x001704, 0);
-	nv_wr32(priv, 0x001708, 0);
-	nv_wr32(priv, 0x00170c, tmp);
+	nvkm_wr32(device, 0x000200, 0xffffffff); /* everything enabled */
 
-	return nvkm_mc_init(&priv->base);
+	nvkm_wr32(device, 0x001700, tmp);
+	nvkm_wr32(device, 0x001704, 0);
+	nvkm_wr32(device, 0x001708, 0);
+	nvkm_wr32(device, 0x00170c, tmp);
 }
 
-struct nvkm_oclass *
-nv44_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0x44),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv44_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
+static const struct nvkm_mc_func
+nv44_mc = {
+	.init = nv44_mc_init,
 	.intr = nv04_mc_intr,
-	.msi_rearm = nv40_mc_msi_rearm,
-}.base;
+	.intr_unarm = nv04_mc_intr_unarm,
+	.intr_rearm = nv04_mc_intr_rearm,
+	.intr_mask = nv04_mc_intr_mask,
+};
+
+int
+nv44_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+	return nvkm_mc_new_(&nv44_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
index 40e3019e1fde..5f27d7b8fddd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
@@ -21,52 +21,44 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
-
-#include <core/device.h>
+#include "priv.h"
 
 const struct nvkm_mc_intr
 nv50_mc_intr[] = {
-	{ 0x04000000, NVDEV_ENGINE_DISP },  /* DISP before FIFO, so pageflip-timestamping works! */
-	{ 0x00000001, NVDEV_ENGINE_MPEG },
-	{ 0x00000100, NVDEV_ENGINE_FIFO },
-	{ 0x00001000, NVDEV_ENGINE_GR },
-	{ 0x00004000, NVDEV_ENGINE_CIPHER },	/* NV84- */
-	{ 0x00008000, NVDEV_ENGINE_BSP },	/* NV84- */
-	{ 0x00020000, NVDEV_ENGINE_VP },	/* NV84- */
-	{ 0x00100000, NVDEV_SUBDEV_TIMER },
-	{ 0x00200000, NVDEV_SUBDEV_GPIO },	/* PMGR->GPIO */
-	{ 0x00200000, NVDEV_SUBDEV_I2C }, 	/* PMGR->I2C/AUX */
-	{ 0x10000000, NVDEV_SUBDEV_BUS },
-	{ 0x80000000, NVDEV_ENGINE_SW },
-	{ 0x0002d101, NVDEV_SUBDEV_FB },
+	{ 0x04000000, NVKM_ENGINE_DISP },  /* DISP before FIFO, so pageflip-timestamping works! */
+	{ 0x00000001, NVKM_ENGINE_MPEG },
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{ 0x00001000, NVKM_ENGINE_GR },
+	{ 0x00004000, NVKM_ENGINE_CIPHER },	/* NV84- */
+	{ 0x00008000, NVKM_ENGINE_BSP },	/* NV84- */
+	{ 0x00020000, NVKM_ENGINE_VP },	/* NV84- */
+	{ 0x00100000, NVKM_SUBDEV_TIMER },
+	{ 0x00200000, NVKM_SUBDEV_GPIO },	/* PMGR->GPIO */
+	{ 0x00200000, NVKM_SUBDEV_I2C }, 	/* PMGR->I2C/AUX */
+	{ 0x10000000, NVKM_SUBDEV_BUS },
+	{ 0x80000000, NVKM_ENGINE_SW },
+	{ 0x0002d101, NVKM_SUBDEV_FB },
 	{},
 };
 
-static void
-nv50_mc_msi_rearm(struct nvkm_mc *pmc)
+void
+nv50_mc_init(struct nvkm_mc *mc)
 {
-	struct nvkm_device *device = nv_device(pmc);
-	pci_write_config_byte(device->pdev, 0x68, 0xff);
+	struct nvkm_device *device = mc->subdev.device;
+	nvkm_wr32(device, 0x000200, 0xffffffff); /* everything on */
 }
 
+static const struct nvkm_mc_func
+nv50_mc = {
+	.init = nv50_mc_init,
+	.intr = nv50_mc_intr,
+	.intr_unarm = nv04_mc_intr_unarm,
+	.intr_rearm = nv04_mc_intr_rearm,
+	.intr_mask = nv04_mc_intr_mask,
+};
+
 int
-nv50_mc_init(struct nvkm_object *object)
+nv50_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
 {
-	struct nv04_mc_priv *priv = (void *)object;
-	nv_wr32(priv, 0x000200, 0xffffffff); /* everything on */
-	return nvkm_mc_init(&priv->base);
+	return nvkm_mc_new_(&nv50_mc, device, index, pmc);
 }
-
-struct nvkm_oclass *
-nv50_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv50_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
-	.intr = nv50_mc_intr,
-	.msi_rearm = nv50_mc_msi_rearm,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
index d2cad07afd1a..307f6c692287 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
@@ -1,36 +1,42 @@
 #ifndef __NVKM_MC_PRIV_H__
 #define __NVKM_MC_PRIV_H__
+#define nvkm_mc(p) container_of((p), struct nvkm_mc, subdev)
 #include <subdev/mc.h>
 
-#define nvkm_mc_create(p,e,o,d)                                             \
-	nvkm_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_mc_destroy(p) ({                                               \
-	struct nvkm_mc *pmc = (p); _nvkm_mc_dtor(nv_object(pmc));        \
-})
-#define nvkm_mc_init(p) ({                                                  \
-	struct nvkm_mc *pmc = (p); _nvkm_mc_init(nv_object(pmc));        \
-})
-#define nvkm_mc_fini(p,s) ({                                                \
-	struct nvkm_mc *pmc = (p); _nvkm_mc_fini(nv_object(pmc), (s));   \
-})
-
-int  nvkm_mc_create_(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, int, void **);
-void _nvkm_mc_dtor(struct nvkm_object *);
-int  _nvkm_mc_init(struct nvkm_object *);
-int  _nvkm_mc_fini(struct nvkm_object *, bool);
+int nvkm_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *,
+		 int index, struct nvkm_mc **);
 
 struct nvkm_mc_intr {
 	u32 stat;
 	u32 unit;
 };
 
-struct nvkm_mc_oclass {
-	struct nvkm_oclass base;
+struct nvkm_mc_func {
+	void (*init)(struct nvkm_mc *);
 	const struct nvkm_mc_intr *intr;
-	void (*msi_rearm)(struct nvkm_mc *);
+	/* disable reporting of interrupts to host */
+	void (*intr_unarm)(struct nvkm_mc *);
+	/* enable reporting of interrupts to host */
+	void (*intr_rearm)(struct nvkm_mc *);
+	/* retrieve pending interrupt mask (NV_PMC_INTR) */
+	u32 (*intr_mask)(struct nvkm_mc *);
 	void (*unk260)(struct nvkm_mc *, u32);
 };
 
+void nv04_mc_init(struct nvkm_mc *);
+extern const struct nvkm_mc_intr nv04_mc_intr[];
+void nv04_mc_intr_unarm(struct nvkm_mc *);
+void nv04_mc_intr_rearm(struct nvkm_mc *);
+u32 nv04_mc_intr_mask(struct nvkm_mc *);
+
+void nv44_mc_init(struct nvkm_mc *);
+
+void nv50_mc_init(struct nvkm_mc *);
+extern const struct nvkm_mc_intr nv50_mc_intr[];
+
+extern const struct nvkm_mc_intr gf100_mc_intr[];
+void gf100_mc_intr_unarm(struct nvkm_mc *);
+void gf100_mc_intr_rearm(struct nvkm_mc *);
+u32 gf100_mc_intr_mask(struct nvkm_mc *);
 void gf100_mc_unk260(struct nvkm_mc *, u32);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index 277b6ec04e24..e04a2296ecd0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -21,10 +21,10 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/mmu.h>
-#include <subdev/fb.h>
+#include "priv.h"
 
 #include <core/gpuobj.h>
+#include <subdev/fb.h>
 
 void
 nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
@@ -32,12 +32,12 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
 	struct nvkm_vm *vm = vma->vm;
 	struct nvkm_mmu *mmu = vm->mmu;
 	struct nvkm_mm_node *r;
-	int big = vma->node->type != mmu->spg_shift;
+	int big = vma->node->type != mmu->func->spg_shift;
 	u32 offset = vma->node->offset + (delta >> 12);
 	u32 bits = vma->node->type - 12;
-	u32 pde  = (offset >> mmu->pgt_bits) - vm->fpde;
-	u32 pte  = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits;
-	u32 max  = 1 << (mmu->pgt_bits - bits);
+	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (mmu->func->pgt_bits - bits);
 	u32 end, len;
 
 	delta = 0;
@@ -46,14 +46,14 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
 		u32 num  = r->length >> bits;
 
 		while (num) {
-			struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
+			struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
 
 			end = (pte + num);
 			if (unlikely(end >= max))
 				end = max;
 			len = end - pte;
 
-			mmu->map(vma, pgt, node, pte, len, phys, delta);
+			mmu->func->map(vma, pgt, node, pte, len, phys, delta);
 
 			num -= len;
 			pte += len;
@@ -67,7 +67,7 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
 		}
 	}
 
-	mmu->flush(vm);
+	mmu->func->flush(vm);
 }
 
 static void
@@ -76,20 +76,20 @@ nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
 {
 	struct nvkm_vm *vm = vma->vm;
 	struct nvkm_mmu *mmu = vm->mmu;
-	int big = vma->node->type != mmu->spg_shift;
+	int big = vma->node->type != mmu->func->spg_shift;
 	u32 offset = vma->node->offset + (delta >> 12);
 	u32 bits = vma->node->type - 12;
 	u32 num  = length >> vma->node->type;
-	u32 pde  = (offset >> mmu->pgt_bits) - vm->fpde;
-	u32 pte  = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits;
-	u32 max  = 1 << (mmu->pgt_bits - bits);
+	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (mmu->func->pgt_bits - bits);
 	unsigned m, sglen;
 	u32 end, len;
 	int i;
 	struct scatterlist *sg;
 
 	for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
-		struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
+		struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
 		sglen = sg_dma_len(sg) >> PAGE_SHIFT;
 
 		end = pte + sglen;
@@ -100,7 +100,7 @@ nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
 		for (m = 0; m < len; m++) {
 			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
 
-			mmu->map_sg(vma, pgt, mem, pte, 1, &addr);
+			mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
 			num--;
 			pte++;
 
@@ -115,7 +115,7 @@ nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
 			for (; m < sglen; m++) {
 				dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
 
-				mmu->map_sg(vma, pgt, mem, pte, 1, &addr);
+				mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
 				num--;
 				pte++;
 				if (num == 0)
@@ -125,7 +125,7 @@ nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
 
 	}
 finish:
-	mmu->flush(vm);
+	mmu->func->flush(vm);
 }
 
 static void
@@ -135,24 +135,24 @@ nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
 	struct nvkm_vm *vm = vma->vm;
 	struct nvkm_mmu *mmu = vm->mmu;
 	dma_addr_t *list = mem->pages;
-	int big = vma->node->type != mmu->spg_shift;
+	int big = vma->node->type != mmu->func->spg_shift;
 	u32 offset = vma->node->offset + (delta >> 12);
 	u32 bits = vma->node->type - 12;
 	u32 num  = length >> vma->node->type;
-	u32 pde  = (offset >> mmu->pgt_bits) - vm->fpde;
-	u32 pte  = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits;
-	u32 max  = 1 << (mmu->pgt_bits - bits);
+	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (mmu->func->pgt_bits - bits);
 	u32 end, len;
 
 	while (num) {
-		struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
+		struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
 
 		end = (pte + num);
 		if (unlikely(end >= max))
 			end = max;
 		len = end - pte;
 
-		mmu->map_sg(vma, pgt, mem, pte, len, list);
+		mmu->func->map_sg(vma, pgt, mem, pte, len, list);
 
 		num  -= len;
 		pte  += len;
@@ -163,7 +163,7 @@ nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
 		}
 	}
 
-	mmu->flush(vm);
+	mmu->func->flush(vm);
 }
 
 void
@@ -183,24 +183,24 @@ nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
 {
 	struct nvkm_vm *vm = vma->vm;
 	struct nvkm_mmu *mmu = vm->mmu;
-	int big = vma->node->type != mmu->spg_shift;
+	int big = vma->node->type != mmu->func->spg_shift;
 	u32 offset = vma->node->offset + (delta >> 12);
 	u32 bits = vma->node->type - 12;
 	u32 num  = length >> vma->node->type;
-	u32 pde  = (offset >> mmu->pgt_bits) - vm->fpde;
-	u32 pte  = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits;
-	u32 max  = 1 << (mmu->pgt_bits - bits);
+	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (mmu->func->pgt_bits - bits);
 	u32 end, len;
 
 	while (num) {
-		struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
+		struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
 
 		end = (pte + num);
 		if (unlikely(end >= max))
 			end = max;
 		len = end - pte;
 
-		mmu->unmap(pgt, pte, len);
+		mmu->func->unmap(vma, pgt, pte, len);
 
 		num -= len;
 		pte += len;
@@ -210,7 +210,7 @@ nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
 		}
 	}
 
-	mmu->flush(vm);
+	mmu->func->flush(vm);
 }
 
 void
@@ -225,7 +225,7 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
 	struct nvkm_mmu *mmu = vm->mmu;
 	struct nvkm_vm_pgd *vpgd;
 	struct nvkm_vm_pgt *vpgt;
-	struct nvkm_gpuobj *pgt;
+	struct nvkm_memory *pgt;
 	u32 pde;
 
 	for (pde = fpde; pde <= lpde; pde++) {
@@ -233,16 +233,14 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
 		if (--vpgt->refcount[big])
 			continue;
 
-		pgt = vpgt->obj[big];
-		vpgt->obj[big] = NULL;
+		pgt = vpgt->mem[big];
+		vpgt->mem[big] = NULL;
 
 		list_for_each_entry(vpgd, &vm->pgd_list, head) {
-			mmu->map_pgt(vpgd->obj, pde, vpgt->obj);
+			mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
 		}
 
-		mutex_unlock(&nv_subdev(mmu)->mutex);
-		nvkm_gpuobj_ref(NULL, &pgt);
-		mutex_lock(&nv_subdev(mmu)->mutex);
+		nvkm_memory_del(&pgt);
 	}
 }
 
@@ -252,34 +250,23 @@ nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type)
 	struct nvkm_mmu *mmu = vm->mmu;
 	struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
 	struct nvkm_vm_pgd *vpgd;
-	struct nvkm_gpuobj *pgt;
-	int big = (type != mmu->spg_shift);
+	int big = (type != mmu->func->spg_shift);
 	u32 pgt_size;
 	int ret;
 
-	pgt_size  = (1 << (mmu->pgt_bits + 12)) >> type;
+	pgt_size  = (1 << (mmu->func->pgt_bits + 12)) >> type;
 	pgt_size *= 8;
 
-	mutex_unlock(&nv_subdev(mmu)->mutex);
-	ret = nvkm_gpuobj_new(nv_object(vm->mmu), NULL, pgt_size, 0x1000,
-			      NVOBJ_FLAG_ZERO_ALLOC, &pgt);
-	mutex_lock(&nv_subdev(mmu)->mutex);
+	ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
+			      pgt_size, 0x1000, true, &vpgt->mem[big]);
 	if (unlikely(ret))
 		return ret;
 
-	/* someone beat us to filling the PDE while we didn't have the lock */
-	if (unlikely(vpgt->refcount[big]++)) {
-		mutex_unlock(&nv_subdev(mmu)->mutex);
-		nvkm_gpuobj_ref(NULL, &pgt);
-		mutex_lock(&nv_subdev(mmu)->mutex);
-		return 0;
-	}
-
-	vpgt->obj[big] = pgt;
 	list_for_each_entry(vpgd, &vm->pgd_list, head) {
-		mmu->map_pgt(vpgd->obj, pde, vpgt->obj);
+		mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
 	}
 
+	vpgt->refcount[big]++;
 	return 0;
 }
 
@@ -293,20 +280,20 @@ nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
 	u32 fpde, lpde, pde;
 	int ret;
 
-	mutex_lock(&nv_subdev(mmu)->mutex);
+	mutex_lock(&vm->mutex);
 	ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align,
 			   &vma->node);
 	if (unlikely(ret != 0)) {
-		mutex_unlock(&nv_subdev(mmu)->mutex);
+		mutex_unlock(&vm->mutex);
 		return ret;
 	}
 
-	fpde = (vma->node->offset >> mmu->pgt_bits);
-	lpde = (vma->node->offset + vma->node->length - 1) >> mmu->pgt_bits;
+	fpde = (vma->node->offset >> mmu->func->pgt_bits);
+	lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
 
 	for (pde = fpde; pde <= lpde; pde++) {
 		struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
-		int big = (vma->node->type != mmu->spg_shift);
+		int big = (vma->node->type != mmu->func->spg_shift);
 
 		if (likely(vpgt->refcount[big])) {
 			vpgt->refcount[big]++;
@@ -318,11 +305,11 @@ nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
 			if (pde != fpde)
 				nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1);
 			nvkm_mm_free(&vm->mm, &vma->node);
-			mutex_unlock(&nv_subdev(mmu)->mutex);
+			mutex_unlock(&vm->mutex);
 			return ret;
 		}
 	}
-	mutex_unlock(&nv_subdev(mmu)->mutex);
+	mutex_unlock(&vm->mutex);
 
 	vma->vm = NULL;
 	nvkm_vm_ref(vm, &vma->vm, NULL);
@@ -334,27 +321,49 @@ nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
 void
 nvkm_vm_put(struct nvkm_vma *vma)
 {
-	struct nvkm_vm *vm = vma->vm;
-	struct nvkm_mmu *mmu = vm->mmu;
+	struct nvkm_mmu *mmu;
+	struct nvkm_vm *vm;
 	u32 fpde, lpde;
 
 	if (unlikely(vma->node == NULL))
 		return;
-	fpde = (vma->node->offset >> mmu->pgt_bits);
-	lpde = (vma->node->offset + vma->node->length - 1) >> mmu->pgt_bits;
+	vm = vma->vm;
+	mmu = vm->mmu;
+
+	fpde = (vma->node->offset >> mmu->func->pgt_bits);
+	lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
 
-	mutex_lock(&nv_subdev(mmu)->mutex);
-	nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->spg_shift, fpde, lpde);
+	mutex_lock(&vm->mutex);
+	nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->func->spg_shift, fpde, lpde);
 	nvkm_mm_free(&vm->mm, &vma->node);
-	mutex_unlock(&nv_subdev(mmu)->mutex);
+	mutex_unlock(&vm->mutex);
 
 	nvkm_vm_ref(NULL, &vma->vm, NULL);
 }
 
 int
+nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
+{
+	struct nvkm_mmu *mmu = vm->mmu;
+	struct nvkm_memory *pgt;
+	int ret;
+
+	ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
+			      (size >> mmu->func->spg_shift) * 8, 0x1000, true, &pgt);
+	if (ret == 0) {
+		vm->pgt[0].refcount[0] = 1;
+		vm->pgt[0].mem[0] = pgt;
+		nvkm_memory_boot(pgt, vm);
+	}
+
+	return ret;
+}
+
+int
 nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
-	       u32 block, struct nvkm_vm **pvm)
+	       u32 block, struct lock_class_key *key, struct nvkm_vm **pvm)
 {
+	static struct lock_class_key _key;
 	struct nvkm_vm *vm;
 	u64 mm_length = (offset + length) - mm_offset;
 	int ret;
@@ -363,11 +372,12 @@ nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
 	if (!vm)
 		return -ENOMEM;
 
+	__mutex_init(&vm->mutex, "&vm->mutex", key ? key : &_key);
 	INIT_LIST_HEAD(&vm->pgd_list);
 	vm->mmu = mmu;
 	kref_init(&vm->refcount);
-	vm->fpde = offset >> (mmu->pgt_bits + 12);
-	vm->lpde = (offset + length - 1) >> (mmu->pgt_bits + 12);
+	vm->fpde = offset >> (mmu->func->pgt_bits + 12);
+	vm->lpde = (offset + length - 1) >> (mmu->func->pgt_bits + 12);
 
 	vm->pgt  = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
 	if (!vm->pgt) {
@@ -390,10 +400,12 @@ nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
 
 int
 nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
-	    struct nvkm_vm **pvm)
+	    struct lock_class_key *key, struct nvkm_vm **pvm)
 {
-	struct nvkm_mmu *mmu = nvkm_mmu(device);
-	return mmu->create(mmu, offset, length, mm_offset, pvm);
+	struct nvkm_mmu *mmu = device->mmu;
+	if (!mmu->func->create)
+		return -EINVAL;
+	return mmu->func->create(mmu, offset, length, mm_offset, key, pvm);
 }
 
 static int
@@ -410,38 +422,33 @@ nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd)
 	if (!vpgd)
 		return -ENOMEM;
 
-	nvkm_gpuobj_ref(pgd, &vpgd->obj);
+	vpgd->obj = pgd;
 
-	mutex_lock(&nv_subdev(mmu)->mutex);
+	mutex_lock(&vm->mutex);
 	for (i = vm->fpde; i <= vm->lpde; i++)
-		mmu->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
+		mmu->func->map_pgt(pgd, i, vm->pgt[i - vm->fpde].mem);
 	list_add(&vpgd->head, &vm->pgd_list);
-	mutex_unlock(&nv_subdev(mmu)->mutex);
+	mutex_unlock(&vm->mutex);
 	return 0;
 }
 
 static void
 nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd)
 {
-	struct nvkm_mmu *mmu = vm->mmu;
 	struct nvkm_vm_pgd *vpgd, *tmp;
-	struct nvkm_gpuobj *pgd = NULL;
 
 	if (!mpgd)
 		return;
 
-	mutex_lock(&nv_subdev(mmu)->mutex);
+	mutex_lock(&vm->mutex);
 	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
 		if (vpgd->obj == mpgd) {
-			pgd = vpgd->obj;
 			list_del(&vpgd->head);
 			kfree(vpgd);
 			break;
 		}
 	}
-	mutex_unlock(&nv_subdev(mmu)->mutex);
-
-	nvkm_gpuobj_ref(NULL, &pgd);
+	mutex_unlock(&vm->mutex);
 }
 
 static void
@@ -478,3 +485,58 @@ nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_gpuobj *pgd)
 	*ptr = ref;
 	return 0;
 }
+
+static int
+nvkm_mmu_oneinit(struct nvkm_subdev *subdev)
+{
+	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
+	if (mmu->func->oneinit)
+		return mmu->func->oneinit(mmu);
+	return 0;
+}
+
+static int
+nvkm_mmu_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
+	if (mmu->func->init)
+		mmu->func->init(mmu);
+	return 0;
+}
+
+static void *
+nvkm_mmu_dtor(struct nvkm_subdev *subdev)
+{
+	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
+	if (mmu->func->dtor)
+		return mmu->func->dtor(mmu);
+	return mmu;
+}
+
+static const struct nvkm_subdev_func
+nvkm_mmu = {
+	.dtor = nvkm_mmu_dtor,
+	.oneinit = nvkm_mmu_oneinit,
+	.init = nvkm_mmu_init,
+};
+
+void
+nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_mmu *mmu)
+{
+	nvkm_subdev_ctor(&nvkm_mmu, device, index, 0, &mmu->subdev);
+	mmu->func = func;
+	mmu->limit = func->limit;
+	mmu->dma_bits = func->dma_bits;
+	mmu->lpg_shift = func->lpg_shift;
+}
+
+int
+nvkm_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_mmu **pmmu)
+{
+	if (!(*pmmu = kzalloc(sizeof(**pmmu), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_mmu_ctor(func, device, index, *pmmu);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
index 294cda37f068..7ac507c927bb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
@@ -21,19 +21,14 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/mmu.h>
-#include <subdev/bar.h>
+#include "priv.h"
+
 #include <subdev/fb.h>
 #include <subdev/ltc.h>
 #include <subdev/timer.h>
 
 #include <core/gpuobj.h>
 
-struct gf100_mmu_priv {
-	struct nvkm_mmu base;
-};
-
-
 /* Map from compressed to corresponding uncompressed storage type.
  * The value 0xff represents an invalid storage type.
  */
@@ -75,17 +70,19 @@ const u8 gf100_pte_storage_type_map[256] =
 
 
 static void
-gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_gpuobj *pgt[2])
+gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_memory *pgt[2])
 {
 	u32 pde[2] = { 0, 0 };
 
 	if (pgt[0])
-		pde[1] = 0x00000001 | (pgt[0]->addr >> 8);
+		pde[1] = 0x00000001 | (nvkm_memory_addr(pgt[0]) >> 8);
 	if (pgt[1])
-		pde[0] = 0x00000001 | (pgt[1]->addr >> 8);
+		pde[0] = 0x00000001 | (nvkm_memory_addr(pgt[1]) >> 8);
 
-	nv_wo32(pgd, (index * 8) + 0, pde[0]);
-	nv_wo32(pgd, (index * 8) + 4, pde[1]);
+	nvkm_kmap(pgd);
+	nvkm_wo32(pgd, (index * 8) + 0, pde[0]);
+	nvkm_wo32(pgd, (index * 8) + 4, pde[1]);
+	nvkm_done(pgd);
 }
 
 static inline u64
@@ -103,7 +100,7 @@ gf100_vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target)
 }
 
 static void
-gf100_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+gf100_vm_map(struct nvkm_vma *vma, struct nvkm_memory *pgt,
 	     struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
 {
 	u64 next = 1 << (vma->node->type - 8);
@@ -112,126 +109,113 @@ gf100_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
 	pte <<= 3;
 
 	if (mem->tag) {
-		struct nvkm_ltc *ltc = nvkm_ltc(vma->vm->mmu);
+		struct nvkm_ltc *ltc = vma->vm->mmu->subdev.device->ltc;
 		u32 tag = mem->tag->offset + (delta >> 17);
 		phys |= (u64)tag << (32 + 12);
 		next |= (u64)1   << (32 + 12);
-		ltc->tags_clear(ltc, tag, cnt);
+		nvkm_ltc_tags_clear(ltc, tag, cnt);
 	}
 
+	nvkm_kmap(pgt);
 	while (cnt--) {
-		nv_wo32(pgt, pte + 0, lower_32_bits(phys));
-		nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+		nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
+		nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
 		phys += next;
 		pte  += 8;
 	}
+	nvkm_done(pgt);
 }
 
 static void
-gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
 		struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
 {
 	u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
 	/* compressed storage types are invalid for system memory */
 	u32 memtype = gf100_pte_storage_type_map[mem->memtype & 0xff];
 
+	nvkm_kmap(pgt);
 	pte <<= 3;
 	while (cnt--) {
 		u64 phys = gf100_vm_addr(vma, *list++, memtype, target);
-		nv_wo32(pgt, pte + 0, lower_32_bits(phys));
-		nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+		nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
+		nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
 		pte += 8;
 	}
+	nvkm_done(pgt);
 }
 
 static void
-gf100_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
+gf100_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
 {
+	nvkm_kmap(pgt);
 	pte <<= 3;
 	while (cnt--) {
-		nv_wo32(pgt, pte + 0, 0x00000000);
-		nv_wo32(pgt, pte + 4, 0x00000000);
+		nvkm_wo32(pgt, pte + 0, 0x00000000);
+		nvkm_wo32(pgt, pte + 4, 0x00000000);
 		pte += 8;
 	}
+	nvkm_done(pgt);
 }
 
 static void
 gf100_vm_flush(struct nvkm_vm *vm)
 {
-	struct gf100_mmu_priv *priv = (void *)vm->mmu;
-	struct nvkm_bar *bar = nvkm_bar(priv);
+	struct nvkm_mmu *mmu = vm->mmu;
+	struct nvkm_device *device = mmu->subdev.device;
 	struct nvkm_vm_pgd *vpgd;
 	u32 type;
 
-	bar->flush(bar);
-
 	type = 0x00000001; /* PAGE_ALL */
-	if (atomic_read(&vm->engref[NVDEV_SUBDEV_BAR]))
+	if (atomic_read(&vm->engref[NVKM_SUBDEV_BAR]))
 		type |= 0x00000004; /* HUB_ONLY */
 
-	mutex_lock(&nv_subdev(priv)->mutex);
+	mutex_lock(&mmu->subdev.mutex);
 	list_for_each_entry(vpgd, &vm->pgd_list, head) {
 		/* looks like maybe a "free flush slots" counter, the
 		 * faster you write to 0x100cbc to more it decreases
 		 */
-		if (!nv_wait_ne(priv, 0x100c80, 0x00ff0000, 0x00000000)) {
-			nv_error(priv, "vm timeout 0: 0x%08x %d\n",
-				 nv_rd32(priv, 0x100c80), type);
-		}
+		nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, 0x100c80) & 0x00ff0000)
+				break;
+		);
 
-		nv_wr32(priv, 0x100cb8, vpgd->obj->addr >> 8);
-		nv_wr32(priv, 0x100cbc, 0x80000000 | type);
+		nvkm_wr32(device, 0x100cb8, vpgd->obj->addr >> 8);
+		nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
 
 		/* wait for flush to be queued? */
-		if (!nv_wait(priv, 0x100c80, 0x00008000, 0x00008000)) {
-			nv_error(priv, "vm timeout 1: 0x%08x %d\n",
-				 nv_rd32(priv, 0x100c80), type);
-		}
+		nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, 0x100c80) & 0x00008000)
+				break;
+		);
 	}
-	mutex_unlock(&nv_subdev(priv)->mutex);
+	mutex_unlock(&mmu->subdev.mutex);
 }
 
 static int
 gf100_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
-		struct nvkm_vm **pvm)
+		struct lock_class_key *key, struct nvkm_vm **pvm)
 {
-	return nvkm_vm_create(mmu, offset, length, mm_offset, 4096, pvm);
+	return nvkm_vm_create(mmu, offset, length, mm_offset, 4096, key, pvm);
 }
 
-static int
-gf100_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+static const struct nvkm_mmu_func
+gf100_mmu = {
+	.limit = (1ULL << 40),
+	.dma_bits = 40,
+	.pgt_bits  = 27 - 12,
+	.spg_shift = 12,
+	.lpg_shift = 17,
+	.create = gf100_vm_create,
+	.map_pgt = gf100_vm_map_pgt,
+	.map = gf100_vm_map,
+	.map_sg = gf100_vm_map_sg,
+	.unmap = gf100_vm_unmap,
+	.flush = gf100_vm_flush,
+};
+
+int
+gf100_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
 {
-	struct gf100_mmu_priv *priv;
-	int ret;
-
-	ret = nvkm_mmu_create(parent, engine, oclass, "VM", "vm", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.limit = 1ULL << 40;
-	priv->base.dma_bits = 40;
-	priv->base.pgt_bits  = 27 - 12;
-	priv->base.spg_shift = 12;
-	priv->base.lpg_shift = 17;
-	priv->base.create = gf100_vm_create;
-	priv->base.map_pgt = gf100_vm_map_pgt;
-	priv->base.map = gf100_vm_map;
-	priv->base.map_sg = gf100_vm_map_sg;
-	priv->base.unmap = gf100_vm_unmap;
-	priv->base.flush = gf100_vm_flush;
-	return 0;
+	return nvkm_mmu_new_(&gf100_mmu, device, index, pmmu);
 }
-
-struct nvkm_oclass
-gf100_mmu_oclass = {
-	.handle = NV_SUBDEV(MMU, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_mmu_ctor,
-		.dtor = _nvkm_mmu_dtor,
-		.init = _nvkm_mmu_init,
-		.fini = _nvkm_mmu_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
index fe93ea2711c9..37927c3fdc3e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
@@ -23,7 +23,6 @@
  */
 #include "nv04.h"
 
-#include <core/device.h>
 #include <core/gpuobj.h>
 
 #define NV04_PDMA_SIZE (128 * 1024 * 1024)
@@ -34,30 +33,34 @@
  ******************************************************************************/
 
 static void
-nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
 	       struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
 {
 	pte = 0x00008 + (pte * 4);
+	nvkm_kmap(pgt);
 	while (cnt) {
 		u32 page = PAGE_SIZE / NV04_PDMA_PAGE;
 		u32 phys = (u32)*list++;
 		while (cnt && page--) {
-			nv_wo32(pgt, pte, phys | 3);
+			nvkm_wo32(pgt, pte, phys | 3);
 			phys += NV04_PDMA_PAGE;
 			pte += 4;
 			cnt -= 1;
 		}
 	}
+	nvkm_done(pgt);
 }
 
 static void
-nv04_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
+nv04_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
 {
 	pte = 0x00008 + (pte * 4);
+	nvkm_kmap(pgt);
 	while (cnt--) {
-		nv_wo32(pgt, pte, 0x00000000);
+		nvkm_wo32(pgt, pte, 0x00000000);
 		pte += 4;
 	}
+	nvkm_done(pgt);
 }
 
 static void
@@ -66,86 +69,81 @@ nv04_vm_flush(struct nvkm_vm *vm)
 }
 
 /*******************************************************************************
- * VM object
- ******************************************************************************/
-
-int
-nv04_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mmstart,
-	       struct nvkm_vm **pvm)
-{
-	return -EINVAL;
-}
-
-/*******************************************************************************
  * MMU subdev
  ******************************************************************************/
 
 static int
-nv04_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+nv04_mmu_oneinit(struct nvkm_mmu *base)
 {
-	struct nv04_mmu_priv *priv;
-	struct nvkm_gpuobj *dma;
+	struct nv04_mmu *mmu = nv04_mmu(base);
+	struct nvkm_device *device = mmu->base.subdev.device;
+	struct nvkm_memory *dma;
 	int ret;
 
-	ret = nvkm_mmu_create(parent, engine, oclass, "PCIGART",
-			      "pcigart", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.create = nv04_vm_create;
-	priv->base.limit = NV04_PDMA_SIZE;
-	priv->base.dma_bits = 32;
-	priv->base.pgt_bits = 32 - 12;
-	priv->base.spg_shift = 12;
-	priv->base.lpg_shift = 12;
-	priv->base.map_sg = nv04_vm_map_sg;
-	priv->base.unmap = nv04_vm_unmap;
-	priv->base.flush = nv04_vm_flush;
-
-	ret = nvkm_vm_create(&priv->base, 0, NV04_PDMA_SIZE, 0, 4096,
-			     &priv->vm);
+	ret = nvkm_vm_create(&mmu->base, 0, NV04_PDMA_SIZE, 0, 4096, NULL,
+			     &mmu->vm);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL,
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
 			      (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + 8,
-			      16, NVOBJ_FLAG_ZERO_ALLOC,
-			      &priv->vm->pgt[0].obj[0]);
-	dma = priv->vm->pgt[0].obj[0];
-	priv->vm->pgt[0].refcount[0] = 1;
+			      16, true, &dma);
+	mmu->vm->pgt[0].mem[0] = dma;
+	mmu->vm->pgt[0].refcount[0] = 1;
 	if (ret)
 		return ret;
 
-	nv_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
-	nv_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
+	nvkm_kmap(dma);
+	nvkm_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
+	nvkm_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
+	nvkm_done(dma);
 	return 0;
 }
 
-void
-nv04_mmu_dtor(struct nvkm_object *object)
+void *
+nv04_mmu_dtor(struct nvkm_mmu *base)
 {
-	struct nv04_mmu_priv *priv = (void *)object;
-	if (priv->vm) {
-		nvkm_gpuobj_ref(NULL, &priv->vm->pgt[0].obj[0]);
-		nvkm_vm_ref(NULL, &priv->vm, NULL);
+	struct nv04_mmu *mmu = nv04_mmu(base);
+	struct nvkm_device *device = mmu->base.subdev.device;
+	if (mmu->vm) {
+		nvkm_memory_del(&mmu->vm->pgt[0].mem[0]);
+		nvkm_vm_ref(NULL, &mmu->vm, NULL);
 	}
-	if (priv->nullp) {
-		pci_free_consistent(nv_device(priv)->pdev, 16 * 1024,
-				    priv->nullp, priv->null);
+	if (mmu->nullp) {
+		dma_free_coherent(device->dev, 16 * 1024,
+				  mmu->nullp, mmu->null);
 	}
-	nvkm_mmu_destroy(&priv->base);
+	return mmu;
+}
+
+int
+nv04_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_mmu **pmmu)
+{
+	struct nv04_mmu *mmu;
+	if (!(mmu = kzalloc(sizeof(*mmu), GFP_KERNEL)))
+		return -ENOMEM;
+	*pmmu = &mmu->base;
+	nvkm_mmu_ctor(func, device, index, &mmu->base);
+	return 0;
 }
 
-struct nvkm_oclass
-nv04_mmu_oclass = {
-	.handle = NV_SUBDEV(MMU, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mmu_ctor,
-		.dtor = nv04_mmu_dtor,
-		.init = _nvkm_mmu_init,
-		.fini = _nvkm_mmu_fini,
-	},
+const struct nvkm_mmu_func
+nv04_mmu = {
+	.oneinit = nv04_mmu_oneinit,
+	.dtor = nv04_mmu_dtor,
+	.limit = NV04_PDMA_SIZE,
+	.dma_bits = 32,
+	.pgt_bits = 32 - 12,
+	.spg_shift = 12,
+	.lpg_shift = 12,
+	.map_sg = nv04_vm_map_sg,
+	.unmap = nv04_vm_unmap,
+	.flush = nv04_vm_flush,
 };
+
+int
+nv04_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+	return nv04_mmu_new_(&nv04_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
index 7bf6f4b38f1d..363e33b296d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
@@ -1,19 +1,18 @@
 #ifndef __NV04_MMU_PRIV__
 #define __NV04_MMU_PRIV__
+#define nv04_mmu(p) container_of((p), struct nv04_mmu, base)
+#include "priv.h"
 
-#include <subdev/mmu.h>
-
-struct nv04_mmu_priv {
+struct nv04_mmu {
 	struct nvkm_mmu base;
 	struct nvkm_vm *vm;
 	dma_addr_t null;
 	void *nullp;
 };
 
-static inline struct nv04_mmu_priv *
-nv04_mmu(void *obj)
-{
-	return (void *)nvkm_mmu(obj);
-}
+int nv04_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *,
+		  int index, struct nvkm_mmu **);
+void *nv04_mmu_dtor(struct nvkm_mmu *);
 
+extern const struct nvkm_mmu_func nv04_mmu;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
index 61ee3ab11660..c6a26f907009 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
@@ -23,7 +23,6 @@
  */
 #include "nv04.h"
 
-#include <core/device.h>
 #include <core/gpuobj.h>
 #include <core/option.h>
 #include <subdev/timer.h>
@@ -36,45 +35,50 @@
  ******************************************************************************/
 
 static void
-nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
 	       struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
 {
 	pte = pte * 4;
+	nvkm_kmap(pgt);
 	while (cnt) {
 		u32 page = PAGE_SIZE / NV41_GART_PAGE;
 		u64 phys = (u64)*list++;
 		while (cnt && page--) {
-			nv_wo32(pgt, pte, (phys >> 7) | 1);
+			nvkm_wo32(pgt, pte, (phys >> 7) | 1);
 			phys += NV41_GART_PAGE;
 			pte += 4;
 			cnt -= 1;
 		}
 	}
+	nvkm_done(pgt);
 }
 
 static void
-nv41_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
+nv41_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
 {
 	pte = pte * 4;
+	nvkm_kmap(pgt);
 	while (cnt--) {
-		nv_wo32(pgt, pte, 0x00000000);
+		nvkm_wo32(pgt, pte, 0x00000000);
 		pte += 4;
 	}
+	nvkm_done(pgt);
 }
 
 static void
 nv41_vm_flush(struct nvkm_vm *vm)
 {
-	struct nv04_mmu_priv *priv = (void *)vm->mmu;
-
-	mutex_lock(&nv_subdev(priv)->mutex);
-	nv_wr32(priv, 0x100810, 0x00000022);
-	if (!nv_wait(priv, 0x100810, 0x00000020, 0x00000020)) {
-		nv_warn(priv, "flush timeout, 0x%08x\n",
-			nv_rd32(priv, 0x100810));
-	}
-	nv_wr32(priv, 0x100810, 0x00000000);
-	mutex_unlock(&nv_subdev(priv)->mutex);
+	struct nv04_mmu *mmu = nv04_mmu(vm->mmu);
+	struct nvkm_device *device = mmu->base.subdev.device;
+
+	mutex_lock(&mmu->base.subdev.mutex);
+	nvkm_wr32(device, 0x100810, 0x00000022);
+	nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x100810) & 0x00000020)
+			break;
+	);
+	nvkm_wr32(device, 0x100810, 0x00000000);
+	mutex_unlock(&mmu->base.subdev.mutex);
 }
 
 /*******************************************************************************
@@ -82,76 +86,56 @@ nv41_vm_flush(struct nvkm_vm *vm)
  ******************************************************************************/
 
 static int
-nv41_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+nv41_mmu_oneinit(struct nvkm_mmu *base)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct nv04_mmu_priv *priv;
+	struct nv04_mmu *mmu = nv04_mmu(base);
+	struct nvkm_device *device = mmu->base.subdev.device;
 	int ret;
 
-	if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
-	    !nvkm_boolopt(device->cfgopt, "NvPCIE", true)) {
-		return nvkm_object_ctor(parent, engine, &nv04_mmu_oclass,
-					data, size, pobject);
-	}
-
-	ret = nvkm_mmu_create(parent, engine, oclass, "PCIEGART",
-			      "pciegart", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.create = nv04_vm_create;
-	priv->base.limit = NV41_GART_SIZE;
-	priv->base.dma_bits = 39;
-	priv->base.pgt_bits = 32 - 12;
-	priv->base.spg_shift = 12;
-	priv->base.lpg_shift = 12;
-	priv->base.map_sg = nv41_vm_map_sg;
-	priv->base.unmap = nv41_vm_unmap;
-	priv->base.flush = nv41_vm_flush;
-
-	ret = nvkm_vm_create(&priv->base, 0, NV41_GART_SIZE, 0, 4096,
-			     &priv->vm);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL,
-			      (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16,
-			      NVOBJ_FLAG_ZERO_ALLOC,
-			      &priv->vm->pgt[0].obj[0]);
-	priv->vm->pgt[0].refcount[0] = 1;
+	ret = nvkm_vm_create(&mmu->base, 0, NV41_GART_SIZE, 0, 4096, NULL,
+			     &mmu->vm);
 	if (ret)
 		return ret;
 
-	return 0;
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+			      (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16, true,
+			      &mmu->vm->pgt[0].mem[0]);
+	mmu->vm->pgt[0].refcount[0] = 1;
+	return ret;
 }
 
-static int
-nv41_mmu_init(struct nvkm_object *object)
+static void
+nv41_mmu_init(struct nvkm_mmu *base)
 {
-	struct nv04_mmu_priv *priv = (void *)object;
-	struct nvkm_gpuobj *dma = priv->vm->pgt[0].obj[0];
-	int ret;
-
-	ret = nvkm_mmu_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x100800, dma->addr | 0x00000002);
-	nv_mask(priv, 0x10008c, 0x00000100, 0x00000100);
-	nv_wr32(priv, 0x100820, 0x00000000);
-	return 0;
+	struct nv04_mmu *mmu = nv04_mmu(base);
+	struct nvkm_device *device = mmu->base.subdev.device;
+	struct nvkm_memory *dma = mmu->vm->pgt[0].mem[0];
+	nvkm_wr32(device, 0x100800, 0x00000002 | nvkm_memory_addr(dma));
+	nvkm_mask(device, 0x10008c, 0x00000100, 0x00000100);
+	nvkm_wr32(device, 0x100820, 0x00000000);
 }
 
-struct nvkm_oclass
-nv41_mmu_oclass = {
-	.handle = NV_SUBDEV(MMU, 0x41),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv41_mmu_ctor,
-		.dtor = nv04_mmu_dtor,
-		.init = nv41_mmu_init,
-		.fini = _nvkm_mmu_fini,
-	},
+static const struct nvkm_mmu_func
+nv41_mmu = {
+	.dtor = nv04_mmu_dtor,
+	.oneinit = nv41_mmu_oneinit,
+	.init = nv41_mmu_init,
+	.limit = NV41_GART_SIZE,
+	.dma_bits = 39,
+	.pgt_bits = 32 - 12,
+	.spg_shift = 12,
+	.lpg_shift = 12,
+	.map_sg = nv41_vm_map_sg,
+	.unmap = nv41_vm_unmap,
+	.flush = nv41_vm_flush,
 };
+
+int
+nv41_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+	if (device->type == NVKM_DEVICE_AGP ||
+	    !nvkm_boolopt(device->cfgopt, "NvPCIE", true))
+		return nv04_mmu_new(device, index, pmmu);
+
+	return nv04_mmu_new_(&nv41_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
index b90ded1887aa..a648c2395545 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
@@ -23,7 +23,6 @@
  */
 #include "nv04.h"
 
-#include <core/device.h>
 #include <core/gpuobj.h>
 #include <core/option.h>
 #include <subdev/timer.h>
@@ -36,16 +35,16 @@
  ******************************************************************************/
 
 static void
-nv44_vm_fill(struct nvkm_gpuobj *pgt, dma_addr_t null,
+nv44_vm_fill(struct nvkm_memory *pgt, dma_addr_t null,
 	     dma_addr_t *list, u32 pte, u32 cnt)
 {
 	u32 base = (pte << 2) & ~0x0000000f;
 	u32 tmp[4];
 
-	tmp[0] = nv_ro32(pgt, base + 0x0);
-	tmp[1] = nv_ro32(pgt, base + 0x4);
-	tmp[2] = nv_ro32(pgt, base + 0x8);
-	tmp[3] = nv_ro32(pgt, base + 0xc);
+	tmp[0] = nvkm_ro32(pgt, base + 0x0);
+	tmp[1] = nvkm_ro32(pgt, base + 0x4);
+	tmp[2] = nvkm_ro32(pgt, base + 0x8);
+	tmp[3] = nvkm_ro32(pgt, base + 0xc);
 
 	while (cnt--) {
 		u32 addr = list ? (*list++ >> 12) : (null >> 12);
@@ -75,24 +74,25 @@ nv44_vm_fill(struct nvkm_gpuobj *pgt, dma_addr_t null,
 		}
 	}
 
-	nv_wo32(pgt, base + 0x0, tmp[0]);
-	nv_wo32(pgt, base + 0x4, tmp[1]);
-	nv_wo32(pgt, base + 0x8, tmp[2]);
-	nv_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
+	nvkm_wo32(pgt, base + 0x0, tmp[0]);
+	nvkm_wo32(pgt, base + 0x4, tmp[1]);
+	nvkm_wo32(pgt, base + 0x8, tmp[2]);
+	nvkm_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
 }
 
 static void
-nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
 	       struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
 {
-	struct nv04_mmu_priv *priv = (void *)vma->vm->mmu;
+	struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu);
 	u32 tmp[4];
 	int i;
 
+	nvkm_kmap(pgt);
 	if (pte & 3) {
 		u32  max = 4 - (pte & 3);
 		u32 part = (cnt > max) ? max : cnt;
-		nv44_vm_fill(pgt, priv->null, list, pte, part);
+		nv44_vm_fill(pgt, mmu->null, list, pte, part);
 		pte  += part;
 		list += part;
 		cnt  -= part;
@@ -101,51 +101,57 @@ nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
 	while (cnt >= 4) {
 		for (i = 0; i < 4; i++)
 			tmp[i] = *list++ >> 12;
-		nv_wo32(pgt, pte++ * 4, tmp[0] >>  0 | tmp[1] << 27);
-		nv_wo32(pgt, pte++ * 4, tmp[1] >>  5 | tmp[2] << 22);
-		nv_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
-		nv_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
+		nvkm_wo32(pgt, pte++ * 4, tmp[0] >>  0 | tmp[1] << 27);
+		nvkm_wo32(pgt, pte++ * 4, tmp[1] >>  5 | tmp[2] << 22);
+		nvkm_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
+		nvkm_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
 		cnt -= 4;
 	}
 
 	if (cnt)
-		nv44_vm_fill(pgt, priv->null, list, pte, cnt);
+		nv44_vm_fill(pgt, mmu->null, list, pte, cnt);
+	nvkm_done(pgt);
 }
 
 static void
-nv44_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
+nv44_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
 {
-	struct nv04_mmu_priv *priv = (void *)nvkm_mmu(pgt);
+	struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu);
 
+	nvkm_kmap(pgt);
 	if (pte & 3) {
 		u32  max = 4 - (pte & 3);
 		u32 part = (cnt > max) ? max : cnt;
-		nv44_vm_fill(pgt, priv->null, NULL, pte, part);
+		nv44_vm_fill(pgt, mmu->null, NULL, pte, part);
 		pte  += part;
 		cnt  -= part;
 	}
 
 	while (cnt >= 4) {
-		nv_wo32(pgt, pte++ * 4, 0x00000000);
-		nv_wo32(pgt, pte++ * 4, 0x00000000);
-		nv_wo32(pgt, pte++ * 4, 0x00000000);
-		nv_wo32(pgt, pte++ * 4, 0x00000000);
+		nvkm_wo32(pgt, pte++ * 4, 0x00000000);
+		nvkm_wo32(pgt, pte++ * 4, 0x00000000);
+		nvkm_wo32(pgt, pte++ * 4, 0x00000000);
+		nvkm_wo32(pgt, pte++ * 4, 0x00000000);
 		cnt -= 4;
 	}
 
 	if (cnt)
-		nv44_vm_fill(pgt, priv->null, NULL, pte, cnt);
+		nv44_vm_fill(pgt, mmu->null, NULL, pte, cnt);
+	nvkm_done(pgt);
 }
 
 static void
 nv44_vm_flush(struct nvkm_vm *vm)
 {
-	struct nv04_mmu_priv *priv = (void *)vm->mmu;
-	nv_wr32(priv, 0x100814, priv->base.limit - NV44_GART_PAGE);
-	nv_wr32(priv, 0x100808, 0x00000020);
-	if (!nv_wait(priv, 0x100808, 0x00000001, 0x00000001))
-		nv_error(priv, "timeout: 0x%08x\n", nv_rd32(priv, 0x100808));
-	nv_wr32(priv, 0x100808, 0x00000000);
+	struct nv04_mmu *mmu = nv04_mmu(vm->mmu);
+	struct nvkm_device *device = mmu->base.subdev.device;
+	nvkm_wr32(device, 0x100814, mmu->base.limit - NV44_GART_PAGE);
+	nvkm_wr32(device, 0x100808, 0x00000020);
+	nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x100808) & 0x00000001)
+			break;
+	);
+	nvkm_wr32(device, 0x100808, 0x00000000);
 }
 
 /*******************************************************************************
@@ -153,95 +159,78 @@ nv44_vm_flush(struct nvkm_vm *vm)
  ******************************************************************************/
 
 static int
-nv44_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+nv44_mmu_oneinit(struct nvkm_mmu *base)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct nv04_mmu_priv *priv;
+	struct nv04_mmu *mmu = nv04_mmu(base);
+	struct nvkm_device *device = mmu->base.subdev.device;
 	int ret;
 
-	if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
-	    !nvkm_boolopt(device->cfgopt, "NvPCIE", true)) {
-		return nvkm_object_ctor(parent, engine, &nv04_mmu_oclass,
-					data, size, pobject);
-	}
-
-	ret = nvkm_mmu_create(parent, engine, oclass, "PCIEGART",
-			      "pciegart", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.create = nv04_vm_create;
-	priv->base.limit = NV44_GART_SIZE;
-	priv->base.dma_bits = 39;
-	priv->base.pgt_bits = 32 - 12;
-	priv->base.spg_shift = 12;
-	priv->base.lpg_shift = 12;
-	priv->base.map_sg = nv44_vm_map_sg;
-	priv->base.unmap = nv44_vm_unmap;
-	priv->base.flush = nv44_vm_flush;
-
-	priv->nullp = pci_alloc_consistent(device->pdev, 16 * 1024, &priv->null);
-	if (!priv->nullp) {
-		nv_error(priv, "unable to allocate dummy pages\n");
-		return -ENOMEM;
+	mmu->nullp = dma_alloc_coherent(device->dev, 16 * 1024,
+					&mmu->null, GFP_KERNEL);
+	if (!mmu->nullp) {
+		nvkm_warn(&mmu->base.subdev, "unable to allocate dummy pages\n");
+		mmu->null = 0;
 	}
 
-	ret = nvkm_vm_create(&priv->base, 0, NV44_GART_SIZE, 0, 4096,
-			     &priv->vm);
+	ret = nvkm_vm_create(&mmu->base, 0, NV44_GART_SIZE, 0, 4096, NULL,
+			     &mmu->vm);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL,
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
 			      (NV44_GART_SIZE / NV44_GART_PAGE) * 4,
-			      512 * 1024, NVOBJ_FLAG_ZERO_ALLOC,
-			      &priv->vm->pgt[0].obj[0]);
-	priv->vm->pgt[0].refcount[0] = 1;
-	if (ret)
-		return ret;
-
-	return 0;
+			      512 * 1024, true,
+			      &mmu->vm->pgt[0].mem[0]);
+	mmu->vm->pgt[0].refcount[0] = 1;
+	return ret;
 }
 
-static int
-nv44_mmu_init(struct nvkm_object *object)
+static void
+nv44_mmu_init(struct nvkm_mmu *base)
 {
-	struct nv04_mmu_priv *priv = (void *)object;
-	struct nvkm_gpuobj *gart = priv->vm->pgt[0].obj[0];
+	struct nv04_mmu *mmu = nv04_mmu(base);
+	struct nvkm_device *device = mmu->base.subdev.device;
+	struct nvkm_memory *gart = mmu->vm->pgt[0].mem[0];
 	u32 addr;
-	int ret;
-
-	ret = nvkm_mmu_init(&priv->base);
-	if (ret)
-		return ret;
 
 	/* calculate vram address of this PRAMIN block, object must be
 	 * allocated on 512KiB alignment, and not exceed a total size
 	 * of 512KiB for this to work correctly
 	 */
-	addr  = nv_rd32(priv, 0x10020c);
-	addr -= ((gart->addr >> 19) + 1) << 19;
-
-	nv_wr32(priv, 0x100850, 0x80000000);
-	nv_wr32(priv, 0x100818, priv->null);
-	nv_wr32(priv, 0x100804, NV44_GART_SIZE);
-	nv_wr32(priv, 0x100850, 0x00008000);
-	nv_mask(priv, 0x10008c, 0x00000200, 0x00000200);
-	nv_wr32(priv, 0x100820, 0x00000000);
-	nv_wr32(priv, 0x10082c, 0x00000001);
-	nv_wr32(priv, 0x100800, addr | 0x00000010);
-	return 0;
+	addr  = nvkm_rd32(device, 0x10020c);
+	addr -= ((nvkm_memory_addr(gart) >> 19) + 1) << 19;
+
+	nvkm_wr32(device, 0x100850, 0x80000000);
+	nvkm_wr32(device, 0x100818, mmu->null);
+	nvkm_wr32(device, 0x100804, NV44_GART_SIZE);
+	nvkm_wr32(device, 0x100850, 0x00008000);
+	nvkm_mask(device, 0x10008c, 0x00000200, 0x00000200);
+	nvkm_wr32(device, 0x100820, 0x00000000);
+	nvkm_wr32(device, 0x10082c, 0x00000001);
+	nvkm_wr32(device, 0x100800, addr | 0x00000010);
 }
 
-struct nvkm_oclass
-nv44_mmu_oclass = {
-	.handle = NV_SUBDEV(MMU, 0x44),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv44_mmu_ctor,
-		.dtor = nv04_mmu_dtor,
-		.init = nv44_mmu_init,
-		.fini = _nvkm_mmu_fini,
-	},
+static const struct nvkm_mmu_func
+nv44_mmu = {
+	.dtor = nv04_mmu_dtor,
+	.oneinit = nv44_mmu_oneinit,
+	.init = nv44_mmu_init,
+	.limit = NV44_GART_SIZE,
+	.dma_bits = 39,
+	.pgt_bits = 32 - 12,
+	.spg_shift = 12,
+	.lpg_shift = 12,
+	.map_sg = nv44_vm_map_sg,
+	.unmap = nv44_vm_unmap,
+	.flush = nv44_vm_flush,
 };
+
+int
+nv44_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+	if (device->type == NVKM_DEVICE_AGP ||
+	    !nvkm_boolopt(device->cfgopt, "NvPCIE", true))
+		return nv04_mmu_new(device, index, pmmu);
+
+	return nv04_mmu_new_(&nv44_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
index b83550fa7f96..a1f8d65f0276 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
@@ -21,31 +21,28 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/mmu.h>
-#include <subdev/bar.h>
-#include <subdev/fb.h>
-#include <subdev/timer.h>
+#include "priv.h"
 
-#include <core/engine.h>
 #include <core/gpuobj.h>
-
-struct nv50_mmu_priv {
-	struct nvkm_mmu base;
-};
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+#include <engine/gr.h>
 
 static void
-nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_gpuobj *pgt[2])
+nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_memory *pgt[2])
 {
 	u64 phys = 0xdeadcafe00000000ULL;
 	u32 coverage = 0;
 
 	if (pgt[0]) {
-		phys = 0x00000003 | pgt[0]->addr; /* present, 4KiB pages */
-		coverage = (pgt[0]->size >> 3) << 12;
+		/* present, 4KiB pages */
+		phys = 0x00000003 | nvkm_memory_addr(pgt[0]);
+		coverage = (nvkm_memory_size(pgt[0]) >> 3) << 12;
 	} else
 	if (pgt[1]) {
-		phys = 0x00000001 | pgt[1]->addr; /* present */
-		coverage = (pgt[1]->size >> 3) << 16;
+		/* present, 64KiB pages  */
+		phys = 0x00000001 | nvkm_memory_addr(pgt[1]);
+		coverage = (nvkm_memory_size(pgt[1]) >> 3) << 16;
 	}
 
 	if (phys & 1) {
@@ -57,8 +54,10 @@ nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_gpuobj *pgt[2])
 			phys |= 0x20;
 	}
 
-	nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
-	nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
+	nvkm_kmap(pgd);
+	nvkm_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
+	nvkm_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
+	nvkm_done(pgd);
 }
 
 static inline u64
@@ -75,17 +74,18 @@ vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target)
 }
 
 static void
-nv50_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+nv50_vm_map(struct nvkm_vma *vma, struct nvkm_memory *pgt,
 	    struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
 {
+	struct nvkm_ram *ram = vma->vm->mmu->subdev.device->fb->ram;
 	u32 comp = (mem->memtype & 0x180) >> 7;
 	u32 block, target;
 	int i;
 
 	/* IGPs don't have real VRAM, re-target to stolen system memory */
 	target = 0;
-	if (nvkm_fb(vma->vm->mmu)->ram->stolen) {
-		phys += nvkm_fb(vma->vm->mmu)->ram->stolen;
+	if (ram->stolen) {
+		phys += ram->stolen;
 		target = 3;
 	}
 
@@ -93,6 +93,7 @@ nv50_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
 	pte <<= 3;
 	cnt <<= 3;
 
+	nvkm_kmap(pgt);
 	while (cnt) {
 		u32 offset_h = upper_32_bits(phys);
 		u32 offset_l = lower_32_bits(phys);
@@ -113,129 +114,118 @@ nv50_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
 		}
 
 		while (block) {
-			nv_wo32(pgt, pte + 0, offset_l);
-			nv_wo32(pgt, pte + 4, offset_h);
+			nvkm_wo32(pgt, pte + 0, offset_l);
+			nvkm_wo32(pgt, pte + 4, offset_h);
 			pte += 8;
 			block -= 8;
 		}
 	}
+	nvkm_done(pgt);
 }
 
 static void
-nv50_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+nv50_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
 	       struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
 {
 	u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2;
 	pte <<= 3;
+	nvkm_kmap(pgt);
 	while (cnt--) {
 		u64 phys = vm_addr(vma, (u64)*list++, mem->memtype, target);
-		nv_wo32(pgt, pte + 0, lower_32_bits(phys));
-		nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+		nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
+		nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
 		pte += 8;
 	}
+	nvkm_done(pgt);
 }
 
 static void
-nv50_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
+nv50_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
 {
 	pte <<= 3;
+	nvkm_kmap(pgt);
 	while (cnt--) {
-		nv_wo32(pgt, pte + 0, 0x00000000);
-		nv_wo32(pgt, pte + 4, 0x00000000);
+		nvkm_wo32(pgt, pte + 0, 0x00000000);
+		nvkm_wo32(pgt, pte + 4, 0x00000000);
 		pte += 8;
 	}
+	nvkm_done(pgt);
 }
 
 static void
 nv50_vm_flush(struct nvkm_vm *vm)
 {
-	struct nv50_mmu_priv *priv = (void *)vm->mmu;
-	struct nvkm_bar *bar = nvkm_bar(priv);
-	struct nvkm_engine *engine;
+	struct nvkm_mmu *mmu = vm->mmu;
+	struct nvkm_subdev *subdev = &mmu->subdev;
+	struct nvkm_device *device = subdev->device;
 	int i, vme;
 
-	bar->flush(bar);
-
-	mutex_lock(&nv_subdev(priv)->mutex);
-	for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
+	mutex_lock(&subdev->mutex);
+	for (i = 0; i < NVKM_SUBDEV_NR; i++) {
 		if (!atomic_read(&vm->engref[i]))
 			continue;
 
 		/* unfortunate hw bug workaround... */
-		engine = nvkm_engine(priv, i);
-		if (engine && engine->tlb_flush) {
-			engine->tlb_flush(engine);
-			continue;
+		if (i == NVKM_ENGINE_GR && device->gr) {
+			int ret = nvkm_gr_tlb_flush(device->gr);
+			if (ret != -ENODEV)
+				continue;
 		}
 
 		switch (i) {
-		case NVDEV_ENGINE_GR    : vme = 0x00; break;
-		case NVDEV_ENGINE_VP    :
-		case NVDEV_ENGINE_MSPDEC: vme = 0x01; break;
-		case NVDEV_SUBDEV_BAR   : vme = 0x06; break;
-		case NVDEV_ENGINE_MSPPP :
-		case NVDEV_ENGINE_MPEG  : vme = 0x08; break;
-		case NVDEV_ENGINE_BSP   :
-		case NVDEV_ENGINE_MSVLD : vme = 0x09; break;
-		case NVDEV_ENGINE_CIPHER:
-		case NVDEV_ENGINE_SEC   : vme = 0x0a; break;
-		case NVDEV_ENGINE_CE0   : vme = 0x0d; break;
+		case NVKM_ENGINE_GR    : vme = 0x00; break;
+		case NVKM_ENGINE_VP    :
+		case NVKM_ENGINE_MSPDEC: vme = 0x01; break;
+		case NVKM_SUBDEV_BAR   : vme = 0x06; break;
+		case NVKM_ENGINE_MSPPP :
+		case NVKM_ENGINE_MPEG  : vme = 0x08; break;
+		case NVKM_ENGINE_BSP   :
+		case NVKM_ENGINE_MSVLD : vme = 0x09; break;
+		case NVKM_ENGINE_CIPHER:
+		case NVKM_ENGINE_SEC   : vme = 0x0a; break;
+		case NVKM_ENGINE_CE0   : vme = 0x0d; break;
 		default:
 			continue;
 		}
 
-		nv_wr32(priv, 0x100c80, (vme << 16) | 1);
-		if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000))
-			nv_error(priv, "vm flush timeout: engine %d\n", vme);
+		nvkm_wr32(device, 0x100c80, (vme << 16) | 1);
+		if (nvkm_msec(device, 2000,
+			if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
+				break;
+		) < 0)
+			nvkm_error(subdev, "vm flush timeout: engine %d\n", vme);
 	}
-	mutex_unlock(&nv_subdev(priv)->mutex);
+	mutex_unlock(&subdev->mutex);
 }
 
 static int
-nv50_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length,
-	       u64 mm_offset, struct nvkm_vm **pvm)
+nv50_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
+	       struct lock_class_key *key, struct nvkm_vm **pvm)
 {
-	u32 block = (1 << (mmu->pgt_bits + 12));
+	u32 block = (1 << (mmu->func->pgt_bits + 12));
 	if (block > length)
 		block = length;
 
-	return nvkm_vm_create(mmu, offset, length, mm_offset, block, pvm);
+	return nvkm_vm_create(mmu, offset, length, mm_offset, block, key, pvm);
 }
 
-static int
-nv50_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+static const struct nvkm_mmu_func
+nv50_mmu = {
+	.limit = (1ULL << 40),
+	.dma_bits = 40,
+	.pgt_bits  = 29 - 12,
+	.spg_shift = 12,
+	.lpg_shift = 16,
+	.create = nv50_vm_create,
+	.map_pgt = nv50_vm_map_pgt,
+	.map = nv50_vm_map,
+	.map_sg = nv50_vm_map_sg,
+	.unmap = nv50_vm_unmap,
+	.flush = nv50_vm_flush,
+};
+
+int
+nv50_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
 {
-	struct nv50_mmu_priv *priv;
-	int ret;
-
-	ret = nvkm_mmu_create(parent, engine, oclass, "VM", "vm", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.limit = 1ULL << 40;
-	priv->base.dma_bits = 40;
-	priv->base.pgt_bits  = 29 - 12;
-	priv->base.spg_shift = 12;
-	priv->base.lpg_shift = 16;
-	priv->base.create = nv50_vm_create;
-	priv->base.map_pgt = nv50_vm_map_pgt;
-	priv->base.map = nv50_vm_map;
-	priv->base.map_sg = nv50_vm_map_sg;
-	priv->base.unmap = nv50_vm_unmap;
-	priv->base.flush = nv50_vm_flush;
-	return 0;
+	return nvkm_mmu_new_(&nv50_mmu, device, index, pmmu);
 }
-
-struct nvkm_oclass
-nv50_mmu_oclass = {
-	.handle = NV_SUBDEV(MMU, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_mmu_ctor,
-		.dtor = _nvkm_mmu_dtor,
-		.init = _nvkm_mmu_init,
-		.fini = _nvkm_mmu_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
new file mode 100644
index 000000000000..27cedc60b507
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -0,0 +1,39 @@
+#ifndef __NVKM_MMU_PRIV_H__
+#define __NVKM_MMU_PRIV_H__
+#define nvkm_mmu(p) container_of((p), struct nvkm_mmu, subdev)
+#include <subdev/mmu.h>
+
+void nvkm_mmu_ctor(const struct nvkm_mmu_func *, struct nvkm_device *,
+		   int index, struct nvkm_mmu *);
+int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *,
+		  int index, struct nvkm_mmu **);
+
+struct nvkm_mmu_func {
+	void *(*dtor)(struct nvkm_mmu *);
+	int (*oneinit)(struct nvkm_mmu *);
+	void (*init)(struct nvkm_mmu *);
+
+	u64 limit;
+	u8  dma_bits;
+	u32 pgt_bits;
+	u8  spg_shift;
+	u8  lpg_shift;
+
+	int  (*create)(struct nvkm_mmu *, u64 offset, u64 length, u64 mm_offset,
+		       struct lock_class_key *, struct nvkm_vm **);
+
+	void (*map_pgt)(struct nvkm_gpuobj *pgd, u32 pde,
+			struct nvkm_memory *pgt[2]);
+	void (*map)(struct nvkm_vma *, struct nvkm_memory *,
+		    struct nvkm_mem *, u32 pte, u32 cnt,
+		    u64 phys, u64 delta);
+	void (*map_sg)(struct nvkm_vma *, struct nvkm_memory *,
+		       struct nvkm_mem *, u32 pte, u32 cnt, dma_addr_t *);
+	void (*unmap)(struct nvkm_vma *, struct nvkm_memory *pgt,
+		      u32 pte, u32 cnt);
+	void (*flush)(struct nvkm_vm *);
+};
+
+int nvkm_vm_create(struct nvkm_mmu *, u64, u64, u64, u32,
+		   struct lock_class_key *, struct nvkm_vm **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
index 0ca9dcabb6d3..9700a7625012 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
@@ -23,14 +23,13 @@
  */
 #include "mxms.h"
 
-#include <core/device.h>
 #include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/mxm.h>
 #include <subdev/i2c.h>
 
 static bool
-mxm_shadow_rom_fetch(struct nvkm_i2c_port *i2c, u8 addr,
+mxm_shadow_rom_fetch(struct nvkm_i2c_bus *bus, u8 addr,
 		     u8 offset, u8 size, u8 *data)
 {
 	struct i2c_msg msgs[] = {
@@ -38,27 +37,28 @@ mxm_shadow_rom_fetch(struct nvkm_i2c_port *i2c, u8 addr,
 		{ .addr = addr, .flags = I2C_M_RD, .len = size, .buf = data, },
 	};
 
-	return i2c_transfer(&i2c->adapter, msgs, 2) == 2;
+	return i2c_transfer(&bus->i2c, msgs, 2) == 2;
 }
 
 static bool
 mxm_shadow_rom(struct nvkm_mxm *mxm, u8 version)
 {
-	struct nvkm_bios *bios = nvkm_bios(mxm);
-	struct nvkm_i2c *i2c = nvkm_i2c(mxm);
-	struct nvkm_i2c_port *port = NULL;
+	struct nvkm_device *device = mxm->subdev.device;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_i2c *i2c = device->i2c;
+	struct nvkm_i2c_bus *bus = NULL;
 	u8 i2cidx, mxms[6], addr, size;
 
 	i2cidx = mxm_ddc_map(bios, 1 /* LVDS_DDC */) & 0x0f;
 	if (i2cidx < 0x0f)
-		port = i2c->find(i2c, i2cidx);
-	if (!port)
+		bus = nvkm_i2c_bus_find(i2c, i2cidx);
+	if (!bus)
 		return false;
 
 	addr = 0x54;
-	if (!mxm_shadow_rom_fetch(port, addr, 0, 6, mxms)) {
+	if (!mxm_shadow_rom_fetch(bus, addr, 0, 6, mxms)) {
 		addr = 0x56;
-		if (!mxm_shadow_rom_fetch(port, addr, 0, 6, mxms))
+		if (!mxm_shadow_rom_fetch(bus, addr, 0, 6, mxms))
 			return false;
 	}
 
@@ -67,7 +67,7 @@ mxm_shadow_rom(struct nvkm_mxm *mxm, u8 version)
 	mxm->mxms = kmalloc(size, GFP_KERNEL);
 
 	if (mxm->mxms &&
-	    mxm_shadow_rom_fetch(port, addr, 0, size, mxm->mxms))
+	    mxm_shadow_rom_fetch(bus, addr, 0, size, mxm->mxms))
 		return true;
 
 	kfree(mxm->mxms);
@@ -79,7 +79,8 @@ mxm_shadow_rom(struct nvkm_mxm *mxm, u8 version)
 static bool
 mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
 {
-	struct nvkm_device *device = nv_device(mxm);
+	struct nvkm_subdev *subdev = &mxm->subdev;
+	struct nvkm_device *device = subdev->device;
 	static char muid[] = {
 		0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C,
 		0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
@@ -94,7 +95,7 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
 	acpi_handle handle;
 	int rev;
 
-	handle = ACPI_HANDLE(nv_device_base(device));
+	handle = ACPI_HANDLE(device->dev);
 	if (!handle)
 		return false;
 
@@ -106,7 +107,7 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
 	rev = (version & 0xf0) << 4 | (version & 0x0f);
 	obj = acpi_evaluate_dsm(handle, muid, rev, 0x00000010, &argv4);
 	if (!obj) {
-		nv_debug(mxm, "DSM MXMS failed\n");
+		nvkm_debug(subdev, "DSM MXMS failed\n");
 		return false;
 	}
 
@@ -114,7 +115,8 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
 		mxm->mxms = kmemdup(obj->buffer.pointer,
 					 obj->buffer.length, GFP_KERNEL);
 	} else if (obj->type == ACPI_TYPE_INTEGER) {
-		nv_debug(mxm, "DSM MXMS returned 0x%llx\n", obj->integer.value);
+		nvkm_debug(subdev, "DSM MXMS returned 0x%llx\n",
+			   obj->integer.value);
 	}
 
 	ACPI_FREE(obj);
@@ -129,6 +131,7 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
 static u8
 wmi_wmmx_mxmi(struct nvkm_mxm *mxm, u8 version)
 {
+	struct nvkm_subdev *subdev = &mxm->subdev;
 	u32 mxmi_args[] = { 0x494D584D /* MXMI */, version, 0 };
 	struct acpi_buffer args = { sizeof(mxmi_args), mxmi_args };
 	struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -137,18 +140,18 @@ wmi_wmmx_mxmi(struct nvkm_mxm *mxm, u8 version)
 
 	status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
 	if (ACPI_FAILURE(status)) {
-		nv_debug(mxm, "WMMX MXMI returned %d\n", status);
+		nvkm_debug(subdev, "WMMX MXMI returned %d\n", status);
 		return 0x00;
 	}
 
 	obj = retn.pointer;
 	if (obj->type == ACPI_TYPE_INTEGER) {
 		version = obj->integer.value;
-		nv_debug(mxm, "WMMX MXMI version %d.%d\n",
-			     (version >> 4), version & 0x0f);
+		nvkm_debug(subdev, "WMMX MXMI version %d.%d\n",
+			   (version >> 4), version & 0x0f);
 	} else {
 		version = 0;
-		nv_debug(mxm, "WMMX MXMI returned non-integer\n");
+		nvkm_debug(subdev, "WMMX MXMI returned non-integer\n");
 	}
 
 	kfree(obj);
@@ -158,6 +161,7 @@ wmi_wmmx_mxmi(struct nvkm_mxm *mxm, u8 version)
 static bool
 mxm_shadow_wmi(struct nvkm_mxm *mxm, u8 version)
 {
+	struct nvkm_subdev *subdev = &mxm->subdev;
 	u32 mxms_args[] = { 0x534D584D /* MXMS */, version, 0 };
 	struct acpi_buffer args = { sizeof(mxms_args), mxms_args };
 	struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -165,7 +169,7 @@ mxm_shadow_wmi(struct nvkm_mxm *mxm, u8 version)
 	acpi_status status;
 
 	if (!wmi_has_guid(WMI_WMMX_GUID)) {
-		nv_debug(mxm, "WMMX GUID not found\n");
+		nvkm_debug(subdev, "WMMX GUID not found\n");
 		return false;
 	}
 
@@ -177,7 +181,7 @@ mxm_shadow_wmi(struct nvkm_mxm *mxm, u8 version)
 
 	status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
 	if (ACPI_FAILURE(status)) {
-		nv_debug(mxm, "WMMX MXMS returned %d\n", status);
+		nvkm_debug(subdev, "WMMX MXMS returned %d\n", status);
 		return false;
 	}
 
@@ -211,7 +215,7 @@ mxm_shadow(struct nvkm_mxm *mxm, u8 version)
 {
 	struct mxm_shadow_h *shadow = _mxm_shadow;
 	do {
-		nv_debug(mxm, "checking %s\n", shadow->name);
+		nvkm_debug(&mxm->subdev, "checking %s\n", shadow->name);
 		if (shadow->exec(mxm, version)) {
 			if (mxms_valid(mxm))
 				return 0;
@@ -222,33 +226,33 @@ mxm_shadow(struct nvkm_mxm *mxm, u8 version)
 	return -ENOENT;
 }
 
+static const struct nvkm_subdev_func
+nvkm_mxm = {
+};
+
 int
-nvkm_mxm_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, int length, void **pobject)
+nvkm_mxm_new_(struct nvkm_device *device, int index, struct nvkm_mxm **pmxm)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct nvkm_bios *bios = nvkm_bios(device);
+	struct nvkm_bios *bios = device->bios;
 	struct nvkm_mxm *mxm;
 	u8  ver, len;
 	u16 data;
-	int ret;
 
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "MXM", "mxm",
-				  length, pobject);
-	mxm = *pobject;
-	if (ret)
-		return ret;
+	if (!(mxm = *pmxm = kzalloc(sizeof(*mxm), GFP_KERNEL)))
+		return -ENOMEM;
+
+	nvkm_subdev_ctor(&nvkm_mxm, device, index, 0, &mxm->subdev);
 
 	data = mxm_table(bios, &ver, &len);
-	if (!data || !(ver = nv_ro08(bios, data))) {
-		nv_debug(mxm, "no VBIOS data, nothing to do\n");
+	if (!data || !(ver = nvbios_rd08(bios, data))) {
+		nvkm_debug(&mxm->subdev, "no VBIOS data, nothing to do\n");
 		return 0;
 	}
 
-	nv_info(mxm, "BIOS version %d.%d\n", ver >> 4, ver & 0x0f);
+	nvkm_info(&mxm->subdev, "BIOS version %d.%d\n", ver >> 4, ver & 0x0f);
 
 	if (mxm_shadow(mxm, ver)) {
-		nv_info(mxm, "failed to locate valid SIS\n");
+		nvkm_warn(&mxm->subdev, "failed to locate valid SIS\n");
 #if 0
 		/* we should, perhaps, fall back to some kind of limited
 		 * mode here if the x86 vbios hasn't already done the
@@ -261,8 +265,8 @@ nvkm_mxm_create_(struct nvkm_object *parent, struct nvkm_object *engine,
 #endif
 	}
 
-	nv_info(mxm, "MXMS Version %d.%d\n",
-		mxms_version(mxm) >> 8, mxms_version(mxm) & 0xff);
+	nvkm_debug(&mxm->subdev, "MXMS Version %d.%d\n",
+		   mxms_version(mxm) >> 8, mxms_version(mxm) & 0xff);
 	mxms_foreach(mxm, 0, NULL, NULL);
 
 	if (nvkm_boolopt(device->cfgopt, "NvMXMDCB", true))
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
index a9b1d63fed58..45a2f8e784f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
@@ -47,7 +47,7 @@ mxms_version(struct nvkm_mxm *mxm)
 		break;
 	}
 
-	nv_debug(mxm, "unknown version %d.%d\n", mxms[4], mxms[5]);
+	nvkm_debug(&mxm->subdev, "unknown version %d.%d\n", mxms[4], mxms[5]);
 	return 0x0000;
 }
 
@@ -71,7 +71,7 @@ mxms_checksum(struct nvkm_mxm *mxm)
 	while (size--)
 		sum += *mxms++;
 	if (sum) {
-		nv_debug(mxm, "checksum invalid\n");
+		nvkm_debug(&mxm->subdev, "checksum invalid\n");
 		return false;
 	}
 	return true;
@@ -82,7 +82,7 @@ mxms_valid(struct nvkm_mxm *mxm)
 {
 	u8 *mxms = mxms_data(mxm);
 	if (*(u32 *)mxms != 0x5f4d584d) {
-		nv_debug(mxm, "signature invalid\n");
+		nvkm_debug(&mxm->subdev, "signature invalid\n");
 		return false;
 	}
 
@@ -96,6 +96,7 @@ bool
 mxms_foreach(struct nvkm_mxm *mxm, u8 types,
 	     bool (*exec)(struct nvkm_mxm *, u8 *, void *), void *info)
 {
+	struct nvkm_subdev *subdev = &mxm->subdev;
 	u8 *mxms = mxms_data(mxm);
 	u8 *desc = mxms + mxms_headerlen(mxm);
 	u8 *fini = desc + mxms_structlen(mxm) - 1;
@@ -140,29 +141,28 @@ mxms_foreach(struct nvkm_mxm *mxm, u8 types,
 			entries   = desc[1] & 0x07;
 			break;
 		default:
-			nv_debug(mxm, "unknown descriptor type %d\n", type);
+			nvkm_debug(subdev, "unknown descriptor type %d\n", type);
 			return false;
 		}
 
-		if (nv_subdev(mxm)->debug >= NV_DBG_DEBUG && (exec == NULL)) {
-			static const char * mxms_desc_name[] = {
+		if (mxm->subdev.debug >= NV_DBG_DEBUG && (exec == NULL)) {
+			static const char * mxms_desc[] = {
 				"ODS", "SCCS", "TS", "IPS",
 				"GSD", "VSS", "BCS", "FCS",
 			};
 			u8 *dump = desc;
+			char data[32], *ptr;
 			int i, j;
 
-			nv_debug(mxm, "%4s: ", mxms_desc_name[type]);
-			for (j = headerlen - 1; j >= 0; j--)
-				pr_cont("%02x", dump[j]);
-			pr_cont("\n");
+			for (j = headerlen - 1, ptr = data; j >= 0; j--)
+				ptr += sprintf(ptr, "%02x", dump[j]);
 			dump += headerlen;
 
+			nvkm_debug(subdev, "%4s: %s\n", mxms_desc[type], data);
 			for (i = 0; i < entries; i++, dump += recordlen) {
-				nv_debug(mxm, "      ");
-				for (j = recordlen - 1; j >= 0; j--)
-					pr_cont("%02x", dump[j]);
-				pr_cont("\n");
+				for (j = recordlen - 1, ptr = data; j >= 0; j--)
+					ptr += sprintf(ptr, "%02x", dump[j]);
+				nvkm_debug(subdev, "      %s\n", data);
 			}
 		}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h
index 4ef804012d06..333e0c01545a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h
@@ -1,6 +1,6 @@
 #ifndef __NVMXM_MXMS_H__
 #define __NVMXM_MXMS_H__
-#include <subdev/mxm.h>
+#include "priv.h"
 
 struct mxms_odev {
 	u8 outp_type;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
index f20e4ca87e17..db14fad2ddfc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
@@ -28,10 +28,6 @@
 #include <subdev/bios/dcb.h>
 #include <subdev/bios/mxm.h>
 
-struct nv50_mxm_priv {
-	struct nvkm_mxm base;
-};
-
 struct context {
 	u32 *outp;
 	struct mxms_odev desc;
@@ -53,7 +49,7 @@ mxm_match_tmds_partner(struct nvkm_mxm *mxm, u8 *data, void *info)
 static bool
 mxm_match_dcb(struct nvkm_mxm *mxm, u8 *data, void *info)
 {
-	struct nvkm_bios *bios = nvkm_bios(mxm);
+	struct nvkm_bios *bios = mxm->subdev.device->bios;
 	struct context *ctx = info;
 	u64 desc = *(u64 *)data;
 
@@ -107,8 +103,8 @@ mxm_dcb_sanitise_entry(struct nvkm_bios *bios, void *data, int idx, u16 pdcb)
 	 * if one isn't found, disable it.
 	 */
 	if (mxms_foreach(mxm, 0x01, mxm_match_dcb, &ctx)) {
-		nv_debug(mxm, "disable %d: 0x%08x 0x%08x\n",
-			idx, ctx.outp[0], ctx.outp[1]);
+		nvkm_debug(&mxm->subdev, "disable %d: %08x %08x\n",
+			   idx, ctx.outp[0], ctx.outp[1]);
 		ctx.outp[0] |= 0x0000000f;
 		return 0;
 	}
@@ -180,20 +176,22 @@ mxm_dcb_sanitise_entry(struct nvkm_bios *bios, void *data, int idx, u16 pdcb)
 static bool
 mxm_show_unmatched(struct nvkm_mxm *mxm, u8 *data, void *info)
 {
+	struct nvkm_subdev *subdev = &mxm->subdev;
 	u64 desc = *(u64 *)data;
 	if ((desc & 0xf0) != 0xf0)
-		nv_info(mxm, "unmatched output device 0x%016llx\n", desc);
+		nvkm_info(subdev, "unmatched output device %016llx\n", desc);
 	return true;
 }
 
 static void
 mxm_dcb_sanitise(struct nvkm_mxm *mxm)
 {
-	struct nvkm_bios *bios = nvkm_bios(mxm);
+	struct nvkm_subdev *subdev = &mxm->subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	u8  ver, hdr, cnt, len;
 	u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len);
 	if (dcb == 0x0000 || ver != 0x40) {
-		nv_debug(mxm, "unsupported DCB version\n");
+		nvkm_debug(subdev, "unsupported DCB version\n");
 		return;
 	}
 
@@ -201,31 +199,20 @@ mxm_dcb_sanitise(struct nvkm_mxm *mxm)
 	mxms_foreach(mxm, 0x01, mxm_show_unmatched, NULL);
 }
 
-static int
-nv50_mxm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+int
+nv50_mxm_new(struct nvkm_device *device, int index, struct nvkm_subdev **pmxm)
 {
-	struct nv50_mxm_priv *priv;
+	struct nvkm_mxm *mxm;
 	int ret;
 
-	ret = nvkm_mxm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
+	ret = nvkm_mxm_new_(device, index, &mxm);
+	if (mxm)
+		*pmxm = &mxm->subdev;
 	if (ret)
 		return ret;
 
-	if (priv->base.action & MXM_SANITISE_DCB)
-		mxm_dcb_sanitise(&priv->base);
+	if (mxm->action & MXM_SANITISE_DCB)
+		mxm_dcb_sanitise(mxm);
+
 	return 0;
 }
-
-struct nvkm_oclass
-nv50_mxm_oclass = {
-	.handle = NV_SUBDEV(MXM, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_mxm_ctor,
-		.dtor = _nvkm_mxm_dtor,
-		.init = _nvkm_mxm_init,
-		.fini = _nvkm_mxm_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
new file mode 100644
index 000000000000..7d970157aed1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
@@ -0,0 +1,15 @@
+#ifndef __NVKM_MXM_PRIV_H__
+#define __NVKM_MXM_PRIV_H__
+#define nvkm_mxm(p) container_of((p), struct nvkm_mxm, subdev)
+#include <subdev/mxm.h>
+
+#define MXM_SANITISE_DCB 0x00000001
+
+struct nvkm_mxm {
+	struct nvkm_subdev subdev;
+	u32 action;
+	u8 *mxms;
+};
+
+int nvkm_mxm_new_(struct nvkm_device *, int index, struct nvkm_mxm **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
new file mode 100644
index 000000000000..99672c3d0bad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
@@ -0,0 +1,7 @@
+nvkm-y += nvkm/subdev/pci/agp.o
+nvkm-y += nvkm/subdev/pci/base.o
+nvkm-y += nvkm/subdev/pci/nv04.o
+nvkm-y += nvkm/subdev/pci/nv40.o
+nvkm-y += nvkm/subdev/pci/nv4c.o
+nvkm-y += nvkm/subdev/pci/nv50.o
+nvkm-y += nvkm/subdev/pci/gf100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c
new file mode 100644
index 000000000000..814cb51cc873
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2015 Nouveau Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "agp.h"
+#ifdef __NVKM_PCI_AGP_H__
+#include <core/option.h>
+
+struct nvkm_device_agp_quirk {
+	u16 hostbridge_vendor;
+	u16 hostbridge_device;
+	u16 chip_vendor;
+	u16 chip_device;
+	int mode;
+};
+
+static const struct nvkm_device_agp_quirk
+nvkm_device_agp_quirks[] = {
+	/* VIA Apollo PRO133x / GeForce FX 5600 Ultra - fdo#20341 */
+	{ PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_NVIDIA, 0x0311, 2 },
+	{},
+};
+
+void
+nvkm_agp_fini(struct nvkm_pci *pci)
+{
+	if (pci->agp.acquired) {
+		agp_backend_release(pci->agp.bridge);
+		pci->agp.acquired = false;
+	}
+}
+
+/* Ensure AGP controller is in a consistent state in case we need to
+ * execute the VBIOS DEVINIT scripts.
+ */
+void
+nvkm_agp_preinit(struct nvkm_pci *pci)
+{
+	struct nvkm_device *device = pci->subdev.device;
+	u32 mode = nvkm_pci_rd32(pci, 0x004c);
+	u32 save[2];
+
+	/* First of all, disable fast writes, otherwise if it's already
+	 * enabled in the AGP bridge and we disable the card's AGP
+	 * controller we might be locking ourselves out of it.
+	 */
+	if ((mode | pci->agp.mode) & PCI_AGP_COMMAND_FW) {
+		mode = pci->agp.mode & ~PCI_AGP_COMMAND_FW;
+		agp_enable(pci->agp.bridge, mode);
+	}
+
+	/* clear busmaster bit, and disable AGP */
+	save[0] = nvkm_pci_rd32(pci, 0x0004);
+	nvkm_pci_wr32(pci, 0x0004, save[0] & ~0x00000004);
+	nvkm_pci_wr32(pci, 0x004c, 0x00000000);
+
+	/* reset PGRAPH, PFIFO and PTIMER */
+	save[1] = nvkm_mask(device, 0x000200, 0x00011100, 0x00000000);
+	nvkm_mask(device, 0x000200, 0x00011100, save[1]);
+
+	/* and restore busmaster bit (gives effect of resetting AGP) */
+	nvkm_pci_wr32(pci, 0x0004, save[0]);
+}
+
+int
+nvkm_agp_init(struct nvkm_pci *pci)
+{
+	if (!agp_backend_acquire(pci->pdev)) {
+		nvkm_error(&pci->subdev, "failed to acquire agp\n");
+		return -ENODEV;
+	}
+
+	agp_enable(pci->agp.bridge, pci->agp.mode);
+	pci->agp.acquired = true;
+	return 0;
+}
+
+void
+nvkm_agp_dtor(struct nvkm_pci *pci)
+{
+	arch_phys_wc_del(pci->agp.mtrr);
+}
+
+void
+nvkm_agp_ctor(struct nvkm_pci *pci)
+{
+	const struct nvkm_device_agp_quirk *quirk = nvkm_device_agp_quirks;
+	struct nvkm_subdev *subdev = &pci->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct agp_kern_info info;
+	int mode = -1;
+
+#ifdef __powerpc__
+	/* Disable AGP by default on all PowerPC machines for now -- At
+	 * least some UniNorth-2 AGP bridges are known to be broken:
+	 * DMA from the host to the card works just fine, but writeback
+	 * from the card to the host goes straight to memory
+	 * untranslated bypassing that GATT somehow, making them quite
+	 * painful to deal with...
+	 */
+	mode = 0;
+#endif
+	mode = nvkm_longopt(device->cfgopt, "NvAGP", mode);
+
+	/* acquire bridge temporarily, so that we can copy its info */
+	if (!(pci->agp.bridge = agp_backend_acquire(pci->pdev))) {
+		nvkm_warn(subdev, "failed to acquire agp\n");
+		return;
+	}
+	agp_copy_info(pci->agp.bridge, &info);
+	agp_backend_release(pci->agp.bridge);
+
+	pci->agp.mode = info.mode;
+	pci->agp.base = info.aper_base;
+	pci->agp.size = info.aper_size * 1024 * 1024;
+	pci->agp.cma  = info.cant_use_aperture;
+	pci->agp.mtrr = -1;
+
+	/* determine if bridge + chipset combination needs a workaround */
+	while (quirk->hostbridge_vendor) {
+		if (info.device->vendor == quirk->hostbridge_vendor &&
+		    info.device->device == quirk->hostbridge_device &&
+		    pci->pdev->vendor == quirk->chip_vendor &&
+		    pci->pdev->device == quirk->chip_device) {
+			nvkm_info(subdev, "forcing default agp mode to %dX, "
+					  "use NvAGP=<mode> to override\n",
+				  quirk->mode);
+			mode = quirk->mode;
+			break;
+		}
+		quirk++;
+	}
+
+	/* apply quirk / user-specified mode */
+	if (mode >= 1) {
+		if (pci->agp.mode & 0x00000008)
+			mode /= 4; /* AGPv3 */
+		pci->agp.mode &= ~0x00000007;
+		pci->agp.mode |= (mode & 0x7);
+	} else
+	if (mode == 0) {
+		pci->agp.bridge = NULL;
+		return;
+	}
+
+	/* fast writes appear to be broken on nv18, they make the card
+	 * lock up randomly.
+	 */
+	if (device->chipset == 0x18)
+		pci->agp.mode &= ~PCI_AGP_COMMAND_FW;
+
+	pci->agp.mtrr = arch_phys_wc_add(pci->agp.base, pci->agp.size);
+}
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h
new file mode 100644
index 000000000000..df2dd08363ad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h
@@ -0,0 +1,18 @@
+#include "priv.h"
+#if defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))
+#ifndef __NVKM_PCI_AGP_H__
+#define __NVKM_PCI_AGP_H__
+
+void nvkm_agp_ctor(struct nvkm_pci *);
+void nvkm_agp_dtor(struct nvkm_pci *);
+void nvkm_agp_preinit(struct nvkm_pci *);
+int nvkm_agp_init(struct nvkm_pci *);
+void nvkm_agp_fini(struct nvkm_pci *);
+#endif
+#else
+static inline void nvkm_agp_ctor(struct nvkm_pci *pci) {}
+static inline void nvkm_agp_dtor(struct nvkm_pci *pci) {}
+static inline void nvkm_agp_preinit(struct nvkm_pci *pci) {}
+static inline int nvkm_agp_init(struct nvkm_pci *pci) { return -ENOSYS; }
+static inline void nvkm_agp_fini(struct nvkm_pci *pci) {}
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
new file mode 100644
index 000000000000..d1c148e51922
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+#include "agp.h"
+
+#include <core/option.h>
+#include <core/pci.h>
+#include <subdev/mc.h>
+
+u32
+nvkm_pci_rd32(struct nvkm_pci *pci, u16 addr)
+{
+	return pci->func->rd32(pci, addr);
+}
+
+void
+nvkm_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
+{
+	pci->func->wr08(pci, addr, data);
+}
+
+void
+nvkm_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
+{
+	pci->func->wr32(pci, addr, data);
+}
+
+void
+nvkm_pci_rom_shadow(struct nvkm_pci *pci, bool shadow)
+{
+	u32 data = nvkm_pci_rd32(pci, 0x0050);
+	if (shadow)
+		data |=  0x00000001;
+	else
+		data &= ~0x00000001;
+	nvkm_pci_wr32(pci, 0x0050, data);
+}
+
+static irqreturn_t
+nvkm_pci_intr(int irq, void *arg)
+{
+	struct nvkm_pci *pci = arg;
+	struct nvkm_mc *mc = pci->subdev.device->mc;
+	bool handled = false;
+	if (likely(mc)) {
+		nvkm_mc_intr_unarm(mc);
+		if (pci->msi)
+			pci->func->msi_rearm(pci);
+		nvkm_mc_intr(mc, &handled);
+		nvkm_mc_intr_rearm(mc);
+	}
+	return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int
+nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+	struct nvkm_pci *pci = nvkm_pci(subdev);
+
+	if (pci->irq >= 0) {
+		free_irq(pci->irq, pci);
+		pci->irq = -1;
+	};
+
+	if (pci->agp.bridge)
+		nvkm_agp_fini(pci);
+
+	return 0;
+}
+
+static int
+nvkm_pci_preinit(struct nvkm_subdev *subdev)
+{
+	struct nvkm_pci *pci = nvkm_pci(subdev);
+	if (pci->agp.bridge)
+		nvkm_agp_preinit(pci);
+	return 0;
+}
+
+static int
+nvkm_pci_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_pci *pci = nvkm_pci(subdev);
+	struct pci_dev *pdev = pci->pdev;
+	int ret;
+
+	if (pci->agp.bridge) {
+		ret = nvkm_agp_init(pci);
+		if (ret)
+			return ret;
+	}
+
+	ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
+	if (ret)
+		return ret;
+
+	pci->irq = pdev->irq;
+	return ret;
+}
+
+static void *
+nvkm_pci_dtor(struct nvkm_subdev *subdev)
+{
+	struct nvkm_pci *pci = nvkm_pci(subdev);
+	nvkm_agp_dtor(pci);
+	if (pci->msi)
+		pci_disable_msi(pci->pdev);
+	return nvkm_pci(subdev);
+}
+
+static const struct nvkm_subdev_func
+nvkm_pci_func = {
+	.dtor = nvkm_pci_dtor,
+	.preinit = nvkm_pci_preinit,
+	.init = nvkm_pci_init,
+	.fini = nvkm_pci_fini,
+};
+
+int
+nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_pci **ppci)
+{
+	struct nvkm_pci *pci;
+
+	if (!(pci = *ppci = kzalloc(sizeof(**ppci), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&nvkm_pci_func, device, index, 0, &pci->subdev);
+	pci->func = func;
+	pci->pdev = device->func->pci(device)->pdev;
+	pci->irq = -1;
+
+	if (device->type == NVKM_DEVICE_AGP)
+		nvkm_agp_ctor(pci);
+
+	switch (pci->pdev->device & 0x0ff0) {
+	case 0x00f0:
+	case 0x02e0:
+		/* BR02? NFI how these would be handled yet exactly */
+		break;
+	default:
+		switch (device->chipset) {
+		case 0xaa:
+			/* reported broken, nv also disable it */
+			break;
+		default:
+			pci->msi = true;
+			break;
+		}
+	}
+
+	pci->msi = nvkm_boolopt(device->cfgopt, "NvMSI", pci->msi);
+	if (pci->msi && func->msi_rearm) {
+		pci->msi = pci_enable_msi(pci->pdev) == 0;
+		if (pci->msi)
+			nvkm_debug(&pci->subdev, "MSI enabled\n");
+	} else {
+		pci->msi = false;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
new file mode 100644
index 000000000000..86f8226532d3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static void
+gf100_pci_msi_rearm(struct nvkm_pci *pci)
+{
+	nvkm_pci_wr08(pci, 0x0704, 0xff);
+}
+
+static const struct nvkm_pci_func
+gf100_pci_func = {
+	.rd32 = nv40_pci_rd32,
+	.wr08 = nv40_pci_wr08,
+	.wr32 = nv40_pci_wr32,
+	.msi_rearm = gf100_pci_msi_rearm,
+};
+
+int
+gf100_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+{
+	return nvkm_pci_new_(&gf100_pci_func, device, index, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
new file mode 100644
index 000000000000..5b1ed42cb90b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static u32
+nv04_pci_rd32(struct nvkm_pci *pci, u16 addr)
+{
+	struct nvkm_device *device = pci->subdev.device;
+	return nvkm_rd32(device, 0x001800 + addr);
+}
+
+static void
+nv04_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
+{
+	struct nvkm_device *device = pci->subdev.device;
+	nvkm_wr08(device, 0x001800 + addr, data);
+}
+
+static void
+nv04_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
+{
+	struct nvkm_device *device = pci->subdev.device;
+	nvkm_wr32(device, 0x001800 + addr, data);
+}
+
+static const struct nvkm_pci_func
+nv04_pci_func = {
+	.rd32 = nv04_pci_rd32,
+	.wr08 = nv04_pci_wr08,
+	.wr32 = nv04_pci_wr32,
+};
+
+int
+nv04_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+{
+	return nvkm_pci_new_(&nv04_pci_func, device, index, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
new file mode 100644
index 000000000000..090a187f165f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+u32
+nv40_pci_rd32(struct nvkm_pci *pci, u16 addr)
+{
+	struct nvkm_device *device = pci->subdev.device;
+	return nvkm_rd32(device, 0x088000 + addr);
+}
+
+void
+nv40_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
+{
+	struct nvkm_device *device = pci->subdev.device;
+	nvkm_wr08(device, 0x088000 + addr, data);
+}
+
+void
+nv40_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
+{
+	struct nvkm_device *device = pci->subdev.device;
+	nvkm_wr32(device, 0x088000 + addr, data);
+}
+
+static void
+nv40_pci_msi_rearm(struct nvkm_pci *pci)
+{
+	nvkm_pci_wr08(pci, 0x0068, 0xff);
+}
+
+static const struct nvkm_pci_func
+nv40_pci_func = {
+	.rd32 = nv40_pci_rd32,
+	.wr08 = nv40_pci_wr08,
+	.wr32 = nv40_pci_wr32,
+	.msi_rearm = nv40_pci_msi_rearm,
+};
+
+int
+nv40_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+{
+	return nvkm_pci_new_(&nv40_pci_func, device, index, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
new file mode 100644
index 000000000000..1f1b26b5fa72
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static const struct nvkm_pci_func
+nv4c_pci_func = {
+	.rd32 = nv40_pci_rd32,
+	.wr08 = nv40_pci_wr08,
+	.wr32 = nv40_pci_wr32,
+};
+
+int
+nv4c_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+{
+	return nvkm_pci_new_(&nv4c_pci_func, device, index, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv50.c
new file mode 100644
index 000000000000..3e167d4a381f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv50.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+#include <core/pci.h>
+
+/* MSI re-arm through the PRI appears to be broken on the original G80,
+ * so we access it via alternate PCI config space mechanisms.
+ */
+static void
+nv50_pci_msi_rearm(struct nvkm_pci *pci)
+{
+	struct nvkm_device *device = pci->subdev.device;
+	struct pci_dev *pdev = device->func->pci(device)->pdev;
+	pci_write_config_byte(pdev, 0x68, 0xff);
+}
+
+static const struct nvkm_pci_func
+nv50_pci_func = {
+	.rd32 = nv40_pci_rd32,
+	.wr08 = nv40_pci_wr08,
+	.wr32 = nv40_pci_wr32,
+	.msi_rearm = nv50_pci_msi_rearm,
+};
+
+int
+nv50_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+{
+	return nvkm_pci_new_(&nv50_pci_func, device, index, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
new file mode 100644
index 000000000000..d22c2c117106
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
@@ -0,0 +1,19 @@
+#ifndef __NVKM_PCI_PRIV_H__
+#define __NVKM_PCI_PRIV_H__
+#define nvkm_pci(p) container_of((p), struct nvkm_pci, subdev)
+#include <subdev/pci.h>
+
+int nvkm_pci_new_(const struct nvkm_pci_func *, struct nvkm_device *,
+		  int index, struct nvkm_pci **);
+
+struct nvkm_pci_func {
+	u32 (*rd32)(struct nvkm_pci *, u16 addr);
+	void (*wr08)(struct nvkm_pci *, u16 addr, u8 data);
+	void (*wr32)(struct nvkm_pci *, u16 addr, u32 data);
+	void (*msi_rearm)(struct nvkm_pci *);
+};
+
+u32 nv40_pci_rd32(struct nvkm_pci *, u16);
+void nv40_pci_wr08(struct nvkm_pci *, u16, u8);
+void nv40_pci_wr32(struct nvkm_pci *, u16, u32);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
index 7081d6a9b95f..88b643b8664e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
@@ -2,8 +2,9 @@ nvkm-y += nvkm/subdev/pmu/base.o
 nvkm-y += nvkm/subdev/pmu/memx.o
 nvkm-y += nvkm/subdev/pmu/gt215.o
 nvkm-y += nvkm/subdev/pmu/gf100.o
-nvkm-y += nvkm/subdev/pmu/gf110.o
+nvkm-y += nvkm/subdev/pmu/gf119.o
 nvkm-y += nvkm/subdev/pmu/gk104.o
 nvkm-y += nvkm/subdev/pmu/gk110.o
 nvkm-y += nvkm/subdev/pmu/gk208.o
 nvkm-y += nvkm/subdev/pmu/gk20a.o
+nvkm-y += nvkm/subdev/pmu/gm107.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
index 054b2d2eec35..27a79c0c3888 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -28,21 +28,25 @@
 void
 nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
 {
-	const struct nvkm_pmu_impl *impl = (void *)nv_oclass(pmu);
-	if (impl->pgob)
-		impl->pgob(pmu, enable);
+	if (pmu->func->pgob)
+		pmu->func->pgob(pmu, enable);
 }
 
-static int
+int
 nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
 	      u32 process, u32 message, u32 data0, u32 data1)
 {
-	struct nvkm_subdev *subdev = nv_subdev(pmu);
+	struct nvkm_subdev *subdev = &pmu->subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 addr;
 
 	/* wait for a free slot in the fifo */
-	addr  = nv_rd32(pmu, 0x10a4a0);
-	if (!nv_wait_ne(pmu, 0x10a4b0, 0xffffffff, addr ^ 8))
+	addr  = nvkm_rd32(device, 0x10a4a0);
+	if (nvkm_msec(device, 2000,
+		u32 tmp = nvkm_rd32(device, 0x10a4b0);
+		if (tmp != (addr ^ 8))
+			break;
+	) < 0)
 		return -EBUSY;
 
 	/* we currently only support a single process at a time waiting
@@ -57,20 +61,20 @@ nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
 
 	/* acquire data segment access */
 	do {
-		nv_wr32(pmu, 0x10a580, 0x00000001);
-	} while (nv_rd32(pmu, 0x10a580) != 0x00000001);
+		nvkm_wr32(device, 0x10a580, 0x00000001);
+	} while (nvkm_rd32(device, 0x10a580) != 0x00000001);
 
 	/* write the packet */
-	nv_wr32(pmu, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
+	nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
 				pmu->send.base));
-	nv_wr32(pmu, 0x10a1c4, process);
-	nv_wr32(pmu, 0x10a1c4, message);
-	nv_wr32(pmu, 0x10a1c4, data0);
-	nv_wr32(pmu, 0x10a1c4, data1);
-	nv_wr32(pmu, 0x10a4a0, (addr + 1) & 0x0f);
+	nvkm_wr32(device, 0x10a1c4, process);
+	nvkm_wr32(device, 0x10a1c4, message);
+	nvkm_wr32(device, 0x10a1c4, data0);
+	nvkm_wr32(device, 0x10a1c4, data1);
+	nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f);
 
 	/* release data segment access */
-	nv_wr32(pmu, 0x10a580, 0x00000000);
+	nvkm_wr32(device, 0x10a580, 0x00000000);
 
 	/* wait for reply, if requested */
 	if (reply) {
@@ -87,29 +91,31 @@ static void
 nvkm_pmu_recv(struct work_struct *work)
 {
 	struct nvkm_pmu *pmu = container_of(work, struct nvkm_pmu, recv.work);
+	struct nvkm_subdev *subdev = &pmu->subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 process, message, data0, data1;
 
 	/* nothing to do if GET == PUT */
-	u32 addr =  nv_rd32(pmu, 0x10a4cc);
-	if (addr == nv_rd32(pmu, 0x10a4c8))
+	u32 addr =  nvkm_rd32(device, 0x10a4cc);
+	if (addr == nvkm_rd32(device, 0x10a4c8))
 		return;
 
 	/* acquire data segment access */
 	do {
-		nv_wr32(pmu, 0x10a580, 0x00000002);
-	} while (nv_rd32(pmu, 0x10a580) != 0x00000002);
+		nvkm_wr32(device, 0x10a580, 0x00000002);
+	} while (nvkm_rd32(device, 0x10a580) != 0x00000002);
 
 	/* read the packet */
-	nv_wr32(pmu, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
+	nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
 				pmu->recv.base));
-	process = nv_rd32(pmu, 0x10a1c4);
-	message = nv_rd32(pmu, 0x10a1c4);
-	data0   = nv_rd32(pmu, 0x10a1c4);
-	data1   = nv_rd32(pmu, 0x10a1c4);
-	nv_wr32(pmu, 0x10a4cc, (addr + 1) & 0x0f);
+	process = nvkm_rd32(device, 0x10a1c4);
+	message = nvkm_rd32(device, 0x10a1c4);
+	data0   = nvkm_rd32(device, 0x10a1c4);
+	data1   = nvkm_rd32(device, 0x10a1c4);
+	nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f);
 
 	/* release data segment access */
-	nv_wr32(pmu, 0x10a580, 0x00000000);
+	nvkm_wr32(device, 0x10a580, 0x00000000);
 
 	/* wake process if it's waiting on a synchronous reply */
 	if (pmu->recv.process) {
@@ -126,143 +132,149 @@ nvkm_pmu_recv(struct work_struct *work)
 	/* right now there's no other expected responses from the engine,
 	 * so assume that any unexpected message is an error.
 	 */
-	nv_warn(pmu, "%c%c%c%c 0x%08x 0x%08x 0x%08x 0x%08x\n",
-		(char)((process & 0x000000ff) >>  0),
-		(char)((process & 0x0000ff00) >>  8),
-		(char)((process & 0x00ff0000) >> 16),
-		(char)((process & 0xff000000) >> 24),
-		process, message, data0, data1);
+	nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n",
+		  (char)((process & 0x000000ff) >>  0),
+		  (char)((process & 0x0000ff00) >>  8),
+		  (char)((process & 0x00ff0000) >> 16),
+		  (char)((process & 0xff000000) >> 24),
+		  process, message, data0, data1);
 }
 
 static void
 nvkm_pmu_intr(struct nvkm_subdev *subdev)
 {
-	struct nvkm_pmu *pmu = (void *)subdev;
-	u32 disp = nv_rd32(pmu, 0x10a01c);
-	u32 intr = nv_rd32(pmu, 0x10a008) & disp & ~(disp >> 16);
+	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
+	struct nvkm_device *device = pmu->subdev.device;
+	u32 disp = nvkm_rd32(device, 0x10a01c);
+	u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16);
 
 	if (intr & 0x00000020) {
-		u32 stat = nv_rd32(pmu, 0x10a16c);
+		u32 stat = nvkm_rd32(device, 0x10a16c);
 		if (stat & 0x80000000) {
-			nv_error(pmu, "UAS fault at 0x%06x addr 0x%08x\n",
-				 stat & 0x00ffffff, nv_rd32(pmu, 0x10a168));
-			nv_wr32(pmu, 0x10a16c, 0x00000000);
+			nvkm_error(subdev, "UAS fault at %06x addr %08x\n",
+				   stat & 0x00ffffff,
+				   nvkm_rd32(device, 0x10a168));
+			nvkm_wr32(device, 0x10a16c, 0x00000000);
 			intr &= ~0x00000020;
 		}
 	}
 
 	if (intr & 0x00000040) {
 		schedule_work(&pmu->recv.work);
-		nv_wr32(pmu, 0x10a004, 0x00000040);
+		nvkm_wr32(device, 0x10a004, 0x00000040);
 		intr &= ~0x00000040;
 	}
 
 	if (intr & 0x00000080) {
-		nv_info(pmu, "wr32 0x%06x 0x%08x\n", nv_rd32(pmu, 0x10a7a0),
-						     nv_rd32(pmu, 0x10a7a4));
-		nv_wr32(pmu, 0x10a004, 0x00000080);
+		nvkm_info(subdev, "wr32 %06x %08x\n",
+			  nvkm_rd32(device, 0x10a7a0),
+			  nvkm_rd32(device, 0x10a7a4));
+		nvkm_wr32(device, 0x10a004, 0x00000080);
 		intr &= ~0x00000080;
 	}
 
 	if (intr) {
-		nv_error(pmu, "intr 0x%08x\n", intr);
-		nv_wr32(pmu, 0x10a004, intr);
+		nvkm_error(subdev, "intr %08x\n", intr);
+		nvkm_wr32(device, 0x10a004, intr);
 	}
 }
 
-int
-_nvkm_pmu_fini(struct nvkm_object *object, bool suspend)
+static int
+nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
 {
-	struct nvkm_pmu *pmu = (void *)object;
+	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
+	struct nvkm_device *device = pmu->subdev.device;
 
-	nv_wr32(pmu, 0x10a014, 0x00000060);
+	nvkm_wr32(device, 0x10a014, 0x00000060);
 	flush_work(&pmu->recv.work);
-
-	return nvkm_subdev_fini(&pmu->base, suspend);
+	return 0;
 }
 
-int
-_nvkm_pmu_init(struct nvkm_object *object)
+static int
+nvkm_pmu_init(struct nvkm_subdev *subdev)
 {
-	const struct nvkm_pmu_impl *impl = (void *)object->oclass;
-	struct nvkm_pmu *pmu = (void *)object;
-	int ret, i;
-
-	ret = nvkm_subdev_init(&pmu->base);
-	if (ret)
-		return ret;
-
-	nv_subdev(pmu)->intr = nvkm_pmu_intr;
-	pmu->message = nvkm_pmu_send;
-	pmu->pgob = nvkm_pmu_pgob;
+	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
+	struct nvkm_device *device = pmu->subdev.device;
+	int i;
 
 	/* prevent previous ucode from running, wait for idle, reset */
-	nv_wr32(pmu, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
-	nv_wait(pmu, 0x10a04c, 0xffffffff, 0x00000000);
-	nv_mask(pmu, 0x000200, 0x00002000, 0x00000000);
-	nv_mask(pmu, 0x000200, 0x00002000, 0x00002000);
-	nv_rd32(pmu, 0x000200);
-	nv_wait(pmu, 0x10a10c, 0x00000006, 0x00000000);
+	nvkm_wr32(device, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
+	nvkm_msec(device, 2000,
+		if (!nvkm_rd32(device, 0x10a04c))
+			break;
+	);
+	nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
+	nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
+	nvkm_rd32(device, 0x000200);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006))
+			break;
+	);
 
 	/* upload data segment */
-	nv_wr32(pmu, 0x10a1c0, 0x01000000);
-	for (i = 0; i < impl->data.size / 4; i++)
-		nv_wr32(pmu, 0x10a1c4, impl->data.data[i]);
+	nvkm_wr32(device, 0x10a1c0, 0x01000000);
+	for (i = 0; i < pmu->func->data.size / 4; i++)
+		nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]);
 
 	/* upload code segment */
-	nv_wr32(pmu, 0x10a180, 0x01000000);
-	for (i = 0; i < impl->code.size / 4; i++) {
+	nvkm_wr32(device, 0x10a180, 0x01000000);
+	for (i = 0; i < pmu->func->code.size / 4; i++) {
 		if ((i & 0x3f) == 0)
-			nv_wr32(pmu, 0x10a188, i >> 6);
-		nv_wr32(pmu, 0x10a184, impl->code.data[i]);
+			nvkm_wr32(device, 0x10a188, i >> 6);
+		nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]);
 	}
 
 	/* start it running */
-	nv_wr32(pmu, 0x10a10c, 0x00000000);
-	nv_wr32(pmu, 0x10a104, 0x00000000);
-	nv_wr32(pmu, 0x10a100, 0x00000002);
+	nvkm_wr32(device, 0x10a10c, 0x00000000);
+	nvkm_wr32(device, 0x10a104, 0x00000000);
+	nvkm_wr32(device, 0x10a100, 0x00000002);
 
 	/* wait for valid host->pmu ring configuration */
-	if (!nv_wait_ne(pmu, 0x10a4d0, 0xffffffff, 0x00000000))
+	if (nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x10a4d0))
+			break;
+	) < 0)
 		return -EBUSY;
-	pmu->send.base = nv_rd32(pmu, 0x10a4d0) & 0x0000ffff;
-	pmu->send.size = nv_rd32(pmu, 0x10a4d0) >> 16;
+	pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff;
+	pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16;
 
 	/* wait for valid pmu->host ring configuration */
-	if (!nv_wait_ne(pmu, 0x10a4dc, 0xffffffff, 0x00000000))
+	if (nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x10a4dc))
+			break;
+	) < 0)
 		return -EBUSY;
-	pmu->recv.base = nv_rd32(pmu, 0x10a4dc) & 0x0000ffff;
-	pmu->recv.size = nv_rd32(pmu, 0x10a4dc) >> 16;
+	pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff;
+	pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16;
 
-	nv_wr32(pmu, 0x10a010, 0x000000e0);
+	nvkm_wr32(device, 0x10a010, 0x000000e0);
 	return 0;
 }
 
-int
-nvkm_pmu_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, int length, void **pobject)
+static void *
+nvkm_pmu_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_pmu *pmu;
-	int ret;
-
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PMU",
-				  "pmu", length, pobject);
-	pmu = *pobject;
-	if (ret)
-		return ret;
-
-	INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
-	init_waitqueue_head(&pmu->recv.wait);
-	return 0;
+	return nvkm_pmu(subdev);
 }
 
+static const struct nvkm_subdev_func
+nvkm_pmu = {
+	.dtor = nvkm_pmu_dtor,
+	.init = nvkm_pmu_init,
+	.fini = nvkm_pmu_fini,
+	.intr = nvkm_pmu_intr,
+};
+
 int
-_nvkm_pmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_pmu **ppmu)
 {
 	struct nvkm_pmu *pmu;
-	int ret = nvkm_pmu_create(parent, engine, oclass, &pmu);
-	*pobject = nv_object(pmu);
-	return ret;
+	if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&nvkm_pmu, device, index, 0, &pmu->subdev);
+	pmu->func = func;
+	INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
+	init_waitqueue_head(&pmu->recv.wait);
+	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4 b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4
index ae9c3f18ae01..2f28c7e26a14 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4
@@ -32,7 +32,7 @@
 
 #include "macros.fuc"
 
-.section #gf110_pmu_data
+.section #gf119_pmu_data
 #define INCLUDE_PROC
 #include "kernel.fuc"
 #include "arith.fuc"
@@ -56,7 +56,7 @@
 #undef INCLUDE_DATA
 .align 256
 
-.section #gf110_pmu_code
+.section #gf119_pmu_code
 #define INCLUDE_CODE
 #include "kernel.fuc"
 #include "arith.fuc"
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
index a0c499e4543c..31552af9b06e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
@@ -1,4 +1,4 @@
-uint32_t gf110_pmu_data[] = {
+uint32_t gf119_pmu_data[] = {
 /* 0x0000: proc_kern */
 	0x52544e49,
 	0x00000000,
@@ -915,7 +915,7 @@ uint32_t gf110_pmu_data[] = {
 	0x00000000,
 };
 
-uint32_t gf110_pmu_code[] = {
+uint32_t gf119_pmu_code[] = {
 	0x034d0ef5,
 /* 0x0004: rd32 */
 	0x07a007f1,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
index 78a4ea0101f1..aeb8ccd891fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
@@ -24,17 +24,16 @@
 #include "priv.h"
 #include "fuc/gf100.fuc3.h"
 
-struct nvkm_oclass *
-gf100_pmu_oclass = &(struct nvkm_pmu_impl) {
-	.base.handle = NV_SUBDEV(PMU, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_pmu_ctor,
-		.dtor = _nvkm_pmu_dtor,
-		.init = _nvkm_pmu_init,
-		.fini = _nvkm_pmu_fini,
-	},
+static const struct nvkm_pmu_func
+gf100_pmu = {
 	.code.data = gf100_pmu_code,
 	.code.size = sizeof(gf100_pmu_code),
 	.data.data = gf100_pmu_data,
 	.data.size = sizeof(gf100_pmu_data),
-}.base;
+};
+
+int
+gf100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+	return nvkm_pmu_new_(&gf100_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
index 6b3a23839ff0..fbc88d8ecd4d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
@@ -22,19 +22,18 @@
  * Authors: Ben Skeggs
  */
 #include "priv.h"
-#include "fuc/gf110.fuc4.h"
+#include "fuc/gf119.fuc4.h"
 
-struct nvkm_oclass *
-gf110_pmu_oclass = &(struct nvkm_pmu_impl) {
-	.base.handle = NV_SUBDEV(PMU, 0xd0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_pmu_ctor,
-		.dtor = _nvkm_pmu_dtor,
-		.init = _nvkm_pmu_init,
-		.fini = _nvkm_pmu_fini,
-	},
-	.code.data = gf110_pmu_code,
-	.code.size = sizeof(gf110_pmu_code),
-	.data.data = gf110_pmu_data,
-	.data.size = sizeof(gf110_pmu_data),
-}.base;
+static const struct nvkm_pmu_func
+gf119_pmu = {
+	.code.data = gf119_pmu_code,
+	.code.size = sizeof(gf119_pmu_code),
+	.data.data = gf119_pmu_data,
+	.data.size = sizeof(gf119_pmu_data),
+};
+
+int
+gf119_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+	return nvkm_pmu_new_(&gf119_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
index 28fdb8ea9ed8..e33f5c03b9ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
@@ -21,47 +21,97 @@
  *
  * Authors: Ben Skeggs
  */
-#define gf110_pmu_code gk104_pmu_code
-#define gf110_pmu_data gk104_pmu_data
+#define gf119_pmu_code gk104_pmu_code
+#define gf119_pmu_data gk104_pmu_data
 #include "priv.h"
-#include "fuc/gf110.fuc4.h"
+#include "fuc/gf119.fuc4.h"
+
+#include <core/option.h>
+#include <subdev/timer.h>
+
+static void
+magic_(struct nvkm_device *device, u32 ctrl, int size)
+{
+	nvkm_wr32(device, 0x00c800, 0x00000000);
+	nvkm_wr32(device, 0x00c808, 0x00000000);
+	nvkm_wr32(device, 0x00c800, ctrl);
+	nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x00c800) & 0x40000000) {
+			while (size--)
+				nvkm_wr32(device, 0x00c804, 0x00000000);
+			break;
+		}
+	);
+	nvkm_wr32(device, 0x00c800, 0x00000000);
+}
+
+static void
+magic(struct nvkm_device *device, u32 ctrl)
+{
+	magic_(device, 0x8000a41f | ctrl, 6);
+	magic_(device, 0x80000421 | ctrl, 1);
+}
 
 static void
 gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
 {
-	nv_mask(pmu, 0x000200, 0x00001000, 0x00000000);
-	nv_rd32(pmu, 0x000200);
-	nv_mask(pmu, 0x000200, 0x08000000, 0x08000000);
+	struct nvkm_device *device = pmu->subdev.device;
+
+	nvkm_mask(device, 0x000200, 0x00001000, 0x00000000);
+	nvkm_rd32(device, 0x000200);
+	nvkm_mask(device, 0x000200, 0x08000000, 0x08000000);
 	msleep(50);
 
-	nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000002);
-	nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001);
-	nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000002);
+	nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000);
 
-	nv_mask(pmu, 0x020004, 0xc0000000, enable ? 0xc0000000 : 0x40000000);
+	nvkm_mask(device, 0x020004, 0xc0000000, enable ? 0xc0000000 : 0x40000000);
 	msleep(50);
 
-	nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000000);
-	nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001);
-	nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000000);
+	nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000);
+
+	nvkm_mask(device, 0x000200, 0x08000000, 0x00000000);
+	nvkm_mask(device, 0x000200, 0x00001000, 0x00001000);
+	nvkm_rd32(device, 0x000200);
 
-	nv_mask(pmu, 0x000200, 0x08000000, 0x00000000);
-	nv_mask(pmu, 0x000200, 0x00001000, 0x00001000);
-	nv_rd32(pmu, 0x000200);
+	if ( nvkm_boolopt(device->cfgopt, "War00C800_0",
+	    device->quirk ? device->quirk->War00C800_0 : false)) {
+		nvkm_info(&pmu->subdev, "hw bug workaround enabled\n");
+		switch (device->chipset) {
+		case 0xe4:
+			magic(device, 0x04000000);
+			magic(device, 0x06000000);
+			magic(device, 0x0c000000);
+			magic(device, 0x0e000000);
+			break;
+		case 0xe6:
+			magic(device, 0x02000000);
+			magic(device, 0x04000000);
+			magic(device, 0x0a000000);
+			break;
+		case 0xe7:
+			magic(device, 0x02000000);
+			break;
+		default:
+			break;
+		}
+	}
 }
 
-struct nvkm_oclass *
-gk104_pmu_oclass = &(struct nvkm_pmu_impl) {
-	.base.handle = NV_SUBDEV(PMU, 0xe4),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_pmu_ctor,
-		.dtor = _nvkm_pmu_dtor,
-		.init = _nvkm_pmu_init,
-		.fini = _nvkm_pmu_fini,
-	},
+static const struct nvkm_pmu_func
+gk104_pmu = {
 	.code.data = gk104_pmu_code,
 	.code.size = sizeof(gk104_pmu_code),
 	.data.data = gk104_pmu_data,
 	.data.size = sizeof(gk104_pmu_data),
 	.pgob = gk104_pmu_pgob,
-}.base;
+};
+
+int
+gk104_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+	return nvkm_pmu_new_(&gk104_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
index 89bb94b0af8b..ae255247c9d1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
@@ -21,16 +21,17 @@
  *
  * Authors: Ben Skeggs
  */
-#define gf110_pmu_code gk110_pmu_code
-#define gf110_pmu_data gk110_pmu_data
+#define gf119_pmu_code gk110_pmu_code
+#define gf119_pmu_data gk110_pmu_data
 #include "priv.h"
-#include "fuc/gf110.fuc4.h"
+#include "fuc/gf119.fuc4.h"
 
 #include <subdev/timer.h>
 
 void
 gk110_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
 {
+	struct nvkm_device *device = pmu->subdev.device;
 	static const struct {
 		u32 addr;
 		u32 data;
@@ -54,42 +55,44 @@ gk110_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
 	};
 	int i;
 
-	nv_mask(pmu, 0x000200, 0x00001000, 0x00000000);
-	nv_rd32(pmu, 0x000200);
-	nv_mask(pmu, 0x000200, 0x08000000, 0x08000000);
+	nvkm_mask(device, 0x000200, 0x00001000, 0x00000000);
+	nvkm_rd32(device, 0x000200);
+	nvkm_mask(device, 0x000200, 0x08000000, 0x08000000);
 	msleep(50);
 
-	nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000002);
-	nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001);
-	nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000002);
+	nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000);
 
-	nv_mask(pmu, 0x0206b4, 0x00000000, 0x00000000);
+	nvkm_mask(device, 0x0206b4, 0x00000000, 0x00000000);
 	for (i = 0; i < ARRAY_SIZE(magic); i++) {
-		nv_wr32(pmu, magic[i].addr, magic[i].data);
-		nv_wait(pmu, magic[i].addr, 0x80000000, 0x00000000);
+		nvkm_wr32(device, magic[i].addr, magic[i].data);
+		nvkm_msec(device, 2000,
+			if (!(nvkm_rd32(device, magic[i].addr) & 0x80000000))
+				break;
+		);
 	}
 
-	nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000000);
-	nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001);
-	nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000000);
+	nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000);
 
-	nv_mask(pmu, 0x000200, 0x08000000, 0x00000000);
-	nv_mask(pmu, 0x000200, 0x00001000, 0x00001000);
-	nv_rd32(pmu, 0x000200);
+	nvkm_mask(device, 0x000200, 0x08000000, 0x00000000);
+	nvkm_mask(device, 0x000200, 0x00001000, 0x00001000);
+	nvkm_rd32(device, 0x000200);
 }
 
-struct nvkm_oclass *
-gk110_pmu_oclass = &(struct nvkm_pmu_impl) {
-	.base.handle = NV_SUBDEV(PMU, 0xf0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_pmu_ctor,
-		.dtor = _nvkm_pmu_dtor,
-		.init = _nvkm_pmu_init,
-		.fini = _nvkm_pmu_fini,
-	},
+static const struct nvkm_pmu_func
+gk110_pmu = {
 	.code.data = gk110_pmu_code,
 	.code.size = sizeof(gk110_pmu_code),
 	.data.data = gk110_pmu_data,
 	.data.size = sizeof(gk110_pmu_data),
 	.pgob = gk110_pmu_pgob,
-}.base;
+};
+
+int
+gk110_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+	return nvkm_pmu_new_(&gk110_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
index b14134ef9ea5..3b4917637902 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
@@ -24,18 +24,17 @@
 #include "priv.h"
 #include "fuc/gk208.fuc5.h"
 
-struct nvkm_oclass *
-gk208_pmu_oclass = &(struct nvkm_pmu_impl) {
-	.base.handle = NV_SUBDEV(PMU, 0x00),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_pmu_ctor,
-		.dtor = _nvkm_pmu_dtor,
-		.init = _nvkm_pmu_init,
-		.fini = _nvkm_pmu_fini,
-	},
+static const struct nvkm_pmu_func
+gk208_pmu = {
 	.code.data = gk208_pmu_code,
 	.code.size = sizeof(gk208_pmu_code),
 	.data.data = gk208_pmu_data,
 	.data.size = sizeof(gk208_pmu_data),
 	.pgob = gk110_pmu_pgob,
-}.base;
+};
+
+int
+gk208_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+	return nvkm_pmu_new_(&gk208_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
index 594f746e68f2..6689d0290a7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
@@ -19,6 +19,7 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
+#define gk20a_pmu(p) container_of((p), struct gk20a_pmu, base.subdev)
 #include "priv.h"
 
 #include <subdev/clk.h>
@@ -35,7 +36,7 @@ struct gk20a_pmu_dvfs_data {
 	unsigned int avg_load;
 };
 
-struct gk20a_pmu_priv {
+struct gk20a_pmu {
 	struct nvkm_pmu base;
 	struct nvkm_alarm alarm;
 	struct gk20a_pmu_dvfs_data *data;
@@ -48,28 +49,28 @@ struct gk20a_pmu_dvfs_dev_status {
 };
 
 static int
-gk20a_pmu_dvfs_target(struct gk20a_pmu_priv *priv, int *state)
+gk20a_pmu_dvfs_target(struct gk20a_pmu *pmu, int *state)
 {
-	struct nvkm_clk *clk = nvkm_clk(priv);
+	struct nvkm_clk *clk = pmu->base.subdev.device->clk;
 
 	return nvkm_clk_astate(clk, *state, 0, false);
 }
 
 static int
-gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu_priv *priv, int *state)
+gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu *pmu, int *state)
 {
-	struct nvkm_clk *clk = nvkm_clk(priv);
+	struct nvkm_clk *clk = pmu->base.subdev.device->clk;
 
 	*state = clk->pstate;
 	return 0;
 }
 
 static int
-gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu_priv *priv,
+gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu *pmu,
 				int *state, int load)
 {
-	struct gk20a_pmu_dvfs_data *data = priv->data;
-	struct nvkm_clk *clk = nvkm_clk(priv);
+	struct gk20a_pmu_dvfs_data *data = pmu->data;
+	struct nvkm_clk *clk = pmu->base.subdev.device->clk;
 	int cur_level, level;
 
 	/* For GK20A, the performance level is directly mapped to pstate */
@@ -84,7 +85,8 @@ gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu_priv *priv,
 		level = min(clk->state_nr - 1, level);
 	}
 
-	nv_trace(priv, "cur level = %d, new level = %d\n", cur_level, level);
+	nvkm_trace(&pmu->base.subdev, "cur level = %d, new level = %d\n",
+		   cur_level, level);
 
 	*state = level;
 
@@ -95,30 +97,35 @@ gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu_priv *priv,
 }
 
 static int
-gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu_priv *priv,
+gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu,
 			      struct gk20a_pmu_dvfs_dev_status *status)
 {
-	status->busy = nv_rd32(priv, 0x10a508 + (BUSY_SLOT * 0x10));
-	status->total= nv_rd32(priv, 0x10a508 + (CLK_SLOT * 0x10));
+	struct nvkm_device *device = pmu->base.subdev.device;
+	status->busy = nvkm_rd32(device, 0x10a508 + (BUSY_SLOT * 0x10));
+	status->total= nvkm_rd32(device, 0x10a508 + (CLK_SLOT * 0x10));
 	return 0;
 }
 
 static void
-gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu_priv *priv)
+gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu)
 {
-	nv_wr32(priv, 0x10a508 + (BUSY_SLOT * 0x10), 0x80000000);
-	nv_wr32(priv, 0x10a508 + (CLK_SLOT * 0x10), 0x80000000);
+	struct nvkm_device *device = pmu->base.subdev.device;
+	nvkm_wr32(device, 0x10a508 + (BUSY_SLOT * 0x10), 0x80000000);
+	nvkm_wr32(device, 0x10a508 + (CLK_SLOT * 0x10), 0x80000000);
 }
 
 static void
 gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm)
 {
-	struct gk20a_pmu_priv *priv =
-		container_of(alarm, struct gk20a_pmu_priv, alarm);
-	struct gk20a_pmu_dvfs_data *data = priv->data;
+	struct gk20a_pmu *pmu =
+		container_of(alarm, struct gk20a_pmu, alarm);
+	struct gk20a_pmu_dvfs_data *data = pmu->data;
 	struct gk20a_pmu_dvfs_dev_status status;
-	struct nvkm_clk *clk = nvkm_clk(priv);
-	struct nvkm_volt *volt = nvkm_volt(priv);
+	struct nvkm_subdev *subdev = &pmu->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_clk *clk = device->clk;
+	struct nvkm_timer *tmr = device->timer;
+	struct nvkm_volt *volt = device->volt;
 	u32 utilization = 0;
 	int state, ret;
 
@@ -129,9 +136,9 @@ gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm)
 	if (!clk || !volt)
 		goto resched;
 
-	ret = gk20a_pmu_dvfs_get_dev_status(priv, &status);
+	ret = gk20a_pmu_dvfs_get_dev_status(pmu, &status);
 	if (ret) {
-		nv_warn(priv, "failed to get device status\n");
+		nvkm_warn(subdev, "failed to get device status\n");
 		goto resched;
 	}
 
@@ -140,56 +147,52 @@ gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm)
 
 	data->avg_load = (data->p_smooth * data->avg_load) + utilization;
 	data->avg_load /= data->p_smooth + 1;
-	nv_trace(priv, "utilization = %d %%, avg_load = %d %%\n",
-			utilization, data->avg_load);
+	nvkm_trace(subdev, "utilization = %d %%, avg_load = %d %%\n",
+		   utilization, data->avg_load);
 
-	ret = gk20a_pmu_dvfs_get_cur_state(priv, &state);
+	ret = gk20a_pmu_dvfs_get_cur_state(pmu, &state);
 	if (ret) {
-		nv_warn(priv, "failed to get current state\n");
+		nvkm_warn(subdev, "failed to get current state\n");
 		goto resched;
 	}
 
-	if (gk20a_pmu_dvfs_get_target_state(priv, &state, data->avg_load)) {
-		nv_trace(priv, "set new state to %d\n", state);
-		gk20a_pmu_dvfs_target(priv, &state);
+	if (gk20a_pmu_dvfs_get_target_state(pmu, &state, data->avg_load)) {
+		nvkm_trace(subdev, "set new state to %d\n", state);
+		gk20a_pmu_dvfs_target(pmu, &state);
 	}
 
 resched:
-	gk20a_pmu_dvfs_reset_dev_status(priv);
-	nvkm_timer_alarm(priv, 100000000, alarm);
+	gk20a_pmu_dvfs_reset_dev_status(pmu);
+	nvkm_timer_alarm(tmr, 100000000, alarm);
 }
 
 static int
-gk20a_pmu_fini(struct nvkm_object *object, bool suspend)
+gk20a_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
 {
-	struct nvkm_pmu *pmu = (void *)object;
-	struct gk20a_pmu_priv *priv = (void *)pmu;
-
-	nvkm_timer_alarm_cancel(priv, &priv->alarm);
+	struct gk20a_pmu *pmu = gk20a_pmu(subdev);
+	nvkm_timer_alarm_cancel(subdev->device->timer, &pmu->alarm);
+	return 0;
+}
 
-	return nvkm_subdev_fini(&pmu->base, suspend);
+static void *
+gk20a_pmu_dtor(struct nvkm_subdev *subdev)
+{
+	return gk20a_pmu(subdev);
 }
 
 static int
-gk20a_pmu_init(struct nvkm_object *object)
+gk20a_pmu_init(struct nvkm_subdev *subdev)
 {
-	struct nvkm_pmu *pmu = (void *)object;
-	struct gk20a_pmu_priv *priv = (void *)pmu;
-	int ret;
-
-	ret = nvkm_subdev_init(&pmu->base);
-	if (ret)
-		return ret;
-
-	pmu->pgob = nvkm_pmu_pgob;
+	struct gk20a_pmu *pmu = gk20a_pmu(subdev);
+	struct nvkm_device *device = pmu->base.subdev.device;
 
 	/* init pwr perf counter */
-	nv_wr32(pmu, 0x10a504 + (BUSY_SLOT * 0x10), 0x00200001);
-	nv_wr32(pmu, 0x10a50c + (BUSY_SLOT * 0x10), 0x00000002);
-	nv_wr32(pmu, 0x10a50c + (CLK_SLOT * 0x10), 0x00000003);
+	nvkm_wr32(device, 0x10a504 + (BUSY_SLOT * 0x10), 0x00200001);
+	nvkm_wr32(device, 0x10a50c + (BUSY_SLOT * 0x10), 0x00000002);
+	nvkm_wr32(device, 0x10a50c + (CLK_SLOT * 0x10), 0x00000003);
 
-	nvkm_timer_alarm(pmu, 2000000000, &priv->alarm);
-	return ret;
+	nvkm_timer_alarm(device->timer, 2000000000, &pmu->alarm);
+	return 0;
 }
 
 static struct gk20a_pmu_dvfs_data
@@ -199,32 +202,26 @@ gk20a_dvfs_data= {
 	.p_smooth = 1,
 };
 
-static int
-gk20a_pmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct gk20a_pmu_priv *priv;
-	int ret;
+static const struct nvkm_subdev_func
+gk20a_pmu = {
+	.init = gk20a_pmu_init,
+	.fini = gk20a_pmu_fini,
+	.dtor = gk20a_pmu_dtor,
+};
 
-	ret = nvkm_pmu_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+int
+gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+	static const struct nvkm_pmu_func func = {};
+	struct gk20a_pmu *pmu;
 
-	priv->data = &gk20a_dvfs_data;
+	if (!(pmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
+		return -ENOMEM;
+	pmu->base.func = &func;
+	*ppmu = &pmu->base;
 
-	nvkm_alarm_init(&priv->alarm, gk20a_pmu_dvfs_work);
+	nvkm_subdev_ctor(&gk20a_pmu, device, index, 0, &pmu->base.subdev);
+	pmu->data = &gk20a_dvfs_data;
+	nvkm_alarm_init(&pmu->alarm, gk20a_pmu_dvfs_work);
 	return 0;
 }
-
-struct nvkm_oclass *
-gk20a_pmu_oclass = &(struct nvkm_pmu_impl) {
-	.base.handle = NV_SUBDEV(PMU, 0xea),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk20a_pmu_ctor,
-		.dtor = _nvkm_pmu_dtor,
-		.init = gk20a_pmu_init,
-		.fini = gk20a_pmu_fini,
-	},
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
new file mode 100644
index 000000000000..31b8692b4641
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#define gk208_pmu_code gm107_pmu_code
+#define gk208_pmu_data gm107_pmu_data
+#include "fuc/gk208.fuc5.h"
+
+static const struct nvkm_pmu_func
+gm107_pmu = {
+	.code.data = gm107_pmu_code,
+	.code.size = sizeof(gm107_pmu_code),
+	.data.data = gm107_pmu_data,
+	.data.size = sizeof(gm107_pmu_data),
+};
+
+int
+gm107_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+	return nvkm_pmu_new_(&gm107_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
index 30aaeb21de41..8ba7fa4ca75b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
@@ -24,26 +24,25 @@
 #include "priv.h"
 #include "fuc/gt215.fuc3.h"
 
-static int
-gt215_pmu_init(struct nvkm_object *object)
+static void
+gt215_pmu_reset(struct nvkm_pmu *pmu)
 {
-	struct nvkm_pmu *pmu = (void *)object;
-	nv_mask(pmu, 0x022210, 0x00000001, 0x00000000);
-	nv_mask(pmu, 0x022210, 0x00000001, 0x00000001);
-	return nvkm_pmu_init(pmu);
+	struct nvkm_device *device = pmu->subdev.device;
+	nvkm_mask(device, 0x022210, 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x022210, 0x00000001, 0x00000001);
 }
 
-struct nvkm_oclass *
-gt215_pmu_oclass = &(struct nvkm_pmu_impl) {
-	.base.handle = NV_SUBDEV(PMU, 0xa3),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_pmu_ctor,
-		.dtor = _nvkm_pmu_dtor,
-		.init = gt215_pmu_init,
-		.fini = _nvkm_pmu_fini,
-	},
+static const struct nvkm_pmu_func
+gt215_pmu = {
+	.reset = gt215_pmu_reset,
 	.code.data = gt215_pmu_code,
 	.code.size = sizeof(gt215_pmu_code),
 	.data.data = gt215_pmu_data,
 	.data.size = sizeof(gt215_pmu_data),
-}.base;
+};
+
+int
+gt215_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+	return nvkm_pmu_new_(&gt215_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
index b75c5b885980..e6f74168238c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
@@ -2,8 +2,6 @@
 #define __NVKM_PMU_MEMX_H__
 #include "priv.h"
 
-#include <core/device.h>
-
 struct nvkm_memx {
 	struct nvkm_pmu *pmu;
 	u32 base;
@@ -18,13 +16,13 @@ struct nvkm_memx {
 static void
 memx_out(struct nvkm_memx *memx)
 {
-	struct nvkm_pmu *pmu = memx->pmu;
+	struct nvkm_device *device = memx->pmu->subdev.device;
 	int i;
 
 	if (memx->c.mthd) {
-		nv_wr32(pmu, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd);
+		nvkm_wr32(device, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd);
 		for (i = 0; i < memx->c.size; i++)
-			nv_wr32(pmu, 0x10a1c4, memx->c.data[i]);
+			nvkm_wr32(device, 0x10a1c4, memx->c.data[i]);
 		memx->c.mthd = 0;
 		memx->c.size = 0;
 	}
@@ -44,12 +42,13 @@ memx_cmd(struct nvkm_memx *memx, u32 mthd, u32 size, u32 data[])
 int
 nvkm_memx_init(struct nvkm_pmu *pmu, struct nvkm_memx **pmemx)
 {
+	struct nvkm_device *device = pmu->subdev.device;
 	struct nvkm_memx *memx;
 	u32 reply[2];
 	int ret;
 
-	ret = pmu->message(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
-			   MEMX_INFO_DATA, 0);
+	ret = nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
+			    MEMX_INFO_DATA, 0);
 	if (ret)
 		return ret;
 
@@ -62,9 +61,9 @@ nvkm_memx_init(struct nvkm_pmu *pmu, struct nvkm_memx **pmemx)
 
 	/* acquire data segment access */
 	do {
-		nv_wr32(pmu, 0x10a580, 0x00000003);
-	} while (nv_rd32(pmu, 0x10a580) != 0x00000003);
-	nv_wr32(pmu, 0x10a1c0, 0x01000000 | memx->base);
+		nvkm_wr32(device, 0x10a580, 0x00000003);
+	} while (nvkm_rd32(device, 0x10a580) != 0x00000003);
+	nvkm_wr32(device, 0x10a1c0, 0x01000000 | memx->base);
 	return 0;
 }
 
@@ -73,23 +72,25 @@ nvkm_memx_fini(struct nvkm_memx **pmemx, bool exec)
 {
 	struct nvkm_memx *memx = *pmemx;
 	struct nvkm_pmu *pmu = memx->pmu;
+	struct nvkm_subdev *subdev = &pmu->subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 finish, reply[2];
 
 	/* flush the cache... */
 	memx_out(memx);
 
 	/* release data segment access */
-	finish = nv_rd32(pmu, 0x10a1c0) & 0x00ffffff;
-	nv_wr32(pmu, 0x10a580, 0x00000000);
+	finish = nvkm_rd32(device, 0x10a1c0) & 0x00ffffff;
+	nvkm_wr32(device, 0x10a580, 0x00000000);
 
 	/* call MEMX process to execute the script, and wait for reply */
 	if (exec) {
-		pmu->message(pmu, reply, PROC_MEMX, MEMX_MSG_EXEC,
-			     memx->base, finish);
+		nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_EXEC,
+			      memx->base, finish);
 	}
 
-	nv_debug(memx->pmu, "Exec took %uns, PMU_IN %08x\n",
-		 reply[0], reply[1]);
+	nvkm_debug(subdev, "Exec took %uns, PMU_IN %08x\n",
+		   reply[0], reply[1]);
 	kfree(memx);
 	return 0;
 }
@@ -97,7 +98,7 @@ nvkm_memx_fini(struct nvkm_memx **pmemx, bool exec)
 void
 nvkm_memx_wr32(struct nvkm_memx *memx, u32 addr, u32 data)
 {
-	nv_debug(memx->pmu, "R[%06x] = 0x%08x\n", addr, data);
+	nvkm_debug(&memx->pmu->subdev, "R[%06x] = %08x\n", addr, data);
 	memx_cmd(memx, MEMX_WR32, 2, (u32[]){ addr, data });
 }
 
@@ -105,8 +106,8 @@ void
 nvkm_memx_wait(struct nvkm_memx *memx,
 		  u32 addr, u32 mask, u32 data, u32 nsec)
 {
-	nv_debug(memx->pmu, "R[%06x] & 0x%08x == 0x%08x, %d us\n",
-				addr, mask, data, nsec);
+	nvkm_debug(&memx->pmu->subdev, "R[%06x] & %08x == %08x, %d us\n",
+		   addr, mask, data, nsec);
 	memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, mask, data, nsec });
 	memx_out(memx); /* fuc can't handle multiple */
 }
@@ -114,7 +115,7 @@ nvkm_memx_wait(struct nvkm_memx *memx,
 void
 nvkm_memx_nsec(struct nvkm_memx *memx, u32 nsec)
 {
-	nv_debug(memx->pmu, "    DELAY = %d ns\n", nsec);
+	nvkm_debug(&memx->pmu->subdev, "    DELAY = %d ns\n", nsec);
 	memx_cmd(memx, MEMX_DELAY, 1, (u32[]){ nsec });
 	memx_out(memx); /* fuc can't handle multiple */
 }
@@ -122,16 +123,17 @@ nvkm_memx_nsec(struct nvkm_memx *memx, u32 nsec)
 void
 nvkm_memx_wait_vblank(struct nvkm_memx *memx)
 {
-	struct nvkm_pmu *pmu = memx->pmu;
+	struct nvkm_subdev *subdev = &memx->pmu->subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 heads, x, y, px = 0;
 	int i, head_sync;
 
-	if (nv_device(pmu)->chipset < 0xd0) {
-		heads = nv_rd32(pmu, 0x610050);
+	if (device->chipset < 0xd0) {
+		heads = nvkm_rd32(device, 0x610050);
 		for (i = 0; i < 2; i++) {
 			/* Heuristic: sync to head with biggest resolution */
 			if (heads & (2 << (i << 3))) {
-				x = nv_rd32(pmu, 0x610b40 + (0x540 * i));
+				x = nvkm_rd32(device, 0x610b40 + (0x540 * i));
 				y = (x & 0xffff0000) >> 16;
 				x &= 0x0000ffff;
 				if ((x * y) > px) {
@@ -143,11 +145,11 @@ nvkm_memx_wait_vblank(struct nvkm_memx *memx)
 	}
 
 	if (px == 0) {
-		nv_debug(memx->pmu, "WAIT VBLANK !NO ACTIVE HEAD\n");
+		nvkm_debug(subdev, "WAIT VBLANK !NO ACTIVE HEAD\n");
 		return;
 	}
 
-	nv_debug(memx->pmu, "WAIT VBLANK HEAD%d\n", head_sync);
+	nvkm_debug(subdev, "WAIT VBLANK HEAD%d\n", head_sync);
 	memx_cmd(memx, MEMX_VBLANK, 1, (u32[]){ head_sync });
 	memx_out(memx); /* fuc can't handle multiple */
 }
@@ -155,18 +157,19 @@ nvkm_memx_wait_vblank(struct nvkm_memx *memx)
 void
 nvkm_memx_train(struct nvkm_memx *memx)
 {
-	nv_debug(memx->pmu, "   MEM TRAIN\n");
+	nvkm_debug(&memx->pmu->subdev, "   MEM TRAIN\n");
 	memx_cmd(memx, MEMX_TRAIN, 0, NULL);
 }
 
 int
 nvkm_memx_train_result(struct nvkm_pmu *pmu, u32 *res, int rsize)
 {
+	struct nvkm_device *device = pmu->subdev.device;
 	u32 reply[2], base, size, i;
 	int ret;
 
-	ret = pmu->message(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
-			   MEMX_INFO_TRAIN, 0);
+	ret = nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
+			    MEMX_INFO_TRAIN, 0);
 	if (ret)
 		return ret;
 
@@ -176,10 +179,10 @@ nvkm_memx_train_result(struct nvkm_pmu *pmu, u32 *res, int rsize)
 		return -ENOMEM;
 
 	/* read the packet */
-	nv_wr32(pmu, 0x10a1c0, 0x02000000 | base);
+	nvkm_wr32(device, 0x10a1c0, 0x02000000 | base);
 
 	for (i = 0; i < size; i++)
-		res[i] = nv_rd32(pmu, 0x10a1c4);
+		res[i] = nvkm_rd32(device, 0x10a1c4);
 
 	return 0;
 }
@@ -187,14 +190,14 @@ nvkm_memx_train_result(struct nvkm_pmu *pmu, u32 *res, int rsize)
 void
 nvkm_memx_block(struct nvkm_memx *memx)
 {
-	nv_debug(memx->pmu, "   HOST BLOCKED\n");
+	nvkm_debug(&memx->pmu->subdev, "   HOST BLOCKED\n");
 	memx_cmd(memx, MEMX_ENTER, 0, NULL);
 }
 
 void
 nvkm_memx_unblock(struct nvkm_memx *memx)
 {
-	nv_debug(memx->pmu, "   HOST UNBLOCKED\n");
+	nvkm_debug(&memx->pmu->subdev, "   HOST UNBLOCKED\n");
 	memx_cmd(memx, MEMX_LEAVE, 0, NULL);
 }
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index 799e7c8b88f5..f38c88fae3d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -1,38 +1,20 @@
 #ifndef __NVKM_PMU_PRIV_H__
 #define __NVKM_PMU_PRIV_H__
+#define nvkm_pmu(p) container_of((p), struct nvkm_pmu, subdev)
 #include <subdev/pmu.h>
 #include <subdev/pmu/fuc/os.h>
 
-#define nvkm_pmu_create(p, e, o, d)                                         \
-	nvkm_pmu_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_pmu_destroy(p)                                                 \
-	nvkm_subdev_destroy(&(p)->base)
-#define nvkm_pmu_init(p) ({                                                 \
-	struct nvkm_pmu *_pmu = (p);                                       \
-	_nvkm_pmu_init(nv_object(_pmu));                                   \
-})
-#define nvkm_pmu_fini(p,s) ({                                               \
-	struct nvkm_pmu *_pmu = (p);                                       \
-	_nvkm_pmu_fini(nv_object(_pmu), (s));                              \
-})
+int nvkm_pmu_new_(const struct nvkm_pmu_func *, struct nvkm_device *,
+		  int index, struct nvkm_pmu **);
 
-int nvkm_pmu_create_(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, int, void **);
+struct nvkm_pmu_func {
+	void (*reset)(struct nvkm_pmu *);
 
-int _nvkm_pmu_ctor(struct nvkm_object *, struct nvkm_object *,
-		      struct nvkm_oclass *, void *, u32,
-		      struct nvkm_object **);
-#define _nvkm_pmu_dtor _nvkm_subdev_dtor
-int _nvkm_pmu_init(struct nvkm_object *);
-int _nvkm_pmu_fini(struct nvkm_object *, bool);
-void nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable);
-
-struct nvkm_pmu_impl {
-	struct nvkm_oclass base;
 	struct {
 		u32 *data;
 		u32  size;
 	} code;
+
 	struct {
 		u32 *data;
 		u32  size;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
index 5837cf1292d9..135758ba3e28 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
@@ -9,5 +9,5 @@ nvkm-y += nvkm/subdev/therm/nv40.o
 nvkm-y += nvkm/subdev/therm/nv50.o
 nvkm-y += nvkm/subdev/therm/g84.o
 nvkm-y += nvkm/subdev/therm/gt215.o
-nvkm-y += nvkm/subdev/therm/gf110.o
+nvkm-y += nvkm/subdev/therm/gf119.o
 nvkm-y += nvkm/subdev/therm/gm107.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index ec327cb64a0d..949dc6101a58 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -23,21 +23,26 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
+int
+nvkm_therm_temp_get(struct nvkm_therm *therm)
+{
+	if (therm->func->temp_get)
+		return therm->func->temp_get(therm);
+	return -ENODEV;
+}
 
 static int
 nvkm_therm_update_trip(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvbios_therm_trip_point *trip = priv->fan->bios.trip,
+	struct nvbios_therm_trip_point *trip = therm->fan->bios.trip,
 				       *cur_trip = NULL,
-				       *last_trip = priv->last_trip;
-	u8  temp = therm->temp_get(therm);
+				       *last_trip = therm->last_trip;
+	u8  temp = therm->func->temp_get(therm);
 	u16 duty, i;
 
 	/* look for the trip point corresponding to the current temperature */
 	cur_trip = NULL;
-	for (i = 0; i < priv->fan->bios.nr_fan_trip; i++) {
+	for (i = 0; i < therm->fan->bios.nr_fan_trip; i++) {
 		if (temp >= trip[i].temp)
 			cur_trip = &trip[i];
 	}
@@ -49,10 +54,10 @@ nvkm_therm_update_trip(struct nvkm_therm *therm)
 
 	if (cur_trip) {
 		duty = cur_trip->fan_duty;
-		priv->last_trip = cur_trip;
+		therm->last_trip = cur_trip;
 	} else {
 		duty = 0;
-		priv->last_trip = NULL;
+		therm->last_trip = NULL;
 	}
 
 	return duty;
@@ -61,51 +66,50 @@ nvkm_therm_update_trip(struct nvkm_therm *therm)
 static int
 nvkm_therm_update_linear(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	u8  linear_min_temp = priv->fan->bios.linear_min_temp;
-	u8  linear_max_temp = priv->fan->bios.linear_max_temp;
-	u8  temp = therm->temp_get(therm);
+	u8  linear_min_temp = therm->fan->bios.linear_min_temp;
+	u8  linear_max_temp = therm->fan->bios.linear_max_temp;
+	u8  temp = therm->func->temp_get(therm);
 	u16 duty;
 
 	/* handle the non-linear part first */
 	if (temp < linear_min_temp)
-		return priv->fan->bios.min_duty;
+		return therm->fan->bios.min_duty;
 	else if (temp > linear_max_temp)
-		return priv->fan->bios.max_duty;
+		return therm->fan->bios.max_duty;
 
 	/* we are in the linear zone */
 	duty  = (temp - linear_min_temp);
-	duty *= (priv->fan->bios.max_duty - priv->fan->bios.min_duty);
+	duty *= (therm->fan->bios.max_duty - therm->fan->bios.min_duty);
 	duty /= (linear_max_temp - linear_min_temp);
-	duty += priv->fan->bios.min_duty;
+	duty += therm->fan->bios.min_duty;
 	return duty;
 }
 
 static void
 nvkm_therm_update(struct nvkm_therm *therm, int mode)
 {
-	struct nvkm_timer *ptimer = nvkm_timer(therm);
-	struct nvkm_therm_priv *priv = (void *)therm;
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_timer *tmr = subdev->device->timer;
 	unsigned long flags;
 	bool immd = true;
 	bool poll = true;
 	int duty = -1;
 
-	spin_lock_irqsave(&priv->lock, flags);
+	spin_lock_irqsave(&therm->lock, flags);
 	if (mode < 0)
-		mode = priv->mode;
-	priv->mode = mode;
+		mode = therm->mode;
+	therm->mode = mode;
 
 	switch (mode) {
 	case NVKM_THERM_CTRL_MANUAL:
-		ptimer->alarm_cancel(ptimer, &priv->alarm);
+		nvkm_timer_alarm_cancel(tmr, &therm->alarm);
 		duty = nvkm_therm_fan_get(therm);
 		if (duty < 0)
 			duty = 100;
 		poll = false;
 		break;
 	case NVKM_THERM_CTRL_AUTO:
-		switch(priv->fan->bios.fan_mode) {
+		switch(therm->fan->bios.fan_mode) {
 		case NVBIOS_THERM_FAN_TRIP:
 			duty = nvkm_therm_update_trip(therm);
 			break;
@@ -113,8 +117,8 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode)
 			duty = nvkm_therm_update_linear(therm);
 			break;
 		case NVBIOS_THERM_FAN_OTHER:
-			if (priv->cstate)
-				duty = priv->cstate;
+			if (therm->cstate)
+				duty = therm->cstate;
 			poll = false;
 			break;
 		}
@@ -122,29 +126,29 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode)
 		break;
 	case NVKM_THERM_CTRL_NONE:
 	default:
-		ptimer->alarm_cancel(ptimer, &priv->alarm);
+		nvkm_timer_alarm_cancel(tmr, &therm->alarm);
 		poll = false;
 	}
 
-	if (list_empty(&priv->alarm.head) && poll)
-		ptimer->alarm(ptimer, 1000000000ULL, &priv->alarm);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	if (list_empty(&therm->alarm.head) && poll)
+		nvkm_timer_alarm(tmr, 1000000000ULL, &therm->alarm);
+	spin_unlock_irqrestore(&therm->lock, flags);
 
 	if (duty >= 0) {
-		nv_debug(therm, "FAN target request: %d%%\n", duty);
+		nvkm_debug(subdev, "FAN target request: %d%%\n", duty);
 		nvkm_therm_fan_set(therm, immd, duty);
 	}
 }
 
 int
-nvkm_therm_cstate(struct nvkm_therm *ptherm, int fan, int dir)
+nvkm_therm_cstate(struct nvkm_therm *therm, int fan, int dir)
 {
-	struct nvkm_therm_priv *priv = (void *)ptherm;
-	if (!dir || (dir < 0 && fan < priv->cstate) ||
-		    (dir > 0 && fan > priv->cstate)) {
-		nv_debug(ptherm, "default fan speed -> %d%%\n", fan);
-		priv->cstate = fan;
-		nvkm_therm_update(ptherm, -1);
+	struct nvkm_subdev *subdev = &therm->subdev;
+	if (!dir || (dir < 0 && fan < therm->cstate) ||
+		    (dir > 0 && fan > therm->cstate)) {
+		nvkm_debug(subdev, "default fan speed -> %d%%\n", fan);
+		therm->cstate = fan;
+		nvkm_therm_update(therm, -1);
 	}
 	return 0;
 }
@@ -152,16 +156,16 @@ nvkm_therm_cstate(struct nvkm_therm *ptherm, int fan, int dir)
 static void
 nvkm_therm_alarm(struct nvkm_alarm *alarm)
 {
-	struct nvkm_therm_priv *priv =
-	       container_of(alarm, struct nvkm_therm_priv, alarm);
-	nvkm_therm_update(&priv->base, -1);
+	struct nvkm_therm *therm =
+	       container_of(alarm, struct nvkm_therm, alarm);
+	nvkm_therm_update(therm, -1);
 }
 
 int
 nvkm_therm_fan_mode(struct nvkm_therm *therm, int mode)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvkm_device *device = nv_device(therm);
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
 	static const char *name[] = {
 		"disabled",
 		"manual",
@@ -171,51 +175,49 @@ nvkm_therm_fan_mode(struct nvkm_therm *therm, int mode)
 	/* The default PPWR ucode on fermi interferes with fan management */
 	if ((mode >= ARRAY_SIZE(name)) ||
 	    (mode != NVKM_THERM_CTRL_NONE && device->card_type >= NV_C0 &&
-	     !nvkm_subdev(device, NVDEV_SUBDEV_PMU)))
+	     !device->pmu))
 		return -EINVAL;
 
 	/* do not allow automatic fan management if the thermal sensor is
 	 * not available */
-	if (mode == NVKM_THERM_CTRL_AUTO && therm->temp_get(therm) < 0)
+	if (mode == NVKM_THERM_CTRL_AUTO &&
+	    therm->func->temp_get(therm) < 0)
 		return -EINVAL;
 
-	if (priv->mode == mode)
+	if (therm->mode == mode)
 		return 0;
 
-	nv_info(therm, "fan management: %s\n", name[mode]);
+	nvkm_debug(subdev, "fan management: %s\n", name[mode]);
 	nvkm_therm_update(therm, mode);
 	return 0;
 }
 
 int
-nvkm_therm_attr_get(struct nvkm_therm *therm,
-		       enum nvkm_therm_attr_type type)
+nvkm_therm_attr_get(struct nvkm_therm *therm, enum nvkm_therm_attr_type type)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-
 	switch (type) {
 	case NVKM_THERM_ATTR_FAN_MIN_DUTY:
-		return priv->fan->bios.min_duty;
+		return therm->fan->bios.min_duty;
 	case NVKM_THERM_ATTR_FAN_MAX_DUTY:
-		return priv->fan->bios.max_duty;
+		return therm->fan->bios.max_duty;
 	case NVKM_THERM_ATTR_FAN_MODE:
-		return priv->mode;
+		return therm->mode;
 	case NVKM_THERM_ATTR_THRS_FAN_BOOST:
-		return priv->bios_sensor.thrs_fan_boost.temp;
+		return therm->bios_sensor.thrs_fan_boost.temp;
 	case NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST:
-		return priv->bios_sensor.thrs_fan_boost.hysteresis;
+		return therm->bios_sensor.thrs_fan_boost.hysteresis;
 	case NVKM_THERM_ATTR_THRS_DOWN_CLK:
-		return priv->bios_sensor.thrs_down_clock.temp;
+		return therm->bios_sensor.thrs_down_clock.temp;
 	case NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST:
-		return priv->bios_sensor.thrs_down_clock.hysteresis;
+		return therm->bios_sensor.thrs_down_clock.hysteresis;
 	case NVKM_THERM_ATTR_THRS_CRITICAL:
-		return priv->bios_sensor.thrs_critical.temp;
+		return therm->bios_sensor.thrs_critical.temp;
 	case NVKM_THERM_ATTR_THRS_CRITICAL_HYST:
-		return priv->bios_sensor.thrs_critical.hysteresis;
+		return therm->bios_sensor.thrs_critical.hysteresis;
 	case NVKM_THERM_ATTR_THRS_SHUTDOWN:
-		return priv->bios_sensor.thrs_shutdown.temp;
+		return therm->bios_sensor.thrs_shutdown.temp;
 	case NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST:
-		return priv->bios_sensor.thrs_shutdown.hysteresis;
+		return therm->bios_sensor.thrs_shutdown.hysteresis;
 	}
 
 	return -EINVAL;
@@ -225,143 +227,156 @@ int
 nvkm_therm_attr_set(struct nvkm_therm *therm,
 		    enum nvkm_therm_attr_type type, int value)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-
 	switch (type) {
 	case NVKM_THERM_ATTR_FAN_MIN_DUTY:
 		if (value < 0)
 			value = 0;
-		if (value > priv->fan->bios.max_duty)
-			value = priv->fan->bios.max_duty;
-		priv->fan->bios.min_duty = value;
+		if (value > therm->fan->bios.max_duty)
+			value = therm->fan->bios.max_duty;
+		therm->fan->bios.min_duty = value;
 		return 0;
 	case NVKM_THERM_ATTR_FAN_MAX_DUTY:
 		if (value < 0)
 			value = 0;
-		if (value < priv->fan->bios.min_duty)
-			value = priv->fan->bios.min_duty;
-		priv->fan->bios.max_duty = value;
+		if (value < therm->fan->bios.min_duty)
+			value = therm->fan->bios.min_duty;
+		therm->fan->bios.max_duty = value;
 		return 0;
 	case NVKM_THERM_ATTR_FAN_MODE:
 		return nvkm_therm_fan_mode(therm, value);
 	case NVKM_THERM_ATTR_THRS_FAN_BOOST:
-		priv->bios_sensor.thrs_fan_boost.temp = value;
-		priv->sensor.program_alarms(therm);
+		therm->bios_sensor.thrs_fan_boost.temp = value;
+		therm->func->program_alarms(therm);
 		return 0;
 	case NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST:
-		priv->bios_sensor.thrs_fan_boost.hysteresis = value;
-		priv->sensor.program_alarms(therm);
+		therm->bios_sensor.thrs_fan_boost.hysteresis = value;
+		therm->func->program_alarms(therm);
 		return 0;
 	case NVKM_THERM_ATTR_THRS_DOWN_CLK:
-		priv->bios_sensor.thrs_down_clock.temp = value;
-		priv->sensor.program_alarms(therm);
+		therm->bios_sensor.thrs_down_clock.temp = value;
+		therm->func->program_alarms(therm);
 		return 0;
 	case NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST:
-		priv->bios_sensor.thrs_down_clock.hysteresis = value;
-		priv->sensor.program_alarms(therm);
+		therm->bios_sensor.thrs_down_clock.hysteresis = value;
+		therm->func->program_alarms(therm);
 		return 0;
 	case NVKM_THERM_ATTR_THRS_CRITICAL:
-		priv->bios_sensor.thrs_critical.temp = value;
-		priv->sensor.program_alarms(therm);
+		therm->bios_sensor.thrs_critical.temp = value;
+		therm->func->program_alarms(therm);
 		return 0;
 	case NVKM_THERM_ATTR_THRS_CRITICAL_HYST:
-		priv->bios_sensor.thrs_critical.hysteresis = value;
-		priv->sensor.program_alarms(therm);
+		therm->bios_sensor.thrs_critical.hysteresis = value;
+		therm->func->program_alarms(therm);
 		return 0;
 	case NVKM_THERM_ATTR_THRS_SHUTDOWN:
-		priv->bios_sensor.thrs_shutdown.temp = value;
-		priv->sensor.program_alarms(therm);
+		therm->bios_sensor.thrs_shutdown.temp = value;
+		therm->func->program_alarms(therm);
 		return 0;
 	case NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST:
-		priv->bios_sensor.thrs_shutdown.hysteresis = value;
-		priv->sensor.program_alarms(therm);
+		therm->bios_sensor.thrs_shutdown.hysteresis = value;
+		therm->func->program_alarms(therm);
 		return 0;
 	}
 
 	return -EINVAL;
 }
 
-int
-_nvkm_therm_init(struct nvkm_object *object)
+static void
+nvkm_therm_intr(struct nvkm_subdev *subdev)
 {
-	struct nvkm_therm *therm = (void *)object;
-	struct nvkm_therm_priv *priv = (void *)therm;
-	int ret;
-
-	ret = nvkm_subdev_init(&therm->base);
-	if (ret)
-		return ret;
-
-	if (priv->suspend >= 0) {
-		/* restore the pwm value only when on manual or auto mode */
-		if (priv->suspend > 0)
-			nvkm_therm_fan_set(therm, true, priv->fan->percent);
-
-		nvkm_therm_fan_mode(therm, priv->suspend);
-	}
-	nvkm_therm_sensor_init(therm);
-	nvkm_therm_fan_init(therm);
-	return 0;
+	struct nvkm_therm *therm = nvkm_therm(subdev);
+	if (therm->func->intr)
+		therm->func->intr(therm);
 }
 
-int
-_nvkm_therm_fini(struct nvkm_object *object, bool suspend)
+static int
+nvkm_therm_fini(struct nvkm_subdev *subdev, bool suspend)
 {
-	struct nvkm_therm *therm = (void *)object;
-	struct nvkm_therm_priv *priv = (void *)therm;
+	struct nvkm_therm *therm = nvkm_therm(subdev);
+
+	if (therm->func->fini)
+		therm->func->fini(therm);
 
 	nvkm_therm_fan_fini(therm, suspend);
 	nvkm_therm_sensor_fini(therm, suspend);
+
 	if (suspend) {
-		priv->suspend = priv->mode;
-		priv->mode = NVKM_THERM_CTRL_NONE;
+		therm->suspend = therm->mode;
+		therm->mode = NVKM_THERM_CTRL_NONE;
 	}
 
-	return nvkm_subdev_fini(&therm->base, suspend);
-}
-
-int
-nvkm_therm_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, int length, void **pobject)
-{
-	struct nvkm_therm_priv *priv;
-	int ret;
-
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PTHERM",
-				  "therm", length, pobject);
-	priv = *pobject;
-	if (ret)
-		return ret;
-
-	nvkm_alarm_init(&priv->alarm, nvkm_therm_alarm);
-	spin_lock_init(&priv->lock);
-	spin_lock_init(&priv->sensor.alarm_program_lock);
-
-	priv->base.fan_get = nvkm_therm_fan_user_get;
-	priv->base.fan_set = nvkm_therm_fan_user_set;
-	priv->base.fan_sense = nvkm_therm_fan_sense;
-	priv->base.attr_get = nvkm_therm_attr_get;
-	priv->base.attr_set = nvkm_therm_attr_set;
-	priv->mode = priv->suspend = -1; /* undefined */
 	return 0;
 }
 
-int
-nvkm_therm_preinit(struct nvkm_therm *therm)
+static int
+nvkm_therm_oneinit(struct nvkm_subdev *subdev)
 {
+	struct nvkm_therm *therm = nvkm_therm(subdev);
 	nvkm_therm_sensor_ctor(therm);
 	nvkm_therm_ic_ctor(therm);
 	nvkm_therm_fan_ctor(therm);
-
 	nvkm_therm_fan_mode(therm, NVKM_THERM_CTRL_AUTO);
 	nvkm_therm_sensor_preinit(therm);
 	return 0;
 }
 
-void
-_nvkm_therm_dtor(struct nvkm_object *object)
+static int
+nvkm_therm_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_therm *therm = nvkm_therm(subdev);
+
+	therm->func->init(therm);
+
+	if (therm->suspend >= 0) {
+		/* restore the pwm value only when on manual or auto mode */
+		if (therm->suspend > 0)
+			nvkm_therm_fan_set(therm, true, therm->fan->percent);
+
+		nvkm_therm_fan_mode(therm, therm->suspend);
+	}
+
+	nvkm_therm_sensor_init(therm);
+	nvkm_therm_fan_init(therm);
+	return 0;
+}
+
+static void *
+nvkm_therm_dtor(struct nvkm_subdev *subdev)
+{
+	struct nvkm_therm *therm = nvkm_therm(subdev);
+	kfree(therm->fan);
+	return therm;
+}
+
+static const struct nvkm_subdev_func
+nvkm_therm = {
+	.dtor = nvkm_therm_dtor,
+	.oneinit = nvkm_therm_oneinit,
+	.init = nvkm_therm_init,
+	.fini = nvkm_therm_fini,
+	.intr = nvkm_therm_intr,
+};
+
+int
+nvkm_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device,
+		int index, struct nvkm_therm **ptherm)
 {
-	struct nvkm_therm_priv *priv = (void *)object;
-	kfree(priv->fan);
-	nvkm_subdev_destroy(&priv->base.base);
+	struct nvkm_therm *therm;
+
+	if (!(therm = *ptherm = kzalloc(sizeof(*therm), GFP_KERNEL)))
+		return -ENOMEM;
+
+	nvkm_subdev_ctor(&nvkm_therm, device, index, 0, &therm->subdev);
+	therm->func = func;
+
+	nvkm_alarm_init(&therm->alarm, nvkm_therm_alarm);
+	spin_lock_init(&therm->lock);
+	spin_lock_init(&therm->sensor.alarm_program_lock);
+
+	therm->fan_get = nvkm_therm_fan_user_get;
+	therm->fan_set = nvkm_therm_fan_user_set;
+	therm->attr_get = nvkm_therm_attr_get;
+	therm->attr_set = nvkm_therm_attr_set;
+	therm->mode = therm->suspend = -1; /* undefined */
+	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
index 434fa745ca40..91198d79393a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
@@ -32,8 +32,8 @@ static int
 nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target)
 {
 	struct nvkm_therm *therm = fan->parent;
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvkm_timer *ptimer = nvkm_timer(priv);
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_timer *tmr = subdev->device->timer;
 	unsigned long flags;
 	int ret = 0;
 	int duty;
@@ -45,7 +45,7 @@ nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target)
 	target = max_t(u8, target, fan->bios.min_duty);
 	target = min_t(u8, target, fan->bios.max_duty);
 	if (fan->percent != target) {
-		nv_debug(therm, "FAN target: %d\n", target);
+		nvkm_debug(subdev, "FAN target: %d\n", target);
 		fan->percent = target;
 	}
 
@@ -70,7 +70,7 @@ nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target)
 		duty = target;
 	}
 
-	nv_debug(therm, "FAN update: %d\n", duty);
+	nvkm_debug(subdev, "FAN update: %d\n", duty);
 	ret = fan->set(therm, duty);
 	if (ret) {
 		spin_unlock_irqrestore(&fan->lock, flags);
@@ -95,7 +95,7 @@ nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target)
 		else
 			delay = bump_period;
 
-		ptimer->alarm(ptimer, delay * 1000 * 1000, &fan->alarm);
+		nvkm_timer_alarm(tmr, delay * 1000 * 1000, &fan->alarm);
 	}
 
 	return ret;
@@ -111,48 +111,51 @@ nvkm_fan_alarm(struct nvkm_alarm *alarm)
 int
 nvkm_therm_fan_get(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	return priv->fan->get(therm);
+	return therm->fan->get(therm);
 }
 
 int
 nvkm_therm_fan_set(struct nvkm_therm *therm, bool immediate, int percent)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	return nvkm_fan_update(priv->fan, immediate, percent);
+	return nvkm_fan_update(therm->fan, immediate, percent);
 }
 
 int
 nvkm_therm_fan_sense(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvkm_timer *ptimer = nvkm_timer(therm);
-	struct nvkm_gpio *gpio = nvkm_gpio(therm);
+	struct nvkm_device *device = therm->subdev.device;
+	struct nvkm_timer *tmr = device->timer;
+	struct nvkm_gpio *gpio = device->gpio;
 	u32 cycles, cur, prev;
 	u64 start, end, tach;
 
-	if (priv->fan->tach.func == DCB_GPIO_UNUSED)
+	if (therm->func->fan_sense)
+		return therm->func->fan_sense(therm);
+
+	if (therm->fan->tach.func == DCB_GPIO_UNUSED)
 		return -ENODEV;
 
 	/* Time a complete rotation and extrapolate to RPM:
 	 * When the fan spins, it changes the value of GPIO FAN_SENSE.
 	 * We get 4 changes (0 -> 1 -> 0 -> 1) per complete rotation.
 	 */
-	start = ptimer->read(ptimer);
-	prev = gpio->get(gpio, 0, priv->fan->tach.func, priv->fan->tach.line);
+	start = nvkm_timer_read(tmr);
+	prev = nvkm_gpio_get(gpio, 0, therm->fan->tach.func,
+				      therm->fan->tach.line);
 	cycles = 0;
 	do {
 		usleep_range(500, 1000); /* supports 0 < rpm < 7500 */
 
-		cur = gpio->get(gpio, 0, priv->fan->tach.func, priv->fan->tach.line);
+		cur = nvkm_gpio_get(gpio, 0, therm->fan->tach.func,
+					     therm->fan->tach.line);
 		if (prev != cur) {
 			if (!start)
-				start = ptimer->read(ptimer);
+				start = nvkm_timer_read(tmr);
 			cycles++;
 			prev = cur;
 		}
-	} while (cycles < 5 && ptimer->read(ptimer) - start < 250000000);
-	end = ptimer->read(ptimer);
+	} while (cycles < 5 && nvkm_timer_read(tmr) - start < 250000000);
+	end = nvkm_timer_read(tmr);
 
 	if (cycles == 5) {
 		tach = (u64)60000000000ULL;
@@ -171,9 +174,7 @@ nvkm_therm_fan_user_get(struct nvkm_therm *therm)
 int
 nvkm_therm_fan_user_set(struct nvkm_therm *therm, int percent)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-
-	if (priv->mode != NVKM_THERM_CTRL_MANUAL)
+	if (therm->mode != NVKM_THERM_CTRL_MANUAL)
 		return -EINVAL;
 
 	return nvkm_therm_fan_set(therm, true, percent);
@@ -182,29 +183,25 @@ nvkm_therm_fan_user_set(struct nvkm_therm *therm, int percent)
 static void
 nvkm_therm_fan_set_defaults(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-
-	priv->fan->bios.pwm_freq = 0;
-	priv->fan->bios.min_duty = 0;
-	priv->fan->bios.max_duty = 100;
-	priv->fan->bios.bump_period = 500;
-	priv->fan->bios.slow_down_period = 2000;
-	priv->fan->bios.linear_min_temp = 40;
-	priv->fan->bios.linear_max_temp = 85;
+	therm->fan->bios.pwm_freq = 0;
+	therm->fan->bios.min_duty = 0;
+	therm->fan->bios.max_duty = 100;
+	therm->fan->bios.bump_period = 500;
+	therm->fan->bios.slow_down_period = 2000;
+	therm->fan->bios.linear_min_temp = 40;
+	therm->fan->bios.linear_max_temp = 85;
 }
 
 static void
 nvkm_therm_fan_safety_checks(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
+	if (therm->fan->bios.min_duty > 100)
+		therm->fan->bios.min_duty = 100;
+	if (therm->fan->bios.max_duty > 100)
+		therm->fan->bios.max_duty = 100;
 
-	if (priv->fan->bios.min_duty > 100)
-		priv->fan->bios.min_duty = 100;
-	if (priv->fan->bios.max_duty > 100)
-		priv->fan->bios.max_duty = 100;
-
-	if (priv->fan->bios.min_duty > priv->fan->bios.max_duty)
-		priv->fan->bios.min_duty = priv->fan->bios.max_duty;
+	if (therm->fan->bios.min_duty > therm->fan->bios.max_duty)
+		therm->fan->bios.min_duty = therm->fan->bios.max_duty;
 }
 
 int
@@ -216,29 +213,28 @@ nvkm_therm_fan_init(struct nvkm_therm *therm)
 int
 nvkm_therm_fan_fini(struct nvkm_therm *therm, bool suspend)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvkm_timer *ptimer = nvkm_timer(therm);
-
+	struct nvkm_timer *tmr = therm->subdev.device->timer;
 	if (suspend)
-		ptimer->alarm_cancel(ptimer, &priv->fan->alarm);
+		nvkm_timer_alarm_cancel(tmr, &therm->fan->alarm);
 	return 0;
 }
 
 int
 nvkm_therm_fan_ctor(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvkm_gpio *gpio = nvkm_gpio(therm);
-	struct nvkm_bios *bios = nvkm_bios(therm);
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_gpio *gpio = device->gpio;
+	struct nvkm_bios *bios = device->bios;
 	struct dcb_gpio_func func;
 	int ret;
 
 	/* attempt to locate a drivable fan, and determine control method */
-	ret = gpio->find(gpio, 0, DCB_GPIO_FAN, 0xff, &func);
+	ret = nvkm_gpio_find(gpio, 0, DCB_GPIO_FAN, 0xff, &func);
 	if (ret == 0) {
 		/* FIXME: is this really the place to perform such checks ? */
 		if (func.line != 16 && func.log[0] & DCB_GPIO_LOG_DIR_IN) {
-			nv_debug(therm, "GPIO_FAN is in input mode\n");
+			nvkm_debug(subdev, "GPIO_FAN is in input mode\n");
 			ret = -EINVAL;
 		} else {
 			ret = nvkm_fanpwm_create(therm, &func);
@@ -254,28 +250,29 @@ nvkm_therm_fan_ctor(struct nvkm_therm *therm)
 			return ret;
 	}
 
-	nv_info(therm, "FAN control: %s\n", priv->fan->type);
+	nvkm_debug(subdev, "FAN control: %s\n", therm->fan->type);
 
 	/* read the current speed, it is useful when resuming */
-	priv->fan->percent = nvkm_therm_fan_get(therm);
+	therm->fan->percent = nvkm_therm_fan_get(therm);
 
 	/* attempt to detect a tachometer connection */
-	ret = gpio->find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff, &priv->fan->tach);
+	ret = nvkm_gpio_find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff,
+			     &therm->fan->tach);
 	if (ret)
-		priv->fan->tach.func = DCB_GPIO_UNUSED;
+		therm->fan->tach.func = DCB_GPIO_UNUSED;
 
 	/* initialise fan bump/slow update handling */
-	priv->fan->parent = therm;
-	nvkm_alarm_init(&priv->fan->alarm, nvkm_fan_alarm);
-	spin_lock_init(&priv->fan->lock);
+	therm->fan->parent = therm;
+	nvkm_alarm_init(&therm->fan->alarm, nvkm_fan_alarm);
+	spin_lock_init(&therm->fan->lock);
 
 	/* other random init... */
 	nvkm_therm_fan_set_defaults(therm);
-	nvbios_perf_fan_parse(bios, &priv->fan->perf);
-	if (!nvbios_fan_parse(bios, &priv->fan->bios)) {
-		nv_debug(therm, "parsing the fan table failed\n");
-		if (nvbios_therm_fan_parse(bios, &priv->fan->bios))
-			nv_error(therm, "parsing both fan tables failed\n");
+	nvbios_perf_fan_parse(bios, &therm->fan->perf);
+	if (!nvbios_fan_parse(bios, &therm->fan->bios)) {
+		nvkm_debug(subdev, "parsing the fan table failed\n");
+		if (nvbios_therm_fan_parse(bios, &therm->fan->bios))
+			nvkm_error(subdev, "parsing both fan tables failed\n");
 	}
 	nvkm_therm_fan_safety_checks(therm);
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c
index 534e5970ec9c..8ae300f911b6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c
@@ -38,11 +38,10 @@ nvkm_fannil_set(struct nvkm_therm *therm, int percent)
 int
 nvkm_fannil_create(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *tpriv = (void *)therm;
 	struct nvkm_fan *priv;
 
 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	tpriv->fan = priv;
+	therm->fan = priv;
 	if (!priv)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c
index bde5ceaeb70a..340f37a299dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c
@@ -24,13 +24,12 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
 #include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/fan.h>
 #include <subdev/gpio.h>
 
-struct nvkm_fanpwm_priv {
+struct nvkm_fanpwm {
 	struct nvkm_fan base;
 	struct dcb_gpio_func func;
 };
@@ -38,76 +37,74 @@ struct nvkm_fanpwm_priv {
 static int
 nvkm_fanpwm_get(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *tpriv = (void *)therm;
-	struct nvkm_fanpwm_priv *priv = (void *)tpriv->fan;
-	struct nvkm_gpio *gpio = nvkm_gpio(therm);
-	int card_type = nv_device(therm)->card_type;
+	struct nvkm_fanpwm *fan = (void *)therm->fan;
+	struct nvkm_device *device = therm->subdev.device;
+	struct nvkm_gpio *gpio = device->gpio;
+	int card_type = device->card_type;
 	u32 divs, duty;
 	int ret;
 
-	ret = therm->pwm_get(therm, priv->func.line, &divs, &duty);
+	ret = therm->func->pwm_get(therm, fan->func.line, &divs, &duty);
 	if (ret == 0 && divs) {
 		divs = max(divs, duty);
-		if (card_type <= NV_40 || (priv->func.log[0] & 1))
+		if (card_type <= NV_40 || (fan->func.log[0] & 1))
 			duty = divs - duty;
 		return (duty * 100) / divs;
 	}
 
-	return gpio->get(gpio, 0, priv->func.func, priv->func.line) * 100;
+	return nvkm_gpio_get(gpio, 0, fan->func.func, fan->func.line) * 100;
 }
 
 static int
 nvkm_fanpwm_set(struct nvkm_therm *therm, int percent)
 {
-	struct nvkm_therm_priv *tpriv = (void *)therm;
-	struct nvkm_fanpwm_priv *priv = (void *)tpriv->fan;
-	int card_type = nv_device(therm)->card_type;
+	struct nvkm_fanpwm *fan = (void *)therm->fan;
+	int card_type = therm->subdev.device->card_type;
 	u32 divs, duty;
 	int ret;
 
-	divs = priv->base.perf.pwm_divisor;
-	if (priv->base.bios.pwm_freq) {
+	divs = fan->base.perf.pwm_divisor;
+	if (fan->base.bios.pwm_freq) {
 		divs = 1;
-		if (therm->pwm_clock)
-			divs = therm->pwm_clock(therm, priv->func.line);
-		divs /= priv->base.bios.pwm_freq;
+		if (therm->func->pwm_clock)
+			divs = therm->func->pwm_clock(therm, fan->func.line);
+		divs /= fan->base.bios.pwm_freq;
 	}
 
 	duty = ((divs * percent) + 99) / 100;
-	if (card_type <= NV_40 || (priv->func.log[0] & 1))
+	if (card_type <= NV_40 || (fan->func.log[0] & 1))
 		duty = divs - duty;
 
-	ret = therm->pwm_set(therm, priv->func.line, divs, duty);
+	ret = therm->func->pwm_set(therm, fan->func.line, divs, duty);
 	if (ret == 0)
-		ret = therm->pwm_ctrl(therm, priv->func.line, true);
+		ret = therm->func->pwm_ctrl(therm, fan->func.line, true);
 	return ret;
 }
 
 int
 nvkm_fanpwm_create(struct nvkm_therm *therm, struct dcb_gpio_func *func)
 {
-	struct nvkm_device *device = nv_device(therm);
-	struct nvkm_therm_priv *tpriv = (void *)therm;
-	struct nvkm_bios *bios = nvkm_bios(therm);
-	struct nvkm_fanpwm_priv *priv;
-	struct nvbios_therm_fan fan;
+	struct nvkm_device *device = therm->subdev.device;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_fanpwm *fan;
+	struct nvbios_therm_fan info = {};
 	u32 divs, duty;
 
-	nvbios_fan_parse(bios, &fan);
+	nvbios_fan_parse(bios, &info);
 
 	if (!nvkm_boolopt(device->cfgopt, "NvFanPWM", func->param) ||
-	    !therm->pwm_ctrl || fan.type == NVBIOS_THERM_FAN_TOGGLE ||
-	     therm->pwm_get(therm, func->line, &divs, &duty) == -ENODEV)
+	    !therm->func->pwm_ctrl || info.type == NVBIOS_THERM_FAN_TOGGLE ||
+	     therm->func->pwm_get(therm, func->line, &divs, &duty) == -ENODEV)
 		return -ENODEV;
 
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	tpriv->fan = &priv->base;
-	if (!priv)
+	fan = kzalloc(sizeof(*fan), GFP_KERNEL);
+	therm->fan = &fan->base;
+	if (!fan)
 		return -ENOMEM;
 
-	priv->base.type = "PWM";
-	priv->base.get = nvkm_fanpwm_get;
-	priv->base.set = nvkm_fanpwm_set;
-	priv->func = *func;
+	fan->base.type = "PWM";
+	fan->base.get = nvkm_fanpwm_get;
+	fan->base.set = nvkm_fanpwm_set;
+	fan->func = *func;
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
index 4ce041e81371..59701b7a6597 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
@@ -26,7 +26,7 @@
 #include <subdev/gpio.h>
 #include <subdev/timer.h>
 
-struct nvkm_fantog_priv {
+struct nvkm_fantog {
 	struct nvkm_fan base;
 	struct nvkm_alarm alarm;
 	spinlock_t lock;
@@ -36,83 +36,81 @@ struct nvkm_fantog_priv {
 };
 
 static void
-nvkm_fantog_update(struct nvkm_fantog_priv *priv, int percent)
+nvkm_fantog_update(struct nvkm_fantog *fan, int percent)
 {
-	struct nvkm_therm_priv *tpriv = (void *)priv->base.parent;
-	struct nvkm_timer *ptimer = nvkm_timer(tpriv);
-	struct nvkm_gpio *gpio = nvkm_gpio(tpriv);
+	struct nvkm_therm *therm = fan->base.parent;
+	struct nvkm_device *device = therm->subdev.device;
+	struct nvkm_timer *tmr = device->timer;
+	struct nvkm_gpio *gpio = device->gpio;
 	unsigned long flags;
 	int duty;
 
-	spin_lock_irqsave(&priv->lock, flags);
+	spin_lock_irqsave(&fan->lock, flags);
 	if (percent < 0)
-		percent = priv->percent;
-	priv->percent = percent;
+		percent = fan->percent;
+	fan->percent = percent;
 
-	duty = !gpio->get(gpio, 0, DCB_GPIO_FAN, 0xff);
-	gpio->set(gpio, 0, DCB_GPIO_FAN, 0xff, duty);
+	duty = !nvkm_gpio_get(gpio, 0, DCB_GPIO_FAN, 0xff);
+	nvkm_gpio_set(gpio, 0, DCB_GPIO_FAN, 0xff, duty);
 
-	if (list_empty(&priv->alarm.head) && percent != (duty * 100)) {
-		u64 next_change = (percent * priv->period_us) / 100;
+	if (list_empty(&fan->alarm.head) && percent != (duty * 100)) {
+		u64 next_change = (percent * fan->period_us) / 100;
 		if (!duty)
-			next_change = priv->period_us - next_change;
-		ptimer->alarm(ptimer, next_change * 1000, &priv->alarm);
+			next_change = fan->period_us - next_change;
+		nvkm_timer_alarm(tmr, next_change * 1000, &fan->alarm);
 	}
-	spin_unlock_irqrestore(&priv->lock, flags);
+	spin_unlock_irqrestore(&fan->lock, flags);
 }
 
 static void
 nvkm_fantog_alarm(struct nvkm_alarm *alarm)
 {
-	struct nvkm_fantog_priv *priv =
-	       container_of(alarm, struct nvkm_fantog_priv, alarm);
-	nvkm_fantog_update(priv, -1);
+	struct nvkm_fantog *fan =
+	       container_of(alarm, struct nvkm_fantog, alarm);
+	nvkm_fantog_update(fan, -1);
 }
 
 static int
 nvkm_fantog_get(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *tpriv = (void *)therm;
-	struct nvkm_fantog_priv *priv = (void *)tpriv->fan;
-	return priv->percent;
+	struct nvkm_fantog *fan = (void *)therm->fan;
+	return fan->percent;
 }
 
 static int
 nvkm_fantog_set(struct nvkm_therm *therm, int percent)
 {
-	struct nvkm_therm_priv *tpriv = (void *)therm;
-	struct nvkm_fantog_priv *priv = (void *)tpriv->fan;
-	if (therm->pwm_ctrl)
-		therm->pwm_ctrl(therm, priv->func.line, false);
-	nvkm_fantog_update(priv, percent);
+	struct nvkm_fantog *fan = (void *)therm->fan;
+	if (therm->func->pwm_ctrl)
+		therm->func->pwm_ctrl(therm, fan->func.line, false);
+	nvkm_fantog_update(fan, percent);
 	return 0;
 }
 
 int
 nvkm_fantog_create(struct nvkm_therm *therm, struct dcb_gpio_func *func)
 {
-	struct nvkm_therm_priv *tpriv = (void *)therm;
-	struct nvkm_fantog_priv *priv;
+	struct nvkm_fantog *fan;
 	int ret;
 
-	if (therm->pwm_ctrl) {
-		ret = therm->pwm_ctrl(therm, func->line, false);
+	if (therm->func->pwm_ctrl) {
+		ret = therm->func->pwm_ctrl(therm, func->line, false);
 		if (ret)
 			return ret;
 	}
 
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	tpriv->fan = &priv->base;
-	if (!priv)
+	fan = kzalloc(sizeof(*fan), GFP_KERNEL);
+	therm->fan = &fan->base;
+	if (!fan)
 		return -ENOMEM;
 
-	priv->base.type = "toggle";
-	priv->base.get = nvkm_fantog_get;
-	priv->base.set = nvkm_fantog_set;
-	nvkm_alarm_init(&priv->alarm, nvkm_fantog_alarm);
-	priv->period_us = 100000; /* 10Hz */
-	priv->percent = 100;
-	priv->func = *func;
-	spin_lock_init(&priv->lock);
+	fan->base.type = "toggle";
+	fan->base.get = nvkm_fantog_get;
+	fan->base.set = nvkm_fantog_set;
+	nvkm_alarm_init(&fan->alarm, nvkm_fantog_alarm);
+	fan->period_us = 100000; /* 10Hz */
+	fan->percent = 100;
+	fan->func = *func;
+	spin_lock_init(&fan->lock);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
index 85b5d0c18c0b..86e81930d8ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
@@ -26,17 +26,13 @@
 
 #include <subdev/fuse.h>
 
-struct g84_therm_priv {
-	struct nvkm_therm_priv base;
-};
-
 int
 g84_temp_get(struct nvkm_therm *therm)
 {
-	struct nvkm_fuse *fuse = nvkm_fuse(therm);
+	struct nvkm_device *device = therm->subdev.device;
 
-	if (nv_ro32(fuse, 0x1a8) == 1)
-		return nv_rd32(therm, 0x20400);
+	if (nvkm_fuse_read(device->fuse, 0x1a8) == 1)
+		return nvkm_rd32(device, 0x20400);
 	else
 		return -ENODEV;
 }
@@ -44,12 +40,12 @@ g84_temp_get(struct nvkm_therm *therm)
 void
 g84_sensor_setup(struct nvkm_therm *therm)
 {
-	struct nvkm_fuse *fuse = nvkm_fuse(therm);
+	struct nvkm_device *device = therm->subdev.device;
 
 	/* enable temperature reading for cards with insane defaults */
-	if (nv_ro32(fuse, 0x1a8) == 1) {
-		nv_mask(therm, 0x20008, 0x80008000, 0x80000000);
-		nv_mask(therm, 0x2000c, 0x80000003, 0x00000000);
+	if (nvkm_fuse_read(device->fuse, 0x1a8) == 1) {
+		nvkm_mask(device, 0x20008, 0x80008000, 0x80000000);
+		nvkm_mask(device, 0x2000c, 0x80000003, 0x00000000);
 		mdelay(20); /* wait for the temperature to stabilize */
 	}
 }
@@ -57,36 +53,40 @@ g84_sensor_setup(struct nvkm_therm *therm)
 static void
 g84_therm_program_alarms(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+	struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
 	unsigned long flags;
 
-	spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+	spin_lock_irqsave(&therm->sensor.alarm_program_lock, flags);
 
 	/* enable RISING and FALLING IRQs for shutdown, THRS 0, 1, 2 and 4 */
-	nv_wr32(therm, 0x20000, 0x000003ff);
+	nvkm_wr32(device, 0x20000, 0x000003ff);
 
 	/* shutdown: The computer should be shutdown when reached */
-	nv_wr32(therm, 0x20484, sensor->thrs_shutdown.hysteresis);
-	nv_wr32(therm, 0x20480, sensor->thrs_shutdown.temp);
+	nvkm_wr32(device, 0x20484, sensor->thrs_shutdown.hysteresis);
+	nvkm_wr32(device, 0x20480, sensor->thrs_shutdown.temp);
 
 	/* THRS_1 : fan boost*/
-	nv_wr32(therm, 0x204c4, sensor->thrs_fan_boost.temp);
+	nvkm_wr32(device, 0x204c4, sensor->thrs_fan_boost.temp);
 
 	/* THRS_2 : critical */
-	nv_wr32(therm, 0x204c0, sensor->thrs_critical.temp);
+	nvkm_wr32(device, 0x204c0, sensor->thrs_critical.temp);
 
 	/* THRS_4 : down clock */
-	nv_wr32(therm, 0x20414, sensor->thrs_down_clock.temp);
-	spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
-
-	nv_debug(therm,
-		 "Programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
-		 sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
-		 sensor->thrs_down_clock.temp,
-		 sensor->thrs_down_clock.hysteresis,
-		 sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
-		 sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
+	nvkm_wr32(device, 0x20414, sensor->thrs_down_clock.temp);
+	spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
+
+	nvkm_debug(subdev,
+		   "Programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
+		   sensor->thrs_fan_boost.temp,
+		   sensor->thrs_fan_boost.hysteresis,
+		   sensor->thrs_down_clock.temp,
+		   sensor->thrs_down_clock.hysteresis,
+		   sensor->thrs_critical.temp,
+		   sensor->thrs_critical.hysteresis,
+		   sensor->thrs_shutdown.temp,
+		   sensor->thrs_shutdown.hysteresis);
 
 }
 
@@ -97,24 +97,25 @@ g84_therm_threshold_hyst_emulation(struct nvkm_therm *therm,
 				   const struct nvbios_therm_threshold *thrs,
 				   enum nvkm_therm_thrs thrs_name)
 {
+	struct nvkm_device *device = therm->subdev.device;
 	enum nvkm_therm_thrs_direction direction;
 	enum nvkm_therm_thrs_state prev_state, new_state;
 	int temp, cur;
 
 	prev_state = nvkm_therm_sensor_get_threshold_state(therm, thrs_name);
-	temp = nv_rd32(therm, thrs_reg);
+	temp = nvkm_rd32(device, thrs_reg);
 
 	/* program the next threshold */
 	if (temp == thrs->temp) {
-		nv_wr32(therm, thrs_reg, thrs->temp - thrs->hysteresis);
+		nvkm_wr32(device, thrs_reg, thrs->temp - thrs->hysteresis);
 		new_state = NVKM_THERM_THRS_HIGHER;
 	} else {
-		nv_wr32(therm, thrs_reg, thrs->temp);
+		nvkm_wr32(device, thrs_reg, thrs->temp);
 		new_state = NVKM_THERM_THRS_LOWER;
 	}
 
 	/* fix the state (in case someone reprogrammed the alarms) */
-	cur = therm->temp_get(therm);
+	cur = therm->func->temp_get(therm);
 	if (new_state == NVKM_THERM_THRS_LOWER && cur > thrs->temp)
 		new_state = NVKM_THERM_THRS_HIGHER;
 	else if (new_state == NVKM_THERM_THRS_HIGHER &&
@@ -135,17 +136,17 @@ g84_therm_threshold_hyst_emulation(struct nvkm_therm *therm,
 }
 
 static void
-g84_therm_intr(struct nvkm_subdev *subdev)
+g84_therm_intr(struct nvkm_therm *therm)
 {
-	struct nvkm_therm *therm = nvkm_therm(subdev);
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
 	unsigned long flags;
 	uint32_t intr;
 
-	spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+	spin_lock_irqsave(&therm->sensor.alarm_program_lock, flags);
 
-	intr = nv_rd32(therm, 0x20100) & 0x3ff;
+	intr = nvkm_rd32(device, 0x20100) & 0x3ff;
 
 	/* THRS_4: downclock */
 	if (intr & 0x002) {
@@ -180,87 +181,66 @@ g84_therm_intr(struct nvkm_subdev *subdev)
 	}
 
 	if (intr)
-		nv_error(therm, "unhandled intr 0x%08x\n", intr);
+		nvkm_error(subdev, "intr %08x\n", intr);
 
 	/* ACK everything */
-	nv_wr32(therm, 0x20100, 0xffffffff);
-	nv_wr32(therm, 0x1100, 0x10000); /* PBUS */
+	nvkm_wr32(device, 0x20100, 0xffffffff);
+	nvkm_wr32(device, 0x1100, 0x10000); /* PBUS */
 
-	spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+	spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
 }
 
-static int
-g84_therm_init(struct nvkm_object *object)
+void
+g84_therm_fini(struct nvkm_therm *therm)
 {
-	struct g84_therm_priv *priv = (void *)object;
-	int ret;
+	struct nvkm_device *device = therm->subdev.device;
 
-	ret = nvkm_therm_init(&priv->base.base);
-	if (ret)
-		return ret;
+	/* Disable PTherm IRQs */
+	nvkm_wr32(device, 0x20000, 0x00000000);
 
-	g84_sensor_setup(&priv->base.base);
-	return 0;
+	/* ACK all PTherm IRQs */
+	nvkm_wr32(device, 0x20100, 0xffffffff);
+	nvkm_wr32(device, 0x1100, 0x10000); /* PBUS */
 }
 
-static int
-g84_therm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+static void
+g84_therm_init(struct nvkm_therm *therm)
 {
-	struct g84_therm_priv *priv;
-	int ret;
-
-	ret = nvkm_therm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
-	priv->base.base.pwm_get = nv50_fan_pwm_get;
-	priv->base.base.pwm_set = nv50_fan_pwm_set;
-	priv->base.base.pwm_clock = nv50_fan_pwm_clock;
-	priv->base.base.temp_get = g84_temp_get;
-	priv->base.sensor.program_alarms = g84_therm_program_alarms;
-	nv_subdev(priv)->intr = g84_therm_intr;
-
-	/* init the thresholds */
-	nvkm_therm_sensor_set_threshold_state(&priv->base.base,
-					      NVKM_THERM_THRS_SHUTDOWN,
-					      NVKM_THERM_THRS_LOWER);
-	nvkm_therm_sensor_set_threshold_state(&priv->base.base,
-					      NVKM_THERM_THRS_FANBOOST,
-					      NVKM_THERM_THRS_LOWER);
-	nvkm_therm_sensor_set_threshold_state(&priv->base.base,
-					      NVKM_THERM_THRS_CRITICAL,
-					      NVKM_THERM_THRS_LOWER);
-	nvkm_therm_sensor_set_threshold_state(&priv->base.base,
-					      NVKM_THERM_THRS_DOWNCLOCK,
-					      NVKM_THERM_THRS_LOWER);
-
-	return nvkm_therm_preinit(&priv->base.base);
+	g84_sensor_setup(therm);
 }
 
+static const struct nvkm_therm_func
+g84_therm = {
+	.init = g84_therm_init,
+	.fini = g84_therm_fini,
+	.intr = g84_therm_intr,
+	.pwm_ctrl = nv50_fan_pwm_ctrl,
+	.pwm_get = nv50_fan_pwm_get,
+	.pwm_set = nv50_fan_pwm_set,
+	.pwm_clock = nv50_fan_pwm_clock,
+	.temp_get = g84_temp_get,
+	.program_alarms = g84_therm_program_alarms,
+};
+
 int
-g84_therm_fini(struct nvkm_object *object, bool suspend)
+g84_therm_new(struct nvkm_device *device, int index, struct nvkm_therm **ptherm)
 {
-	/* Disable PTherm IRQs */
-	nv_wr32(object, 0x20000, 0x00000000);
+	struct nvkm_therm *therm;
+	int ret;
 
-	/* ACK all PTherm IRQs */
-	nv_wr32(object, 0x20100, 0xffffffff);
-	nv_wr32(object, 0x1100, 0x10000); /* PBUS */
+	ret = nvkm_therm_new_(&g84_therm, device, index, &therm);
+	*ptherm = therm;
+	if (ret)
+		return ret;
 
-	return _nvkm_therm_fini(object, suspend);
+	/* init the thresholds */
+	nvkm_therm_sensor_set_threshold_state(therm, NVKM_THERM_THRS_SHUTDOWN,
+						     NVKM_THERM_THRS_LOWER);
+	nvkm_therm_sensor_set_threshold_state(therm, NVKM_THERM_THRS_FANBOOST,
+						     NVKM_THERM_THRS_LOWER);
+	nvkm_therm_sensor_set_threshold_state(therm, NVKM_THERM_THRS_CRITICAL,
+						     NVKM_THERM_THRS_LOWER);
+	nvkm_therm_sensor_set_threshold_state(therm, NVKM_THERM_THRS_DOWNCLOCK,
+						     NVKM_THERM_THRS_LOWER);
+	return 0;
 }
-
-struct nvkm_oclass
-g84_therm_oclass = {
-	.handle = NV_SUBDEV(THERM, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g84_therm_ctor,
-		.dtor = _nvkm_therm_dtor,
-		.init = g84_therm_init,
-		.fini = g84_therm_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf110.c
deleted file mode 100644
index 46b7e656a752..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf110.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <core/device.h>
-
-struct gf110_therm_priv {
-	struct nvkm_therm_priv base;
-};
-
-static int
-pwm_info(struct nvkm_therm *therm, int line)
-{
-	u32 gpio = nv_rd32(therm, 0x00d610 + (line * 0x04));
-
-	switch (gpio & 0x000000c0) {
-	case 0x00000000: /* normal mode, possibly pwm forced off by us */
-	case 0x00000040: /* nvio special */
-		switch (gpio & 0x0000001f) {
-		case 0x00: return 2;
-		case 0x19: return 1;
-		case 0x1c: return 0;
-		case 0x1e: return 2;
-		default:
-			break;
-		}
-	default:
-		break;
-	}
-
-	nv_error(therm, "GPIO %d unknown PWM: 0x%08x\n", line, gpio);
-	return -ENODEV;
-}
-
-static int
-gf110_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
-{
-	u32 data = enable ? 0x00000040 : 0x00000000;
-	int indx = pwm_info(therm, line);
-	if (indx < 0)
-		return indx;
-	else if (indx < 2)
-		nv_mask(therm, 0x00d610 + (line * 0x04), 0x000000c0, data);
-	/* nothing to do for indx == 2, it seems hardwired to PTHERM */
-	return 0;
-}
-
-static int
-gf110_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
-{
-	int indx = pwm_info(therm, line);
-	if (indx < 0)
-		return indx;
-	else if (indx < 2) {
-		if (nv_rd32(therm, 0x00d610 + (line * 0x04)) & 0x00000040) {
-			*divs = nv_rd32(therm, 0x00e114 + (indx * 8));
-			*duty = nv_rd32(therm, 0x00e118 + (indx * 8));
-			return 0;
-		}
-	} else if (indx == 2) {
-		*divs = nv_rd32(therm, 0x0200d8) & 0x1fff;
-		*duty = nv_rd32(therm, 0x0200dc) & 0x1fff;
-		return 0;
-	}
-
-	return -EINVAL;
-}
-
-static int
-gf110_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
-{
-	int indx = pwm_info(therm, line);
-	if (indx < 0)
-		return indx;
-	else if (indx < 2) {
-		nv_wr32(therm, 0x00e114 + (indx * 8), divs);
-		nv_wr32(therm, 0x00e118 + (indx * 8), duty | 0x80000000);
-	} else if (indx == 2) {
-		nv_mask(therm, 0x0200d8, 0x1fff, divs); /* keep the high bits */
-		nv_wr32(therm, 0x0200dc, duty | 0x40000000);
-	}
-	return 0;
-}
-
-static int
-gf110_fan_pwm_clock(struct nvkm_therm *therm, int line)
-{
-	int indx = pwm_info(therm, line);
-	if (indx < 0)
-		return 0;
-	else if (indx < 2)
-		return (nv_device(therm)->crystal * 1000) / 20;
-	else
-		return nv_device(therm)->crystal * 1000 / 10;
-}
-
-int
-gf110_therm_init(struct nvkm_object *object)
-{
-	struct gf110_therm_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_therm_init(&priv->base.base);
-	if (ret)
-		return ret;
-
-	/* enable fan tach, count revolutions per-second */
-	nv_mask(priv, 0x00e720, 0x00000003, 0x00000002);
-	if (priv->base.fan->tach.func != DCB_GPIO_UNUSED) {
-		nv_mask(priv, 0x00d79c, 0x000000ff, priv->base.fan->tach.line);
-		nv_wr32(priv, 0x00e724, nv_device(priv)->crystal * 1000);
-		nv_mask(priv, 0x00e720, 0x00000001, 0x00000001);
-	}
-	nv_mask(priv, 0x00e720, 0x00000002, 0x00000000);
-
-	return 0;
-}
-
-static int
-gf110_therm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
-{
-	struct gf110_therm_priv *priv;
-	int ret;
-
-	ret = nvkm_therm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	g84_sensor_setup(&priv->base.base);
-
-	priv->base.base.pwm_ctrl = gf110_fan_pwm_ctrl;
-	priv->base.base.pwm_get = gf110_fan_pwm_get;
-	priv->base.base.pwm_set = gf110_fan_pwm_set;
-	priv->base.base.pwm_clock = gf110_fan_pwm_clock;
-	priv->base.base.temp_get = g84_temp_get;
-	priv->base.base.fan_sense = gt215_therm_fan_sense;
-	priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
-	return nvkm_therm_preinit(&priv->base.base);
-}
-
-struct nvkm_oclass
-gf110_therm_oclass = {
-	.handle = NV_SUBDEV(THERM, 0xd0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf110_therm_ctor,
-		.dtor = _nvkm_therm_dtor,
-		.init = gf110_therm_init,
-		.fini = g84_therm_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
new file mode 100644
index 000000000000..06dcfd6ee966
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+static int
+pwm_info(struct nvkm_therm *therm, int line)
+{
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 gpio = nvkm_rd32(device, 0x00d610 + (line * 0x04));
+
+	switch (gpio & 0x000000c0) {
+	case 0x00000000: /* normal mode, possibly pwm forced off by us */
+	case 0x00000040: /* nvio special */
+		switch (gpio & 0x0000001f) {
+		case 0x00: return 2;
+		case 0x19: return 1;
+		case 0x1c: return 0;
+		case 0x1e: return 2;
+		default:
+			break;
+		}
+	default:
+		break;
+	}
+
+	nvkm_error(subdev, "GPIO %d unknown PWM: %08x\n", line, gpio);
+	return -ENODEV;
+}
+
+static int
+gf119_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
+{
+	struct nvkm_device *device = therm->subdev.device;
+	u32 data = enable ? 0x00000040 : 0x00000000;
+	int indx = pwm_info(therm, line);
+	if (indx < 0)
+		return indx;
+	else if (indx < 2)
+		nvkm_mask(device, 0x00d610 + (line * 0x04), 0x000000c0, data);
+	/* nothing to do for indx == 2, it seems hardwired to PTHERM */
+	return 0;
+}
+
+static int
+gf119_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
+{
+	struct nvkm_device *device = therm->subdev.device;
+	int indx = pwm_info(therm, line);
+	if (indx < 0)
+		return indx;
+	else if (indx < 2) {
+		if (nvkm_rd32(device, 0x00d610 + (line * 0x04)) & 0x00000040) {
+			*divs = nvkm_rd32(device, 0x00e114 + (indx * 8));
+			*duty = nvkm_rd32(device, 0x00e118 + (indx * 8));
+			return 0;
+		}
+	} else if (indx == 2) {
+		*divs = nvkm_rd32(device, 0x0200d8) & 0x1fff;
+		*duty = nvkm_rd32(device, 0x0200dc) & 0x1fff;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int
+gf119_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
+{
+	struct nvkm_device *device = therm->subdev.device;
+	int indx = pwm_info(therm, line);
+	if (indx < 0)
+		return indx;
+	else if (indx < 2) {
+		nvkm_wr32(device, 0x00e114 + (indx * 8), divs);
+		nvkm_wr32(device, 0x00e118 + (indx * 8), duty | 0x80000000);
+	} else if (indx == 2) {
+		nvkm_mask(device, 0x0200d8, 0x1fff, divs); /* keep the high bits */
+		nvkm_wr32(device, 0x0200dc, duty | 0x40000000);
+	}
+	return 0;
+}
+
+static int
+gf119_fan_pwm_clock(struct nvkm_therm *therm, int line)
+{
+	struct nvkm_device *device = therm->subdev.device;
+	int indx = pwm_info(therm, line);
+	if (indx < 0)
+		return 0;
+	else if (indx < 2)
+		return (device->crystal * 1000) / 20;
+	else
+		return device->crystal * 1000 / 10;
+}
+
+void
+gf119_therm_init(struct nvkm_therm *therm)
+{
+	struct nvkm_device *device = therm->subdev.device;
+
+	g84_sensor_setup(therm);
+
+	/* enable fan tach, count revolutions per-second */
+	nvkm_mask(device, 0x00e720, 0x00000003, 0x00000002);
+	if (therm->fan->tach.func != DCB_GPIO_UNUSED) {
+		nvkm_mask(device, 0x00d79c, 0x000000ff, therm->fan->tach.line);
+		nvkm_wr32(device, 0x00e724, device->crystal * 1000);
+		nvkm_mask(device, 0x00e720, 0x00000001, 0x00000001);
+	}
+	nvkm_mask(device, 0x00e720, 0x00000002, 0x00000000);
+}
+
+static const struct nvkm_therm_func
+gf119_therm = {
+	.init = gf119_therm_init,
+	.fini = g84_therm_fini,
+	.pwm_ctrl = gf119_fan_pwm_ctrl,
+	.pwm_get = gf119_fan_pwm_get,
+	.pwm_set = gf119_fan_pwm_set,
+	.pwm_clock = gf119_fan_pwm_clock,
+	.temp_get = g84_temp_get,
+	.fan_sense = gt215_therm_fan_sense,
+	.program_alarms = nvkm_therm_program_alarms_polling,
+};
+
+int
+gf119_therm_new(struct nvkm_device *device, int index,
+	       struct nvkm_therm **ptherm)
+{
+	return nvkm_therm_new_(&gf119_therm, device, index, ptherm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c
index 2fd110f09878..86848ece4d89 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c
@@ -23,12 +23,6 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
-
-struct gm107_therm_priv {
-	struct nvkm_therm_priv base;
-};
-
 static int
 gm107_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
 {
@@ -39,55 +33,43 @@ gm107_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
 static int
 gm107_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
 {
-	*divs = nv_rd32(therm, 0x10eb20) & 0x1fff;
-	*duty = nv_rd32(therm, 0x10eb24) & 0x1fff;
+	struct nvkm_device *device = therm->subdev.device;
+	*divs = nvkm_rd32(device, 0x10eb20) & 0x1fff;
+	*duty = nvkm_rd32(device, 0x10eb24) & 0x1fff;
 	return 0;
 }
 
 static int
 gm107_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
 {
-	nv_mask(therm, 0x10eb10, 0x1fff, divs); /* keep the high bits */
-	nv_wr32(therm, 0x10eb14, duty | 0x80000000);
+	struct nvkm_device *device = therm->subdev.device;
+	nvkm_mask(device, 0x10eb10, 0x1fff, divs); /* keep the high bits */
+	nvkm_wr32(device, 0x10eb14, duty | 0x80000000);
 	return 0;
 }
 
 static int
 gm107_fan_pwm_clock(struct nvkm_therm *therm, int line)
 {
-	return nv_device(therm)->crystal * 1000;
+	return therm->subdev.device->crystal * 1000;
 }
 
-static int
-gm107_therm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
-{
-	struct gm107_therm_priv *priv;
-	int ret;
-
-	ret = nvkm_therm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+static const struct nvkm_therm_func
+gm107_therm = {
+	.init = gf119_therm_init,
+	.fini = g84_therm_fini,
+	.pwm_ctrl = gm107_fan_pwm_ctrl,
+	.pwm_get = gm107_fan_pwm_get,
+	.pwm_set = gm107_fan_pwm_set,
+	.pwm_clock = gm107_fan_pwm_clock,
+	.temp_get = g84_temp_get,
+	.fan_sense = gt215_therm_fan_sense,
+	.program_alarms = nvkm_therm_program_alarms_polling,
+};
 
-	priv->base.base.pwm_ctrl = gm107_fan_pwm_ctrl;
-	priv->base.base.pwm_get = gm107_fan_pwm_get;
-	priv->base.base.pwm_set = gm107_fan_pwm_set;
-	priv->base.base.pwm_clock = gm107_fan_pwm_clock;
-	priv->base.base.temp_get = g84_temp_get;
-	priv->base.base.fan_sense = gt215_therm_fan_sense;
-	priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
-	return nvkm_therm_preinit(&priv->base.base);
+int
+gm107_therm_new(struct nvkm_device *device, int index,
+		struct nvkm_therm **ptherm)
+{
+	return nvkm_therm_new_(&gm107_therm, device, index, ptherm);
 }
-
-struct nvkm_oclass
-gm107_therm_oclass = {
-	.handle = NV_SUBDEV(THERM, 0x117),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm107_therm_ctor,
-		.dtor = _nvkm_therm_dtor,
-		.init = gf110_therm_init,
-		.fini = g84_therm_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
index e99be20332f2..c08097f2aff5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
@@ -23,78 +23,53 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
 #include <subdev/gpio.h>
 
-struct gt215_therm_priv {
-	struct nvkm_therm_priv base;
-};
-
 int
 gt215_therm_fan_sense(struct nvkm_therm *therm)
 {
-	u32 tach = nv_rd32(therm, 0x00e728) & 0x0000ffff;
-	u32 ctrl = nv_rd32(therm, 0x00e720);
+	struct nvkm_device *device = therm->subdev.device;
+	u32 tach = nvkm_rd32(device, 0x00e728) & 0x0000ffff;
+	u32 ctrl = nvkm_rd32(device, 0x00e720);
 	if (ctrl & 0x00000001)
 		return tach * 60 / 2;
 	return -ENODEV;
 }
 
-static int
-gt215_therm_init(struct nvkm_object *object)
+static void
+gt215_therm_init(struct nvkm_therm *therm)
 {
-	struct gt215_therm_priv *priv = (void *)object;
-	struct dcb_gpio_func *tach = &priv->base.fan->tach;
-	int ret;
-
-	ret = nvkm_therm_init(&priv->base.base);
-	if (ret)
-		return ret;
+	struct nvkm_device *device = therm->subdev.device;
+	struct dcb_gpio_func *tach = &therm->fan->tach;
 
-	g84_sensor_setup(&priv->base.base);
+	g84_sensor_setup(therm);
 
 	/* enable fan tach, count revolutions per-second */
-	nv_mask(priv, 0x00e720, 0x00000003, 0x00000002);
+	nvkm_mask(device, 0x00e720, 0x00000003, 0x00000002);
 	if (tach->func != DCB_GPIO_UNUSED) {
-		nv_wr32(priv, 0x00e724, nv_device(priv)->crystal * 1000);
-		nv_mask(priv, 0x00e720, 0x001f0000, tach->line << 16);
-		nv_mask(priv, 0x00e720, 0x00000001, 0x00000001);
+		nvkm_wr32(device, 0x00e724, device->crystal * 1000);
+		nvkm_mask(device, 0x00e720, 0x001f0000, tach->line << 16);
+		nvkm_mask(device, 0x00e720, 0x00000001, 0x00000001);
 	}
-	nv_mask(priv, 0x00e720, 0x00000002, 0x00000000);
-
-	return 0;
+	nvkm_mask(device, 0x00e720, 0x00000002, 0x00000000);
 }
 
-static int
-gt215_therm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
-{
-	struct gt215_therm_priv *priv;
-	int ret;
-
-	ret = nvkm_therm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+static const struct nvkm_therm_func
+gt215_therm = {
+	.init = gt215_therm_init,
+	.fini = g84_therm_fini,
+	.pwm_ctrl = nv50_fan_pwm_ctrl,
+	.pwm_get = nv50_fan_pwm_get,
+	.pwm_set = nv50_fan_pwm_set,
+	.pwm_clock = nv50_fan_pwm_clock,
+	.temp_get = g84_temp_get,
+	.fan_sense = gt215_therm_fan_sense,
+	.program_alarms = nvkm_therm_program_alarms_polling,
+};
 
-	priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
-	priv->base.base.pwm_get = nv50_fan_pwm_get;
-	priv->base.base.pwm_set = nv50_fan_pwm_set;
-	priv->base.base.pwm_clock = nv50_fan_pwm_clock;
-	priv->base.base.temp_get = g84_temp_get;
-	priv->base.base.fan_sense = gt215_therm_fan_sense;
-	priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
-	return nvkm_therm_preinit(&priv->base.base);
+int
+gt215_therm_new(struct nvkm_device *device, int index,
+	       struct nvkm_therm **ptherm)
+{
+	return nvkm_therm_new_(&gt215_therm, device, index, ptherm);
 }
-
-struct nvkm_oclass
-gt215_therm_oclass = {
-	.handle = NV_SUBDEV(THERM, 0xa3),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gt215_therm_ctor,
-		.dtor = _nvkm_therm_dtor,
-		.init = gt215_therm_init,
-		.fini = g84_therm_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
index 09fc4605e853..6e0ddc1bb583 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
@@ -27,16 +27,16 @@
 #include <subdev/i2c.h>
 
 static bool
-probe_monitoring_device(struct nvkm_i2c_port *i2c,
+probe_monitoring_device(struct nvkm_i2c_bus *bus,
 			struct i2c_board_info *info, void *data)
 {
-	struct nvkm_therm_priv *priv = data;
-	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+	struct nvkm_therm *therm = data;
+	struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
 	struct i2c_client *client;
 
 	request_module("%s%s", I2C_MODULE_PREFIX, info->type);
 
-	client = i2c_new_device(&i2c->adapter, info);
+	client = i2c_new_device(&bus->i2c, info);
 	if (!client)
 		return false;
 
@@ -46,15 +46,15 @@ probe_monitoring_device(struct nvkm_i2c_port *i2c,
 		return false;
 	}
 
-	nv_info(priv,
-		"Found an %s at address 0x%x (controlled by lm_sensors, "
-		"temp offset %+i C)\n",
-		info->type, info->addr, sensor->offset_constant);
-	priv->ic = client;
+	nvkm_debug(&therm->subdev,
+		   "Found an %s at address 0x%x (controlled by lm_sensors, "
+		   "temp offset %+i C)\n",
+		   info->type, info->addr, sensor->offset_constant);
+	therm->ic = client;
 	return true;
 }
 
-static struct nvkm_i2c_board_info
+static struct nvkm_i2c_bus_probe
 nv_board_infos[] = {
 	{ { I2C_BOARD_INFO("w83l785ts", 0x2d) }, 0 },
 	{ { I2C_BOARD_INFO("w83781d", 0x2d) }, 0  },
@@ -82,38 +82,43 @@ nv_board_infos[] = {
 void
 nvkm_therm_ic_ctor(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvkm_bios *bios = nvkm_bios(therm);
-	struct nvkm_i2c *i2c = nvkm_i2c(therm);
+	struct nvkm_device *device = therm->subdev.device;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_i2c *i2c = device->i2c;
+	struct nvkm_i2c_bus *bus;
 	struct nvbios_extdev_func extdev_entry;
 
+	bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI);
+	if (!bus)
+		return;
+
 	if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_LM89, &extdev_entry)) {
-		struct nvkm_i2c_board_info board[] = {
+		struct nvkm_i2c_bus_probe board[] = {
 		  { { I2C_BOARD_INFO("lm90", extdev_entry.addr >> 1) }, 0},
 		  { }
 		};
 
-		i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
-			      board, probe_monitoring_device, therm);
-		if (priv->ic)
+		nvkm_i2c_bus_probe(bus, "monitoring device", board,
+				   probe_monitoring_device, therm);
+		if (therm->ic)
 			return;
 	}
 
 	if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_ADT7473, &extdev_entry)) {
-		struct nvkm_i2c_board_info board[] = {
+		struct nvkm_i2c_bus_probe board[] = {
 		  { { I2C_BOARD_INFO("adt7473", extdev_entry.addr >> 1) }, 20 },
 		  { }
 		};
 
-		i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
-			      board, probe_monitoring_device, therm);
-		if (priv->ic)
+		nvkm_i2c_bus_probe(bus, "monitoring device", board,
+				   probe_monitoring_device, therm);
+		if (therm->ic)
 			return;
 	}
 
 	/* The vbios doesn't provide the address of an exisiting monitoring
 	   device. Let's try our static list.
 	 */
-	i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
-		      nv_board_infos, probe_monitoring_device, therm);
+	nvkm_i2c_bus_probe(bus, "monitoring device", nv_board_infos,
+			   probe_monitoring_device, therm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
index 8496fffd4688..6326fdc5a48d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
@@ -24,26 +24,17 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
-
-struct nv40_therm_priv {
-	struct nvkm_therm_priv base;
-};
-
 enum nv40_sensor_style { INVALID_STYLE = -1, OLD_STYLE = 0, NEW_STYLE = 1 };
 
 static enum nv40_sensor_style
 nv40_sensor_style(struct nvkm_therm *therm)
 {
-	struct nvkm_device *device = nv_device(therm);
-
-	switch (device->chipset) {
+	switch (therm->subdev.device->chipset) {
 	case 0x43:
 	case 0x44:
 	case 0x4a:
 	case 0x47:
 		return OLD_STYLE;
-
 	case 0x46:
 	case 0x49:
 	case 0x4b:
@@ -61,18 +52,19 @@ nv40_sensor_style(struct nvkm_therm *therm)
 static int
 nv40_sensor_setup(struct nvkm_therm *therm)
 {
+	struct nvkm_device *device = therm->subdev.device;
 	enum nv40_sensor_style style = nv40_sensor_style(therm);
 
 	/* enable ADC readout and disable the ALARM threshold */
 	if (style == NEW_STYLE) {
-		nv_mask(therm, 0x15b8, 0x80000000, 0);
-		nv_wr32(therm, 0x15b0, 0x80003fff);
+		nvkm_mask(device, 0x15b8, 0x80000000, 0);
+		nvkm_wr32(device, 0x15b0, 0x80003fff);
 		mdelay(20); /* wait for the temperature to stabilize */
-		return nv_rd32(therm, 0x15b4) & 0x3fff;
+		return nvkm_rd32(device, 0x15b4) & 0x3fff;
 	} else if (style == OLD_STYLE) {
-		nv_wr32(therm, 0x15b0, 0xff);
+		nvkm_wr32(device, 0x15b0, 0xff);
 		mdelay(20); /* wait for the temperature to stabilize */
-		return nv_rd32(therm, 0x15b4) & 0xff;
+		return nvkm_rd32(device, 0x15b4) & 0xff;
 	} else
 		return -ENODEV;
 }
@@ -80,17 +72,17 @@ nv40_sensor_setup(struct nvkm_therm *therm)
 static int
 nv40_temp_get(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+	struct nvkm_device *device = therm->subdev.device;
+	struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
 	enum nv40_sensor_style style = nv40_sensor_style(therm);
 	int core_temp;
 
 	if (style == NEW_STYLE) {
-		nv_wr32(therm, 0x15b0, 0x80003fff);
-		core_temp = nv_rd32(therm, 0x15b4) & 0x3fff;
+		nvkm_wr32(device, 0x15b0, 0x80003fff);
+		core_temp = nvkm_rd32(device, 0x15b4) & 0x3fff;
 	} else if (style == OLD_STYLE) {
-		nv_wr32(therm, 0x15b0, 0xff);
-		core_temp = nv_rd32(therm, 0x15b4) & 0xff;
+		nvkm_wr32(device, 0x15b0, 0xff);
+		core_temp = nvkm_rd32(device, 0x15b4) & 0xff;
 	} else
 		return -ENODEV;
 
@@ -113,11 +105,13 @@ nv40_temp_get(struct nvkm_therm *therm)
 static int
 nv40_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
 {
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 mask = enable ? 0x80000000 : 0x0000000;
-	if      (line == 2) nv_mask(therm, 0x0010f0, 0x80000000, mask);
-	else if (line == 9) nv_mask(therm, 0x0015f4, 0x80000000, mask);
+	if      (line == 2) nvkm_mask(device, 0x0010f0, 0x80000000, mask);
+	else if (line == 9) nvkm_mask(device, 0x0015f4, 0x80000000, mask);
 	else {
-		nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
+		nvkm_error(subdev, "unknown pwm ctrl for gpio %d\n", line);
 		return -ENODEV;
 	}
 	return 0;
@@ -126,8 +120,10 @@ nv40_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
 static int
 nv40_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
 {
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
 	if (line == 2) {
-		u32 reg = nv_rd32(therm, 0x0010f0);
+		u32 reg = nvkm_rd32(device, 0x0010f0);
 		if (reg & 0x80000000) {
 			*duty = (reg & 0x7fff0000) >> 16;
 			*divs = (reg & 0x00007fff);
@@ -135,14 +131,14 @@ nv40_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
 		}
 	} else
 	if (line == 9) {
-		u32 reg = nv_rd32(therm, 0x0015f4);
+		u32 reg = nvkm_rd32(device, 0x0015f4);
 		if (reg & 0x80000000) {
-			*divs = nv_rd32(therm, 0x0015f8);
+			*divs = nvkm_rd32(device, 0x0015f8);
 			*duty = (reg & 0x7fffffff);
 			return 0;
 		}
 	} else {
-		nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
+		nvkm_error(subdev, "unknown pwm ctrl for gpio %d\n", line);
 		return -ENODEV;
 	}
 
@@ -152,14 +148,16 @@ nv40_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
 static int
 nv40_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
 {
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
 	if (line == 2) {
-		nv_mask(therm, 0x0010f0, 0x7fff7fff, (duty << 16) | divs);
+		nvkm_mask(device, 0x0010f0, 0x7fff7fff, (duty << 16) | divs);
 	} else
 	if (line == 9) {
-		nv_wr32(therm, 0x0015f8, divs);
-		nv_mask(therm, 0x0015f4, 0x7fffffff, duty);
+		nvkm_wr32(device, 0x0015f8, divs);
+		nvkm_mask(device, 0x0015f4, 0x7fffffff, duty);
 	} else {
-		nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
+		nvkm_error(subdev, "unknown pwm ctrl for gpio %d\n", line);
 		return -ENODEV;
 	}
 
@@ -167,59 +165,40 @@ nv40_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
 }
 
 void
-nv40_therm_intr(struct nvkm_subdev *subdev)
+nv40_therm_intr(struct nvkm_therm *therm)
 {
-	struct nvkm_therm *therm = nvkm_therm(subdev);
-	uint32_t stat = nv_rd32(therm, 0x1100);
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
+	uint32_t stat = nvkm_rd32(device, 0x1100);
 
 	/* traitement */
 
 	/* ack all IRQs */
-	nv_wr32(therm, 0x1100, 0x70000);
+	nvkm_wr32(device, 0x1100, 0x70000);
 
-	nv_error(therm, "THERM received an IRQ: stat = %x\n", stat);
+	nvkm_error(subdev, "THERM received an IRQ: stat = %x\n", stat);
 }
 
-static int
-nv40_therm_ctor(struct nvkm_object *parent,
-		struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+static void
+nv40_therm_init(struct nvkm_therm *therm)
 {
-	struct nv40_therm_priv *priv;
-	int ret;
-
-	ret = nvkm_therm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.base.pwm_ctrl = nv40_fan_pwm_ctrl;
-	priv->base.base.pwm_get = nv40_fan_pwm_get;
-	priv->base.base.pwm_set = nv40_fan_pwm_set;
-	priv->base.base.temp_get = nv40_temp_get;
-	priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
-	nv_subdev(priv)->intr = nv40_therm_intr;
-	return nvkm_therm_preinit(&priv->base.base);
-}
-
-static int
-nv40_therm_init(struct nvkm_object *object)
-{
-	struct nvkm_therm *therm = (void *)object;
-
 	nv40_sensor_setup(therm);
-
-	return _nvkm_therm_init(object);
 }
 
-struct nvkm_oclass
-nv40_therm_oclass = {
-	.handle = NV_SUBDEV(THERM, 0x40),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_therm_ctor,
-		.dtor = _nvkm_therm_dtor,
-		.init = nv40_therm_init,
-		.fini = _nvkm_therm_fini,
-	},
+static const struct nvkm_therm_func
+nv40_therm = {
+	.init = nv40_therm_init,
+	.intr = nv40_therm_intr,
+	.pwm_ctrl = nv40_fan_pwm_ctrl,
+	.pwm_get = nv40_fan_pwm_get,
+	.pwm_set = nv40_fan_pwm_set,
+	.temp_get = nv40_temp_get,
+	.program_alarms = nvkm_therm_program_alarms_polling,
 };
+
+int
+nv40_therm_new(struct nvkm_device *device, int index,
+	       struct nvkm_therm **ptherm)
+{
+	return nvkm_therm_new_(&nv40_therm, device, index, ptherm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c
index 1ef59e8922d4..9b57b433d4cf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c
@@ -24,15 +24,11 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
-
-struct nv50_therm_priv {
-	struct nvkm_therm_priv base;
-};
-
 static int
 pwm_info(struct nvkm_therm *therm, int *line, int *ctrl, int *indx)
 {
+	struct nvkm_subdev *subdev = &therm->subdev;
+
 	if (*line == 0x04) {
 		*ctrl = 0x00e100;
 		*line = 4;
@@ -48,7 +44,7 @@ pwm_info(struct nvkm_therm *therm, int *line, int *ctrl, int *indx)
 		*line = 0;
 		*indx = 0;
 	} else {
-		nv_error(therm, "unknown pwm ctrl for gpio %d\n", *line);
+		nvkm_error(subdev, "unknown pwm ctrl for gpio %d\n", *line);
 		return -ENODEV;
 	}
 
@@ -58,23 +54,25 @@ pwm_info(struct nvkm_therm *therm, int *line, int *ctrl, int *indx)
 int
 nv50_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
 {
+	struct nvkm_device *device = therm->subdev.device;
 	u32 data = enable ? 0x00000001 : 0x00000000;
 	int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
 	if (ret == 0)
-		nv_mask(therm, ctrl, 0x00010001 << line, data << line);
+		nvkm_mask(device, ctrl, 0x00010001 << line, data << line);
 	return ret;
 }
 
 int
 nv50_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
 {
+	struct nvkm_device *device = therm->subdev.device;
 	int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
 	if (ret)
 		return ret;
 
-	if (nv_rd32(therm, ctrl) & (1 << line)) {
-		*divs = nv_rd32(therm, 0x00e114 + (id * 8));
-		*duty = nv_rd32(therm, 0x00e118 + (id * 8));
+	if (nvkm_rd32(device, ctrl) & (1 << line)) {
+		*divs = nvkm_rd32(device, 0x00e114 + (id * 8));
+		*duty = nvkm_rd32(device, 0x00e118 + (id * 8));
 		return 0;
 	}
 
@@ -84,36 +82,36 @@ nv50_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
 int
 nv50_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
 {
+	struct nvkm_device *device = therm->subdev.device;
 	int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
 	if (ret)
 		return ret;
 
-	nv_wr32(therm, 0x00e114 + (id * 8), divs);
-	nv_wr32(therm, 0x00e118 + (id * 8), duty | 0x80000000);
+	nvkm_wr32(device, 0x00e114 + (id * 8), divs);
+	nvkm_wr32(device, 0x00e118 + (id * 8), duty | 0x80000000);
 	return 0;
 }
 
 int
 nv50_fan_pwm_clock(struct nvkm_therm *therm, int line)
 {
-	int chipset = nv_device(therm)->chipset;
-	int crystal = nv_device(therm)->crystal;
+	struct nvkm_device *device = therm->subdev.device;
 	int pwm_clock;
 
 	/* determine the PWM source clock */
-	if (chipset > 0x50 && chipset < 0x94) {
-		u8 pwm_div = nv_rd32(therm, 0x410c);
-		if (nv_rd32(therm, 0xc040) & 0x800000) {
+	if (device->chipset > 0x50 && device->chipset < 0x94) {
+		u8 pwm_div = nvkm_rd32(device, 0x410c);
+		if (nvkm_rd32(device, 0xc040) & 0x800000) {
 			/* Use the HOST clock (100 MHz)
 			* Where does this constant(2.4) comes from? */
 			pwm_clock = (100000000 >> pwm_div) * 10 / 24;
 		} else {
 			/* Where does this constant(20) comes from? */
-			pwm_clock = (crystal * 1000) >> pwm_div;
+			pwm_clock = (device->crystal * 1000) >> pwm_div;
 			pwm_clock /= 20;
 		}
 	} else {
-		pwm_clock = (crystal * 1000) / 20;
+		pwm_clock = (device->crystal * 1000) / 20;
 	}
 
 	return pwm_clock;
@@ -122,18 +120,19 @@ nv50_fan_pwm_clock(struct nvkm_therm *therm, int line)
 static void
 nv50_sensor_setup(struct nvkm_therm *therm)
 {
-	nv_mask(therm, 0x20010, 0x40000000, 0x0);
+	struct nvkm_device *device = therm->subdev.device;
+	nvkm_mask(device, 0x20010, 0x40000000, 0x0);
 	mdelay(20); /* wait for the temperature to stabilize */
 }
 
 static int
 nv50_temp_get(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+	struct nvkm_device *device = therm->subdev.device;
+	struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
 	int core_temp;
 
-	core_temp = nv_rd32(therm, 0x20014) & 0x3fff;
+	core_temp = nvkm_rd32(device, 0x20014) & 0x3fff;
 
 	/* if the slope or the offset is unset, do no use the sensor */
 	if (!sensor->slope_div || !sensor->slope_mult ||
@@ -151,48 +150,27 @@ nv50_temp_get(struct nvkm_therm *therm)
 	return core_temp;
 }
 
-static int
-nv50_therm_ctor(struct nvkm_object *parent,
-		struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
-{
-	struct nv50_therm_priv *priv;
-	int ret;
-
-	ret = nvkm_therm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
-	priv->base.base.pwm_get = nv50_fan_pwm_get;
-	priv->base.base.pwm_set = nv50_fan_pwm_set;
-	priv->base.base.pwm_clock = nv50_fan_pwm_clock;
-	priv->base.base.temp_get = nv50_temp_get;
-	priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
-	nv_subdev(priv)->intr = nv40_therm_intr;
-
-	return nvkm_therm_preinit(&priv->base.base);
-}
-
-static int
-nv50_therm_init(struct nvkm_object *object)
+static void
+nv50_therm_init(struct nvkm_therm *therm)
 {
-	struct nvkm_therm *therm = (void *)object;
-
 	nv50_sensor_setup(therm);
-
-	return _nvkm_therm_init(object);
 }
 
-struct nvkm_oclass
-nv50_therm_oclass = {
-	.handle = NV_SUBDEV(THERM, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_therm_ctor,
-		.dtor = _nvkm_therm_dtor,
-		.init = nv50_therm_init,
-		.fini = _nvkm_therm_fini,
-	},
+static const struct nvkm_therm_func
+nv50_therm = {
+	.init = nv50_therm_init,
+	.intr = nv40_therm_intr,
+	.pwm_ctrl = nv50_fan_pwm_ctrl,
+	.pwm_get = nv50_fan_pwm_get,
+	.pwm_set = nv50_fan_pwm_set,
+	.pwm_clock = nv50_fan_pwm_clock,
+	.temp_get = nv50_temp_get,
+	.program_alarms = nvkm_therm_program_alarms_polling,
 };
+
+int
+nv50_therm_new(struct nvkm_device *device, int index,
+	       struct nvkm_therm **ptherm)
+{
+	return nvkm_therm_new_(&nv50_therm, device, index, ptherm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
index 916a149efe6e..235a5d8daff6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
@@ -1,5 +1,6 @@
 #ifndef __NVTHERM_PRIV_H__
 #define __NVTHERM_PRIV_H__
+#define nvkm_therm(p) container_of((p), struct nvkm_therm, subdev)
 /*
  * Copyright 2012 The Nouveau community
  *
@@ -28,8 +29,9 @@
 #include <subdev/bios/extdev.h>
 #include <subdev/bios/gpio.h>
 #include <subdev/bios/perf.h>
-#include <subdev/bios/therm.h>
-#include <subdev/timer.h>
+
+int nvkm_therm_new_(const struct nvkm_therm_func *, struct nvkm_device *,
+		    int index, struct nvkm_therm **);
 
 struct nvkm_fan {
 	struct nvkm_therm *parent;
@@ -48,59 +50,6 @@ struct nvkm_fan {
 	struct dcb_gpio_func tach;
 };
 
-enum nvkm_therm_thrs_direction {
-	NVKM_THERM_THRS_FALLING = 0,
-	NVKM_THERM_THRS_RISING = 1
-};
-
-enum nvkm_therm_thrs_state {
-	NVKM_THERM_THRS_LOWER = 0,
-	NVKM_THERM_THRS_HIGHER = 1
-};
-
-enum nvkm_therm_thrs {
-	NVKM_THERM_THRS_FANBOOST = 0,
-	NVKM_THERM_THRS_DOWNCLOCK = 1,
-	NVKM_THERM_THRS_CRITICAL = 2,
-	NVKM_THERM_THRS_SHUTDOWN = 3,
-	NVKM_THERM_THRS_NR
-};
-
-struct nvkm_therm_priv {
-	struct nvkm_therm base;
-
-	/* automatic thermal management */
-	struct nvkm_alarm alarm;
-	spinlock_t lock;
-	struct nvbios_therm_trip_point *last_trip;
-	int mode;
-	int cstate;
-	int suspend;
-
-	/* bios */
-	struct nvbios_therm_sensor bios_sensor;
-
-	/* fan priv */
-	struct nvkm_fan *fan;
-
-	/* alarms priv */
-	struct {
-		spinlock_t alarm_program_lock;
-		struct nvkm_alarm therm_poll_alarm;
-		enum nvkm_therm_thrs_state alarm_state[NVKM_THERM_THRS_NR];
-		void (*program_alarms)(struct nvkm_therm *);
-	} sensor;
-
-	/* what should be done if the card overheats */
-	struct {
-		void (*downclock)(struct nvkm_therm *, bool active);
-		void (*pause)(struct nvkm_therm *, bool active);
-	} emergency;
-
-	/* ic */
-	struct i2c_client *ic;
-};
-
 int nvkm_therm_fan_mode(struct nvkm_therm *, int mode);
 int nvkm_therm_attr_get(struct nvkm_therm *, enum nvkm_therm_attr_type);
 int nvkm_therm_attr_set(struct nvkm_therm *, enum nvkm_therm_attr_type, int);
@@ -117,8 +66,6 @@ int nvkm_therm_fan_set(struct nvkm_therm *, bool now, int percent);
 int nvkm_therm_fan_user_get(struct nvkm_therm *);
 int nvkm_therm_fan_user_set(struct nvkm_therm *, int percent);
 
-int nvkm_therm_fan_sense(struct nvkm_therm *);
-
 int nvkm_therm_preinit(struct nvkm_therm *);
 
 int  nvkm_therm_sensor_init(struct nvkm_therm *);
@@ -134,18 +81,37 @@ void nvkm_therm_sensor_event(struct nvkm_therm *, enum nvkm_therm_thrs,
 			     enum nvkm_therm_thrs_direction);
 void nvkm_therm_program_alarms_polling(struct nvkm_therm *);
 
-void nv40_therm_intr(struct nvkm_subdev *);
+struct nvkm_therm_func {
+	void (*init)(struct nvkm_therm *);
+	void (*fini)(struct nvkm_therm *);
+	void (*intr)(struct nvkm_therm *);
+
+	int (*pwm_ctrl)(struct nvkm_therm *, int line, bool);
+	int (*pwm_get)(struct nvkm_therm *, int line, u32 *, u32 *);
+	int (*pwm_set)(struct nvkm_therm *, int line, u32, u32);
+	int (*pwm_clock)(struct nvkm_therm *, int line);
+
+	int (*temp_get)(struct nvkm_therm *);
+
+	int (*fan_sense)(struct nvkm_therm *);
+
+	void (*program_alarms)(struct nvkm_therm *);
+};
+
+void nv40_therm_intr(struct nvkm_therm *);
+
 int  nv50_fan_pwm_ctrl(struct nvkm_therm *, int, bool);
 int  nv50_fan_pwm_get(struct nvkm_therm *, int, u32 *, u32 *);
 int  nv50_fan_pwm_set(struct nvkm_therm *, int, u32, u32);
 int  nv50_fan_pwm_clock(struct nvkm_therm *, int);
+
 int  g84_temp_get(struct nvkm_therm *);
 void g84_sensor_setup(struct nvkm_therm *);
-int  g84_therm_fini(struct nvkm_object *, bool suspend);
+void g84_therm_fini(struct nvkm_therm *);
 
 int gt215_therm_fan_sense(struct nvkm_therm *);
 
-int gf110_therm_init(struct nvkm_object *);
+void gf119_therm_init(struct nvkm_therm *);
 
 int nvkm_fanpwm_create(struct nvkm_therm *, struct dcb_gpio_func *);
 int nvkm_fantog_create(struct nvkm_therm *, struct dcb_gpio_func *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
index aa13744f3854..b9703c02d8ca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
@@ -26,29 +26,25 @@
 static void
 nvkm_therm_temp_set_defaults(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
+	therm->bios_sensor.offset_constant = 0;
 
-	priv->bios_sensor.offset_constant = 0;
+	therm->bios_sensor.thrs_fan_boost.temp = 90;
+	therm->bios_sensor.thrs_fan_boost.hysteresis = 3;
 
-	priv->bios_sensor.thrs_fan_boost.temp = 90;
-	priv->bios_sensor.thrs_fan_boost.hysteresis = 3;
+	therm->bios_sensor.thrs_down_clock.temp = 95;
+	therm->bios_sensor.thrs_down_clock.hysteresis = 3;
 
-	priv->bios_sensor.thrs_down_clock.temp = 95;
-	priv->bios_sensor.thrs_down_clock.hysteresis = 3;
+	therm->bios_sensor.thrs_critical.temp = 105;
+	therm->bios_sensor.thrs_critical.hysteresis = 5;
 
-	priv->bios_sensor.thrs_critical.temp = 105;
-	priv->bios_sensor.thrs_critical.hysteresis = 5;
-
-	priv->bios_sensor.thrs_shutdown.temp = 135;
-	priv->bios_sensor.thrs_shutdown.hysteresis = 5; /*not that it matters */
+	therm->bios_sensor.thrs_shutdown.temp = 135;
+	therm->bios_sensor.thrs_shutdown.hysteresis = 5; /*not that it matters */
 }
 
-
 static void
 nvkm_therm_temp_safety_checks(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvbios_therm_sensor *s = &priv->bios_sensor;
+	struct nvbios_therm_sensor *s = &therm->bios_sensor;
 
 	/* enforce a minimum hysteresis on thresholds */
 	s->thrs_fan_boost.hysteresis = max_t(u8, s->thrs_fan_boost.hysteresis, 2);
@@ -63,8 +59,7 @@ nvkm_therm_sensor_set_threshold_state(struct nvkm_therm *therm,
 				      enum nvkm_therm_thrs thrs,
 				      enum nvkm_therm_thrs_state st)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	priv->sensor.alarm_state[thrs] = st;
+	therm->sensor.alarm_state[thrs] = st;
 }
 
 /* must be called with alarm_program_lock taken ! */
@@ -72,8 +67,7 @@ enum nvkm_therm_thrs_state
 nvkm_therm_sensor_get_threshold_state(struct nvkm_therm *therm,
 				      enum nvkm_therm_thrs thrs)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	return priv->sensor.alarm_state[thrs];
+	return therm->sensor.alarm_state[thrs];
 }
 
 static void
@@ -87,22 +81,23 @@ void
 nvkm_therm_sensor_event(struct nvkm_therm *therm, enum nvkm_therm_thrs thrs,
 			enum nvkm_therm_thrs_direction dir)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
+	struct nvkm_subdev *subdev = &therm->subdev;
 	bool active;
 	const char *thresolds[] = {
 		"fanboost", "downclock", "critical", "shutdown"
 	};
-	int temperature = therm->temp_get(therm);
+	int temperature = therm->func->temp_get(therm);
 
 	if (thrs < 0 || thrs > 3)
 		return;
 
 	if (dir == NVKM_THERM_THRS_FALLING)
-		nv_info(therm, "temperature (%i C) went below the '%s' threshold\n",
-			temperature, thresolds[thrs]);
+		nvkm_info(subdev,
+			  "temperature (%i C) went below the '%s' threshold\n",
+			  temperature, thresolds[thrs]);
 	else
-		nv_info(therm, "temperature (%i C) hit the '%s' threshold\n",
-			temperature, thresolds[thrs]);
+		nvkm_info(subdev, "temperature (%i C) hit the '%s' threshold\n",
+			  temperature, thresolds[thrs]);
 
 	active = (dir == NVKM_THERM_THRS_RISING);
 	switch (thrs) {
@@ -113,12 +108,12 @@ nvkm_therm_sensor_event(struct nvkm_therm *therm, enum nvkm_therm_thrs thrs,
 		}
 		break;
 	case NVKM_THERM_THRS_DOWNCLOCK:
-		if (priv->emergency.downclock)
-			priv->emergency.downclock(therm, active);
+		if (therm->emergency.downclock)
+			therm->emergency.downclock(therm, active);
 		break;
 	case NVKM_THERM_THRS_CRITICAL:
-		if (priv->emergency.pause)
-			priv->emergency.pause(therm, active);
+		if (therm->emergency.pause)
+			therm->emergency.pause(therm, active);
 		break;
 	case NVKM_THERM_THRS_SHUTDOWN:
 		if (active) {
@@ -145,7 +140,7 @@ nvkm_therm_threshold_hyst_polling(struct nvkm_therm *therm,
 {
 	enum nvkm_therm_thrs_direction direction;
 	enum nvkm_therm_thrs_state prev_state, new_state;
-	int temp = therm->temp_get(therm);
+	int temp = therm->func->temp_get(therm);
 
 	prev_state = nvkm_therm_sensor_get_threshold_state(therm, thrs_name);
 
@@ -166,19 +161,19 @@ nvkm_therm_threshold_hyst_polling(struct nvkm_therm *therm,
 static void
 alarm_timer_callback(struct nvkm_alarm *alarm)
 {
-	struct nvkm_therm_priv *priv =
-	container_of(alarm, struct nvkm_therm_priv, sensor.therm_poll_alarm);
-	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
-	struct nvkm_timer *ptimer = nvkm_timer(priv);
-	struct nvkm_therm *therm = &priv->base;
+	struct nvkm_therm *therm =
+		container_of(alarm, struct nvkm_therm, sensor.therm_poll_alarm);
+	struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
+	struct nvkm_timer *tmr = therm->subdev.device->timer;
 	unsigned long flags;
 
-	spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+	spin_lock_irqsave(&therm->sensor.alarm_program_lock, flags);
 
 	nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost,
 					  NVKM_THERM_THRS_FANBOOST);
 
-	nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_down_clock,
+	nvkm_therm_threshold_hyst_polling(therm,
+					  &sensor->thrs_down_clock,
 					  NVKM_THERM_THRS_DOWNCLOCK);
 
 	nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_critical,
@@ -187,46 +182,45 @@ alarm_timer_callback(struct nvkm_alarm *alarm)
 	nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown,
 					  NVKM_THERM_THRS_SHUTDOWN);
 
-	spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+	spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
 
 	/* schedule the next poll in one second */
-	if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head))
-		ptimer->alarm(ptimer, 1000000000ULL, alarm);
+	if (therm->func->temp_get(therm) >= 0 && list_empty(&alarm->head))
+		nvkm_timer_alarm(tmr, 1000000000ULL, alarm);
 }
 
 void
 nvkm_therm_program_alarms_polling(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
-
-	nv_debug(therm,
-		 "programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
-		 sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
-		 sensor->thrs_down_clock.temp,
-		 sensor->thrs_down_clock.hysteresis,
-		 sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
-		 sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
-
-	alarm_timer_callback(&priv->sensor.therm_poll_alarm);
+	struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
+
+	nvkm_debug(&therm->subdev,
+		   "programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
+		   sensor->thrs_fan_boost.temp,
+		   sensor->thrs_fan_boost.hysteresis,
+		   sensor->thrs_down_clock.temp,
+		   sensor->thrs_down_clock.hysteresis,
+		   sensor->thrs_critical.temp,
+		   sensor->thrs_critical.hysteresis,
+		   sensor->thrs_shutdown.temp,
+		   sensor->thrs_shutdown.hysteresis);
+
+	alarm_timer_callback(&therm->sensor.therm_poll_alarm);
 }
 
 int
 nvkm_therm_sensor_init(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	priv->sensor.program_alarms(therm);
+	therm->func->program_alarms(therm);
 	return 0;
 }
 
 int
 nvkm_therm_sensor_fini(struct nvkm_therm *therm, bool suspend)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvkm_timer *ptimer = nvkm_timer(therm);
-
+	struct nvkm_timer *tmr = therm->subdev.device->timer;
 	if (suspend)
-		ptimer->alarm_cancel(ptimer, &priv->sensor.therm_poll_alarm);
+		nvkm_timer_alarm_cancel(tmr, &therm->sensor.therm_poll_alarm);
 	return 0;
 }
 
@@ -235,24 +229,24 @@ nvkm_therm_sensor_preinit(struct nvkm_therm *therm)
 {
 	const char *sensor_avail = "yes";
 
-	if (therm->temp_get(therm) < 0)
+	if (therm->func->temp_get(therm) < 0)
 		sensor_avail = "no";
 
-	nv_info(therm, "internal sensor: %s\n", sensor_avail);
+	nvkm_debug(&therm->subdev, "internal sensor: %s\n", sensor_avail);
 }
 
 int
 nvkm_therm_sensor_ctor(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvkm_bios *bios = nvkm_bios(therm);
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 
-	nvkm_alarm_init(&priv->sensor.therm_poll_alarm, alarm_timer_callback);
+	nvkm_alarm_init(&therm->sensor.therm_poll_alarm, alarm_timer_callback);
 
 	nvkm_therm_temp_set_defaults(therm);
 	if (nvbios_therm_sensor_parse(bios, NVBIOS_THERM_DOMAIN_CORE,
-				      &priv->bios_sensor))
-		nv_error(therm, "nvbios_therm_sensor_parse failed\n");
+				      &therm->bios_sensor))
+		nvkm_error(subdev, "nvbios_therm_sensor_parse failed\n");
 	nvkm_therm_temp_safety_checks(therm);
 
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild
index d1d38b4ba30a..e436f0ffe3f4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild
@@ -1,3 +1,5 @@
 nvkm-y += nvkm/subdev/timer/base.o
 nvkm-y += nvkm/subdev/timer/nv04.o
+nvkm-y += nvkm/subdev/timer/nv40.o
+nvkm-y += nvkm/subdev/timer/nv41.o
 nvkm-y += nvkm/subdev/timer/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index d894061ced52..d4dae1f12d62 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -21,73 +21,131 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/timer.h>
+#include "priv.h"
 
-bool
-nvkm_timer_wait_eq(void *obj, u64 nsec, u32 addr, u32 mask, u32 data)
+u64
+nvkm_timer_read(struct nvkm_timer *tmr)
 {
-	struct nvkm_timer *ptimer = nvkm_timer(obj);
-	u64 time0;
-
-	time0 = ptimer->read(ptimer);
-	do {
-		if (nv_iclass(obj, NV_SUBDEV_CLASS)) {
-			if ((nv_rd32(obj, addr) & mask) == data)
-				return true;
-		} else {
-			if ((nv_ro32(obj, addr) & mask) == data)
-				return true;
-		}
-	} while (ptimer->read(ptimer) - time0 < nsec);
+	return tmr->func->read(tmr);
+}
+
+void
+nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
+{
+	struct nvkm_alarm *alarm, *atemp;
+	unsigned long flags;
+	LIST_HEAD(exec);
+
+	/* move any due alarms off the pending list */
+	spin_lock_irqsave(&tmr->lock, flags);
+	list_for_each_entry_safe(alarm, atemp, &tmr->alarms, head) {
+		if (alarm->timestamp <= nvkm_timer_read(tmr))
+			list_move_tail(&alarm->head, &exec);
+	}
 
-	return false;
+	/* reschedule interrupt for next alarm time */
+	if (!list_empty(&tmr->alarms)) {
+		alarm = list_first_entry(&tmr->alarms, typeof(*alarm), head);
+		tmr->func->alarm_init(tmr, alarm->timestamp);
+	} else {
+		tmr->func->alarm_fini(tmr);
+	}
+	spin_unlock_irqrestore(&tmr->lock, flags);
+
+	/* execute any pending alarm handlers */
+	list_for_each_entry_safe(alarm, atemp, &exec, head) {
+		list_del_init(&alarm->head);
+		alarm->func(alarm);
+	}
 }
 
-bool
-nvkm_timer_wait_ne(void *obj, u64 nsec, u32 addr, u32 mask, u32 data)
+void
+nvkm_timer_alarm(struct nvkm_timer *tmr, u32 nsec, struct nvkm_alarm *alarm)
 {
-	struct nvkm_timer *ptimer = nvkm_timer(obj);
-	u64 time0;
-
-	time0 = ptimer->read(ptimer);
-	do {
-		if (nv_iclass(obj, NV_SUBDEV_CLASS)) {
-			if ((nv_rd32(obj, addr) & mask) != data)
-				return true;
-		} else {
-			if ((nv_ro32(obj, addr) & mask) != data)
-				return true;
+	struct nvkm_alarm *list;
+	unsigned long flags;
+
+	alarm->timestamp = nvkm_timer_read(tmr) + nsec;
+
+	/* append new alarm to list, in soonest-alarm-first order */
+	spin_lock_irqsave(&tmr->lock, flags);
+	if (!nsec) {
+		if (!list_empty(&alarm->head))
+			list_del(&alarm->head);
+	} else {
+		list_for_each_entry(list, &tmr->alarms, head) {
+			if (list->timestamp > alarm->timestamp)
+				break;
 		}
-	} while (ptimer->read(ptimer) - time0 < nsec);
+		list_add_tail(&alarm->head, &list->head);
+	}
+	spin_unlock_irqrestore(&tmr->lock, flags);
 
-	return false;
+	/* process pending alarms */
+	nvkm_timer_alarm_trigger(tmr);
 }
 
-bool
-nvkm_timer_wait_cb(void *obj, u64 nsec, bool (*func)(void *), void *data)
+void
+nvkm_timer_alarm_cancel(struct nvkm_timer *tmr, struct nvkm_alarm *alarm)
 {
-	struct nvkm_timer *ptimer = nvkm_timer(obj);
-	u64 time0;
+	unsigned long flags;
+	spin_lock_irqsave(&tmr->lock, flags);
+	list_del_init(&alarm->head);
+	spin_unlock_irqrestore(&tmr->lock, flags);
+}
 
-	time0 = ptimer->read(ptimer);
-	do {
-		if (func(data) == true)
-			return true;
-	} while (ptimer->read(ptimer) - time0 < nsec);
+static void
+nvkm_timer_intr(struct nvkm_subdev *subdev)
+{
+	struct nvkm_timer *tmr = nvkm_timer(subdev);
+	tmr->func->intr(tmr);
+}
 
-	return false;
+static int
+nvkm_timer_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+	struct nvkm_timer *tmr = nvkm_timer(subdev);
+	tmr->func->alarm_fini(tmr);
+	return 0;
 }
 
-void
-nvkm_timer_alarm(void *obj, u32 nsec, struct nvkm_alarm *alarm)
+static int
+nvkm_timer_init(struct nvkm_subdev *subdev)
 {
-	struct nvkm_timer *ptimer = nvkm_timer(obj);
-	ptimer->alarm(ptimer, nsec, alarm);
+	struct nvkm_timer *tmr = nvkm_timer(subdev);
+	if (tmr->func->init)
+		tmr->func->init(tmr);
+	tmr->func->time(tmr, ktime_to_ns(ktime_get()));
+	nvkm_timer_alarm_trigger(tmr);
+	return 0;
 }
 
-void
-nvkm_timer_alarm_cancel(void *obj, struct nvkm_alarm *alarm)
+static void *
+nvkm_timer_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_timer *ptimer = nvkm_timer(obj);
-	ptimer->alarm_cancel(ptimer, alarm);
+	return nvkm_timer(subdev);
+}
+
+static const struct nvkm_subdev_func
+nvkm_timer = {
+	.dtor = nvkm_timer_dtor,
+	.init = nvkm_timer_init,
+	.fini = nvkm_timer_fini,
+	.intr = nvkm_timer_intr,
+};
+
+int
+nvkm_timer_new_(const struct nvkm_timer_func *func, struct nvkm_device *device,
+		int index, struct nvkm_timer **ptmr)
+{
+	struct nvkm_timer *tmr;
+
+	if (!(tmr = *ptmr = kzalloc(sizeof(*tmr), GFP_KERNEL)))
+		return -ENOMEM;
+
+	nvkm_subdev_ctor(&nvkm_timer, device, index, 0, &tmr->subdev);
+	tmr->func = func;
+	INIT_LIST_HEAD(&tmr->alarms);
+	spin_lock_init(&tmr->lock);
+	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c
index 80e38063dd9b..9ed5f64912d0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c
@@ -21,36 +21,19 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
-static int
-gk20a_timer_init(struct nvkm_object *object)
-{
-	struct nv04_timer_priv *priv = (void *)object;
-	u32 hi = upper_32_bits(priv->suspend_time);
-	u32 lo = lower_32_bits(priv->suspend_time);
-	int ret;
-
-	ret = nvkm_timer_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_debug(priv, "time low        : 0x%08x\n", lo);
-	nv_debug(priv, "time high       : 0x%08x\n", hi);
+static const struct nvkm_timer_func
+gk20a_timer = {
+	.intr = nv04_timer_intr,
+	.read = nv04_timer_read,
+	.time = nv04_timer_time,
+	.alarm_init = nv04_timer_alarm_init,
+	.alarm_fini = nv04_timer_alarm_fini,
+};
 
-	/* restore the time before suspend */
-	nv_wr32(priv, NV04_PTIMER_TIME_1, hi);
-	nv_wr32(priv, NV04_PTIMER_TIME_0, lo);
-	return 0;
+int
+gk20a_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
+{
+	return nvkm_timer_new_(&gk20a_timer, device, index, ptmr);
 }
-
-struct nvkm_oclass
-gk20a_timer_oclass = {
-	.handle = NV_SUBDEV(TIMER, 0xff),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_timer_ctor,
-		.dtor = nv04_timer_dtor,
-		.init = gk20a_timer_init,
-		.fini = nv04_timer_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
index 6b7facbe59a2..7b9ce87f0617 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
@@ -21,165 +21,92 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
+#include "regsnv04.h"
 
-#include <core/device.h>
-
-static u64
-nv04_timer_read(struct nvkm_timer *ptimer)
+void
+nv04_timer_time(struct nvkm_timer *tmr, u64 time)
 {
-	struct nv04_timer_priv *priv = (void *)ptimer;
-	u32 hi, lo;
+	struct nvkm_subdev *subdev = &tmr->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 hi = upper_32_bits(time);
+	u32 lo = lower_32_bits(time);
 
-	do {
-		hi = nv_rd32(priv, NV04_PTIMER_TIME_1);
-		lo = nv_rd32(priv, NV04_PTIMER_TIME_0);
-	} while (hi != nv_rd32(priv, NV04_PTIMER_TIME_1));
+	nvkm_debug(subdev, "time low        : %08x\n", lo);
+	nvkm_debug(subdev, "time high       : %08x\n", hi);
 
-	return ((u64)hi << 32 | lo);
+	nvkm_wr32(device, NV04_PTIMER_TIME_1, hi);
+	nvkm_wr32(device, NV04_PTIMER_TIME_0, lo);
 }
 
-static void
-nv04_timer_alarm_trigger(struct nvkm_timer *ptimer)
+u64
+nv04_timer_read(struct nvkm_timer *tmr)
 {
-	struct nv04_timer_priv *priv = (void *)ptimer;
-	struct nvkm_alarm *alarm, *atemp;
-	unsigned long flags;
-	LIST_HEAD(exec);
-
-	/* move any due alarms off the pending list */
-	spin_lock_irqsave(&priv->lock, flags);
-	list_for_each_entry_safe(alarm, atemp, &priv->alarms, head) {
-		if (alarm->timestamp <= ptimer->read(ptimer))
-			list_move_tail(&alarm->head, &exec);
-	}
+	struct nvkm_device *device = tmr->subdev.device;
+	u32 hi, lo;
 
-	/* reschedule interrupt for next alarm time */
-	if (!list_empty(&priv->alarms)) {
-		alarm = list_first_entry(&priv->alarms, typeof(*alarm), head);
-		nv_wr32(priv, NV04_PTIMER_ALARM_0, alarm->timestamp);
-		nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000001);
-	} else {
-		nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
-	}
-	spin_unlock_irqrestore(&priv->lock, flags);
+	do {
+		hi = nvkm_rd32(device, NV04_PTIMER_TIME_1);
+		lo = nvkm_rd32(device, NV04_PTIMER_TIME_0);
+	} while (hi != nvkm_rd32(device, NV04_PTIMER_TIME_1));
 
-	/* execute any pending alarm handlers */
-	list_for_each_entry_safe(alarm, atemp, &exec, head) {
-		list_del_init(&alarm->head);
-		alarm->func(alarm);
-	}
+	return ((u64)hi << 32 | lo);
 }
 
-static void
-nv04_timer_alarm(struct nvkm_timer *ptimer, u64 time, struct nvkm_alarm *alarm)
+void
+nv04_timer_alarm_fini(struct nvkm_timer *tmr)
 {
-	struct nv04_timer_priv *priv = (void *)ptimer;
-	struct nvkm_alarm *list;
-	unsigned long flags;
-
-	alarm->timestamp = ptimer->read(ptimer) + time;
-
-	/* append new alarm to list, in soonest-alarm-first order */
-	spin_lock_irqsave(&priv->lock, flags);
-	if (!time) {
-		if (!list_empty(&alarm->head))
-			list_del(&alarm->head);
-	} else {
-		list_for_each_entry(list, &priv->alarms, head) {
-			if (list->timestamp > alarm->timestamp)
-				break;
-		}
-		list_add_tail(&alarm->head, &list->head);
-	}
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	/* process pending alarms */
-	nv04_timer_alarm_trigger(ptimer);
+	struct nvkm_device *device = tmr->subdev.device;
+	nvkm_wr32(device, NV04_PTIMER_INTR_EN_0, 0x00000000);
 }
 
-static void
-nv04_timer_alarm_cancel(struct nvkm_timer *ptimer, struct nvkm_alarm *alarm)
+void
+nv04_timer_alarm_init(struct nvkm_timer *tmr, u32 time)
 {
-	struct nv04_timer_priv *priv = (void *)ptimer;
-	unsigned long flags;
-	spin_lock_irqsave(&priv->lock, flags);
-	list_del_init(&alarm->head);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	struct nvkm_device *device = tmr->subdev.device;
+	nvkm_wr32(device, NV04_PTIMER_ALARM_0, time);
+	nvkm_wr32(device, NV04_PTIMER_INTR_EN_0, 0x00000001);
 }
 
-static void
-nv04_timer_intr(struct nvkm_subdev *subdev)
+void
+nv04_timer_intr(struct nvkm_timer *tmr)
 {
-	struct nv04_timer_priv *priv = (void *)subdev;
-	u32 stat = nv_rd32(priv, NV04_PTIMER_INTR_0);
+	struct nvkm_subdev *subdev = &tmr->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, NV04_PTIMER_INTR_0);
 
 	if (stat & 0x00000001) {
-		nv04_timer_alarm_trigger(&priv->base);
-		nv_wr32(priv, NV04_PTIMER_INTR_0, 0x00000001);
+		nvkm_timer_alarm_trigger(tmr);
+		nvkm_wr32(device, NV04_PTIMER_INTR_0, 0x00000001);
 		stat &= ~0x00000001;
 	}
 
 	if (stat) {
-		nv_error(priv, "unknown stat 0x%08x\n", stat);
-		nv_wr32(priv, NV04_PTIMER_INTR_0, stat);
+		nvkm_error(subdev, "intr %08x\n", stat);
+		nvkm_wr32(device, NV04_PTIMER_INTR_0, stat);
 	}
 }
 
-int
-nv04_timer_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv04_timer_priv *priv = (void *)object;
-	if (suspend)
-		priv->suspend_time = nv04_timer_read(&priv->base);
-	nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
-	return nvkm_timer_fini(&priv->base, suspend);
-}
-
-static int
-nv04_timer_init(struct nvkm_object *object)
+static void
+nv04_timer_init(struct nvkm_timer *tmr)
 {
-	struct nvkm_device *device = nv_device(object);
-	struct nv04_timer_priv *priv = (void *)object;
-	u32 m = 1, f, n, d, lo, hi;
-	int ret;
-
-	ret = nvkm_timer_init(&priv->base);
-	if (ret)
-		return ret;
+	struct nvkm_subdev *subdev = &tmr->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 f = 0; /*XXX: nvclk */
+	u32 n, d;
 
 	/* aim for 31.25MHz, which gives us nanosecond timestamps */
 	d = 1000000 / 32;
-
-	/* determine base clock for timer source */
-#if 0 /*XXX*/
-	if (device->chipset < 0x40) {
-		n = nvkm_hw_get_clock(device, PLL_CORE);
-	} else
-#endif
-	if (device->chipset <= 0x40) {
-		/*XXX: figure this out */
-		f = -1;
-		n = 0;
-	} else {
-		f = device->crystal;
-		n = f;
-		while (n < (d * 2)) {
-			n += (n / m);
-			m++;
-		}
-
-		nv_wr32(priv, 0x009220, m - 1);
-	}
-
-	if (!n) {
-		nv_warn(priv, "unknown input clock freq\n");
-		if (!nv_rd32(priv, NV04_PTIMER_NUMERATOR) ||
-		    !nv_rd32(priv, NV04_PTIMER_DENOMINATOR)) {
-			nv_wr32(priv, NV04_PTIMER_NUMERATOR, 1);
-			nv_wr32(priv, NV04_PTIMER_DENOMINATOR, 1);
+	n = f;
+
+	if (!f) {
+		n = nvkm_rd32(device, NV04_PTIMER_NUMERATOR);
+		d = nvkm_rd32(device, NV04_PTIMER_DENOMINATOR);
+		if (!n || !d) {
+			n = 1;
+			d = 1;
 		}
-		return 0;
+		nvkm_warn(subdev, "unknown input clock freq\n");
 	}
 
 	/* reduce ratio to acceptable values */
@@ -198,65 +125,27 @@ nv04_timer_init(struct nvkm_object *object)
 		d >>= 1;
 	}
 
-	/* restore the time before suspend */
-	lo = priv->suspend_time;
-	hi = (priv->suspend_time >> 32);
+	nvkm_debug(subdev, "input frequency : %dHz\n", f);
+	nvkm_debug(subdev, "numerator       : %08x\n", n);
+	nvkm_debug(subdev, "denominator     : %08x\n", d);
+	nvkm_debug(subdev, "timer frequency : %dHz\n", f * d / n);
 
-	nv_debug(priv, "input frequency : %dHz\n", f);
-	nv_debug(priv, "input multiplier: %d\n", m);
-	nv_debug(priv, "numerator       : 0x%08x\n", n);
-	nv_debug(priv, "denominator     : 0x%08x\n", d);
-	nv_debug(priv, "timer frequency : %dHz\n", (f * m) * d / n);
-	nv_debug(priv, "time low        : 0x%08x\n", lo);
-	nv_debug(priv, "time high       : 0x%08x\n", hi);
-
-	nv_wr32(priv, NV04_PTIMER_NUMERATOR, n);
-	nv_wr32(priv, NV04_PTIMER_DENOMINATOR, d);
-	nv_wr32(priv, NV04_PTIMER_INTR_0, 0xffffffff);
-	nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
-	nv_wr32(priv, NV04_PTIMER_TIME_1, hi);
-	nv_wr32(priv, NV04_PTIMER_TIME_0, lo);
-	return 0;
+	nvkm_wr32(device, NV04_PTIMER_NUMERATOR, n);
+	nvkm_wr32(device, NV04_PTIMER_DENOMINATOR, d);
 }
 
-void
-nv04_timer_dtor(struct nvkm_object *object)
-{
-	struct nv04_timer_priv *priv = (void *)object;
-	return nvkm_timer_destroy(&priv->base);
-}
+static const struct nvkm_timer_func
+nv04_timer = {
+	.init = nv04_timer_init,
+	.intr = nv04_timer_intr,
+	.read = nv04_timer_read,
+	.time = nv04_timer_time,
+	.alarm_init = nv04_timer_alarm_init,
+	.alarm_fini = nv04_timer_alarm_fini,
+};
 
 int
-nv04_timer_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+nv04_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
 {
-	struct nv04_timer_priv *priv;
-	int ret;
-
-	ret = nvkm_timer_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.base.intr = nv04_timer_intr;
-	priv->base.read = nv04_timer_read;
-	priv->base.alarm = nv04_timer_alarm;
-	priv->base.alarm_cancel = nv04_timer_alarm_cancel;
-	priv->suspend_time = 0;
-
-	INIT_LIST_HEAD(&priv->alarms);
-	spin_lock_init(&priv->lock);
-	return 0;
+	return nvkm_timer_new_(&nv04_timer, device, index, ptmr);
 }
-
-struct nvkm_oclass
-nv04_timer_oclass = {
-	.handle = NV_SUBDEV(TIMER, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_timer_ctor,
-		.dtor = nv04_timer_dtor,
-		.init = nv04_timer_init,
-		.fini = nv04_timer_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.h
deleted file mode 100644
index 89996a9826b1..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef __NVKM_TIMER_NV04_H__
-#define __NVKM_TIMER_NV04_H__
-#include "priv.h"
-
-#define NV04_PTIMER_INTR_0      0x009100
-#define NV04_PTIMER_INTR_EN_0   0x009140
-#define NV04_PTIMER_NUMERATOR   0x009200
-#define NV04_PTIMER_DENOMINATOR 0x009210
-#define NV04_PTIMER_TIME_0      0x009400
-#define NV04_PTIMER_TIME_1      0x009410
-#define NV04_PTIMER_ALARM_0     0x009420
-
-struct nv04_timer_priv {
-	struct nvkm_timer base;
-	struct list_head alarms;
-	spinlock_t lock;
-	u64 suspend_time;
-};
-
-int  nv04_timer_ctor(struct nvkm_object *, struct nvkm_object *,
-		     struct nvkm_oclass *, void *, u32,
-		     struct nvkm_object **);
-void nv04_timer_dtor(struct nvkm_object *);
-int  nv04_timer_fini(struct nvkm_object *, bool);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c
new file mode 100644
index 000000000000..bb99a152f26e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "regsnv04.h"
+
+static void
+nv40_timer_init(struct nvkm_timer *tmr)
+{
+	struct nvkm_subdev *subdev = &tmr->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 f = 0; /*XXX: figure this out */
+	u32 n, d;
+
+	/* aim for 31.25MHz, which gives us nanosecond timestamps */
+	d = 1000000 / 32;
+	n = f;
+
+	if (!f) {
+		n = nvkm_rd32(device, NV04_PTIMER_NUMERATOR);
+		d = nvkm_rd32(device, NV04_PTIMER_DENOMINATOR);
+		if (!n || !d) {
+			n = 1;
+			d = 1;
+		}
+		nvkm_warn(subdev, "unknown input clock freq\n");
+	}
+
+	/* reduce ratio to acceptable values */
+	while (((n % 5) == 0) && ((d % 5) == 0)) {
+		n /= 5;
+		d /= 5;
+	}
+
+	while (((n % 2) == 0) && ((d % 2) == 0)) {
+		n /= 2;
+		d /= 2;
+	}
+
+	while (n > 0xffff || d > 0xffff) {
+		n >>= 1;
+		d >>= 1;
+	}
+
+	nvkm_debug(subdev, "input frequency : %dHz\n", f);
+	nvkm_debug(subdev, "numerator       : %08x\n", n);
+	nvkm_debug(subdev, "denominator     : %08x\n", d);
+	nvkm_debug(subdev, "timer frequency : %dHz\n", f * d / n);
+
+	nvkm_wr32(device, NV04_PTIMER_NUMERATOR, n);
+	nvkm_wr32(device, NV04_PTIMER_DENOMINATOR, d);
+}
+
+static const struct nvkm_timer_func
+nv40_timer = {
+	.init = nv40_timer_init,
+	.intr = nv04_timer_intr,
+	.read = nv04_timer_read,
+	.time = nv04_timer_time,
+	.alarm_init = nv04_timer_alarm_init,
+	.alarm_fini = nv04_timer_alarm_fini,
+};
+
+int
+nv40_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
+{
+	return nvkm_timer_new_(&nv40_timer, device, index, ptmr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c
new file mode 100644
index 000000000000..3cf9ec1b1b57
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "regsnv04.h"
+
+static void
+nv41_timer_init(struct nvkm_timer *tmr)
+{
+	struct nvkm_subdev *subdev = &tmr->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 f = device->crystal;
+	u32 m = 1, n, d;
+
+	/* aim for 31.25MHz, which gives us nanosecond timestamps */
+	d = 1000000 / 32;
+	n = f;
+
+	while (n < (d * 2)) {
+		n += (n / m);
+		m++;
+	}
+
+	/* reduce ratio to acceptable values */
+	while (((n % 5) == 0) && ((d % 5) == 0)) {
+		n /= 5;
+		d /= 5;
+	}
+
+	while (((n % 2) == 0) && ((d % 2) == 0)) {
+		n /= 2;
+		d /= 2;
+	}
+
+	while (n > 0xffff || d > 0xffff) {
+		n >>= 1;
+		d >>= 1;
+	}
+
+	nvkm_debug(subdev, "input frequency : %dHz\n", f);
+	nvkm_debug(subdev, "input multiplier: %d\n", m);
+	nvkm_debug(subdev, "numerator       : %08x\n", n);
+	nvkm_debug(subdev, "denominator     : %08x\n", d);
+	nvkm_debug(subdev, "timer frequency : %dHz\n", (f * m) * d / n);
+
+	nvkm_wr32(device, 0x009220, m - 1);
+	nvkm_wr32(device, NV04_PTIMER_NUMERATOR, n);
+	nvkm_wr32(device, NV04_PTIMER_DENOMINATOR, d);
+}
+
+static const struct nvkm_timer_func
+nv41_timer = {
+	.init = nv41_timer_init,
+	.intr = nv04_timer_intr,
+	.read = nv04_timer_read,
+	.time = nv04_timer_time,
+	.alarm_init = nv04_timer_alarm_init,
+	.alarm_fini = nv04_timer_alarm_fini,
+};
+
+int
+nv41_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
+{
+	return nvkm_timer_new_(&nv41_timer, device, index, ptmr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
index 08e29a3da188..f820ca2aeda4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
@@ -1,4 +1,26 @@
 #ifndef __NVKM_TIMER_PRIV_H__
 #define __NVKM_TIMER_PRIV_H__
+#define nvkm_timer(p) container_of((p), struct nvkm_timer, subdev)
 #include <subdev/timer.h>
+
+int nvkm_timer_new_(const struct nvkm_timer_func *, struct nvkm_device *,
+		    int index, struct nvkm_timer **);
+
+struct nvkm_timer_func {
+	void (*init)(struct nvkm_timer *);
+	void (*intr)(struct nvkm_timer *);
+	u64 (*read)(struct nvkm_timer *);
+	void (*time)(struct nvkm_timer *, u64 time);
+	void (*alarm_init)(struct nvkm_timer *, u32 time);
+	void (*alarm_fini)(struct nvkm_timer *);
+};
+
+void nvkm_timer_alarm_trigger(struct nvkm_timer *);
+
+void nv04_timer_fini(struct nvkm_timer *);
+void nv04_timer_intr(struct nvkm_timer *);
+void nv04_timer_time(struct nvkm_timer *, u64);
+u64 nv04_timer_read(struct nvkm_timer *);
+void nv04_timer_alarm_init(struct nvkm_timer *, u32);
+void nv04_timer_alarm_fini(struct nvkm_timer *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h
new file mode 100644
index 000000000000..10bef85b485e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h
@@ -0,0 +1,7 @@
+#define NV04_PTIMER_INTR_0      0x009100
+#define NV04_PTIMER_INTR_EN_0   0x009140
+#define NV04_PTIMER_NUMERATOR   0x009200
+#define NV04_PTIMER_DENOMINATOR 0x009210
+#define NV04_PTIMER_TIME_0      0x009400
+#define NV04_PTIMER_TIME_1      0x009410
+#define NV04_PTIMER_ALARM_0     0x009420
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
index 39f15803f2d4..4752dbd33923 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
@@ -21,49 +21,45 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/volt.h>
+#include "priv.h"
+
 #include <subdev/bios.h>
 #include <subdev/bios/vmap.h>
 #include <subdev/bios/volt.h>
 
-static int
+int
 nvkm_volt_get(struct nvkm_volt *volt)
 {
-	if (volt->vid_get) {
-		int ret = volt->vid_get(volt), i;
-		if (ret >= 0) {
-			for (i = 0; i < volt->vid_nr; i++) {
-				if (volt->vid[i].vid == ret)
-					return volt->vid[i].uv;
-			}
-			ret = -EINVAL;
+	int ret = volt->func->vid_get(volt), i;
+	if (ret >= 0) {
+		for (i = 0; i < volt->vid_nr; i++) {
+			if (volt->vid[i].vid == ret)
+				return volt->vid[i].uv;
 		}
-		return ret;
+		ret = -EINVAL;
 	}
-	return -ENODEV;
+	return ret;
 }
 
 static int
 nvkm_volt_set(struct nvkm_volt *volt, u32 uv)
 {
-	if (volt->vid_set) {
-		int i, ret = -EINVAL;
-		for (i = 0; i < volt->vid_nr; i++) {
-			if (volt->vid[i].uv == uv) {
-				ret = volt->vid_set(volt, volt->vid[i].vid);
-				nv_debug(volt, "set %duv: %d\n", uv, ret);
-				break;
-			}
+	struct nvkm_subdev *subdev = &volt->subdev;
+	int i, ret = -EINVAL;
+	for (i = 0; i < volt->vid_nr; i++) {
+		if (volt->vid[i].uv == uv) {
+			ret = volt->func->vid_set(volt, volt->vid[i].vid);
+			nvkm_debug(subdev, "set %duv: %d\n", uv, ret);
+			break;
 		}
-		return ret;
 	}
-	return -ENODEV;
+	return ret;
 }
 
 static int
 nvkm_volt_map(struct nvkm_volt *volt, u8 id)
 {
-	struct nvkm_bios *bios = nvkm_bios(volt);
+	struct nvkm_bios *bios = volt->subdev.device->bios;
 	struct nvbios_vmap_entry info;
 	u8  ver, len;
 	u16 vmap;
@@ -82,10 +78,15 @@ nvkm_volt_map(struct nvkm_volt *volt, u8 id)
 	return id ? id * 10000 : -ENODEV;
 }
 
-static int
+int
 nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, int condition)
 {
-	int ret = nvkm_volt_map(volt, id);
+	int ret;
+
+	if (volt->func->set_id)
+		return volt->func->set_id(volt, id, condition);
+
+	ret = nvkm_volt_map(volt, id);
 	if (ret >= 0) {
 		int prev = nvkm_volt_get(volt);
 		if (!condition || prev < 0 ||
@@ -134,51 +135,41 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
 	}
 }
 
-int
-_nvkm_volt_init(struct nvkm_object *object)
+static int
+nvkm_volt_init(struct nvkm_subdev *subdev)
 {
-	struct nvkm_volt *volt = (void *)object;
-	int ret;
-
-	ret = nvkm_subdev_init(&volt->base);
-	if (ret)
-		return ret;
-
-	ret = volt->get(volt);
+	struct nvkm_volt *volt = nvkm_volt(subdev);
+	int ret = nvkm_volt_get(volt);
 	if (ret < 0) {
 		if (ret != -ENODEV)
-			nv_debug(volt, "current voltage unknown\n");
+			nvkm_debug(subdev, "current voltage unknown\n");
 		return 0;
 	}
-
-	nv_info(volt, "GPU voltage: %duv\n", ret);
+	nvkm_debug(subdev, "current voltage: %duv\n", ret);
 	return 0;
 }
 
-void
-_nvkm_volt_dtor(struct nvkm_object *object)
+static void *
+nvkm_volt_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_volt *volt = (void *)object;
-	nvkm_subdev_destroy(&volt->base);
+	return nvkm_volt(subdev);
 }
 
-int
-nvkm_volt_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, int length, void **pobject)
-{
-	struct nvkm_bios *bios = nvkm_bios(parent);
-	struct nvkm_volt *volt;
-	int ret, i;
+static const struct nvkm_subdev_func
+nvkm_volt = {
+	.dtor = nvkm_volt_dtor,
+	.init = nvkm_volt_init,
+};
 
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "VOLT",
-				  "voltage", length, pobject);
-	volt = *pobject;
-	if (ret)
-		return ret;
+void
+nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device,
+	       int index, struct nvkm_volt *volt)
+{
+	struct nvkm_bios *bios = device->bios;
+	int i;
 
-	volt->get = nvkm_volt_get;
-	volt->set = nvkm_volt_set;
-	volt->set_id = nvkm_volt_set_id;
+	nvkm_subdev_ctor(&nvkm_volt, device, index, 0, &volt->subdev);
+	volt->func = func;
 
 	/* Assuming the non-bios device should build the voltage table later */
 	if (bios)
@@ -186,19 +177,18 @@ nvkm_volt_create_(struct nvkm_object *parent, struct nvkm_object *engine,
 
 	if (volt->vid_nr) {
 		for (i = 0; i < volt->vid_nr; i++) {
-			nv_debug(volt, "VID %02x: %duv\n",
-				 volt->vid[i].vid, volt->vid[i].uv);
-		}
-
-		/*XXX: this is an assumption.. there probably exists boards
-		 * out there with i2c-connected voltage controllers too..
-		 */
-		ret = nvkm_voltgpio_init(volt);
-		if (ret == 0) {
-			volt->vid_get = nvkm_voltgpio_get;
-			volt->vid_set = nvkm_voltgpio_set;
+			nvkm_debug(&volt->subdev, "VID %02x: %duv\n",
+				   volt->vid[i].vid, volt->vid[i].uv);
 		}
 	}
+}
 
-	return ret;
+int
+nvkm_volt_new_(const struct nvkm_volt_func *func, struct nvkm_device *device,
+	       int index, struct nvkm_volt **pvolt)
+{
+	if (!(*pvolt = kzalloc(sizeof(**pvolt), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_volt_ctor(func, device, index, *pvolt);
+	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
index 871fd51011db..fd56c6476064 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
@@ -19,10 +19,10 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-#include <subdev/volt.h>
-#ifdef __KERNEL__
-#include <nouveau_platform.h>
-#endif
+#define gk20a_volt(p) container_of((p), struct gk20a_volt, base)
+#include "priv.h"
+
+#include <core/tegra.h>
 
 struct cvb_coef {
 	int c0;
@@ -33,7 +33,7 @@ struct cvb_coef {
 	int c5;
 };
 
-struct gk20a_volt_priv {
+struct gk20a_volt {
 	struct nvkm_volt base;
 	struct regulator *vdd;
 };
@@ -101,43 +101,45 @@ gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo)
 }
 
 static int
-gk20a_volt_vid_get(struct nvkm_volt *volt)
+gk20a_volt_vid_get(struct nvkm_volt *base)
 {
-	struct gk20a_volt_priv *priv = (void *)volt;
+	struct gk20a_volt *volt = gk20a_volt(base);
 	int i, uv;
 
-	uv = regulator_get_voltage(priv->vdd);
+	uv = regulator_get_voltage(volt->vdd);
 
-	for (i = 0; i < volt->vid_nr; i++)
-		if (volt->vid[i].uv >= uv)
+	for (i = 0; i < volt->base.vid_nr; i++)
+		if (volt->base.vid[i].uv >= uv)
 			return i;
 
 	return -EINVAL;
 }
 
 static int
-gk20a_volt_vid_set(struct nvkm_volt *volt, u8 vid)
+gk20a_volt_vid_set(struct nvkm_volt *base, u8 vid)
 {
-	struct gk20a_volt_priv *priv = (void *)volt;
+	struct gk20a_volt *volt = gk20a_volt(base);
+	struct nvkm_subdev *subdev = &volt->base.subdev;
 
-	nv_debug(volt, "set voltage as %duv\n", volt->vid[vid].uv);
-	return regulator_set_voltage(priv->vdd, volt->vid[vid].uv, 1200000);
+	nvkm_debug(subdev, "set voltage as %duv\n", volt->base.vid[vid].uv);
+	return regulator_set_voltage(volt->vdd, volt->base.vid[vid].uv, 1200000);
 }
 
 static int
-gk20a_volt_set_id(struct nvkm_volt *volt, u8 id, int condition)
+gk20a_volt_set_id(struct nvkm_volt *base, u8 id, int condition)
 {
-	struct gk20a_volt_priv *priv = (void *)volt;
-	int prev_uv = regulator_get_voltage(priv->vdd);
-	int target_uv = volt->vid[id].uv;
+	struct gk20a_volt *volt = gk20a_volt(base);
+	struct nvkm_subdev *subdev = &volt->base.subdev;
+	int prev_uv = regulator_get_voltage(volt->vdd);
+	int target_uv = volt->base.vid[id].uv;
 	int ret;
 
-	nv_debug(volt, "prev=%d, target=%d, condition=%d\n",
-			prev_uv, target_uv, condition);
+	nvkm_debug(subdev, "prev=%d, target=%d, condition=%d\n",
+		   prev_uv, target_uv, condition);
 	if (!condition ||
 		(condition < 0 && target_uv < prev_uv) ||
 		(condition > 0 && target_uv > prev_uv)) {
-		ret = gk20a_volt_vid_set(volt, volt->vid[id].vid);
+		ret = gk20a_volt_vid_set(&volt->base, volt->base.vid[id].vid);
 	} else {
 		ret = 0;
 	}
@@ -145,53 +147,42 @@ gk20a_volt_set_id(struct nvkm_volt *volt, u8 id, int condition)
 	return ret;
 }
 
-static int
-gk20a_volt_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+static const struct nvkm_volt_func
+gk20a_volt = {
+	.vid_get = gk20a_volt_vid_get,
+	.vid_set = gk20a_volt_vid_set,
+	.set_id = gk20a_volt_set_id,
+};
+
+int
+gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
 {
-	struct gk20a_volt_priv *priv;
-	struct nvkm_volt *volt;
-	struct nouveau_platform_device *plat;
-	int i, ret, uv;
-
-	ret = nvkm_volt_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	volt = &priv->base;
-
-	plat = nv_device_to_platform(nv_device(parent));
-
-	uv = regulator_get_voltage(plat->gpu->vdd);
-	nv_info(priv, "The default voltage is %duV\n", uv);
-
-	priv->vdd = plat->gpu->vdd;
-	priv->base.vid_get = gk20a_volt_vid_get;
-	priv->base.vid_set = gk20a_volt_vid_set;
-	priv->base.set_id = gk20a_volt_set_id;
-
-	volt->vid_nr = ARRAY_SIZE(gk20a_cvb_coef);
-	nv_debug(priv, "%s - vid_nr = %d\n", __func__, volt->vid_nr);
-	for (i = 0; i < volt->vid_nr; i++) {
-		volt->vid[i].vid = i;
-		volt->vid[i].uv = gk20a_volt_calc_voltage(&gk20a_cvb_coef[i],
-					plat->gpu_speedo);
-		nv_debug(priv, "%2d: vid=%d, uv=%d\n", i, volt->vid[i].vid,
-					volt->vid[i].uv);
+	struct nvkm_device_tegra *tdev = device->func->tegra(device);
+	struct gk20a_volt *volt;
+	int i, uv;
+
+	if (!(volt = kzalloc(sizeof(*volt), GFP_KERNEL)))
+		return -ENOMEM;
+
+	nvkm_volt_ctor(&gk20a_volt, device, index, &volt->base);
+	*pvolt = &volt->base;
+
+	uv = regulator_get_voltage(tdev->vdd);
+	nvkm_info(&volt->base.subdev, "The default voltage is %duV\n", uv);
+
+	volt->vdd = tdev->vdd;
+
+	volt->base.vid_nr = ARRAY_SIZE(gk20a_cvb_coef);
+	nvkm_debug(&volt->base.subdev, "%s - vid_nr = %d\n", __func__,
+		   volt->base.vid_nr);
+	for (i = 0; i < volt->base.vid_nr; i++) {
+		volt->base.vid[i].vid = i;
+		volt->base.vid[i].uv =
+			gk20a_volt_calc_voltage(&gk20a_cvb_coef[i],
+						tdev->gpu_speedo);
+		nvkm_debug(&volt->base.subdev, "%2d: vid=%d, uv=%d\n", i,
+			   volt->base.vid[i].vid, volt->base.vid[i].uv);
 	}
 
 	return 0;
 }
-
-struct nvkm_oclass
-gk20a_volt_oclass = {
-	.handle = NV_SUBDEV(VOLT, 0xea),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk20a_volt_ctor,
-		.dtor = _nvkm_volt_dtor,
-		.init = _nvkm_volt_init,
-		.fini = _nvkm_volt_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
index b778deb32d93..d2bac1d77819 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
@@ -34,13 +34,13 @@ static const u8 tags[] = {
 int
 nvkm_voltgpio_get(struct nvkm_volt *volt)
 {
-	struct nvkm_gpio *gpio = nvkm_gpio(volt);
+	struct nvkm_gpio *gpio = volt->subdev.device->gpio;
 	u8 vid = 0;
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(tags); i++) {
 		if (volt->vid_mask & (1 << i)) {
-			int ret = gpio->get(gpio, 0, tags[i], 0xff);
+			int ret = nvkm_gpio_get(gpio, 0, tags[i], 0xff);
 			if (ret < 0)
 				return ret;
 			vid |= ret << i;
@@ -53,12 +53,12 @@ nvkm_voltgpio_get(struct nvkm_volt *volt)
 int
 nvkm_voltgpio_set(struct nvkm_volt *volt, u8 vid)
 {
-	struct nvkm_gpio *gpio = nvkm_gpio(volt);
+	struct nvkm_gpio *gpio = volt->subdev.device->gpio;
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(tags); i++, vid >>= 1) {
 		if (volt->vid_mask & (1 << i)) {
-			int ret = gpio->set(gpio, 0, tags[i], 0xff, vid & 1);
+			int ret = nvkm_gpio_set(gpio, 0, tags[i], 0xff, vid & 1);
 			if (ret < 0)
 				return ret;
 		}
@@ -70,7 +70,8 @@ nvkm_voltgpio_set(struct nvkm_volt *volt, u8 vid)
 int
 nvkm_voltgpio_init(struct nvkm_volt *volt)
 {
-	struct nvkm_gpio *gpio = nvkm_gpio(volt);
+	struct nvkm_subdev *subdev = &volt->subdev;
+	struct nvkm_gpio *gpio = subdev->device->gpio;
 	struct dcb_gpio_func func;
 	int i;
 
@@ -82,11 +83,11 @@ nvkm_voltgpio_init(struct nvkm_volt *volt)
 	 */
 	for (i = 0; i < ARRAY_SIZE(tags); i++) {
 		if (volt->vid_mask & (1 << i)) {
-			int ret = gpio->find(gpio, 0, tags[i], 0xff, &func);
+			int ret = nvkm_gpio_find(gpio, 0, tags[i], 0xff, &func);
 			if (ret) {
 				if (ret != -ENOENT)
 					return ret;
-				nv_debug(volt, "VID bit %d has no GPIO\n", i);
+				nvkm_debug(subdev, "VID bit %d has no GPIO\n", i);
 				volt->vid_mask &= ~(1 << i);
 			}
 		}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c
index 0ac5a3f8c9a8..23409387abb5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c
@@ -21,35 +21,24 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/volt.h>
+#include "priv.h"
 
-struct nv40_volt_priv {
-	struct nvkm_volt base;
+static const struct nvkm_volt_func
+nv40_volt = {
+	.vid_get = nvkm_voltgpio_get,
+	.vid_set = nvkm_voltgpio_set,
 };
 
-static int
-nv40_volt_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+nv40_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
 {
-	struct nv40_volt_priv *priv;
+	struct nvkm_volt *volt;
 	int ret;
 
-	ret = nvkm_volt_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
+	ret = nvkm_volt_new_(&nv40_volt, device, index, &volt);
+	*pvolt = volt;
 	if (ret)
 		return ret;
 
-	return 0;
+	return nvkm_voltgpio_init(volt);
 }
-
-struct nvkm_oclass
-nv40_volt_oclass = {
-	.handle = NV_SUBDEV(VOLT, 0x40),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_volt_ctor,
-		.dtor = _nvkm_volt_dtor,
-		.init = _nvkm_volt_init,
-		.fini = _nvkm_volt_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
new file mode 100644
index 000000000000..394f37c723af
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
@@ -0,0 +1,20 @@
+#ifndef __NVKM_VOLT_PRIV_H__
+#define __NVKM_VOLT_PRIV_H__
+#define nvkm_volt(p) container_of((p), struct nvkm_volt, subdev)
+#include <subdev/volt.h>
+
+void nvkm_volt_ctor(const struct nvkm_volt_func *, struct nvkm_device *,
+		    int index, struct nvkm_volt *);
+int nvkm_volt_new_(const struct nvkm_volt_func *, struct nvkm_device *,
+		   int index, struct nvkm_volt **);
+
+struct nvkm_volt_func {
+	int (*vid_get)(struct nvkm_volt *);
+	int (*vid_set)(struct nvkm_volt *, u8 vid);
+	int (*set_id)(struct nvkm_volt *, u8 id, int condition);
+};
+
+int nvkm_voltgpio_init(struct nvkm_volt *);
+int nvkm_voltgpio_get(struct nvkm_volt *);
+int nvkm_voltgpio_set(struct nvkm_volt *, u8);
+#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 23d9c928cdc9..9a4ba4f03567 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -388,11 +388,13 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
 	copy_timings_drm_to_omap(&omap_crtc->timings, mode);
 }
 
-static void omap_crtc_atomic_begin(struct drm_crtc *crtc)
+static void omap_crtc_atomic_begin(struct drm_crtc *crtc,
+                                  struct drm_crtc_state *old_crtc_state)
 {
 }
 
-static void omap_crtc_atomic_flush(struct drm_crtc *crtc)
+static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
+                                  struct drm_crtc_state *old_crtc_state)
 {
 	struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
 
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 720d16bce7e8..b8e4cdec28c3 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -86,11 +86,11 @@ static struct fb_ops omap_fb_ops = {
 	/* Note: to properly handle manual update displays, we wrap the
 	 * basic fbdev ops which write to the framebuffer
 	 */
-	.fb_read = fb_sys_read,
-	.fb_write = fb_sys_write,
-	.fb_fillrect = sys_fillrect,
-	.fb_copyarea = sys_copyarea,
-	.fb_imageblit = sys_imageblit,
+	.fb_read = drm_fb_helper_sys_read,
+	.fb_write = drm_fb_helper_sys_write,
+	.fb_fillrect = drm_fb_helper_sys_fillrect,
+	.fb_copyarea = drm_fb_helper_sys_copyarea,
+	.fb_imageblit = drm_fb_helper_sys_imageblit,
 
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
@@ -179,10 +179,10 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
 
 	mutex_lock(&dev->struct_mutex);
 
-	fbi = framebuffer_alloc(0, dev->dev);
-	if (!fbi) {
+	fbi = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(fbi)) {
 		dev_err(dev->dev, "failed to allocate fb info\n");
-		ret = -ENOMEM;
+		ret = PTR_ERR(fbi);
 		goto fail_unlock;
 	}
 
@@ -190,7 +190,6 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
 
 	fbdev->fb = fb;
 	helper->fb = fb;
-	helper->fbdev = fbi;
 
 	fbi->par = helper;
 	fbi->flags = FBINFO_DEFAULT;
@@ -198,12 +197,6 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
 
 	strcpy(fbi->fix.id, MODULE_NAME);
 
-	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto fail_unlock;
-	}
-
 	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
 	drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
 
@@ -236,8 +229,9 @@ fail_unlock:
 fail:
 
 	if (ret) {
-		if (fbi)
-			framebuffer_release(fbi);
+
+		drm_fb_helper_release_fbi(helper);
+
 		if (fb) {
 			drm_framebuffer_unregister_private(fb);
 			drm_framebuffer_remove(fb);
@@ -312,17 +306,11 @@ void omap_fbdev_free(struct drm_device *dev)
 	struct omap_drm_private *priv = dev->dev_private;
 	struct drm_fb_helper *helper = priv->fbdev;
 	struct omap_fbdev *fbdev;
-	struct fb_info *fbi;
 
 	DBG();
 
-	fbi = helper->fbdev;
-
-	/* only cleanup framebuffer if it is present */
-	if (fbi) {
-		unregister_framebuffer(fbi);
-		framebuffer_release(fbi);
-	}
+	drm_fb_helper_unregister_fbi(helper);
+	drm_fb_helper_release_fbi(helper);
 
 	drm_fb_helper_fini(helper);
 
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 6d64c7bb908b..7d4704b1292b 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -18,13 +18,21 @@ config DRM_PANEL_SIMPLE
 	  that it can be automatically turned off when the panel goes into a
 	  low power state.
 
-config DRM_PANEL_LD9040
-	tristate "LD9040 RGB/SPI panel"
+config DRM_PANEL_SAMSUNG_LD9040
+	tristate "Samsung LD9040 RGB/SPI panel"
 	depends on OF && SPI
 	select VIDEOMODE_HELPERS
 
-config DRM_PANEL_S6E8AA0
-	tristate "S6E8AA0 DSI video mode panel"
+config DRM_PANEL_LG_LG4573
+	tristate "LG4573 RGB/SPI panel"
+	depends on OF && SPI
+	select VIDEOMODE_HELPERS
+	help
+	  Say Y here if you want to enable support for LG4573 RGB panel.
+	  To compile this driver as a module, choose M here.
+
+config DRM_PANEL_SAMSUNG_S6E8AA0
+	tristate "Samsung S6E8AA0 DSI video mode panel"
 	depends on OF
 	select DRM_MIPI_DSI
 	select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 4b2a0430804b..d0f016dd7ddb 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,4 +1,5 @@
 obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
-obj-$(CONFIG_DRM_PANEL_LD9040) += panel-ld9040.o
-obj-$(CONFIG_DRM_PANEL_S6E8AA0) += panel-s6e8aa0.o
+obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
 obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
new file mode 100644
index 000000000000..a7b4939cee6d
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2015 Heiko Schocher <hs@denx.de>
+ *
+ * from:
+ * drivers/gpu/drm/panel/panel-ld9040.c
+ * ld9040 AMOLED LCD drm_panel driver.
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd
+ * Derived from drivers/video/backlight/ld9040.c
+ *
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <drm/drmP.h>
+#include <drm/drm_panel.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <video/mipi_display.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+
+struct lg4573 {
+	struct drm_panel panel;
+	struct spi_device *spi;
+	struct videomode vm;
+};
+
+static inline struct lg4573 *panel_to_lg4573(struct drm_panel *panel)
+{
+	return container_of(panel, struct lg4573, panel);
+}
+
+static int lg4573_spi_write_u16(struct lg4573 *ctx, u16 data)
+{
+	struct spi_transfer xfer = {
+		.len = 2,
+	};
+	u16 temp = cpu_to_be16(data);
+	struct spi_message msg;
+
+	dev_dbg(ctx->panel.dev, "writing data: %x\n", data);
+	xfer.tx_buf = &temp;
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+
+	return spi_sync(ctx->spi, &msg);
+}
+
+static int lg4573_spi_write_u16_array(struct lg4573 *ctx, const u16 *buffer,
+				      unsigned int count)
+{
+	unsigned int i;
+	int ret;
+
+	for (i = 0; i < count; i++) {
+		ret = lg4573_spi_write_u16(ctx, buffer[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int lg4573_spi_write_dcs(struct lg4573 *ctx, u8 dcs)
+{
+	return lg4573_spi_write_u16(ctx, (0x70 << 8 | dcs));
+}
+
+static int lg4573_display_on(struct lg4573 *ctx)
+{
+	int ret;
+
+	ret = lg4573_spi_write_dcs(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
+	if (ret)
+		return ret;
+
+	msleep(5);
+
+	return lg4573_spi_write_dcs(ctx, MIPI_DCS_SET_DISPLAY_ON);
+}
+
+static int lg4573_display_off(struct lg4573 *ctx)
+{
+	int ret;
+
+	ret = lg4573_spi_write_dcs(ctx, MIPI_DCS_SET_DISPLAY_OFF);
+	if (ret)
+		return ret;
+
+	msleep(120);
+
+	return lg4573_spi_write_dcs(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
+}
+
+static int lg4573_display_mode_settings(struct lg4573 *ctx)
+{
+	static const u16 display_mode_settings[] = {
+		0x703A, 0x7270, 0x70B1, 0x7208,
+		0x723B, 0x720F, 0x70B2, 0x7200,
+		0x72C8, 0x70B3, 0x7200, 0x70B4,
+		0x7200, 0x70B5, 0x7242, 0x7210,
+		0x7210, 0x7200, 0x7220, 0x70B6,
+		0x720B, 0x720F, 0x723C, 0x7213,
+		0x7213, 0x72E8, 0x70B7, 0x7246,
+		0x7206, 0x720C, 0x7200, 0x7200,
+	};
+
+	dev_dbg(ctx->panel.dev, "transfer display mode settings\n");
+	return lg4573_spi_write_u16_array(ctx, display_mode_settings,
+					  ARRAY_SIZE(display_mode_settings));
+}
+
+static int lg4573_power_settings(struct lg4573 *ctx)
+{
+	static const u16 power_settings[] = {
+		0x70C0, 0x7201, 0x7211, 0x70C3,
+		0x7207, 0x7203, 0x7204, 0x7204,
+		0x7204, 0x70C4, 0x7212, 0x7224,
+		0x7218, 0x7218, 0x7202, 0x7249,
+		0x70C5, 0x726F, 0x70C6, 0x7241,
+		0x7263,
+	};
+
+	dev_dbg(ctx->panel.dev, "transfer power settings\n");
+	return lg4573_spi_write_u16_array(ctx, power_settings,
+					  ARRAY_SIZE(power_settings));
+}
+
+static int lg4573_gamma_settings(struct lg4573 *ctx)
+{
+	static const u16 gamma_settings[] = {
+		0x70D0, 0x7203, 0x7207, 0x7273,
+		0x7235, 0x7200, 0x7201, 0x7220,
+		0x7200, 0x7203, 0x70D1, 0x7203,
+		0x7207, 0x7273, 0x7235, 0x7200,
+		0x7201, 0x7220, 0x7200, 0x7203,
+		0x70D2, 0x7203, 0x7207, 0x7273,
+		0x7235, 0x7200, 0x7201, 0x7220,
+		0x7200, 0x7203, 0x70D3, 0x7203,
+		0x7207, 0x7273, 0x7235, 0x7200,
+		0x7201, 0x7220, 0x7200, 0x7203,
+		0x70D4, 0x7203, 0x7207, 0x7273,
+		0x7235, 0x7200, 0x7201, 0x7220,
+		0x7200, 0x7203, 0x70D5, 0x7203,
+		0x7207, 0x7273, 0x7235, 0x7200,
+		0x7201, 0x7220, 0x7200, 0x7203,
+	};
+
+	dev_dbg(ctx->panel.dev, "transfer gamma settings\n");
+	return lg4573_spi_write_u16_array(ctx, gamma_settings,
+					  ARRAY_SIZE(gamma_settings));
+}
+
+static int lg4573_init(struct lg4573 *ctx)
+{
+	int ret;
+
+	dev_dbg(ctx->panel.dev, "initializing LCD\n");
+
+	ret = lg4573_display_mode_settings(ctx);
+	if (ret)
+		return ret;
+
+	ret = lg4573_power_settings(ctx);
+	if (ret)
+		return ret;
+
+	return lg4573_gamma_settings(ctx);
+}
+
+static int lg4573_power_on(struct lg4573 *ctx)
+{
+	return lg4573_display_on(ctx);
+}
+
+static int lg4573_disable(struct drm_panel *panel)
+{
+	struct lg4573 *ctx = panel_to_lg4573(panel);
+
+	return lg4573_display_off(ctx);
+}
+
+static int lg4573_enable(struct drm_panel *panel)
+{
+	struct lg4573 *ctx = panel_to_lg4573(panel);
+
+	lg4573_init(ctx);
+
+	return lg4573_power_on(ctx);
+}
+
+static const struct drm_display_mode default_mode = {
+	.clock = 27000,
+	.hdisplay = 480,
+	.hsync_start = 480 + 10,
+	.hsync_end = 480 + 10 + 59,
+	.htotal = 480 + 10 + 59 + 10,
+	.vdisplay = 800,
+	.vsync_start = 800 + 15,
+	.vsync_end = 800 + 15 + 15,
+	.vtotal = 800 + 15 + 15 + 15,
+	.vrefresh = 60,
+};
+
+static int lg4573_get_modes(struct drm_panel *panel)
+{
+	struct drm_connector *connector = panel->connector;
+	struct drm_display_mode *mode;
+
+	mode = drm_mode_duplicate(panel->drm, &default_mode);
+	if (!mode) {
+		dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+			default_mode.hdisplay, default_mode.vdisplay,
+			default_mode.vrefresh);
+		return -ENOMEM;
+	}
+
+	drm_mode_set_name(mode);
+
+	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+	drm_mode_probed_add(connector, mode);
+
+	panel->connector->display_info.width_mm = 61;
+	panel->connector->display_info.height_mm = 103;
+
+	return 1;
+}
+
+static const struct drm_panel_funcs lg4573_drm_funcs = {
+	.disable = lg4573_disable,
+	.enable = lg4573_enable,
+	.get_modes = lg4573_get_modes,
+};
+
+static int lg4573_probe(struct spi_device *spi)
+{
+	struct lg4573 *ctx;
+	int ret;
+
+	ctx = devm_kzalloc(&spi->dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->spi = spi;
+
+	spi_set_drvdata(spi, ctx);
+	spi->bits_per_word = 8;
+
+	ret = spi_setup(spi);
+	if (ret < 0) {
+		dev_err(&spi->dev, "SPI setup failed: %d\n", ret);
+		return ret;
+	}
+
+	drm_panel_init(&ctx->panel);
+	ctx->panel.dev = &spi->dev;
+	ctx->panel.funcs = &lg4573_drm_funcs;
+
+	return drm_panel_add(&ctx->panel);
+}
+
+static int lg4573_remove(struct spi_device *spi)
+{
+	struct lg4573 *ctx = spi_get_drvdata(spi);
+
+	lg4573_display_off(ctx);
+	drm_panel_remove(&ctx->panel);
+
+	return 0;
+}
+
+static const struct of_device_id lg4573_of_match[] = {
+	{ .compatible = "lg,lg4573" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, lg4573_of_match);
+
+static struct spi_driver lg4573_driver = {
+	.probe = lg4573_probe,
+	.remove = lg4573_remove,
+	.driver = {
+		.name = "lg4573",
+		.owner = THIS_MODULE,
+		.of_match_table = lg4573_of_match,
+	},
+};
+module_spi_driver(lg4573_driver);
+
+MODULE_AUTHOR("Heiko Schocher <hs@denx.de>");
+MODULE_DESCRIPTION("lg4573 LCD Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index 9c27bded4c09..b202377135e7 100644
--- a/drivers/gpu/drm/panel/panel-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -377,7 +377,7 @@ static struct spi_driver ld9040_driver = {
 	.probe = ld9040_probe,
 	.remove = ld9040_remove,
 	.driver = {
-		.name = "ld9040",
+		.name = "panel-samsung-ld9040",
 		.owner = THIS_MODULE,
 		.of_match_table = ld9040_of_match,
 	},
diff --git a/drivers/gpu/drm/panel/panel-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
index 30051108eec4..a188a3959f1a 100644
--- a/drivers/gpu/drm/panel/panel-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
@@ -1051,7 +1051,7 @@ static struct mipi_dsi_driver s6e8aa0_driver = {
 	.probe = s6e8aa0_probe,
 	.remove = s6e8aa0_remove,
 	.driver = {
-		.name = "panel_s6e8aa0",
+		.name = "panel-samsung-s6e8aa0",
 		.of_match_table = s6e8aa0_of_match,
 	},
 };
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index f94201b6e882..f97b73ec4713 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -713,7 +713,12 @@ static const struct display_timing hannstar_hsd070pww1_timing = {
 	.hactive = { 1280, 1280, 1280 },
 	.hfront_porch = { 1, 1, 10 },
 	.hback_porch = { 1, 1, 10 },
-	.hsync_len = { 52, 158, 661 },
+	/*
+	 * According to the data sheet, the minimum horizontal blanking interval
+	 * is 54 clocks (1 + 52 + 1), but tests with a Nitrogen6X have shown the
+	 * minimum working horizontal blanking interval to be 60 clocks.
+	 */
+	.hsync_len = { 58, 158, 661 },
 	.vactive = { 800, 800, 800 },
 	.vfront_porch = { 1, 1, 10 },
 	.vback_porch = { 1, 1, 10 },
@@ -729,6 +734,7 @@ static const struct panel_desc hannstar_hsd070pww1 = {
 		.width = 151,
 		.height = 94,
 	},
+	.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
 };
 
 static const struct display_timing hannstar_hsd100pxn1_timing = {
@@ -943,6 +949,60 @@ static const struct panel_desc lg_lp129qe = {
 	},
 };
 
+static const struct drm_display_mode nec_nl4827hc19_05b_mode = {
+	.clock = 10870,
+	.hdisplay = 480,
+	.hsync_start = 480 + 2,
+	.hsync_end = 480 + 2 + 41,
+	.htotal = 480 + 2 + 41 + 2,
+	.vdisplay = 272,
+	.vsync_start = 272 + 2,
+	.vsync_end = 272 + 2 + 4,
+	.vtotal = 272 + 2 + 4 + 2,
+	.vrefresh = 74,
+};
+
+static const struct panel_desc nec_nl4827hc19_05b = {
+	.modes = &nec_nl4827hc19_05b_mode,
+	.num_modes = 1,
+	.bpc = 8,
+	.size = {
+		.width = 95,
+		.height = 54,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB888_1X24
+};
+
+static const struct display_timing okaya_rs800480t_7x0gp_timing = {
+	.pixelclock = { 30000000, 30000000, 40000000 },
+	.hactive = { 800, 800, 800 },
+	.hfront_porch = { 40, 40, 40 },
+	.hback_porch = { 40, 40, 40 },
+	.hsync_len = { 1, 48, 48 },
+	.vactive = { 480, 480, 480 },
+	.vfront_porch = { 13, 13, 13 },
+	.vback_porch = { 29, 29, 29 },
+	.vsync_len = { 3, 3, 3 },
+	.flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc okaya_rs800480t_7x0gp = {
+	.timings = &okaya_rs800480t_7x0gp_timing,
+	.num_timings = 1,
+	.bpc = 6,
+	.size = {
+		.width = 154,
+		.height = 87,
+	},
+	.delay = {
+		.prepare = 41,
+		.enable = 50,
+		.unprepare = 41,
+		.disable = 50,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
 static const struct drm_display_mode ortustech_com43h4m85ulc_mode  = {
 	.clock = 25000,
 	.hdisplay = 480,
@@ -1113,6 +1173,12 @@ static const struct of_device_id platform_of_match[] = {
 		.compatible = "lg,lp129qe",
 		.data = &lg_lp129qe,
 	}, {
+		.compatible = "nec,nl4827hc19-05b",
+		.data = &nec_nl4827hc19_05b,
+	}, {
+		.compatible = "okaya,rs800480t-7x0gp",
+		.data = &okaya_rs800480t_7x0gp,
+	}, {
 		.compatible = "ortustech,com43h4m85ulc",
 		.data = &ortustech_com43h4m85ulc,
 	}, {
@@ -1169,6 +1235,34 @@ struct panel_desc_dsi {
 	unsigned int lanes;
 };
 
+static const struct drm_display_mode auo_b080uan01_mode = {
+	.clock = 154500,
+	.hdisplay = 1200,
+	.hsync_start = 1200 + 62,
+	.hsync_end = 1200 + 62 + 4,
+	.htotal = 1200 + 62 + 4 + 62,
+	.vdisplay = 1920,
+	.vsync_start = 1920 + 9,
+	.vsync_end = 1920 + 9 + 2,
+	.vtotal = 1920 + 9 + 2 + 8,
+	.vrefresh = 60,
+};
+
+static const struct panel_desc_dsi auo_b080uan01 = {
+	.desc = {
+		.modes = &auo_b080uan01_mode,
+		.num_modes = 1,
+		.bpc = 8,
+		.size = {
+			.width = 108,
+			.height = 272,
+		},
+	},
+	.flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS,
+	.format = MIPI_DSI_FMT_RGB888,
+	.lanes = 4,
+};
+
 static const struct drm_display_mode lg_ld070wx3_sl01_mode = {
 	.clock = 71000,
 	.hdisplay = 800,
@@ -1256,6 +1350,9 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
 
 static const struct of_device_id dsi_of_match[] = {
 	{
+		.compatible = "auo,b080uan01",
+		.data = &auo_b080uan01
+	}, {
 		.compatible = "lg,ld070wx3-sl01",
 		.data = &lg_ld070wx3_sl01
 	}, {
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 6b6e57e8c2d6..41c422fee31a 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -197,7 +197,7 @@ static void qxl_fb_fillrect(struct fb_info *info,
 {
 	struct qxl_fbdev *qfbdev = info->par;
 
-	sys_fillrect(info, rect);
+	drm_fb_helper_sys_fillrect(info, rect);
 	qxl_dirty_update(qfbdev, rect->dx, rect->dy, rect->width,
 			 rect->height);
 }
@@ -207,7 +207,7 @@ static void qxl_fb_copyarea(struct fb_info *info,
 {
 	struct qxl_fbdev *qfbdev = info->par;
 
-	sys_copyarea(info, area);
+	drm_fb_helper_sys_copyarea(info, area);
 	qxl_dirty_update(qfbdev, area->dx, area->dy, area->width,
 			 area->height);
 }
@@ -217,7 +217,7 @@ static void qxl_fb_imageblit(struct fb_info *info,
 {
 	struct qxl_fbdev *qfbdev = info->par;
 
-	sys_imageblit(info, image);
+	drm_fb_helper_sys_imageblit(info, image);
 	qxl_dirty_update(qfbdev, image->dx, image->dy, image->width,
 			 image->height);
 }
@@ -345,7 +345,6 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
 	struct drm_mode_fb_cmd2 mode_cmd;
 	struct drm_gem_object *gobj = NULL;
 	struct qxl_bo *qbo = NULL;
-	struct device *device = &qdev->pdev->dev;
 	int ret;
 	int size;
 	int bpp = sizes->surface_bpp;
@@ -374,9 +373,9 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
 		 shadow);
 	size = mode_cmd.pitches[0] * mode_cmd.height;
 
-	info = framebuffer_alloc(0, device);
-	if (info == NULL) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(&qfbdev->helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto out_unref;
 	}
 
@@ -388,7 +387,7 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
 
 	/* setup helper with fb data */
 	qfbdev->helper.fb = fb;
-	qfbdev->helper.fbdev = info;
+
 	qfbdev->shadow = shadow;
 	strcpy(info->fix.id, "qxldrmfb");
 
@@ -410,11 +409,6 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
 			       sizes->fb_height);
 
 	/* setup aperture base/size for vesafb takeover */
-	info->apertures = alloc_apertures(1);
-	if (!info->apertures) {
-		ret = -ENOMEM;
-		goto out_unref;
-	}
 	info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base;
 	info->apertures->ranges[0].size = qdev->vram_size;
 
@@ -423,13 +417,7 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
 
 	if (info->screen_base == NULL) {
 		ret = -ENOSPC;
-		goto out_unref;
-	}
-
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto out_unref;
+		goto out_destroy_fbi;
 	}
 
 	info->fbdefio = &qxl_defio;
@@ -441,6 +429,8 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
 	DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height);
 	return 0;
 
+out_destroy_fbi:
+	drm_fb_helper_release_fbi(&qfbdev->helper);
 out_unref:
 	if (qbo) {
 		ret = qxl_bo_reserve(qbo, false);
@@ -479,15 +469,11 @@ static int qxl_fb_find_or_create_single(
 
 static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev)
 {
-	struct fb_info *info;
 	struct qxl_framebuffer *qfb = &qfbdev->qfb;
 
-	if (qfbdev->helper.fbdev) {
-		info = qfbdev->helper.fbdev;
+	drm_fb_helper_unregister_fbi(&qfbdev->helper);
+	drm_fb_helper_release_fbi(&qfbdev->helper);
 
-		unregister_framebuffer(info);
-		framebuffer_release(info);
-	}
 	if (qfb->obj) {
 		qxlfb_destroy_pinned_object(qfb->obj);
 		qfb->obj = NULL;
@@ -557,7 +543,7 @@ void qxl_fbdev_fini(struct qxl_device *qdev)
 
 void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state)
 {
-	fb_set_suspend(qdev->mode_info.qfbdev->helper.fbdev, state);
+	drm_fb_helper_set_suspend(&qdev->mode_info.qfbdev->helper, state);
 }
 
 bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj)
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 6d6f33de48f4..b28370e014c6 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -272,7 +272,6 @@ void qxl_bo_force_delete(struct qxl_device *qdev)
 		return;
 	dev_err(qdev->dev, "Userspace still has active objects !\n");
 	list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
-		mutex_lock(&qdev->ddev->struct_mutex);
 		dev_err(qdev->dev, "%p %p %lu %lu force free\n",
 			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
 			*((unsigned long *)&bo->gem_base.refcount));
@@ -280,8 +279,7 @@ void qxl_bo_force_delete(struct qxl_device *qdev)
 		list_del_init(&bo->list);
 		mutex_unlock(&qdev->gem.mutex);
 		/* this should unref the ttm bo */
-		drm_gem_object_unreference(&bo->gem_base);
-		mutex_unlock(&qdev->ddev->struct_mutex);
+		drm_gem_object_unreference_unlocked(&bo->gem_base);
 	}
 }
 
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index f81e0d7d0232..9cd49c584263 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -171,8 +171,9 @@ radeon_dp_aux_transfer_atom(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
 		return -E2BIG;
 
 	tx_buf[0] = msg->address & 0xff;
-	tx_buf[1] = msg->address >> 8;
-	tx_buf[2] = msg->request << 4;
+	tx_buf[1] = (msg->address >> 8) & 0xff;
+	tx_buf[2] = (msg->request << 4) |
+		((msg->address >> 16) & 0xf);
 	tx_buf[3] = msg->size ? (msg->size - 1) : 0;
 
 	switch (msg->request & ~DP_AUX_I2C_MOT) {
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 44480c1b9738..752072771388 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -76,16 +76,35 @@ static void dce6_afmt_get_connected_pins(struct radeon_device *rdev)
 
 struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev)
 {
-	int i;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+	struct radeon_encoder_atom_dig *dig;
+	struct r600_audio_pin *pin = NULL;
+	int i, pin_count;
 
 	dce6_afmt_get_connected_pins(rdev);
 
 	for (i = 0; i < rdev->audio.num_pins; i++) {
-		if (rdev->audio.pin[i].connected)
-			return &rdev->audio.pin[i];
+		if (rdev->audio.pin[i].connected) {
+			pin = &rdev->audio.pin[i];
+			pin_count = 0;
+
+			list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
+				if (radeon_encoder_is_digital(encoder)) {
+					radeon_encoder = to_radeon_encoder(encoder);
+					dig = radeon_encoder->enc_priv;
+					if (dig->pin == pin)
+						pin_count++;
+				}
+			}
+
+			if (pin_count == 0)
+				return pin;
+		}
 	}
-	DRM_ERROR("No connected audio pins found!\n");
-	return NULL;
+	if (!pin)
+		DRM_ERROR("No connected audio pins found!\n");
+	return pin;
 }
 
 void dce6_afmt_select_pin(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index fbc8d88d6e5d..2c02e99b5f95 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -522,13 +522,15 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
 		return err;
 	}
 
-	if (drm_rgb_quant_range_selectable(radeon_connector_edid(connector))) {
-		if (radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB)
-			frame.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED;
-		else
-			frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
-	} else {
-		frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+	if (radeon_encoder->output_csc != RADEON_OUTPUT_CSC_BYPASS) {
+		if (drm_rgb_quant_range_selectable(radeon_connector_edid(connector))) {
+			if (radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB)
+				frame.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED;
+			else
+				frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
+		} else {
+			frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+		}
 	}
 
 	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index c097d3a82bda..a9b01bcf7d0a 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -3387,6 +3387,14 @@ void radeon_combios_asic_init(struct drm_device *dev)
 	    rdev->pdev->subsystem_device == 0x30ae)
 		return;
 
+	/* quirk for rs4xx HP Compaq dc5750 Small Form Factor to make it resume
+	 * - it hangs on resume inside the dynclk 1 table.
+	 */
+	if (rdev->family == CHIP_RS480 &&
+	    rdev->pdev->subsystem_vendor == 0x103c &&
+	    rdev->pdev->subsystem_device == 0x280a)
+		return;
+
 	/* DYN CLK 1 */
 	table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
 	if (table)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 94b21ae70ef7..5a2cafb4f1bc 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -95,6 +95,11 @@ void radeon_connector_hotplug(struct drm_connector *connector)
 			if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
 				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
 			} else if (radeon_dp_needs_link_train(radeon_connector)) {
+				/* Don't try to start link training before we
+				 * have the dpcd */
+				if (!radeon_dp_getdpcd(radeon_connector))
+					return;
+
 				/* set it to OFF so that drm_helper_connector_dpms()
 				 * won't return immediately since the current state
 				 * is ON at this point.
diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
index fcbd60bb0349..3b0c229d7dcd 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
@@ -116,8 +116,8 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
 	       AUX_SW_WR_BYTES(bytes));
 
 	/* write the data header into the registers */
-	/* request, addres, msg size */
-	byte = (msg->request << 4);
+	/* request, address, msg size */
+	byte = (msg->request << 4) | ((msg->address >> 16) & 0xf);
 	WREG32(AUX_SW_DATA + aux_offset[instance],
 	       AUX_SW_DATA_MASK(byte) | AUX_SW_AUTOINCREMENT_DISABLE);
 
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 257b10be5cda..5e09c061847f 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -246,9 +246,10 @@ radeon_dp_mst_connector_destroy(struct drm_connector *connector)
 	kfree(radeon_connector);
 }
 
-static void radeon_connector_dpms(struct drm_connector *connector, int mode)
+static int radeon_connector_dpms(struct drm_connector *connector, int mode)
 {
 	DRM_DEBUG_KMS("\n");
+	return 0;
 }
 
 static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
@@ -284,11 +285,10 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
 
 	drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
 	drm_mode_connector_set_path_property(connector, pathprop);
-	drm_reinit_primary_mode_group(dev);
 
-	mutex_lock(&dev->mode_config.mutex);
+	drm_modeset_lock_all(dev);
 	radeon_fb_add_connector(rdev, connector);
-	mutex_unlock(&dev->mode_config.mutex);
+	drm_modeset_unlock_all(dev);
 
 	drm_connector_register(connector);
 	return connector;
@@ -303,14 +303,12 @@ static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
 
 	drm_connector_unregister(connector);
 	/* need to nuke the connector */
-	mutex_lock(&dev->mode_config.mutex);
+	drm_modeset_lock_all(dev);
 	/* dpms off */
 	radeon_fb_remove_connector(rdev, connector);
 
 	drm_connector_cleanup(connector);
-	mutex_unlock(&dev->mode_config.mutex);
-	drm_reinit_primary_mode_group(dev);
-
+	drm_modeset_unlock_all(dev);
 
 	kfree(connector);
 	DRM_DEBUG_KMS("\n");
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index aeb676708e60..7214858ffcea 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -82,9 +82,9 @@ static struct fb_ops radeonfb_ops = {
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = radeon_fb_helper_set_par,
-	.fb_fillrect = cfb_fillrect,
-	.fb_copyarea = cfb_copyarea,
-	.fb_imageblit = cfb_imageblit,
+	.fb_fillrect = drm_fb_helper_cfb_fillrect,
+	.fb_copyarea = drm_fb_helper_cfb_copyarea,
+	.fb_imageblit = drm_fb_helper_cfb_imageblit,
 	.fb_pan_display = drm_fb_helper_pan_display,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcmap = drm_fb_helper_setcmap,
@@ -227,7 +227,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
 	struct drm_mode_fb_cmd2 mode_cmd;
 	struct drm_gem_object *gobj = NULL;
 	struct radeon_bo *rbo = NULL;
-	struct device *device = &rdev->pdev->dev;
 	int ret;
 	unsigned long tmp;
 
@@ -250,9 +249,9 @@ static int radeonfb_create(struct drm_fb_helper *helper,
 	rbo = gem_to_radeon_bo(gobj);
 
 	/* okay we have an object now allocate the framebuffer */
-	info = framebuffer_alloc(0, device);
-	if (info == NULL) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto out_unref;
 	}
 
@@ -262,14 +261,13 @@ static int radeonfb_create(struct drm_fb_helper *helper,
 	ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
 	if (ret) {
 		DRM_ERROR("failed to initialize framebuffer %d\n", ret);
-		goto out_unref;
+		goto out_destroy_fbi;
 	}
 
 	fb = &rfbdev->rfb.base;
 
 	/* setup helper */
 	rfbdev->helper.fb = fb;
-	rfbdev->helper.fbdev = info;
 
 	memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));
 
@@ -289,11 +287,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
 	drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
 
 	/* setup aperture base/size for vesafb takeover */
-	info->apertures = alloc_apertures(1);
-	if (!info->apertures) {
-		ret = -ENOMEM;
-		goto out_unref;
-	}
 	info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
 	info->apertures->ranges[0].size = rdev->mc.aper_size;
 
@@ -301,13 +294,7 @@ static int radeonfb_create(struct drm_fb_helper *helper,
 
 	if (info->screen_base == NULL) {
 		ret = -ENOSPC;
-		goto out_unref;
-	}
-
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto out_unref;
+		goto out_destroy_fbi;
 	}
 
 	DRM_INFO("fb mappable at 0x%lX\n",  info->fix.smem_start);
@@ -319,6 +306,8 @@ static int radeonfb_create(struct drm_fb_helper *helper,
 	vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
 	return 0;
 
+out_destroy_fbi:
+	drm_fb_helper_release_fbi(helper);
 out_unref:
 	if (rbo) {
 
@@ -339,17 +328,10 @@ void radeon_fb_output_poll_changed(struct radeon_device *rdev)
 
 static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
 {
-	struct fb_info *info;
 	struct radeon_framebuffer *rfb = &rfbdev->rfb;
 
-	if (rfbdev->helper.fbdev) {
-		info = rfbdev->helper.fbdev;
-
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&rfbdev->helper);
+	drm_fb_helper_release_fbi(&rfbdev->helper);
 
 	if (rfb->obj) {
 		radeonfb_destroy_pinned_object(rfb->obj);
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index e476c331f3fa..9a4d69e59401 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -845,7 +845,8 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
 		hdr = (const union radeon_firmware_header *) rdev->rlc_fw->data;
 		break;
 
-	case KGD_ENGINE_SDMA:
+	case KGD_ENGINE_SDMA1:
+	case KGD_ENGINE_SDMA2:
 		hdr = (const union radeon_firmware_header *)
 							rdev->sdma_fw->data;
 		break;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 676362769b8d..d3024883b844 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -419,7 +419,6 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
 	}
 	dev_err(rdev->dev, "Userspace still has active objects !\n");
 	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
-		mutex_lock(&rdev->ddev->struct_mutex);
 		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
 			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
 			*((unsigned long *)&bo->gem_base.refcount));
@@ -427,8 +426,7 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
 		list_del_init(&bo->list);
 		mutex_unlock(&bo->rdev->gem.mutex);
 		/* this should unref the ttm bo */
-		drm_gem_object_unreference(&bo->gem_base);
-		mutex_unlock(&rdev->ddev->struct_mutex);
+		drm_gem_object_unreference_unlocked(&bo->gem_base);
 	}
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index c1ba83a8dd8c..05751f3f8444 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -253,7 +253,6 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
 	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
 		return;
 
-	mutex_lock(&rdev->ddev->struct_mutex);
 	down_write(&rdev->pm.mclk_lock);
 	mutex_lock(&rdev->ring_lock);
 
@@ -268,7 +267,6 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
 			/* needs a GPU reset dont reset here */
 			mutex_unlock(&rdev->ring_lock);
 			up_write(&rdev->pm.mclk_lock);
-			mutex_unlock(&rdev->ddev->struct_mutex);
 			return;
 		}
 	}
@@ -304,7 +302,6 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
 
 	mutex_unlock(&rdev->ring_lock);
 	up_write(&rdev->pm.mclk_lock);
-	mutex_unlock(&rdev->ddev->struct_mutex);
 }
 
 static void radeon_pm_print_states(struct radeon_device *rdev)
@@ -1062,7 +1059,6 @@ force:
 		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
 	}
 
-	mutex_lock(&rdev->ddev->struct_mutex);
 	down_write(&rdev->pm.mclk_lock);
 	mutex_lock(&rdev->ring_lock);
 
@@ -1113,7 +1109,6 @@ force:
 done:
 	mutex_unlock(&rdev->ring_lock);
 	up_write(&rdev->pm.mclk_lock);
-	mutex_unlock(&rdev->ddev->struct_mutex);
 }
 
 void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 65d6ba6621ac..48cb19949ca3 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -496,7 +496,8 @@ static bool rcar_du_crtc_mode_fixup(struct drm_crtc *crtc,
 	return true;
 }
 
-static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc)
+static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
+				      struct drm_crtc_state *old_crtc_state)
 {
 	struct drm_pending_vblank_event *event = crtc->state->event;
 	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
@@ -512,7 +513,8 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc)
 	}
 }
 
-static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc)
+static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc,
+				      struct drm_crtc_state *old_crtc_state)
 {
 	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
 
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index 5b0dc0f6fd94..f261512bb4a0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -37,9 +37,9 @@ static int rockchip_fbdev_mmap(struct fb_info *info,
 static struct fb_ops rockchip_drm_fbdev_ops = {
 	.owner		= THIS_MODULE,
 	.fb_mmap	= rockchip_fbdev_mmap,
-	.fb_fillrect	= cfb_fillrect,
-	.fb_copyarea	= cfb_copyarea,
-	.fb_imageblit	= cfb_imageblit,
+	.fb_fillrect	= drm_fb_helper_cfb_fillrect,
+	.fb_copyarea	= drm_fb_helper_cfb_copyarea,
+	.fb_imageblit	= drm_fb_helper_cfb_imageblit,
 	.fb_check_var	= drm_fb_helper_check_var,
 	.fb_set_par	= drm_fb_helper_set_par,
 	.fb_blank	= drm_fb_helper_blank,
@@ -77,10 +77,10 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
 
 	private->fbdev_bo = &rk_obj->base;
 
-	fbi = framebuffer_alloc(0, dev->dev);
-	if (!fbi) {
-		dev_err(dev->dev, "Failed to allocate framebuffer info.\n");
-		ret = -ENOMEM;
+	fbi = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(fbi)) {
+		dev_err(dev->dev, "Failed to create framebuffer info.\n");
+		ret = PTR_ERR(fbi);
 		goto err_rockchip_gem_free_object;
 	}
 
@@ -89,21 +89,13 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
 	if (IS_ERR(helper->fb)) {
 		dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
 		ret = PTR_ERR(helper->fb);
-		goto err_framebuffer_release;
+		goto err_release_fbi;
 	}
 
-	helper->fbdev = fbi;
-
 	fbi->par = helper;
 	fbi->flags = FBINFO_FLAG_DEFAULT;
 	fbi->fbops = &rockchip_drm_fbdev_ops;
 
-	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
-	if (ret) {
-		dev_err(dev->dev, "Failed to allocate color map.\n");
-		goto err_drm_framebuffer_unref;
-	}
-
 	fb = helper->fb;
 	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
 	drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
@@ -124,10 +116,8 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
 
 	return 0;
 
-err_drm_framebuffer_unref:
-	drm_framebuffer_unreference(helper->fb);
-err_framebuffer_release:
-	framebuffer_release(fbi);
+err_release_fbi:
+	drm_fb_helper_release_fbi(helper);
 err_rockchip_gem_free_object:
 	rockchip_gem_free_object(&rk_obj->base);
 	return ret;
@@ -190,21 +180,8 @@ void rockchip_drm_fbdev_fini(struct drm_device *dev)
 
 	helper = &private->fbdev_helper;
 
-	if (helper->fbdev) {
-		struct fb_info *info;
-		int ret;
-
-		info = helper->fbdev;
-		ret = unregister_framebuffer(info);
-		if (ret < 0)
-			DRM_DEBUG_KMS("failed unregister_framebuffer() - %d\n",
-				      ret);
-
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(helper);
+	drm_fb_helper_release_fbi(helper);
 
 	if (helper->fb)
 		drm_framebuffer_unreference(helper->fb);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index eba5f8a52fbd..a6d9104f7f15 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -200,13 +200,10 @@ int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
 	struct drm_gem_object *obj;
 	int ret;
 
-	mutex_lock(&dev->struct_mutex);
-
 	obj = drm_gem_object_lookup(dev, file_priv, handle);
 	if (!obj) {
 		DRM_ERROR("failed to lookup gem object.\n");
-		ret = -EINVAL;
-		goto unlock;
+		return -EINVAL;
 	}
 
 	ret = drm_gem_create_mmap_offset(obj);
@@ -217,10 +214,9 @@ int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
 	DRM_DEBUG_KMS("offset = 0x%llx\n", *offset);
 
 out:
-	drm_gem_object_unreference(obj);
-unlock:
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
+	drm_gem_object_unreference_unlocked(obj);
+
+	return 0;
 }
 
 /*
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 34b78e736532..5d8ae5e49c44 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -50,6 +50,8 @@
 
 #define VOP_WIN_SET(x, win, name, v) \
 		REG_SET(x, win->base, win->phy->name, v, RELAXED)
+#define VOP_SCL_SET(x, win, name, v) \
+		REG_SET(x, win->base, win->phy->scl->name, v, RELAXED)
 #define VOP_CTRL_SET(x, name, v) \
 		REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL)
 
@@ -164,7 +166,37 @@ struct vop_ctrl {
 	struct vop_reg vpost_st_end;
 };
 
+struct vop_scl_regs {
+	struct vop_reg cbcr_vsd_mode;
+	struct vop_reg cbcr_vsu_mode;
+	struct vop_reg cbcr_hsd_mode;
+	struct vop_reg cbcr_ver_scl_mode;
+	struct vop_reg cbcr_hor_scl_mode;
+	struct vop_reg yrgb_vsd_mode;
+	struct vop_reg yrgb_vsu_mode;
+	struct vop_reg yrgb_hsd_mode;
+	struct vop_reg yrgb_ver_scl_mode;
+	struct vop_reg yrgb_hor_scl_mode;
+	struct vop_reg line_load_mode;
+	struct vop_reg cbcr_axi_gather_num;
+	struct vop_reg yrgb_axi_gather_num;
+	struct vop_reg vsd_cbcr_gt2;
+	struct vop_reg vsd_cbcr_gt4;
+	struct vop_reg vsd_yrgb_gt2;
+	struct vop_reg vsd_yrgb_gt4;
+	struct vop_reg bic_coe_sel;
+	struct vop_reg cbcr_axi_gather_en;
+	struct vop_reg yrgb_axi_gather_en;
+
+	struct vop_reg lb_mode;
+	struct vop_reg scale_yrgb_x;
+	struct vop_reg scale_yrgb_y;
+	struct vop_reg scale_cbcr_x;
+	struct vop_reg scale_cbcr_y;
+};
+
 struct vop_win_phy {
+	const struct vop_scl_regs *scl;
 	const uint32_t *data_formats;
 	uint32_t nformats;
 
@@ -222,7 +254,36 @@ static const uint32_t formats_234[] = {
 	DRM_FORMAT_BGR565,
 };
 
+static const struct vop_scl_regs win_full_scl = {
+	.cbcr_vsd_mode = VOP_REG(WIN0_CTRL1, 0x1, 31),
+	.cbcr_vsu_mode = VOP_REG(WIN0_CTRL1, 0x1, 30),
+	.cbcr_hsd_mode = VOP_REG(WIN0_CTRL1, 0x3, 28),
+	.cbcr_ver_scl_mode = VOP_REG(WIN0_CTRL1, 0x3, 26),
+	.cbcr_hor_scl_mode = VOP_REG(WIN0_CTRL1, 0x3, 24),
+	.yrgb_vsd_mode = VOP_REG(WIN0_CTRL1, 0x1, 23),
+	.yrgb_vsu_mode = VOP_REG(WIN0_CTRL1, 0x1, 22),
+	.yrgb_hsd_mode = VOP_REG(WIN0_CTRL1, 0x3, 20),
+	.yrgb_ver_scl_mode = VOP_REG(WIN0_CTRL1, 0x3, 18),
+	.yrgb_hor_scl_mode = VOP_REG(WIN0_CTRL1, 0x3, 16),
+	.line_load_mode = VOP_REG(WIN0_CTRL1, 0x1, 15),
+	.cbcr_axi_gather_num = VOP_REG(WIN0_CTRL1, 0x7, 12),
+	.yrgb_axi_gather_num = VOP_REG(WIN0_CTRL1, 0xf, 8),
+	.vsd_cbcr_gt2 = VOP_REG(WIN0_CTRL1, 0x1, 7),
+	.vsd_cbcr_gt4 = VOP_REG(WIN0_CTRL1, 0x1, 6),
+	.vsd_yrgb_gt2 = VOP_REG(WIN0_CTRL1, 0x1, 5),
+	.vsd_yrgb_gt4 = VOP_REG(WIN0_CTRL1, 0x1, 4),
+	.bic_coe_sel = VOP_REG(WIN0_CTRL1, 0x3, 2),
+	.cbcr_axi_gather_en = VOP_REG(WIN0_CTRL1, 0x1, 1),
+	.yrgb_axi_gather_en = VOP_REG(WIN0_CTRL1, 0x1, 0),
+	.lb_mode = VOP_REG(WIN0_CTRL0, 0x7, 5),
+	.scale_yrgb_x = VOP_REG(WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
+	.scale_yrgb_y = VOP_REG(WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
+	.scale_cbcr_x = VOP_REG(WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
+	.scale_cbcr_y = VOP_REG(WIN0_SCL_FACTOR_CBR, 0xffff, 16),
+};
+
 static const struct vop_win_phy win01_data = {
+	.scl = &win_full_scl,
 	.data_formats = formats_01,
 	.nformats = ARRAY_SIZE(formats_01),
 	.enable = VOP_REG(WIN0_CTRL0, 0x1, 0),
@@ -279,6 +340,12 @@ static const struct vop_reg_data vop_init_reg_table[] = {
 	{DSP_CTRL0, 0x00000000},
 	{WIN0_CTRL0, 0x00000080},
 	{WIN1_CTRL0, 0x00000080},
+	/* TODO: Win2/3 support multiple area function, but we haven't found
+	 * a suitable way to use it yet, so let's just use them as other windows
+	 * with only area 0 enabled.
+	 */
+	{WIN2_CTRL0, 0x00000010},
+	{WIN3_CTRL0, 0x00000010},
 };
 
 /*
@@ -393,6 +460,18 @@ static enum vop_data_format vop_convert_format(uint32_t format)
 	}
 }
 
+static bool is_yuv_support(uint32_t format)
+{
+	switch (format) {
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV24:
+		return true;
+	default:
+		return false;
+	}
+}
+
 static bool is_alpha_support(uint32_t format)
 {
 	switch (format) {
@@ -404,6 +483,126 @@ static bool is_alpha_support(uint32_t format)
 	}
 }
 
+static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
+				  uint32_t dst, bool is_horizontal,
+				  int vsu_mode, int *vskiplines)
+{
+	uint16_t val = 1 << SCL_FT_DEFAULT_FIXPOINT_SHIFT;
+
+	if (is_horizontal) {
+		if (mode == SCALE_UP)
+			val = GET_SCL_FT_BIC(src, dst);
+		else if (mode == SCALE_DOWN)
+			val = GET_SCL_FT_BILI_DN(src, dst);
+	} else {
+		if (mode == SCALE_UP) {
+			if (vsu_mode == SCALE_UP_BIL)
+				val = GET_SCL_FT_BILI_UP(src, dst);
+			else
+				val = GET_SCL_FT_BIC(src, dst);
+		} else if (mode == SCALE_DOWN) {
+			if (vskiplines) {
+				*vskiplines = scl_get_vskiplines(src, dst);
+				val = scl_get_bili_dn_vskip(src, dst,
+							    *vskiplines);
+			} else {
+				val = GET_SCL_FT_BILI_DN(src, dst);
+			}
+		}
+	}
+
+	return val;
+}
+
+static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
+			     uint32_t src_w, uint32_t src_h, uint32_t dst_w,
+			     uint32_t dst_h, uint32_t pixel_format)
+{
+	uint16_t yrgb_hor_scl_mode, yrgb_ver_scl_mode;
+	uint16_t cbcr_hor_scl_mode = SCALE_NONE;
+	uint16_t cbcr_ver_scl_mode = SCALE_NONE;
+	int hsub = drm_format_horz_chroma_subsampling(pixel_format);
+	int vsub = drm_format_vert_chroma_subsampling(pixel_format);
+	bool is_yuv = is_yuv_support(pixel_format);
+	uint16_t cbcr_src_w = src_w / hsub;
+	uint16_t cbcr_src_h = src_h / vsub;
+	uint16_t vsu_mode;
+	uint16_t lb_mode;
+	uint32_t val;
+	int vskiplines;
+
+	if (dst_w > 3840) {
+		DRM_ERROR("Maximum destination width (3840) exceeded\n");
+		return;
+	}
+
+	yrgb_hor_scl_mode = scl_get_scl_mode(src_w, dst_w);
+	yrgb_ver_scl_mode = scl_get_scl_mode(src_h, dst_h);
+
+	if (is_yuv) {
+		cbcr_hor_scl_mode = scl_get_scl_mode(cbcr_src_w, dst_w);
+		cbcr_ver_scl_mode = scl_get_scl_mode(cbcr_src_h, dst_h);
+		if (cbcr_hor_scl_mode == SCALE_DOWN)
+			lb_mode = scl_vop_cal_lb_mode(dst_w, true);
+		else
+			lb_mode = scl_vop_cal_lb_mode(cbcr_src_w, true);
+	} else {
+		if (yrgb_hor_scl_mode == SCALE_DOWN)
+			lb_mode = scl_vop_cal_lb_mode(dst_w, false);
+		else
+			lb_mode = scl_vop_cal_lb_mode(src_w, false);
+	}
+
+	VOP_SCL_SET(vop, win, lb_mode, lb_mode);
+	if (lb_mode == LB_RGB_3840X2) {
+		if (yrgb_ver_scl_mode != SCALE_NONE) {
+			DRM_ERROR("ERROR : not allow yrgb ver scale\n");
+			return;
+		}
+		if (cbcr_ver_scl_mode != SCALE_NONE) {
+			DRM_ERROR("ERROR : not allow cbcr ver scale\n");
+			return;
+		}
+		vsu_mode = SCALE_UP_BIL;
+	} else if (lb_mode == LB_RGB_2560X4) {
+		vsu_mode = SCALE_UP_BIL;
+	} else {
+		vsu_mode = SCALE_UP_BIC;
+	}
+
+	val = scl_vop_cal_scale(yrgb_hor_scl_mode, src_w, dst_w,
+				true, 0, NULL);
+	VOP_SCL_SET(vop, win, scale_yrgb_x, val);
+	val = scl_vop_cal_scale(yrgb_ver_scl_mode, src_h, dst_h,
+				false, vsu_mode, &vskiplines);
+	VOP_SCL_SET(vop, win, scale_yrgb_y, val);
+
+	VOP_SCL_SET(vop, win, vsd_yrgb_gt4, vskiplines == 4);
+	VOP_SCL_SET(vop, win, vsd_yrgb_gt2, vskiplines == 2);
+
+	VOP_SCL_SET(vop, win, yrgb_hor_scl_mode, yrgb_hor_scl_mode);
+	VOP_SCL_SET(vop, win, yrgb_ver_scl_mode, yrgb_ver_scl_mode);
+	VOP_SCL_SET(vop, win, yrgb_hsd_mode, SCALE_DOWN_BIL);
+	VOP_SCL_SET(vop, win, yrgb_vsd_mode, SCALE_DOWN_BIL);
+	VOP_SCL_SET(vop, win, yrgb_vsu_mode, vsu_mode);
+	if (is_yuv) {
+		val = scl_vop_cal_scale(cbcr_hor_scl_mode, cbcr_src_w,
+					dst_w, true, 0, NULL);
+		VOP_SCL_SET(vop, win, scale_cbcr_x, val);
+		val = scl_vop_cal_scale(cbcr_ver_scl_mode, cbcr_src_h,
+					dst_h, false, vsu_mode, &vskiplines);
+		VOP_SCL_SET(vop, win, scale_cbcr_y, val);
+
+		VOP_SCL_SET(vop, win, vsd_cbcr_gt4, vskiplines == 4);
+		VOP_SCL_SET(vop, win, vsd_cbcr_gt2, vskiplines == 2);
+		VOP_SCL_SET(vop, win, cbcr_hor_scl_mode, cbcr_hor_scl_mode);
+		VOP_SCL_SET(vop, win, cbcr_ver_scl_mode, cbcr_ver_scl_mode);
+		VOP_SCL_SET(vop, win, cbcr_hsd_mode, SCALE_DOWN_BIL);
+		VOP_SCL_SET(vop, win, cbcr_vsd_mode, SCALE_DOWN_BIL);
+		VOP_SCL_SET(vop, win, cbcr_vsu_mode, vsu_mode);
+	}
+}
+
 static void vop_dsp_hold_valid_irq_enable(struct vop *vop)
 {
 	unsigned long flags;
@@ -478,6 +677,7 @@ static void vop_enable(struct drm_crtc *crtc)
 		goto err_disable_aclk;
 	}
 
+	memcpy(vop->regs, vop->regsbak, vop->len);
 	/*
 	 * At here, vop clock & iommu is enable, R/W vop regs would be safe.
 	 */
@@ -598,17 +798,22 @@ static int vop_update_plane_event(struct drm_plane *plane,
 	struct vop *vop = to_vop(crtc);
 	struct drm_gem_object *obj;
 	struct rockchip_gem_object *rk_obj;
+	struct drm_gem_object *uv_obj;
+	struct rockchip_gem_object *rk_uv_obj;
 	unsigned long offset;
 	unsigned int actual_w;
 	unsigned int actual_h;
 	unsigned int dsp_stx;
 	unsigned int dsp_sty;
 	unsigned int y_vir_stride;
+	unsigned int uv_vir_stride = 0;
 	dma_addr_t yrgb_mst;
+	dma_addr_t uv_mst = 0;
 	enum vop_data_format format;
 	uint32_t val;
 	bool is_alpha;
 	bool rb_swap;
+	bool is_yuv;
 	bool visible;
 	int ret;
 	struct drm_rect dest = {
@@ -629,11 +834,15 @@ static int vop_update_plane_event(struct drm_plane *plane,
 		.y2 = crtc->mode.vdisplay,
 	};
 	bool can_position = plane->type != DRM_PLANE_TYPE_PRIMARY;
+	int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
+					DRM_PLANE_HELPER_NO_SCALING;
+	int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
+					DRM_PLANE_HELPER_NO_SCALING;
 
 	ret = drm_plane_helper_check_update(plane, crtc, fb,
 					    &src, &dest, &clip,
-					    DRM_PLANE_HELPER_NO_SCALING,
-					    DRM_PLANE_HELPER_NO_SCALING,
+					    min_scale,
+					    max_scale,
 					    can_position, false, &visible);
 	if (ret)
 		return ret;
@@ -643,6 +852,8 @@ static int vop_update_plane_event(struct drm_plane *plane,
 
 	is_alpha = is_alpha_support(fb->pixel_format);
 	rb_swap = has_rb_swapped(fb->pixel_format);
+	is_yuv = is_yuv_support(fb->pixel_format);
+
 	format = vop_convert_format(fb->pixel_format);
 	if (format < 0)
 		return format;
@@ -655,19 +866,46 @@ static int vop_update_plane_event(struct drm_plane *plane,
 
 	rk_obj = to_rockchip_obj(obj);
 
+	if (is_yuv) {
+		/*
+		 * Src.x1 can be odd when do clip, but yuv plane start point
+		 * need align with 2 pixel.
+		 */
+		val = (src.x1 >> 16) % 2;
+		src.x1 += val << 16;
+		src.x2 += val << 16;
+	}
+
 	actual_w = (src.x2 - src.x1) >> 16;
 	actual_h = (src.y2 - src.y1) >> 16;
-	crtc_x = max(0, crtc_x);
-	crtc_y = max(0, crtc_y);
 
-	dsp_stx = crtc_x + crtc->mode.htotal - crtc->mode.hsync_start;
-	dsp_sty = crtc_y + crtc->mode.vtotal - crtc->mode.vsync_start;
+	dsp_stx = dest.x1 + crtc->mode.htotal - crtc->mode.hsync_start;
+	dsp_sty = dest.y1 + crtc->mode.vtotal - crtc->mode.vsync_start;
 
-	offset = (src.x1 >> 16) * (fb->bits_per_pixel >> 3);
+	offset = (src.x1 >> 16) * drm_format_plane_cpp(fb->pixel_format, 0);
 	offset += (src.y1 >> 16) * fb->pitches[0];
-	yrgb_mst = rk_obj->dma_addr + offset;
 
-	y_vir_stride = fb->pitches[0] / (fb->bits_per_pixel >> 3);
+	yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0];
+	y_vir_stride = fb->pitches[0] >> 2;
+
+	if (is_yuv) {
+		int hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
+		int vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
+		int bpp = drm_format_plane_cpp(fb->pixel_format, 1);
+
+		uv_obj = rockchip_fb_get_gem_obj(fb, 1);
+		if (!uv_obj) {
+			DRM_ERROR("fail to get uv object from framebuffer\n");
+			return -EINVAL;
+		}
+		rk_uv_obj = to_rockchip_obj(uv_obj);
+		uv_vir_stride = fb->pitches[1] >> 2;
+
+		offset = (src.x1 >> 16) * bpp / hsub;
+		offset += (src.y1 >> 16) * fb->pitches[1] / vsub;
+
+		uv_mst = rk_uv_obj->dma_addr + offset + fb->offsets[1];
+	}
 
 	/*
 	 * If this plane update changes the plane's framebuffer, (or more
@@ -704,9 +942,22 @@ static int vop_update_plane_event(struct drm_plane *plane,
 	VOP_WIN_SET(vop, win, format, format);
 	VOP_WIN_SET(vop, win, yrgb_vir, y_vir_stride);
 	VOP_WIN_SET(vop, win, yrgb_mst, yrgb_mst);
+	if (is_yuv) {
+		VOP_WIN_SET(vop, win, uv_vir, uv_vir_stride);
+		VOP_WIN_SET(vop, win, uv_mst, uv_mst);
+	}
+
+	if (win->phy->scl)
+		scl_vop_cal_scl_fac(vop, win, actual_w, actual_h,
+				    dest.x2 - dest.x1, dest.y2 - dest.y1,
+				    fb->pixel_format);
+
 	val = (actual_h - 1) << 16;
 	val |= (actual_w - 1) & 0xffff;
 	VOP_WIN_SET(vop, win, act_info, val);
+
+	val = (dest.y2 - dest.y1 - 1) << 16;
+	val |= (dest.x2 - dest.x1 - 1) & 0xffff;
 	VOP_WIN_SET(vop, win, dsp_info, val);
 	val = (dsp_sty - 1) << 16;
 	val |= (dsp_stx - 1) & 0xffff;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 63e9b3a084c5..a2d4ddb896fa 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -198,4 +198,92 @@ enum factor_mode {
 	ALPHA_SRC_GLOBAL,
 };
 
+enum scale_mode {
+	SCALE_NONE = 0x0,
+	SCALE_UP   = 0x1,
+	SCALE_DOWN = 0x2
+};
+
+enum lb_mode {
+	LB_YUV_3840X5 = 0x0,
+	LB_YUV_2560X8 = 0x1,
+	LB_RGB_3840X2 = 0x2,
+	LB_RGB_2560X4 = 0x3,
+	LB_RGB_1920X5 = 0x4,
+	LB_RGB_1280X8 = 0x5
+};
+
+enum sacle_up_mode {
+	SCALE_UP_BIL = 0x0,
+	SCALE_UP_BIC = 0x1
+};
+
+enum scale_down_mode {
+	SCALE_DOWN_BIL = 0x0,
+	SCALE_DOWN_AVG = 0x1
+};
+
+#define FRAC_16_16(mult, div)    (((mult) << 16) / (div))
+#define SCL_FT_DEFAULT_FIXPOINT_SHIFT	12
+#define SCL_MAX_VSKIPLINES		4
+#define MIN_SCL_FT_AFTER_VSKIP		1
+
+static inline uint16_t scl_cal_scale(int src, int dst, int shift)
+{
+	return ((src * 2 - 3) << (shift - 1)) / (dst - 1);
+}
+
+#define GET_SCL_FT_BILI_DN(src, dst)	scl_cal_scale(src, dst, 12)
+#define GET_SCL_FT_BILI_UP(src, dst)	scl_cal_scale(src, dst, 16)
+#define GET_SCL_FT_BIC(src, dst)	scl_cal_scale(src, dst, 16)
+
+static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h,
+					     int vskiplines)
+{
+	int act_height;
+
+	act_height = (src_h + vskiplines - 1) / vskiplines;
+
+	return GET_SCL_FT_BILI_DN(act_height, dst_h);
+}
+
+static inline enum scale_mode scl_get_scl_mode(int src, int dst)
+{
+	if (src < dst)
+		return SCALE_UP;
+	else if (src > dst)
+		return SCALE_DOWN;
+
+	return SCALE_NONE;
+}
+
+static inline int scl_get_vskiplines(uint32_t srch, uint32_t dsth)
+{
+	uint32_t vskiplines;
+
+	for (vskiplines = SCL_MAX_VSKIPLINES; vskiplines > 1; vskiplines /= 2)
+		if (srch >= vskiplines * dsth * MIN_SCL_FT_AFTER_VSKIP)
+			break;
+
+	return vskiplines;
+}
+
+static inline int scl_vop_cal_lb_mode(int width, bool is_yuv)
+{
+	int lb_mode;
+
+	if (width > 2560)
+		lb_mode = LB_RGB_3840X2;
+	else if (width > 1920)
+		lb_mode = LB_RGB_2560X4;
+	else if (!is_yuv)
+		lb_mode = LB_RGB_1920X5;
+	else if (width > 1280)
+		lb_mode = LB_YUV_3840X5;
+	else
+		lb_mode = LB_YUV_2560X8;
+
+	return lb_mode;
+}
+
 #endif /* _ROCKCHIP_DRM_VOP_H */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 859ccb658601..e9272b0a8592 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -248,7 +248,7 @@ static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
 	lcdc_write(sdev, LDDDSR, value);
 
 	/* Setup planes. */
-	drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
+	drm_for_each_legacy_plane(plane, dev) {
 		if (plane->crtc == crtc)
 			shmob_drm_plane_setup(plane);
 	}
diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile
index f0f1e4ee2d92..e27490b492a5 100644
--- a/drivers/gpu/drm/sti/Makefile
+++ b/drivers/gpu/drm/sti/Makefile
@@ -1,12 +1,11 @@
 sticompositor-y := \
-	sti_layer.o \
 	sti_mixer.o \
 	sti_gdp.o \
 	sti_vid.o \
 	sti_cursor.o \
 	sti_compositor.o \
-	sti_drm_crtc.o \
-	sti_drm_plane.o
+	sti_crtc.o \
+	sti_plane.o
 
 stihdmi-y := sti_hdmi.o \
 	sti_hdmi_tx3g0c55phy.o \
@@ -24,4 +23,4 @@ obj-$(CONFIG_DRM_STI) = \
 	sticompositor.o \
 	sti_hqvdp.o \
 	stidvo.o \
-	sti_drm_drv.o
+	sti_drv.o
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 43215d3020fb..c652627b1bca 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -14,10 +14,12 @@
 #include <drm/drmP.h>
 
 #include "sti_compositor.h"
-#include "sti_drm_crtc.h"
-#include "sti_drm_drv.h"
-#include "sti_drm_plane.h"
+#include "sti_crtc.h"
+#include "sti_cursor.h"
+#include "sti_drv.h"
 #include "sti_gdp.h"
+#include "sti_plane.h"
+#include "sti_vid.h"
 #include "sti_vtg.h"
 
 /*
@@ -31,7 +33,7 @@ struct sti_compositor_data stih407_compositor_data = {
 			{STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200},
 			{STI_GPD_SUBDEV, (int)STI_GDP_2, 0x300},
 			{STI_GPD_SUBDEV, (int)STI_GDP_3, 0x400},
-			{STI_VID_SUBDEV, (int)STI_VID_0, 0x700},
+			{STI_VID_SUBDEV, (int)STI_HQVDP_0, 0x700},
 			{STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00},
 			{STI_MIXER_AUX_SUBDEV, STI_MIXER_AUX, 0xD00},
 	},
@@ -53,14 +55,29 @@ struct sti_compositor_data stih416_compositor_data = {
 	},
 };
 
-static int sti_compositor_init_subdev(struct sti_compositor *compo,
-		struct sti_compositor_subdev_descriptor *desc,
-		unsigned int array_size)
+static int sti_compositor_bind(struct device *dev,
+			       struct device *master,
+			       void *data)
 {
-	unsigned int i, mixer_id = 0, layer_id = 0;
+	struct sti_compositor *compo = dev_get_drvdata(dev);
+	struct drm_device *drm_dev = data;
+	unsigned int i, mixer_id = 0, vid_id = 0, crtc_id = 0;
+	struct sti_private *dev_priv = drm_dev->dev_private;
+	struct drm_plane *cursor = NULL;
+	struct drm_plane *primary = NULL;
+	struct sti_compositor_subdev_descriptor *desc = compo->data.subdev_desc;
+	unsigned int array_size = compo->data.nb_subdev;
+
+	dev_priv->compo = compo;
 
+	/* Register mixer subdev and video subdev first */
 	for (i = 0; i < array_size; i++) {
 		switch (desc[i].type) {
+		case STI_VID_SUBDEV:
+			compo->vid[vid_id++] =
+			    sti_vid_create(compo->dev, desc[i].id,
+					   compo->regs + desc[i].offset);
+			break;
 		case STI_MIXER_MAIN_SUBDEV:
 		case STI_MIXER_AUX_SUBDEV:
 			compo->mixer[mixer_id++] =
@@ -68,83 +85,68 @@ static int sti_compositor_init_subdev(struct sti_compositor *compo,
 					     compo->regs + desc[i].offset);
 			break;
 		case STI_GPD_SUBDEV:
-		case STI_VID_SUBDEV:
 		case STI_CURSOR_SUBDEV:
-			compo->layer[layer_id++] =
-			    sti_layer_create(compo->dev, desc[i].id,
-					     compo->regs + desc[i].offset);
+			/* Nothing to do, wait for the second round */
 			break;
 		default:
 			DRM_ERROR("Unknow subdev compoment type\n");
 			return 1;
 		}
-
 	}
-	compo->nb_mixers = mixer_id;
-	compo->nb_layers = layer_id;
-
-	return 0;
-}
-
-static int sti_compositor_bind(struct device *dev, struct device *master,
-	void *data)
-{
-	struct sti_compositor *compo = dev_get_drvdata(dev);
-	struct drm_device *drm_dev = data;
-	unsigned int i, crtc = 0, plane = 0;
-	struct sti_drm_private *dev_priv = drm_dev->dev_private;
-	struct drm_plane *cursor = NULL;
-	struct drm_plane *primary = NULL;
 
-	dev_priv->compo = compo;
-
-	for (i = 0; i < compo->nb_layers; i++) {
-		if (compo->layer[i]) {
-			enum sti_layer_desc desc = compo->layer[i]->desc;
-			enum sti_layer_type type = desc & STI_LAYER_TYPE_MASK;
-			enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;
+	/* Register the other subdevs, create crtc and planes */
+	for (i = 0; i < array_size; i++) {
+		enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;
 
-			if (crtc < compo->nb_mixers)
-				plane_type = DRM_PLANE_TYPE_PRIMARY;
+		if (crtc_id < mixer_id)
+			plane_type = DRM_PLANE_TYPE_PRIMARY;
 
-			switch (type) {
-			case STI_CUR:
-				cursor = sti_drm_plane_init(drm_dev,
-						compo->layer[i],
-						1, DRM_PLANE_TYPE_CURSOR);
-				break;
-			case STI_GDP:
-			case STI_VID:
-				primary = sti_drm_plane_init(drm_dev,
-						compo->layer[i],
-						(1 << compo->nb_mixers) - 1,
-						plane_type);
-				plane++;
+		switch (desc[i].type) {
+		case STI_MIXER_MAIN_SUBDEV:
+		case STI_MIXER_AUX_SUBDEV:
+		case STI_VID_SUBDEV:
+			/* Nothing to do, already done at the first round */
+			break;
+		case STI_CURSOR_SUBDEV:
+			cursor = sti_cursor_create(drm_dev, compo->dev,
+						   desc[i].id,
+						   compo->regs + desc[i].offset,
+						   1);
+			if (!cursor) {
+				DRM_ERROR("Can't create CURSOR plane\n");
 				break;
-			case STI_BCK:
-			case STI_VDP:
+			}
+			break;
+		case STI_GPD_SUBDEV:
+			primary = sti_gdp_create(drm_dev, compo->dev,
+						 desc[i].id,
+						 compo->regs + desc[i].offset,
+						 (1 << mixer_id) - 1,
+						 plane_type);
+			if (!primary) {
+				DRM_ERROR("Can't create GDP plane\n");
 				break;
 			}
+			break;
+		default:
+			DRM_ERROR("Unknown subdev compoment type\n");
+			return 1;
+		}
 
-			/* The first planes are reserved for primary planes*/
-			if (crtc < compo->nb_mixers && primary) {
-				sti_drm_crtc_init(drm_dev, compo->mixer[crtc],
-						primary, cursor);
-				crtc++;
-				cursor = NULL;
-				primary = NULL;
-			}
+		/* The first planes are reserved for primary planes*/
+		if (crtc_id < mixer_id && primary) {
+			sti_crtc_init(drm_dev, compo->mixer[crtc_id],
+				      primary, cursor);
+			crtc_id++;
+			cursor = NULL;
+			primary = NULL;
 		}
 	}
 
-	drm_vblank_init(drm_dev, crtc);
+	drm_vblank_init(drm_dev, crtc_id);
 	/* Allow usage of vblank without having to call drm_irq_install */
 	drm_dev->irq_enabled = 1;
 
-	DRM_DEBUG_DRIVER("Initialized %d DRM CRTC(s) and %d DRM plane(s)\n",
-			 crtc, plane);
-	DRM_DEBUG_DRIVER("DRM plane(s) for VID/VDP not created yet\n");
-
 	return 0;
 }
 
@@ -179,7 +181,6 @@ static int sti_compositor_probe(struct platform_device *pdev)
 	struct device_node *vtg_np;
 	struct sti_compositor *compo;
 	struct resource *res;
-	int err;
 
 	compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL);
 	if (!compo) {
@@ -187,7 +188,7 @@ static int sti_compositor_probe(struct platform_device *pdev)
 		return -ENOMEM;
 	}
 	compo->dev = dev;
-	compo->vtg_vblank_nb.notifier_call = sti_drm_crtc_vblank_cb;
+	compo->vtg_vblank_nb.notifier_call = sti_crtc_vblank_cb;
 
 	/* populate data structure depending on compatibility */
 	BUG_ON(!of_match_node(compositor_of_match, np)->data);
@@ -251,12 +252,6 @@ static int sti_compositor_probe(struct platform_device *pdev)
 	if (vtg_np)
 		compo->vtg_aux = of_vtg_find(vtg_np);
 
-	/* Initialize compositor subdevices */
-	err = sti_compositor_init_subdev(compo, compo->data.subdev_desc,
-					 compo->data.nb_subdev);
-	if (err)
-		return err;
-
 	platform_set_drvdata(pdev, compo);
 
 	return component_add(&pdev->dev, &sti_compositor_ops);
diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h
index 019eb44c62cc..1a4a73dab11e 100644
--- a/drivers/gpu/drm/sti/sti_compositor.h
+++ b/drivers/gpu/drm/sti/sti_compositor.h
@@ -12,13 +12,13 @@
 #include <linux/clk.h>
 #include <linux/kernel.h>
 
-#include "sti_layer.h"
 #include "sti_mixer.h"
+#include "sti_plane.h"
 
 #define WAIT_NEXT_VSYNC_MS      50 /*ms*/
 
-#define STI_MAX_LAYER 8
 #define STI_MAX_MIXER 2
+#define STI_MAX_VID   1
 
 enum sti_compositor_subdev_type {
 	STI_MIXER_MAIN_SUBDEV,
@@ -59,11 +59,9 @@ struct sti_compositor_data {
  * @rst_main: reset control of the main path
  * @rst_aux: reset control of the aux path
  * @mixer: array of mixers
+ * @vid: array of vids
  * @vtg_main: vtg for main data path
  * @vtg_aux: vtg for auxillary data path
- * @layer: array of layers
- * @nb_mixers: number of mixers for this compositor
- * @nb_layers: number of layers (GDP,VID,...) for this compositor
  * @vtg_vblank_nb: callback for VTG VSYNC notification
  */
 struct sti_compositor {
@@ -77,11 +75,9 @@ struct sti_compositor {
 	struct reset_control *rst_main;
 	struct reset_control *rst_aux;
 	struct sti_mixer *mixer[STI_MAX_MIXER];
+	struct sti_vid *vid[STI_MAX_VID];
 	struct sti_vtg *vtg_main;
 	struct sti_vtg *vtg_aux;
-	struct sti_layer *layer[STI_MAX_LAYER];
-	int nb_mixers;
-	int nb_layers;
 	struct notifier_block vtg_vblank_nb;
 };
 
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 6b641c5a2ec7..018ffc970e96 100644
--- a/drivers/gpu/drm/sti/sti_drm_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -15,22 +15,20 @@
 #include <drm/drm_plane_helper.h>
 
 #include "sti_compositor.h"
-#include "sti_drm_drv.h"
-#include "sti_drm_crtc.h"
+#include "sti_crtc.h"
+#include "sti_drv.h"
+#include "sti_vid.h"
 #include "sti_vtg.h"
 
-static void sti_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-	DRM_DEBUG_KMS("\n");
-}
-
-static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
+static void sti_crtc_enable(struct drm_crtc *crtc)
 {
 	struct sti_mixer *mixer = to_sti_mixer(crtc);
 	struct device *dev = mixer->dev;
 	struct sti_compositor *compo = dev_get_drvdata(dev);
 
-	mixer->enabled = true;
+	DRM_DEBUG_DRIVER("\n");
+
+	mixer->status = STI_MIXER_READY;
 
 	/* Prepare and enable the compo IP clock */
 	if (mixer->id == STI_MIXER_MAIN) {
@@ -41,45 +39,28 @@ static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
 			DRM_INFO("Failed to prepare/enable compo_aux clk\n");
 	}
 
-	sti_mixer_clear_all_layers(mixer);
+	drm_crtc_vblank_on(crtc);
 }
 
-static void sti_drm_crtc_commit(struct drm_crtc *crtc)
+static void sti_crtc_disabling(struct drm_crtc *crtc)
 {
 	struct sti_mixer *mixer = to_sti_mixer(crtc);
-	struct device *dev = mixer->dev;
-	struct sti_compositor *compo = dev_get_drvdata(dev);
-	struct sti_layer *layer;
-
-	if ((!mixer || !compo)) {
-		DRM_ERROR("Can not find mixer or compositor)\n");
-		return;
-	}
 
-	/* get GDP which is reserved to the CRTC FB */
-	layer = to_sti_layer(crtc->primary);
-	if (layer)
-		sti_layer_commit(layer);
-	else
-		DRM_ERROR("Can not find CRTC dedicated plane (GDP0)\n");
-
-	/* Enable layer on mixer */
-	if (sti_mixer_set_layer_status(mixer, layer, true))
-		DRM_ERROR("Can not enable layer at mixer\n");
+	DRM_DEBUG_DRIVER("\n");
 
-	drm_crtc_vblank_on(crtc);
+	mixer->status = STI_MIXER_DISABLING;
 }
 
-static bool sti_drm_crtc_mode_fixup(struct drm_crtc *crtc,
-				    const struct drm_display_mode *mode,
-				    struct drm_display_mode *adjusted_mode)
+static bool sti_crtc_mode_fixup(struct drm_crtc *crtc,
+				const struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
 {
 	/* accept the provided drm_display_mode, do not fix it up */
 	return true;
 }
 
 static int
-sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
+sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
 {
 	struct sti_mixer *mixer = to_sti_mixer(crtc);
 	struct device *dev = mixer->dev;
@@ -122,22 +103,19 @@ sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
 
 	res = sti_mixer_active_video_area(mixer, &crtc->mode);
 	if (res) {
-		DRM_ERROR("Can not set active video area\n");
+		DRM_ERROR("Can't set active video area\n");
 		return -EINVAL;
 	}
 
 	return res;
 }
 
-static void sti_drm_crtc_disable(struct drm_crtc *crtc)
+static void sti_crtc_disable(struct drm_crtc *crtc)
 {
 	struct sti_mixer *mixer = to_sti_mixer(crtc);
 	struct device *dev = mixer->dev;
 	struct sti_compositor *compo = dev_get_drvdata(dev);
 
-	if (!mixer->enabled)
-		return;
-
 	DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer));
 
 	/* Disable Background */
@@ -154,17 +132,18 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc)
 		clk_disable_unprepare(compo->clk_compo_aux);
 	}
 
-	mixer->enabled = false;
+	mixer->status = STI_MIXER_DISABLED;
 }
 
 static void
-sti_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
+sti_crtc_mode_set_nofb(struct drm_crtc *crtc)
 {
-	sti_drm_crtc_prepare(crtc);
-	sti_drm_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
+	sti_crtc_enable(crtc);
+	sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
 }
 
-static void sti_drm_atomic_begin(struct drm_crtc *crtc)
+static void sti_crtc_atomic_begin(struct drm_crtc *crtc,
+				  struct drm_crtc_state *old_crtc_state)
 {
 	struct sti_mixer *mixer = to_sti_mixer(crtc);
 
@@ -178,46 +157,109 @@ static void sti_drm_atomic_begin(struct drm_crtc *crtc)
 	}
 }
 
-static void sti_drm_atomic_flush(struct drm_crtc *crtc)
+static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
+				  struct drm_crtc_state *old_crtc_state)
 {
+	struct drm_device *drm_dev = crtc->dev;
+	struct sti_mixer *mixer = to_sti_mixer(crtc);
+	struct sti_compositor *compo = dev_get_drvdata(mixer->dev);
+	struct drm_plane *p;
+
+	DRM_DEBUG_DRIVER("\n");
+
+	/* perform plane actions */
+	list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
+		struct sti_plane *plane = to_sti_plane(p);
+
+		switch (plane->status) {
+		case STI_PLANE_UPDATED:
+			/* update planes tag as updated */
+			DRM_DEBUG_DRIVER("update plane %s\n",
+					 sti_plane_to_str(plane));
+
+			if (sti_mixer_set_plane_depth(mixer, plane)) {
+				DRM_ERROR("Cannot set plane %s depth\n",
+					  sti_plane_to_str(plane));
+				break;
+			}
+
+			if (sti_mixer_set_plane_status(mixer, plane, true)) {
+				DRM_ERROR("Cannot enable plane %s at mixer\n",
+					  sti_plane_to_str(plane));
+				break;
+			}
+
+			/* if plane is HQVDP_0 then commit the vid[0] */
+			if (plane->desc == STI_HQVDP_0)
+				sti_vid_commit(compo->vid[0], p->state);
+
+			plane->status = STI_PLANE_READY;
+
+			break;
+		case STI_PLANE_DISABLING:
+			/* disabling sequence for planes tag as disabling */
+			DRM_DEBUG_DRIVER("disable plane %s from mixer\n",
+					 sti_plane_to_str(plane));
+
+			if (sti_mixer_set_plane_status(mixer, plane, false)) {
+				DRM_ERROR("Cannot disable plane %s at mixer\n",
+					  sti_plane_to_str(plane));
+				continue;
+			}
+
+			if (plane->desc == STI_CURSOR)
+				/* tag plane status for disabled */
+				plane->status = STI_PLANE_DISABLED;
+			else
+				/* tag plane status for flushing */
+				plane->status = STI_PLANE_FLUSHING;
+
+			/* if plane is HQVDP_0 then disable the vid[0] */
+			if (plane->desc == STI_HQVDP_0)
+				sti_vid_disable(compo->vid[0]);
+
+			break;
+		default:
+			/* Other status case are not handled */
+			break;
+		}
+	}
 }
 
 static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
-	.dpms = sti_drm_crtc_dpms,
-	.prepare = sti_drm_crtc_prepare,
-	.commit = sti_drm_crtc_commit,
-	.mode_fixup = sti_drm_crtc_mode_fixup,
+	.enable = sti_crtc_enable,
+	.disable = sti_crtc_disabling,
+	.mode_fixup = sti_crtc_mode_fixup,
 	.mode_set = drm_helper_crtc_mode_set,
-	.mode_set_nofb = sti_drm_crtc_mode_set_nofb,
+	.mode_set_nofb = sti_crtc_mode_set_nofb,
 	.mode_set_base = drm_helper_crtc_mode_set_base,
-	.disable = sti_drm_crtc_disable,
-	.atomic_begin = sti_drm_atomic_begin,
-	.atomic_flush = sti_drm_atomic_flush,
+	.atomic_begin = sti_crtc_atomic_begin,
+	.atomic_flush = sti_crtc_atomic_flush,
 };
 
-static void sti_drm_crtc_destroy(struct drm_crtc *crtc)
+static void sti_crtc_destroy(struct drm_crtc *crtc)
 {
 	DRM_DEBUG_KMS("\n");
 	drm_crtc_cleanup(crtc);
 }
 
-static int sti_drm_crtc_set_property(struct drm_crtc *crtc,
-				     struct drm_property *property,
-				     uint64_t val)
+static int sti_crtc_set_property(struct drm_crtc *crtc,
+				 struct drm_property *property,
+				 uint64_t val)
 {
 	DRM_DEBUG_KMS("\n");
 	return 0;
 }
 
-int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
-			   unsigned long event, void *data)
+int sti_crtc_vblank_cb(struct notifier_block *nb,
+		       unsigned long event, void *data)
 {
 	struct drm_device *drm_dev;
 	struct sti_compositor *compo =
 		container_of(nb, struct sti_compositor, vtg_vblank_nb);
 	int *crtc = data;
 	unsigned long flags;
-	struct sti_drm_private *priv;
+	struct sti_private *priv;
 
 	drm_dev = compo->mixer[*crtc]->drm_crtc.dev;
 	priv = drm_dev->dev_private;
@@ -233,21 +275,38 @@ int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
 	spin_lock_irqsave(&drm_dev->event_lock, flags);
 	if (compo->mixer[*crtc]->pending_event) {
 		drm_send_vblank_event(drm_dev, -1,
-				compo->mixer[*crtc]->pending_event);
+				      compo->mixer[*crtc]->pending_event);
 		drm_vblank_put(drm_dev, *crtc);
 		compo->mixer[*crtc]->pending_event = NULL;
 	}
 	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
 
+	if (compo->mixer[*crtc]->status == STI_MIXER_DISABLING) {
+		struct drm_plane *p;
+
+		/* Disable mixer only if all overlay planes (GDP and VDP)
+		 * are disabled */
+		list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
+			struct sti_plane *plane = to_sti_plane(p);
+
+			if ((plane->desc & STI_PLANE_TYPE_MASK) <= STI_VDP)
+				if (plane->status != STI_PLANE_DISABLED)
+					return 0;
+		}
+		sti_crtc_disable(&compo->mixer[*crtc]->drm_crtc);
+	}
+
 	return 0;
 }
 
-int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
+int sti_crtc_enable_vblank(struct drm_device *dev, int crtc)
 {
-	struct sti_drm_private *dev_priv = dev->dev_private;
+	struct sti_private *dev_priv = dev->dev_private;
 	struct sti_compositor *compo = dev_priv->compo;
 	struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
 
+	DRM_DEBUG_DRIVER("\n");
+
 	if (sti_vtg_register_client(crtc == STI_MIXER_MAIN ?
 			compo->vtg_main : compo->vtg_aux,
 			vtg_vblank_nb, crtc)) {
@@ -257,11 +316,11 @@ int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
 
 	return 0;
 }
-EXPORT_SYMBOL(sti_drm_crtc_enable_vblank);
+EXPORT_SYMBOL(sti_crtc_enable_vblank);
 
-void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
+void sti_crtc_disable_vblank(struct drm_device *drm_dev, int crtc)
 {
-	struct sti_drm_private *priv = dev->dev_private;
+	struct sti_private *priv = drm_dev->dev_private;
 	struct sti_compositor *compo = priv->compo;
 	struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
 
@@ -273,23 +332,23 @@ void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
 
 	/* free the resources of the pending requests */
 	if (compo->mixer[crtc]->pending_event) {
-		drm_vblank_put(dev, crtc);
+		drm_vblank_put(drm_dev, crtc);
 		compo->mixer[crtc]->pending_event = NULL;
 	}
 }
-EXPORT_SYMBOL(sti_drm_crtc_disable_vblank);
+EXPORT_SYMBOL(sti_crtc_disable_vblank);
 
 static struct drm_crtc_funcs sti_crtc_funcs = {
 	.set_config = drm_atomic_helper_set_config,
 	.page_flip = drm_atomic_helper_page_flip,
-	.destroy = sti_drm_crtc_destroy,
-	.set_property = sti_drm_crtc_set_property,
+	.destroy = sti_crtc_destroy,
+	.set_property = sti_crtc_set_property,
 	.reset = drm_atomic_helper_crtc_reset,
 	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
 	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
 };
 
-bool sti_drm_crtc_is_main(struct drm_crtc *crtc)
+bool sti_crtc_is_main(struct drm_crtc *crtc)
 {
 	struct sti_mixer *mixer = to_sti_mixer(crtc);
 
@@ -298,18 +357,18 @@ bool sti_drm_crtc_is_main(struct drm_crtc *crtc)
 
 	return false;
 }
-EXPORT_SYMBOL(sti_drm_crtc_is_main);
+EXPORT_SYMBOL(sti_crtc_is_main);
 
-int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
-		struct drm_plane *primary, struct drm_plane *cursor)
+int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
+		  struct drm_plane *primary, struct drm_plane *cursor)
 {
 	struct drm_crtc *crtc = &mixer->drm_crtc;
 	int res;
 
 	res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
-			&sti_crtc_funcs);
+					&sti_crtc_funcs);
 	if (res) {
-		DRM_ERROR("Can not initialze CRTC\n");
+		DRM_ERROR("Can't initialze CRTC\n");
 		return -EINVAL;
 	}
 
diff --git a/drivers/gpu/drm/sti/sti_crtc.h b/drivers/gpu/drm/sti/sti_crtc.h
new file mode 100644
index 000000000000..51963e6ddbe7
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_crtc.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_CRTC_H_
+#define _STI_CRTC_H_
+
+#include <drm/drmP.h>
+
+struct sti_mixer;
+
+int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
+		  struct drm_plane *primary, struct drm_plane *cursor);
+int sti_crtc_enable_vblank(struct drm_device *dev, int crtc);
+void sti_crtc_disable_vblank(struct drm_device *dev, int crtc);
+int sti_crtc_vblank_cb(struct notifier_block *nb,
+		       unsigned long event, void *data);
+bool sti_crtc_is_main(struct drm_crtc *drm_crtc);
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 010eaee60bf7..dd1032195051 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -7,8 +7,14 @@
  */
 #include <drm/drmP.h>
 
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "sti_compositor.h"
 #include "sti_cursor.h"
-#include "sti_layer.h"
+#include "sti_plane.h"
 #include "sti_vtg.h"
 
 /* Registers */
@@ -42,15 +48,19 @@ struct dma_pixmap {
 /**
  * STI Cursor structure
  *
- * @layer:      layer structure
- * @width:      cursor width
- * @height:     cursor height
- * @clut:       color look up table
- * @clut_paddr: color look up table physical address
- * @pixmap:     pixmap dma buffer (clut8-format cursor)
+ * @sti_plane:    sti_plane structure
+ * @dev:          driver device
+ * @regs:         cursor registers
+ * @width:        cursor width
+ * @height:       cursor height
+ * @clut:         color look up table
+ * @clut_paddr:   color look up table physical address
+ * @pixmap:       pixmap dma buffer (clut8-format cursor)
  */
 struct sti_cursor {
-	struct sti_layer layer;
+	struct sti_plane plane;
+	struct device *dev;
+	void __iomem *regs;
 	unsigned int width;
 	unsigned int height;
 	unsigned short *clut;
@@ -62,22 +72,10 @@ static const uint32_t cursor_supported_formats[] = {
 	DRM_FORMAT_ARGB8888,
 };
 
-#define to_sti_cursor(x) container_of(x, struct sti_cursor, layer)
-
-static const uint32_t *sti_cursor_get_formats(struct sti_layer *layer)
-{
-	return cursor_supported_formats;
-}
-
-static unsigned int sti_cursor_get_nb_formats(struct sti_layer *layer)
-{
-	return ARRAY_SIZE(cursor_supported_formats);
-}
+#define to_sti_cursor(x) container_of(x, struct sti_cursor, plane)
 
-static void sti_cursor_argb8888_to_clut8(struct sti_layer *layer)
+static void sti_cursor_argb8888_to_clut8(struct sti_cursor *cursor, u32 *src)
 {
-	struct sti_cursor *cursor = to_sti_cursor(layer);
-	u32 *src = layer->vaddr;
 	u8  *dst = cursor->pixmap.base;
 	unsigned int i, j;
 	u32 a, r, g, b;
@@ -96,127 +94,155 @@ static void sti_cursor_argb8888_to_clut8(struct sti_layer *layer)
 	}
 }
 
-static int sti_cursor_prepare_layer(struct sti_layer *layer, bool first_prepare)
+static void sti_cursor_init(struct sti_cursor *cursor)
 {
-	struct sti_cursor *cursor = to_sti_cursor(layer);
-	struct drm_display_mode *mode = layer->mode;
+	unsigned short *base = cursor->clut;
+	unsigned int a, r, g, b;
+
+	/* Assign CLUT values, ARGB444 format */
+	for (a = 0; a < 4; a++)
+		for (r = 0; r < 4; r++)
+			for (g = 0; g < 4; g++)
+				for (b = 0; b < 4; b++)
+					*base++ = (a * 5) << 12 |
+						  (r * 5) << 8 |
+						  (g * 5) << 4 |
+						  (b * 5);
+}
+
+static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
+				     struct drm_plane_state *oldstate)
+{
+	struct drm_plane_state *state = drm_plane->state;
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+	struct sti_cursor *cursor = to_sti_cursor(plane);
+	struct drm_crtc *crtc = state->crtc;
+	struct sti_mixer *mixer = to_sti_mixer(crtc);
+	struct drm_framebuffer *fb = state->fb;
+	struct drm_display_mode *mode = &crtc->mode;
+	int dst_x = state->crtc_x;
+	int dst_y = state->crtc_y;
+	int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+	int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+	/* src_x are in 16.16 format */
+	int src_w = state->src_w >> 16;
+	int src_h = state->src_h >> 16;
+	bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
+	struct drm_gem_cma_object *cma_obj;
 	u32 y, x;
 	u32 val;
 
-	DRM_DEBUG_DRIVER("\n");
+	DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
+		      crtc->base.id, sti_mixer_to_str(mixer),
+		      drm_plane->base.id, sti_plane_to_str(plane));
+	DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", dst_w, dst_h, dst_x, dst_y);
 
-	dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
+	dev_dbg(cursor->dev, "%s %s\n", __func__,
+		sti_plane_to_str(plane));
 
-	if (layer->src_w < STI_CURS_MIN_SIZE ||
-	    layer->src_h < STI_CURS_MIN_SIZE ||
-	    layer->src_w > STI_CURS_MAX_SIZE ||
-	    layer->src_h > STI_CURS_MAX_SIZE) {
+	if (src_w < STI_CURS_MIN_SIZE ||
+	    src_h < STI_CURS_MIN_SIZE ||
+	    src_w > STI_CURS_MAX_SIZE ||
+	    src_h > STI_CURS_MAX_SIZE) {
 		DRM_ERROR("Invalid cursor size (%dx%d)\n",
-				layer->src_w, layer->src_h);
-		return -EINVAL;
+				src_w, src_h);
+		return;
 	}
 
 	/* If the cursor size has changed, re-allocated the pixmap */
 	if (!cursor->pixmap.base ||
-	    (cursor->width != layer->src_w) ||
-	    (cursor->height != layer->src_h)) {
-		cursor->width = layer->src_w;
-		cursor->height = layer->src_h;
+	    (cursor->width != src_w) ||
+	    (cursor->height != src_h)) {
+		cursor->width = src_w;
+		cursor->height = src_h;
 
 		if (cursor->pixmap.base)
-			dma_free_writecombine(layer->dev,
+			dma_free_writecombine(cursor->dev,
 					      cursor->pixmap.size,
 					      cursor->pixmap.base,
 					      cursor->pixmap.paddr);
 
 		cursor->pixmap.size = cursor->width * cursor->height;
 
-		cursor->pixmap.base = dma_alloc_writecombine(layer->dev,
+		cursor->pixmap.base = dma_alloc_writecombine(cursor->dev,
 							cursor->pixmap.size,
 							&cursor->pixmap.paddr,
 							GFP_KERNEL | GFP_DMA);
 		if (!cursor->pixmap.base) {
 			DRM_ERROR("Failed to allocate memory for pixmap\n");
-			return -ENOMEM;
+			return;
 		}
 	}
 
+	cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+	if (!cma_obj) {
+		DRM_ERROR("Can't get CMA GEM object for fb\n");
+		return;
+	}
+
 	/* Convert ARGB8888 to CLUT8 */
-	sti_cursor_argb8888_to_clut8(layer);
+	sti_cursor_argb8888_to_clut8(cursor, (u32 *)cma_obj->vaddr);
 
 	/* AWS and AWE depend on the mode */
 	y = sti_vtg_get_line_number(*mode, 0);
 	x = sti_vtg_get_pixel_number(*mode, 0);
 	val = y << 16 | x;
-	writel(val, layer->regs + CUR_AWS);
+	writel(val, cursor->regs + CUR_AWS);
 	y = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
 	x = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
 	val = y << 16 | x;
-	writel(val, layer->regs + CUR_AWE);
+	writel(val, cursor->regs + CUR_AWE);
 
 	if (first_prepare) {
 		/* Set and fetch CLUT */
-		writel(cursor->clut_paddr, layer->regs + CUR_CML);
-		writel(CUR_CTL_CLUT_UPDATE, layer->regs + CUR_CTL);
+		writel(cursor->clut_paddr, cursor->regs + CUR_CML);
+		writel(CUR_CTL_CLUT_UPDATE, cursor->regs + CUR_CTL);
 	}
 
-	return 0;
-}
-
-static int sti_cursor_commit_layer(struct sti_layer *layer)
-{
-	struct sti_cursor *cursor = to_sti_cursor(layer);
-	struct drm_display_mode *mode = layer->mode;
-	u32 ydo, xdo;
-
-	dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
-
 	/* Set memory location, size, and position */
-	writel(cursor->pixmap.paddr, layer->regs + CUR_PML);
-	writel(cursor->width, layer->regs + CUR_PMP);
-	writel(cursor->height << 16 | cursor->width, layer->regs + CUR_SIZE);
+	writel(cursor->pixmap.paddr, cursor->regs + CUR_PML);
+	writel(cursor->width, cursor->regs + CUR_PMP);
+	writel(cursor->height << 16 | cursor->width, cursor->regs + CUR_SIZE);
 
-	ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
-	xdo = sti_vtg_get_pixel_number(*mode, layer->dst_y);
-	writel((ydo << 16) | xdo, layer->regs + CUR_VPO);
+	y = sti_vtg_get_line_number(*mode, dst_y);
+	x = sti_vtg_get_pixel_number(*mode, dst_y);
+	writel((y << 16) | x, cursor->regs + CUR_VPO);
 
-	return 0;
+	plane->status = STI_PLANE_UPDATED;
 }
 
-static int sti_cursor_disable_layer(struct sti_layer *layer)
+static void sti_cursor_atomic_disable(struct drm_plane *drm_plane,
+				      struct drm_plane_state *oldstate)
 {
-	return 0;
-}
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+	struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
 
-static void sti_cursor_init(struct sti_layer *layer)
-{
-	struct sti_cursor *cursor = to_sti_cursor(layer);
-	unsigned short *base = cursor->clut;
-	unsigned int a, r, g, b;
+	if (!drm_plane->crtc) {
+		DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
+				 drm_plane->base.id);
+		return;
+	}
 
-	/* Assign CLUT values, ARGB444 format */
-	for (a = 0; a < 4; a++)
-		for (r = 0; r < 4; r++)
-			for (g = 0; g < 4; g++)
-				for (b = 0; b < 4; b++)
-					*base++ = (a * 5) << 12 |
-						  (r * 5) << 8 |
-						  (g * 5) << 4 |
-						  (b * 5);
+	DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
+			 drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+			 drm_plane->base.id, sti_plane_to_str(plane));
+
+	plane->status = STI_PLANE_DISABLING;
 }
 
-static const struct sti_layer_funcs cursor_ops = {
-	.get_formats = sti_cursor_get_formats,
-	.get_nb_formats = sti_cursor_get_nb_formats,
-	.init = sti_cursor_init,
-	.prepare = sti_cursor_prepare_layer,
-	.commit = sti_cursor_commit_layer,
-	.disable = sti_cursor_disable_layer,
+static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = {
+	.atomic_update = sti_cursor_atomic_update,
+	.atomic_disable = sti_cursor_atomic_disable,
 };
 
-struct sti_layer *sti_cursor_create(struct device *dev)
+struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
+				    struct device *dev, int desc,
+				    void __iomem *baseaddr,
+				    unsigned int possible_crtcs)
 {
 	struct sti_cursor *cursor;
+	size_t size;
+	int res;
 
 	cursor = devm_kzalloc(dev, sizeof(*cursor), GFP_KERNEL);
 	if (!cursor) {
@@ -225,18 +251,43 @@ struct sti_layer *sti_cursor_create(struct device *dev)
 	}
 
 	/* Allocate clut buffer */
-	cursor->clut = dma_alloc_writecombine(dev,
-			0x100 * sizeof(unsigned short),
-			&cursor->clut_paddr,
-			GFP_KERNEL | GFP_DMA);
+	size = 0x100 * sizeof(unsigned short);
+	cursor->clut = dma_alloc_writecombine(dev, size, &cursor->clut_paddr,
+					      GFP_KERNEL | GFP_DMA);
 
 	if (!cursor->clut) {
 		DRM_ERROR("Failed to allocate memory for cursor clut\n");
-		devm_kfree(dev, cursor);
-		return NULL;
+		goto err_clut;
+	}
+
+	cursor->dev = dev;
+	cursor->regs = baseaddr;
+	cursor->plane.desc = desc;
+	cursor->plane.status = STI_PLANE_DISABLED;
+
+	sti_cursor_init(cursor);
+
+	res = drm_universal_plane_init(drm_dev, &cursor->plane.drm_plane,
+				       possible_crtcs,
+				       &sti_plane_helpers_funcs,
+				       cursor_supported_formats,
+				       ARRAY_SIZE(cursor_supported_formats),
+				       DRM_PLANE_TYPE_CURSOR);
+	if (res) {
+		DRM_ERROR("Failed to initialize universal plane\n");
+		goto err_plane;
 	}
 
-	cursor->layer.ops = &cursor_ops;
+	drm_plane_helper_add(&cursor->plane.drm_plane,
+			     &sti_cursor_helpers_funcs);
+
+	sti_plane_init_property(&cursor->plane, DRM_PLANE_TYPE_CURSOR);
+
+	return &cursor->plane.drm_plane;
 
-	return (struct sti_layer *)cursor;
+err_plane:
+	dma_free_writecombine(dev, size, cursor->clut, cursor->clut_paddr);
+err_clut:
+	devm_kfree(dev, cursor);
+	return NULL;
 }
diff --git a/drivers/gpu/drm/sti/sti_cursor.h b/drivers/gpu/drm/sti/sti_cursor.h
index 3c9827404f27..2ee5c10e8b33 100644
--- a/drivers/gpu/drm/sti/sti_cursor.h
+++ b/drivers/gpu/drm/sti/sti_cursor.h
@@ -7,6 +7,9 @@
 #ifndef _STI_CURSOR_H_
 #define _STI_CURSOR_H_
 
-struct sti_layer *sti_cursor_create(struct device *dev);
+struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
+				    struct device *dev, int desc,
+				    void __iomem *baseaddr,
+				    unsigned int possible_crtcs);
 
 #endif
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.h b/drivers/gpu/drm/sti/sti_drm_crtc.h
deleted file mode 100644
index caca8b14f017..000000000000
--- a/drivers/gpu/drm/sti/sti_drm_crtc.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#ifndef _STI_DRM_CRTC_H_
-#define _STI_DRM_CRTC_H_
-
-#include <drm/drmP.h>
-
-struct sti_mixer;
-
-int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
-		struct drm_plane *primary, struct drm_plane *cursor);
-int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
-void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
-int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
-		unsigned long event, void *data);
-bool sti_drm_crtc_is_main(struct drm_crtc *drm_crtc);
-
-#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.c b/drivers/gpu/drm/sti/sti_drm_plane.c
deleted file mode 100644
index 64d4ed43dda3..000000000000
--- a/drivers/gpu/drm/sti/sti_drm_plane.c
+++ /dev/null
@@ -1,251 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
- *          Fabien Dessenne <fabien.dessenne@st.com>
- *          for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
-
-#include "sti_compositor.h"
-#include "sti_drm_drv.h"
-#include "sti_drm_plane.h"
-#include "sti_vtg.h"
-
-enum sti_layer_desc sti_layer_default_zorder[] = {
-	STI_GDP_0,
-	STI_VID_0,
-	STI_GDP_1,
-	STI_VID_1,
-	STI_GDP_2,
-	STI_GDP_3,
-};
-
-/* (Background) < GDP0 < VID0 < GDP1 < VID1 < GDP2 < GDP3 < (ForeGround) */
-
-static int
-sti_drm_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
-		     struct drm_framebuffer *fb, int crtc_x, int crtc_y,
-		     unsigned int crtc_w, unsigned int crtc_h,
-		     uint32_t src_x, uint32_t src_y,
-		     uint32_t src_w, uint32_t src_h)
-{
-	struct sti_layer *layer = to_sti_layer(plane);
-	struct sti_mixer *mixer = to_sti_mixer(crtc);
-	int res;
-
-	DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
-		      crtc->base.id, sti_mixer_to_str(mixer),
-		      plane->base.id, sti_layer_to_str(layer));
-	DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", crtc_w, crtc_h, crtc_x, crtc_y);
-
-	res = sti_mixer_set_layer_depth(mixer, layer);
-	if (res) {
-		DRM_ERROR("Can not set layer depth\n");
-		return res;
-	}
-
-	/* src_x are in 16.16 format. */
-	res = sti_layer_prepare(layer, crtc, fb,
-			&crtc->mode, mixer->id,
-			crtc_x, crtc_y, crtc_w, crtc_h,
-			src_x >> 16, src_y >> 16,
-			src_w >> 16, src_h >> 16);
-	if (res) {
-		DRM_ERROR("Layer prepare failed\n");
-		return res;
-	}
-
-	res = sti_layer_commit(layer);
-	if (res) {
-		DRM_ERROR("Layer commit failed\n");
-		return res;
-	}
-
-	res = sti_mixer_set_layer_status(mixer, layer, true);
-	if (res) {
-		DRM_ERROR("Can not enable layer at mixer\n");
-		return res;
-	}
-
-	return 0;
-}
-
-static int sti_drm_disable_plane(struct drm_plane *plane)
-{
-	struct sti_layer *layer;
-	struct sti_mixer *mixer;
-	int lay_res, mix_res;
-
-	if (!plane->crtc) {
-		DRM_DEBUG_DRIVER("drm plane:%d not enabled\n", plane->base.id);
-		return 0;
-	}
-	layer = to_sti_layer(plane);
-	mixer = to_sti_mixer(plane->crtc);
-
-	DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
-			plane->crtc->base.id, sti_mixer_to_str(mixer),
-			plane->base.id, sti_layer_to_str(layer));
-
-	/* Disable layer at mixer level */
-	mix_res = sti_mixer_set_layer_status(mixer, layer, false);
-	if (mix_res)
-		DRM_ERROR("Can not disable layer at mixer\n");
-
-	/* Wait a while to be sure that a Vsync event is received */
-	msleep(WAIT_NEXT_VSYNC_MS);
-
-	/* Then disable layer itself */
-	lay_res = sti_layer_disable(layer);
-	if (lay_res)
-		DRM_ERROR("Layer disable failed\n");
-
-	if (lay_res || mix_res)
-		return -EINVAL;
-
-	return 0;
-}
-
-static void sti_drm_plane_destroy(struct drm_plane *plane)
-{
-	DRM_DEBUG_DRIVER("\n");
-
-	drm_plane_helper_disable(plane);
-	drm_plane_cleanup(plane);
-}
-
-static int sti_drm_plane_set_property(struct drm_plane *plane,
-				      struct drm_property *property,
-				      uint64_t val)
-{
-	struct drm_device *dev = plane->dev;
-	struct sti_drm_private *private = dev->dev_private;
-	struct sti_layer *layer = to_sti_layer(plane);
-
-	DRM_DEBUG_DRIVER("\n");
-
-	if (property == private->plane_zorder_property) {
-		layer->zorder = val;
-		return 0;
-	}
-
-	return -EINVAL;
-}
-
-static struct drm_plane_funcs sti_drm_plane_funcs = {
-	.update_plane = drm_atomic_helper_update_plane,
-	.disable_plane = drm_atomic_helper_disable_plane,
-	.destroy = sti_drm_plane_destroy,
-	.set_property = sti_drm_plane_set_property,
-	.reset = drm_atomic_helper_plane_reset,
-	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
-	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
-};
-
-static int sti_drm_plane_prepare_fb(struct drm_plane *plane,
-				  struct drm_framebuffer *fb,
-				  const struct drm_plane_state *new_state)
-{
-	return 0;
-}
-
-static void sti_drm_plane_cleanup_fb(struct drm_plane *plane,
-				   struct drm_framebuffer *fb,
-				   const struct drm_plane_state *old_fb)
-{
-}
-
-static int sti_drm_plane_atomic_check(struct drm_plane *plane,
-				      struct drm_plane_state *state)
-{
-	return 0;
-}
-
-static void sti_drm_plane_atomic_update(struct drm_plane *plane,
-					struct drm_plane_state *oldstate)
-{
-	struct drm_plane_state *state = plane->state;
-
-	sti_drm_update_plane(plane, state->crtc, state->fb,
-			    state->crtc_x, state->crtc_y,
-			    state->crtc_w, state->crtc_h,
-			    state->src_x, state->src_y,
-			    state->src_w, state->src_h);
-}
-
-static void sti_drm_plane_atomic_disable(struct drm_plane *plane,
-					 struct drm_plane_state *oldstate)
-{
-	sti_drm_disable_plane(plane);
-}
-
-static const struct drm_plane_helper_funcs sti_drm_plane_helpers_funcs = {
-	.prepare_fb = sti_drm_plane_prepare_fb,
-	.cleanup_fb = sti_drm_plane_cleanup_fb,
-	.atomic_check = sti_drm_plane_atomic_check,
-	.atomic_update = sti_drm_plane_atomic_update,
-	.atomic_disable = sti_drm_plane_atomic_disable,
-};
-
-static void sti_drm_plane_attach_zorder_property(struct drm_plane *plane,
-						 uint64_t default_val)
-{
-	struct drm_device *dev = plane->dev;
-	struct sti_drm_private *private = dev->dev_private;
-	struct drm_property *prop;
-	struct sti_layer *layer = to_sti_layer(plane);
-
-	prop = private->plane_zorder_property;
-	if (!prop) {
-		prop = drm_property_create_range(dev, 0, "zpos", 0,
-						 GAM_MIXER_NB_DEPTH_LEVEL - 1);
-		if (!prop)
-			return;
-
-		private->plane_zorder_property = prop;
-	}
-
-	drm_object_attach_property(&plane->base, prop, default_val);
-	layer->zorder = default_val;
-}
-
-struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
-				     struct sti_layer *layer,
-				     unsigned int possible_crtcs,
-				     enum drm_plane_type type)
-{
-	int err, i;
-	uint64_t default_zorder = 0;
-
-	err = drm_universal_plane_init(dev, &layer->plane, possible_crtcs,
-			     &sti_drm_plane_funcs,
-			     sti_layer_get_formats(layer),
-			     sti_layer_get_nb_formats(layer), type);
-	if (err) {
-		DRM_ERROR("Failed to initialize plane\n");
-		return NULL;
-	}
-
-	drm_plane_helper_add(&layer->plane, &sti_drm_plane_helpers_funcs);
-
-	for (i = 0; i < ARRAY_SIZE(sti_layer_default_zorder); i++)
-		if (sti_layer_default_zorder[i] == layer->desc)
-			break;
-
-	default_zorder = i + 1;
-
-	if (type == DRM_PLANE_TYPE_OVERLAY)
-		sti_drm_plane_attach_zorder_property(&layer->plane,
-				default_zorder);
-
-	DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%llu\n",
-			 layer->plane.base.id,
-			 sti_layer_to_str(layer), default_zorder);
-
-	return &layer->plane;
-}
-EXPORT_SYMBOL(sti_drm_plane_init);
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.h b/drivers/gpu/drm/sti/sti_drm_plane.h
deleted file mode 100644
index 4f191839f2a7..000000000000
--- a/drivers/gpu/drm/sti/sti_drm_plane.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#ifndef _STI_DRM_PLANE_H_
-#define _STI_DRM_PLANE_H_
-
-#include <drm/drmP.h>
-
-struct sti_layer;
-
-struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
-		struct sti_layer *layer,
-		unsigned int possible_crtcs,
-		enum drm_plane_type type);
-#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 59d558b400b3..6f4af6a8ba1b 100644
--- a/drivers/gpu/drm/sti/sti_drm_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -18,8 +18,8 @@
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 
-#include "sti_drm_drv.h"
-#include "sti_drm_crtc.h"
+#include "sti_crtc.h"
+#include "sti_drv.h"
 
 #define DRIVER_NAME	"sti"
 #define DRIVER_DESC	"STMicroelectronics SoC DRM"
@@ -30,15 +30,15 @@
 #define STI_MAX_FB_HEIGHT	4096
 #define STI_MAX_FB_WIDTH	4096
 
-static void sti_drm_atomic_schedule(struct sti_drm_private *private,
-				  struct drm_atomic_state *state)
+static void sti_atomic_schedule(struct sti_private *private,
+				struct drm_atomic_state *state)
 {
 	private->commit.state = state;
 	schedule_work(&private->commit.work);
 }
 
-static void sti_drm_atomic_complete(struct sti_drm_private *private,
-				  struct drm_atomic_state *state)
+static void sti_atomic_complete(struct sti_private *private,
+				struct drm_atomic_state *state)
 {
 	struct drm_device *drm = private->drm_dev;
 
@@ -68,18 +68,18 @@ static void sti_drm_atomic_complete(struct sti_drm_private *private,
 	drm_atomic_state_free(state);
 }
 
-static void sti_drm_atomic_work(struct work_struct *work)
+static void sti_atomic_work(struct work_struct *work)
 {
-	struct sti_drm_private *private = container_of(work,
-			struct sti_drm_private, commit.work);
+	struct sti_private *private = container_of(work,
+			struct sti_private, commit.work);
 
-	sti_drm_atomic_complete(private, private->commit.state);
+	sti_atomic_complete(private, private->commit.state);
 }
 
-static int sti_drm_atomic_commit(struct drm_device *drm,
-			       struct drm_atomic_state *state, bool async)
+static int sti_atomic_commit(struct drm_device *drm,
+			     struct drm_atomic_state *state, bool async)
 {
-	struct sti_drm_private *private = drm->dev_private;
+	struct sti_private *private = drm->dev_private;
 	int err;
 
 	err = drm_atomic_helper_prepare_planes(drm, state);
@@ -99,21 +99,21 @@ static int sti_drm_atomic_commit(struct drm_device *drm,
 	drm_atomic_helper_swap_state(drm, state);
 
 	if (async)
-		sti_drm_atomic_schedule(private, state);
+		sti_atomic_schedule(private, state);
 	else
-		sti_drm_atomic_complete(private, state);
+		sti_atomic_complete(private, state);
 
 	mutex_unlock(&private->commit.lock);
 	return 0;
 }
 
-static struct drm_mode_config_funcs sti_drm_mode_config_funcs = {
+static struct drm_mode_config_funcs sti_mode_config_funcs = {
 	.fb_create = drm_fb_cma_create,
 	.atomic_check = drm_atomic_helper_check,
-	.atomic_commit = sti_drm_atomic_commit,
+	.atomic_commit = sti_atomic_commit,
 };
 
-static void sti_drm_mode_config_init(struct drm_device *dev)
+static void sti_mode_config_init(struct drm_device *dev)
 {
 	dev->mode_config.min_width = 0;
 	dev->mode_config.min_height = 0;
@@ -126,15 +126,15 @@ static void sti_drm_mode_config_init(struct drm_device *dev)
 	dev->mode_config.max_width = STI_MAX_FB_HEIGHT;
 	dev->mode_config.max_height = STI_MAX_FB_WIDTH;
 
-	dev->mode_config.funcs = &sti_drm_mode_config_funcs;
+	dev->mode_config.funcs = &sti_mode_config_funcs;
 }
 
-static int sti_drm_load(struct drm_device *dev, unsigned long flags)
+static int sti_load(struct drm_device *dev, unsigned long flags)
 {
-	struct sti_drm_private *private;
+	struct sti_private *private;
 	int ret;
 
-	private = kzalloc(sizeof(struct sti_drm_private), GFP_KERNEL);
+	private = kzalloc(sizeof(*private), GFP_KERNEL);
 	if (!private) {
 		DRM_ERROR("Failed to allocate private\n");
 		return -ENOMEM;
@@ -143,12 +143,12 @@ static int sti_drm_load(struct drm_device *dev, unsigned long flags)
 	private->drm_dev = dev;
 
 	mutex_init(&private->commit.lock);
-	INIT_WORK(&private->commit.work, sti_drm_atomic_work);
+	INIT_WORK(&private->commit.work, sti_atomic_work);
 
 	drm_mode_config_init(dev);
 	drm_kms_helper_poll_init(dev);
 
-	sti_drm_mode_config_init(dev);
+	sti_mode_config_init(dev);
 
 	ret = component_bind_all(dev->dev, dev);
 	if (ret) {
@@ -162,13 +162,13 @@ static int sti_drm_load(struct drm_device *dev, unsigned long flags)
 
 #ifdef CONFIG_DRM_STI_FBDEV
 	drm_fbdev_cma_init(dev, 32,
-		   dev->mode_config.num_crtc,
-		   dev->mode_config.num_connector);
+			   dev->mode_config.num_crtc,
+			   dev->mode_config.num_connector);
 #endif
 	return 0;
 }
 
-static const struct file_operations sti_drm_driver_fops = {
+static const struct file_operations sti_driver_fops = {
 	.owner = THIS_MODULE,
 	.open = drm_open,
 	.mmap = drm_gem_cma_mmap,
@@ -181,33 +181,33 @@ static const struct file_operations sti_drm_driver_fops = {
 	.release = drm_release,
 };
 
-static struct dma_buf *sti_drm_gem_prime_export(struct drm_device *dev,
-						struct drm_gem_object *obj,
-						int flags)
+static struct dma_buf *sti_gem_prime_export(struct drm_device *dev,
+					    struct drm_gem_object *obj,
+					    int flags)
 {
 	/* we want to be able to write in mmapped buffer */
 	flags |= O_RDWR;
 	return drm_gem_prime_export(dev, obj, flags);
 }
 
-static struct drm_driver sti_drm_driver = {
+static struct drm_driver sti_driver = {
 	.driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
 	    DRIVER_GEM | DRIVER_PRIME,
-	.load = sti_drm_load,
+	.load = sti_load,
 	.gem_free_object = drm_gem_cma_free_object,
 	.gem_vm_ops = &drm_gem_cma_vm_ops,
 	.dumb_create = drm_gem_cma_dumb_create,
 	.dumb_map_offset = drm_gem_cma_dumb_map_offset,
 	.dumb_destroy = drm_gem_dumb_destroy,
-	.fops = &sti_drm_driver_fops,
+	.fops = &sti_driver_fops,
 
 	.get_vblank_counter = drm_vblank_count,
-	.enable_vblank = sti_drm_crtc_enable_vblank,
-	.disable_vblank = sti_drm_crtc_disable_vblank,
+	.enable_vblank = sti_crtc_enable_vblank,
+	.disable_vblank = sti_crtc_disable_vblank,
 
 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-	.gem_prime_export = sti_drm_gem_prime_export,
+	.gem_prime_export = sti_gem_prime_export,
 	.gem_prime_import = drm_gem_prime_import,
 	.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
 	.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
@@ -227,30 +227,32 @@ static int compare_of(struct device *dev, void *data)
 	return dev->of_node == data;
 }
 
-static int sti_drm_bind(struct device *dev)
+static int sti_bind(struct device *dev)
 {
-	return drm_platform_init(&sti_drm_driver, to_platform_device(dev));
+	return drm_platform_init(&sti_driver, to_platform_device(dev));
 }
 
-static void sti_drm_unbind(struct device *dev)
+static void sti_unbind(struct device *dev)
 {
 	drm_put_dev(dev_get_drvdata(dev));
 }
 
-static const struct component_master_ops sti_drm_ops = {
-	.bind = sti_drm_bind,
-	.unbind = sti_drm_unbind,
+static const struct component_master_ops sti_ops = {
+	.bind = sti_bind,
+	.unbind = sti_unbind,
 };
 
-static int sti_drm_master_probe(struct platform_device *pdev)
+static int sti_platform_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
-	struct device_node *node = dev->parent->of_node;
+	struct device_node *node = dev->of_node;
 	struct device_node *child_np;
 	struct component_match *match = NULL;
 
 	dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
 
+	of_platform_populate(node, NULL, NULL, dev);
+
 	child_np = of_get_next_available_child(node, NULL);
 
 	while (child_np) {
@@ -259,68 +261,33 @@ static int sti_drm_master_probe(struct platform_device *pdev)
 		child_np = of_get_next_available_child(node, child_np);
 	}
 
-	return component_master_add_with_match(dev, &sti_drm_ops, match);
-}
-
-static int sti_drm_master_remove(struct platform_device *pdev)
-{
-	component_master_del(&pdev->dev, &sti_drm_ops);
-	return 0;
+	return component_master_add_with_match(dev, &sti_ops, match);
 }
 
-static struct platform_driver sti_drm_master_driver = {
-	.probe = sti_drm_master_probe,
-	.remove = sti_drm_master_remove,
-	.driver = {
-		.name = DRIVER_NAME "__master",
-	},
-};
-
-static int sti_drm_platform_probe(struct platform_device *pdev)
+static int sti_platform_remove(struct platform_device *pdev)
 {
-	struct device *dev = &pdev->dev;
-	struct device_node *node = dev->of_node;
-	struct platform_device *master;
-
-	of_platform_populate(node, NULL, NULL, dev);
-
-	platform_driver_register(&sti_drm_master_driver);
-	master = platform_device_register_resndata(dev,
-			DRIVER_NAME "__master", -1,
-			NULL, 0, NULL, 0);
-	if (IS_ERR(master))
-               return PTR_ERR(master);
-
-	platform_set_drvdata(pdev, master);
-	return 0;
-}
-
-static int sti_drm_platform_remove(struct platform_device *pdev)
-{
-	struct platform_device *master = platform_get_drvdata(pdev);
-
+	component_master_del(&pdev->dev, &sti_ops);
 	of_platform_depopulate(&pdev->dev);
-	platform_device_unregister(master);
-	platform_driver_unregister(&sti_drm_master_driver);
+
 	return 0;
 }
 
-static const struct of_device_id sti_drm_dt_ids[] = {
+static const struct of_device_id sti_dt_ids[] = {
 	{ .compatible = "st,sti-display-subsystem", },
 	{ /* end node */ },
 };
-MODULE_DEVICE_TABLE(of, sti_drm_dt_ids);
+MODULE_DEVICE_TABLE(of, sti_dt_ids);
 
-static struct platform_driver sti_drm_platform_driver = {
-	.probe = sti_drm_platform_probe,
-	.remove = sti_drm_platform_remove,
+static struct platform_driver sti_platform_driver = {
+	.probe = sti_platform_probe,
+	.remove = sti_platform_remove,
 	.driver = {
 		.name = DRIVER_NAME,
-		.of_match_table = sti_drm_dt_ids,
+		.of_match_table = sti_dt_ids,
 	},
 };
 
-module_platform_driver(sti_drm_platform_driver);
+module_platform_driver(sti_platform_driver);
 
 MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
 MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.h b/drivers/gpu/drm/sti/sti_drv.h
index c413aa3ff402..9372f69e1859 100644
--- a/drivers/gpu/drm/sti/sti_drm_drv.h
+++ b/drivers/gpu/drm/sti/sti_drv.h
@@ -4,8 +4,8 @@
  * License terms:  GNU General Public License (GPL), version 2
  */
 
-#ifndef _STI_DRM_DRV_H_
-#define _STI_DRM_DRV_H_
+#ifndef _STI_DRV_H_
+#define _STI_DRV_H_
 
 #include <drm/drmP.h>
 
@@ -20,7 +20,7 @@ struct sti_tvout;
  * @plane_zorder_property: z-order property for CRTC planes
  * @drm_dev:               drm device
  */
-struct sti_drm_private {
+struct sti_private {
 	struct sti_compositor *compo;
 	struct drm_property *plane_zorder_property;
 	struct drm_device *drm_dev;
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 087906fd8846..9365670427ad 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -9,9 +9,12 @@
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
 
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
 #include "sti_compositor.h"
 #include "sti_gdp.h"
-#include "sti_layer.h"
+#include "sti_plane.h"
 #include "sti_vtg.h"
 
 #define ALPHASWITCH     BIT(6)
@@ -26,7 +29,7 @@
 #define GDP_XBGR8888    (GDP_RGB888_32 | BIGNOTLITTLE | ALPHASWITCH)
 #define GDP_ARGB8565    0x04
 #define GDP_ARGB8888    0x05
-#define GDP_ABGR8888	(GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH)
+#define GDP_ABGR8888    (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH)
 #define GDP_ARGB1555    0x06
 #define GDP_ARGB4444    0x07
 #define GDP_CLUT8       0x0B
@@ -53,8 +56,8 @@
 #define GAM_GDP_PPT_IGNORE      (BIT(1) | BIT(0))
 #define GAM_GDP_SIZE_MAX        0x7FF
 
-#define GDP_NODE_NB_BANK	2
-#define GDP_NODE_PER_FIELD	2
+#define GDP_NODE_NB_BANK        2
+#define GDP_NODE_PER_FIELD      2
 
 struct sti_gdp_node {
 	u32 gam_gdp_ctl;
@@ -85,16 +88,20 @@ struct sti_gdp_node_list {
 /**
  * STI GDP structure
  *
- * @layer:		layer structure
+ * @sti_plane:          sti_plane structure
+ * @dev:                driver device
+ * @regs:               gdp registers
  * @clk_pix:            pixel clock for the current gdp
  * @clk_main_parent:    gdp parent clock if main path used
  * @clk_aux_parent:     gdp parent clock if aux path used
  * @vtg_field_nb:       callback for VTG FIELD (top or bottom) notification
  * @is_curr_top:        true if the current node processed is the top field
- * @node_list:		array of node list
+ * @node_list:          array of node list
  */
 struct sti_gdp {
-	struct sti_layer layer;
+	struct sti_plane plane;
+	struct device *dev;
+	void __iomem *regs;
 	struct clk *clk_pix;
 	struct clk *clk_main_parent;
 	struct clk *clk_aux_parent;
@@ -103,7 +110,7 @@ struct sti_gdp {
 	struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
 };
 
-#define to_sti_gdp(x) container_of(x, struct sti_gdp, layer)
+#define to_sti_gdp(x) container_of(x, struct sti_gdp, plane)
 
 static const uint32_t gdp_supported_formats[] = {
 	DRM_FORMAT_XRGB8888,
@@ -120,16 +127,6 @@ static const uint32_t gdp_supported_formats[] = {
 	DRM_FORMAT_C8,
 };
 
-static const uint32_t *sti_gdp_get_formats(struct sti_layer *layer)
-{
-	return gdp_supported_formats;
-}
-
-static unsigned int sti_gdp_get_nb_formats(struct sti_layer *layer)
-{
-	return ARRAY_SIZE(gdp_supported_formats);
-}
-
 static int sti_gdp_fourcc2format(int fourcc)
 {
 	switch (fourcc) {
@@ -175,20 +172,19 @@ static int sti_gdp_get_alpharange(int format)
 
 /**
  * sti_gdp_get_free_nodes
- * @layer: gdp layer
+ * @gdp: gdp pointer
  *
  * Look for a GDP node list that is not currently read by the HW.
  *
  * RETURNS:
  * Pointer to the free GDP node list
  */
-static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
+static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_gdp *gdp)
 {
 	int hw_nvn;
-	struct sti_gdp *gdp = to_sti_gdp(layer);
 	unsigned int i;
 
-	hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
+	hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
 	if (!hw_nvn)
 		goto end;
 
@@ -199,7 +195,7 @@ static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
 
 	/* in hazardious cases restart with the first node */
 	DRM_ERROR("inconsistent NVN for %s: 0x%08X\n",
-			sti_layer_to_str(layer), hw_nvn);
+			sti_plane_to_str(&gdp->plane), hw_nvn);
 
 end:
 	return &gdp->node_list[0];
@@ -207,7 +203,7 @@ end:
 
 /**
  * sti_gdp_get_current_nodes
- * @layer: GDP layer
+ * @gdp: gdp pointer
  *
  * Look for GDP nodes that are currently read by the HW.
  *
@@ -215,13 +211,12 @@ end:
  * Pointer to the current GDP node list
  */
 static
-struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
+struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_gdp *gdp)
 {
 	int hw_nvn;
-	struct sti_gdp *gdp = to_sti_gdp(layer);
 	unsigned int i;
 
-	hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
+	hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
 	if (!hw_nvn)
 		goto end;
 
@@ -232,205 +227,25 @@ struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
 
 end:
 	DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n",
-				hw_nvn, sti_layer_to_str(layer));
+				hw_nvn, sti_plane_to_str(&gdp->plane));
 
 	return NULL;
 }
 
 /**
- * sti_gdp_prepare_layer
- * @lay: gdp layer
- * @first_prepare: true if it is the first time this function is called
- *
- * Update the free GDP node list according to the layer properties.
- *
- * RETURNS:
- * 0 on success.
- */
-static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
-{
-	struct sti_gdp_node_list *list;
-	struct sti_gdp_node *top_field, *btm_field;
-	struct drm_display_mode *mode = layer->mode;
-	struct device *dev = layer->dev;
-	struct sti_gdp *gdp = to_sti_gdp(layer);
-	struct sti_compositor *compo = dev_get_drvdata(dev);
-	int format;
-	unsigned int depth, bpp;
-	int rate = mode->clock * 1000;
-	int res;
-	u32 ydo, xdo, yds, xds;
-
-	list = sti_gdp_get_free_nodes(layer);
-	top_field = list->top_field;
-	btm_field = list->btm_field;
-
-	dev_dbg(dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
-			sti_layer_to_str(layer), top_field, btm_field);
-
-	/* Build the top field from layer params */
-	top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
-	top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
-	format = sti_gdp_fourcc2format(layer->format);
-	if (format == -1) {
-		DRM_ERROR("Format not supported by GDP %.4s\n",
-			  (char *)&layer->format);
-		return 1;
-	}
-	top_field->gam_gdp_ctl |= format;
-	top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
-	top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
-
-	/* pixel memory location */
-	drm_fb_get_bpp_depth(layer->format, &depth, &bpp);
-	top_field->gam_gdp_pml = (u32) layer->paddr + layer->offsets[0];
-	top_field->gam_gdp_pml += layer->src_x * (bpp >> 3);
-	top_field->gam_gdp_pml += layer->src_y * layer->pitches[0];
-
-	/* input parameters */
-	top_field->gam_gdp_pmp = layer->pitches[0];
-	top_field->gam_gdp_size =
-	    clamp_val(layer->src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
-	    clamp_val(layer->src_w, 0, GAM_GDP_SIZE_MAX);
-
-	/* output parameters */
-	ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
-	yds = sti_vtg_get_line_number(*mode, layer->dst_y + layer->dst_h - 1);
-	xdo = sti_vtg_get_pixel_number(*mode, layer->dst_x);
-	xds = sti_vtg_get_pixel_number(*mode, layer->dst_x + layer->dst_w - 1);
-	top_field->gam_gdp_vpo = (ydo << 16) | xdo;
-	top_field->gam_gdp_vps = (yds << 16) | xds;
-
-	/* Same content and chained together */
-	memcpy(btm_field, top_field, sizeof(*btm_field));
-	top_field->gam_gdp_nvn = list->btm_field_paddr;
-	btm_field->gam_gdp_nvn = list->top_field_paddr;
-
-	/* Interlaced mode */
-	if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE)
-		btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
-		    layer->pitches[0];
-
-	if (first_prepare) {
-		/* Register gdp callback */
-		if (sti_vtg_register_client(layer->mixer_id == STI_MIXER_MAIN ?
-				compo->vtg_main : compo->vtg_aux,
-				&gdp->vtg_field_nb, layer->mixer_id)) {
-			DRM_ERROR("Cannot register VTG notifier\n");
-			return 1;
-		}
-
-		/* Set and enable gdp clock */
-		if (gdp->clk_pix) {
-			struct clk *clkp;
-			/* According to the mixer used, the gdp pixel clock
-			 * should have a different parent clock. */
-			if (layer->mixer_id == STI_MIXER_MAIN)
-				clkp = gdp->clk_main_parent;
-			else
-				clkp = gdp->clk_aux_parent;
-
-			if (clkp)
-				clk_set_parent(gdp->clk_pix, clkp);
-
-			res = clk_set_rate(gdp->clk_pix, rate);
-			if (res < 0) {
-				DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
-						rate);
-				return 1;
-			}
-
-			if (clk_prepare_enable(gdp->clk_pix)) {
-				DRM_ERROR("Failed to prepare/enable gdp\n");
-				return 1;
-			}
-		}
-	}
-
-	return 0;
-}
-
-/**
- * sti_gdp_commit_layer
- * @lay: gdp layer
- *
- * Update the NVN field of the 'right' field of the current GDP node (being
- * used by the HW) with the address of the updated ('free') top field GDP node.
- * - In interlaced mode the 'right' field is the bottom field as we update
- *   frames starting from their top field
- * - In progressive mode, we update both bottom and top fields which are
- *   equal nodes.
- * At the next VSYNC, the updated node list will be used by the HW.
- *
- * RETURNS:
- * 0 on success.
- */
-static int sti_gdp_commit_layer(struct sti_layer *layer)
-{
-	struct sti_gdp_node_list *updated_list = sti_gdp_get_free_nodes(layer);
-	struct sti_gdp_node *updated_top_node = updated_list->top_field;
-	struct sti_gdp_node *updated_btm_node = updated_list->btm_field;
-	struct sti_gdp *gdp = to_sti_gdp(layer);
-	u32 dma_updated_top = updated_list->top_field_paddr;
-	u32 dma_updated_btm = updated_list->btm_field_paddr;
-	struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer);
-
-	dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__,
-			sti_layer_to_str(layer),
-			updated_top_node, updated_btm_node);
-	dev_dbg(layer->dev, "Current NVN:0x%X\n",
-		readl(layer->regs + GAM_GDP_NVN_OFFSET));
-	dev_dbg(layer->dev, "Posted buff: %lx current buff: %x\n",
-		(unsigned long)layer->paddr,
-		readl(layer->regs + GAM_GDP_PML_OFFSET));
-
-	if (curr_list == NULL) {
-		/* First update or invalid node should directly write in the
-		 * hw register */
-		DRM_DEBUG_DRIVER("%s first update (or invalid node)",
-				sti_layer_to_str(layer));
-
-		writel(gdp->is_curr_top == true ?
-				dma_updated_btm : dma_updated_top,
-				layer->regs + GAM_GDP_NVN_OFFSET);
-		return 0;
-	}
-
-	if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) {
-		if (gdp->is_curr_top == true) {
-			/* Do not update in the middle of the frame, but
-			 * postpone the update after the bottom field has
-			 * been displayed */
-			curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
-		} else {
-			/* Direct update to avoid one frame delay */
-			writel(dma_updated_top,
-				layer->regs + GAM_GDP_NVN_OFFSET);
-		}
-	} else {
-		/* Direct update for progressive to avoid one frame delay */
-		writel(dma_updated_top, layer->regs + GAM_GDP_NVN_OFFSET);
-	}
-
-	return 0;
-}
-
-/**
- * sti_gdp_disable_layer
- * @lay: gdp layer
+ * sti_gdp_disable
+ * @gdp: gdp pointer
  *
  * Disable a GDP.
- *
- * RETURNS:
- * 0 on success.
  */
-static int sti_gdp_disable_layer(struct sti_layer *layer)
+static void sti_gdp_disable(struct sti_gdp *gdp)
 {
+	struct drm_plane *drm_plane = &gdp->plane.drm_plane;
+	struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
+	struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
 	unsigned int i;
-	struct sti_gdp *gdp = to_sti_gdp(layer);
-	struct sti_compositor *compo = dev_get_drvdata(layer->dev);
 
-	DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
+	DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&gdp->plane));
 
 	/* Set the nodes as 'to be ignored on mixer' */
 	for (i = 0; i < GDP_NODE_NB_BANK; i++) {
@@ -438,14 +253,14 @@ static int sti_gdp_disable_layer(struct sti_layer *layer)
 		gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
 	}
 
-	if (sti_vtg_unregister_client(layer->mixer_id == STI_MIXER_MAIN ?
+	if (sti_vtg_unregister_client(mixer->id == STI_MIXER_MAIN ?
 			compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb))
 		DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
 
 	if (gdp->clk_pix)
 		clk_disable_unprepare(gdp->clk_pix);
 
-	return 0;
+	gdp->plane.status = STI_PLANE_DISABLED;
 }
 
 /**
@@ -464,6 +279,14 @@ int sti_gdp_field_cb(struct notifier_block *nb,
 {
 	struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb);
 
+	if (gdp->plane.status == STI_PLANE_FLUSHING) {
+		/* disable need to be synchronize on vsync event */
+		DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
+				 sti_plane_to_str(&gdp->plane));
+
+		sti_gdp_disable(gdp);
+	}
+
 	switch (event) {
 	case VTG_TOP_FIELD_EVENT:
 		gdp->is_curr_top = true;
@@ -479,10 +302,9 @@ int sti_gdp_field_cb(struct notifier_block *nb,
 	return 0;
 }
 
-static void sti_gdp_init(struct sti_layer *layer)
+static void sti_gdp_init(struct sti_gdp *gdp)
 {
-	struct sti_gdp *gdp = to_sti_gdp(layer);
-	struct device_node *np = layer->dev->of_node;
+	struct device_node *np = gdp->dev->of_node;
 	dma_addr_t dma_addr;
 	void *base;
 	unsigned int i, size;
@@ -490,8 +312,8 @@ static void sti_gdp_init(struct sti_layer *layer)
 	/* Allocate all the nodes within a single memory page */
 	size = sizeof(struct sti_gdp_node) *
 	    GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
-	base = dma_alloc_writecombine(layer->dev,
-			size, &dma_addr, GFP_KERNEL | GFP_DMA);
+	base = dma_alloc_writecombine(gdp->dev,
+				      size, &dma_addr, GFP_KERNEL | GFP_DMA);
 
 	if (!base) {
 		DRM_ERROR("Failed to allocate memory for GDP node\n");
@@ -526,7 +348,7 @@ static void sti_gdp_init(struct sti_layer *layer)
 		/* GDP of STiH407 chip have its own pixel clock */
 		char *clk_name;
 
-		switch (layer->desc) {
+		switch (gdp->plane.desc) {
 		case STI_GDP_0:
 			clk_name = "pix_gdp1";
 			break;
@@ -544,32 +366,249 @@ static void sti_gdp_init(struct sti_layer *layer)
 			return;
 		}
 
-		gdp->clk_pix = devm_clk_get(layer->dev, clk_name);
+		gdp->clk_pix = devm_clk_get(gdp->dev, clk_name);
 		if (IS_ERR(gdp->clk_pix))
 			DRM_ERROR("Cannot get %s clock\n", clk_name);
 
-		gdp->clk_main_parent = devm_clk_get(layer->dev, "main_parent");
+		gdp->clk_main_parent = devm_clk_get(gdp->dev, "main_parent");
 		if (IS_ERR(gdp->clk_main_parent))
 			DRM_ERROR("Cannot get main_parent clock\n");
 
-		gdp->clk_aux_parent = devm_clk_get(layer->dev, "aux_parent");
+		gdp->clk_aux_parent = devm_clk_get(gdp->dev, "aux_parent");
 		if (IS_ERR(gdp->clk_aux_parent))
 			DRM_ERROR("Cannot get aux_parent clock\n");
 	}
 }
 
-static const struct sti_layer_funcs gdp_ops = {
-	.get_formats = sti_gdp_get_formats,
-	.get_nb_formats = sti_gdp_get_nb_formats,
-	.init = sti_gdp_init,
-	.prepare = sti_gdp_prepare_layer,
-	.commit = sti_gdp_commit_layer,
-	.disable = sti_gdp_disable_layer,
+static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
+				  struct drm_plane_state *oldstate)
+{
+	struct drm_plane_state *state = drm_plane->state;
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+	struct sti_gdp *gdp = to_sti_gdp(plane);
+	struct drm_crtc *crtc = state->crtc;
+	struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
+	struct drm_framebuffer *fb =  state->fb;
+	bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
+	struct sti_mixer *mixer;
+	struct drm_display_mode *mode;
+	int dst_x, dst_y, dst_w, dst_h;
+	int src_x, src_y, src_w, src_h;
+	struct drm_gem_cma_object *cma_obj;
+	struct sti_gdp_node_list *list;
+	struct sti_gdp_node_list *curr_list;
+	struct sti_gdp_node *top_field, *btm_field;
+	u32 dma_updated_top;
+	u32 dma_updated_btm;
+	int format;
+	unsigned int depth, bpp;
+	u32 ydo, xdo, yds, xds;
+	int res;
+
+	/* Manage the case where crtc is null (disabled) */
+	if (!crtc)
+		return;
+
+	mixer = to_sti_mixer(crtc);
+	mode = &crtc->mode;
+	dst_x = state->crtc_x;
+	dst_y = state->crtc_y;
+	dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+	dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+	/* src_x are in 16.16 format */
+	src_x = state->src_x >> 16;
+	src_y = state->src_y >> 16;
+	src_w = state->src_w >> 16;
+	src_h = state->src_h >> 16;
+
+	DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
+		      crtc->base.id, sti_mixer_to_str(mixer),
+		      drm_plane->base.id, sti_plane_to_str(plane));
+	DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
+		      sti_plane_to_str(plane),
+		      dst_w, dst_h, dst_x, dst_y,
+		      src_w, src_h, src_x, src_y);
+
+	list = sti_gdp_get_free_nodes(gdp);
+	top_field = list->top_field;
+	btm_field = list->btm_field;
+
+	dev_dbg(gdp->dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
+		sti_plane_to_str(plane), top_field, btm_field);
+
+	/* build the top field */
+	top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
+	top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
+	format = sti_gdp_fourcc2format(fb->pixel_format);
+	if (format == -1) {
+		DRM_ERROR("Format not supported by GDP %.4s\n",
+			  (char *)&fb->pixel_format);
+		return;
+	}
+	top_field->gam_gdp_ctl |= format;
+	top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
+	top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
+
+	cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+	if (!cma_obj) {
+		DRM_ERROR("Can't get CMA GEM object for fb\n");
+		return;
+	}
+
+	DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
+			 (char *)&fb->pixel_format,
+			 (unsigned long)cma_obj->paddr);
+
+	/* pixel memory location */
+	drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
+	top_field->gam_gdp_pml = (u32)cma_obj->paddr + fb->offsets[0];
+	top_field->gam_gdp_pml += src_x * (bpp >> 3);
+	top_field->gam_gdp_pml += src_y * fb->pitches[0];
+
+	/* input parameters */
+	top_field->gam_gdp_pmp = fb->pitches[0];
+	top_field->gam_gdp_size = clamp_val(src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
+				  clamp_val(src_w, 0, GAM_GDP_SIZE_MAX);
+
+	/* output parameters */
+	ydo = sti_vtg_get_line_number(*mode, dst_y);
+	yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
+	xdo = sti_vtg_get_pixel_number(*mode, dst_x);
+	xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1);
+	top_field->gam_gdp_vpo = (ydo << 16) | xdo;
+	top_field->gam_gdp_vps = (yds << 16) | xds;
+
+	/* Same content and chained together */
+	memcpy(btm_field, top_field, sizeof(*btm_field));
+	top_field->gam_gdp_nvn = list->btm_field_paddr;
+	btm_field->gam_gdp_nvn = list->top_field_paddr;
+
+	/* Interlaced mode */
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
+					 fb->pitches[0];
+
+	if (first_prepare) {
+		/* Register gdp callback */
+		if (sti_vtg_register_client(mixer->id == STI_MIXER_MAIN ?
+				compo->vtg_main : compo->vtg_aux,
+				&gdp->vtg_field_nb, mixer->id)) {
+			DRM_ERROR("Cannot register VTG notifier\n");
+			return;
+		}
+
+		/* Set and enable gdp clock */
+		if (gdp->clk_pix) {
+			struct clk *clkp;
+			int rate = mode->clock * 1000;
+
+			/* According to the mixer used, the gdp pixel clock
+			 * should have a different parent clock. */
+			if (mixer->id == STI_MIXER_MAIN)
+				clkp = gdp->clk_main_parent;
+			else
+				clkp = gdp->clk_aux_parent;
+
+			if (clkp)
+				clk_set_parent(gdp->clk_pix, clkp);
+
+			res = clk_set_rate(gdp->clk_pix, rate);
+			if (res < 0) {
+				DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
+					  rate);
+				return;
+			}
+
+			if (clk_prepare_enable(gdp->clk_pix)) {
+				DRM_ERROR("Failed to prepare/enable gdp\n");
+				return;
+			}
+		}
+	}
+
+	/* Update the NVN field of the 'right' field of the current GDP node
+	 * (being used by the HW) with the address of the updated ('free') top
+	 * field GDP node.
+	 * - In interlaced mode the 'right' field is the bottom field as we
+	 *   update frames starting from their top field
+	 * - In progressive mode, we update both bottom and top fields which
+	 *   are equal nodes.
+	 * At the next VSYNC, the updated node list will be used by the HW.
+	 */
+	curr_list = sti_gdp_get_current_nodes(gdp);
+	dma_updated_top = list->top_field_paddr;
+	dma_updated_btm = list->btm_field_paddr;
+
+	dev_dbg(gdp->dev, "Current NVN:0x%X\n",
+		readl(gdp->regs + GAM_GDP_NVN_OFFSET));
+	dev_dbg(gdp->dev, "Posted buff: %lx current buff: %x\n",
+		(unsigned long)cma_obj->paddr,
+		readl(gdp->regs + GAM_GDP_PML_OFFSET));
+
+	if (!curr_list) {
+		/* First update or invalid node should directly write in the
+		 * hw register */
+		DRM_DEBUG_DRIVER("%s first update (or invalid node)",
+				 sti_plane_to_str(plane));
+
+		writel(gdp->is_curr_top ?
+				dma_updated_btm : dma_updated_top,
+				gdp->regs + GAM_GDP_NVN_OFFSET);
+		goto end;
+	}
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		if (gdp->is_curr_top) {
+			/* Do not update in the middle of the frame, but
+			 * postpone the update after the bottom field has
+			 * been displayed */
+			curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
+		} else {
+			/* Direct update to avoid one frame delay */
+			writel(dma_updated_top,
+			       gdp->regs + GAM_GDP_NVN_OFFSET);
+		}
+	} else {
+		/* Direct update for progressive to avoid one frame delay */
+		writel(dma_updated_top, gdp->regs + GAM_GDP_NVN_OFFSET);
+	}
+
+end:
+	plane->status = STI_PLANE_UPDATED;
+}
+
+static void sti_gdp_atomic_disable(struct drm_plane *drm_plane,
+				   struct drm_plane_state *oldstate)
+{
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+	struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
+
+	if (!drm_plane->crtc) {
+		DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
+				 drm_plane->base.id);
+		return;
+	}
+
+	DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
+			 drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+			 drm_plane->base.id, sti_plane_to_str(plane));
+
+	plane->status = STI_PLANE_DISABLING;
+}
+
+static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
+	.atomic_update = sti_gdp_atomic_update,
+	.atomic_disable = sti_gdp_atomic_disable,
 };
 
-struct sti_layer *sti_gdp_create(struct device *dev, int id)
+struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
+				 struct device *dev, int desc,
+				 void __iomem *baseaddr,
+				 unsigned int possible_crtcs,
+				 enum drm_plane_type type)
 {
 	struct sti_gdp *gdp;
+	int res;
 
 	gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL);
 	if (!gdp) {
@@ -577,8 +616,33 @@ struct sti_layer *sti_gdp_create(struct device *dev, int id)
 		return NULL;
 	}
 
-	gdp->layer.ops = &gdp_ops;
+	gdp->dev = dev;
+	gdp->regs = baseaddr;
+	gdp->plane.desc = desc;
+	gdp->plane.status = STI_PLANE_DISABLED;
+
 	gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb;
 
-	return (struct sti_layer *)gdp;
+	sti_gdp_init(gdp);
+
+	res = drm_universal_plane_init(drm_dev, &gdp->plane.drm_plane,
+				       possible_crtcs,
+				       &sti_plane_helpers_funcs,
+				       gdp_supported_formats,
+				       ARRAY_SIZE(gdp_supported_formats),
+				       type);
+	if (res) {
+		DRM_ERROR("Failed to initialize universal plane\n");
+		goto err;
+	}
+
+	drm_plane_helper_add(&gdp->plane.drm_plane, &sti_gdp_helpers_funcs);
+
+	sti_plane_init_property(&gdp->plane, type);
+
+	return &gdp->plane.drm_plane;
+
+err:
+	devm_kfree(dev, gdp);
+	return NULL;
 }
diff --git a/drivers/gpu/drm/sti/sti_gdp.h b/drivers/gpu/drm/sti/sti_gdp.h
index 1dab68274ad3..73947a4a8004 100644
--- a/drivers/gpu/drm/sti/sti_gdp.h
+++ b/drivers/gpu/drm/sti/sti_gdp.h
@@ -11,6 +11,9 @@
 
 #include <linux/types.h>
 
-struct sti_layer *sti_gdp_create(struct device *dev, int id);
-
+struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
+				 struct device *dev, int desc,
+				 void __iomem *baseaddr,
+				 unsigned int possible_crtcs,
+				 enum drm_plane_type type);
 #endif
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index f28a4d54487c..09e29e43423e 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -588,7 +588,7 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
 	return count;
 
 fail:
-	DRM_ERROR("Can not read HDMI EDID\n");
+	DRM_ERROR("Can't read HDMI EDID\n");
 	return 0;
 }
 
@@ -693,21 +693,8 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
 	struct sti_hdmi_connector *connector;
 	struct drm_connector *drm_connector;
 	struct drm_bridge *bridge;
-	struct device_node *ddc;
 	int err;
 
-	ddc = of_parse_phandle(dev->of_node, "ddc", 0);
-	if (ddc) {
-		hdmi->ddc_adapt = of_find_i2c_adapter_by_node(ddc);
-		if (!hdmi->ddc_adapt) {
-			err = -EPROBE_DEFER;
-			of_node_put(ddc);
-			return err;
-		}
-
-		of_node_put(ddc);
-	}
-
 	/* Set the drm device handle */
 	hdmi->drm_dev = drm_dev;
 
@@ -796,6 +783,7 @@ static int sti_hdmi_probe(struct platform_device *pdev)
 	struct sti_hdmi *hdmi;
 	struct device_node *np = dev->of_node;
 	struct resource *res;
+	struct device_node *ddc;
 	int ret;
 
 	DRM_INFO("%s\n", __func__);
@@ -804,6 +792,17 @@ static int sti_hdmi_probe(struct platform_device *pdev)
 	if (!hdmi)
 		return -ENOMEM;
 
+	ddc = of_parse_phandle(pdev->dev.of_node, "ddc", 0);
+	if (ddc) {
+		hdmi->ddc_adapt = of_find_i2c_adapter_by_node(ddc);
+		if (!hdmi->ddc_adapt) {
+			of_node_put(ddc);
+			return -EPROBE_DEFER;
+		}
+
+		of_node_put(ddc);
+	}
+
 	hdmi->dev = pdev->dev;
 
 	/* Get resources */
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index b0eb62de1b2e..7c8f9b8bfae1 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -12,11 +12,12 @@
 #include <linux/reset.h>
 
 #include <drm/drmP.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
 
-#include "sti_drm_plane.h"
-#include "sti_hqvdp.h"
+#include "sti_compositor.h"
 #include "sti_hqvdp_lut.h"
-#include "sti_layer.h"
+#include "sti_plane.h"
 #include "sti_vtg.h"
 
 /* Firmware name */
@@ -322,8 +323,7 @@ struct sti_hqvdp_cmd {
  * @dev:               driver device
  * @drm_dev:           the drm device
  * @regs:              registers
- * @layer:             layer structure for hqvdp it self
- * @vid_plane:         VID plug used as link with compositor IP
+ * @plane:             plane structure for hqvdp it self
  * @clk:               IP clock
  * @clk_pix_main:      pix main clock
  * @reset:             reset control
@@ -334,13 +334,13 @@ struct sti_hqvdp_cmd {
  * @hqvdp_cmd:         buffer of commands
  * @hqvdp_cmd_paddr:   physical address of hqvdp_cmd
  * @vtg:               vtg for main data path
+ * @xp70_initialized:  true if xp70 is already initialized
  */
 struct sti_hqvdp {
 	struct device *dev;
 	struct drm_device *drm_dev;
 	void __iomem *regs;
-	struct sti_layer layer;
-	struct drm_plane *vid_plane;
+	struct sti_plane plane;
 	struct clk *clk;
 	struct clk *clk_pix_main;
 	struct reset_control *reset;
@@ -351,24 +351,15 @@ struct sti_hqvdp {
 	void *hqvdp_cmd;
 	dma_addr_t hqvdp_cmd_paddr;
 	struct sti_vtg *vtg;
+	bool xp70_initialized;
 };
 
-#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, layer)
+#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, plane)
 
 static const uint32_t hqvdp_supported_formats[] = {
 	DRM_FORMAT_NV12,
 };
 
-static const uint32_t *sti_hqvdp_get_formats(struct sti_layer *layer)
-{
-	return hqvdp_supported_formats;
-}
-
-static unsigned int sti_hqvdp_get_nb_formats(struct sti_layer *layer)
-{
-	return ARRAY_SIZE(hqvdp_supported_formats);
-}
-
 /**
  * sti_hqvdp_get_free_cmd
  * @hqvdp: hqvdp structure
@@ -484,7 +475,12 @@ static void sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient, int scale,
 
 /**
  * sti_hqvdp_check_hw_scaling
- * @layer: hqvdp layer
+ * @hqvdp: hqvdp pointer
+ * @mode: display mode with timing constraints
+ * @src_w: source width
+ * @src_h: source height
+ * @dst_w: destination width
+ * @dst_h: destination height
  *
  * Check if the HW is able to perform the scaling request
  * The firmware scaling limitation is "CEIL(1/Zy) <= FLOOR(LFW)" where:
@@ -498,184 +494,36 @@ static void sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient, int scale,
  * RETURNS:
  * True if the HW can scale.
  */
-static bool sti_hqvdp_check_hw_scaling(struct sti_layer *layer)
+static bool sti_hqvdp_check_hw_scaling(struct sti_hqvdp *hqvdp,
+				       struct drm_display_mode *mode,
+				       int src_w, int src_h,
+				       int dst_w, int dst_h)
 {
-	struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
 	unsigned long lfw;
 	unsigned int inv_zy;
 
-	lfw = layer->mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000);
-	lfw /= max(layer->src_w, layer->dst_w) * layer->mode->clock / 1000;
+	lfw = mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000);
+	lfw /= max(src_w, dst_w) * mode->clock / 1000;
 
-	inv_zy = DIV_ROUND_UP(layer->src_h, layer->dst_h);
+	inv_zy = DIV_ROUND_UP(src_h, dst_h);
 
 	return (inv_zy <= lfw) ? true : false;
 }
 
 /**
- * sti_hqvdp_prepare_layer
- * @layer: hqvdp layer
- * @first_prepare: true if it is the first time this function is called
+ * sti_hqvdp_disable
+ * @hqvdp: hqvdp pointer
  *
- * Prepares a command for the firmware
- *
- * RETURNS:
- * 0 on success.
+ * Disables the HQVDP plane
  */
-static int sti_hqvdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
-{
-	struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
-	struct sti_hqvdp_cmd *cmd;
-	int scale_h, scale_v;
-	int cmd_offset;
-
-	dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
-
-	/* prepare and commit VID plane */
-	hqvdp->vid_plane->funcs->update_plane(hqvdp->vid_plane,
-					layer->crtc, layer->fb,
-					layer->dst_x, layer->dst_y,
-					layer->dst_w, layer->dst_h,
-					layer->src_x, layer->src_y,
-					layer->src_w, layer->src_h);
-
-	cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
-	if (cmd_offset == -1) {
-		DRM_ERROR("No available hqvdp_cmd now\n");
-		return -EBUSY;
-	}
-	cmd = hqvdp->hqvdp_cmd + cmd_offset;
-
-	if (!sti_hqvdp_check_hw_scaling(layer)) {
-		DRM_ERROR("Scaling beyond HW capabilities\n");
-		return -EINVAL;
-	}
-
-	/* Static parameters, defaulting to progressive mode */
-	cmd->top.config = TOP_CONFIG_PROGRESSIVE;
-	cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
-	cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT;
-	cmd->csdi.config = CSDI_CONFIG_PROG;
-
-	/* VC1RE, FMD bypassed : keep everything set to 0
-	 * IQI/P2I bypassed */
-	cmd->iqi.config = IQI_CONFIG_DFLT;
-	cmd->iqi.con_bri = IQI_CON_BRI_DFLT;
-	cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
-	cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
-
-	/* Buffer planes address */
-	cmd->top.current_luma = (u32) layer->paddr + layer->offsets[0];
-	cmd->top.current_chroma = (u32) layer->paddr + layer->offsets[1];
-
-	/* Pitches */
-	cmd->top.luma_processed_pitch = cmd->top.luma_src_pitch =
-			layer->pitches[0];
-	cmd->top.chroma_processed_pitch = cmd->top.chroma_src_pitch =
-			layer->pitches[1];
-
-	/* Input / output size
-	 * Align to upper even value */
-	layer->dst_w = ALIGN(layer->dst_w, 2);
-	layer->dst_h = ALIGN(layer->dst_h, 2);
-
-	if ((layer->src_w > MAX_WIDTH) || (layer->src_w < MIN_WIDTH) ||
-	    (layer->src_h > MAX_HEIGHT) || (layer->src_h < MIN_HEIGHT) ||
-	    (layer->dst_w > MAX_WIDTH) || (layer->dst_w < MIN_WIDTH) ||
-	    (layer->dst_h > MAX_HEIGHT) || (layer->dst_h < MIN_HEIGHT)) {
-		DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
-				layer->src_w, layer->src_h,
-				layer->dst_w, layer->dst_h);
-		return -EINVAL;
-	}
-	cmd->top.input_viewport_size = cmd->top.input_frame_size =
-			layer->src_h << 16 | layer->src_w;
-	cmd->hvsrc.output_picture_size = layer->dst_h << 16 | layer->dst_w;
-	cmd->top.input_viewport_ori = layer->src_y << 16 | layer->src_x;
-
-	/* Handle interlaced */
-	if (layer->fb->flags & DRM_MODE_FB_INTERLACED) {
-		/* Top field to display */
-		cmd->top.config = TOP_CONFIG_INTER_TOP;
-
-		/* Update pitches and vert size */
-		cmd->top.input_frame_size = (layer->src_h / 2) << 16 |
-					     layer->src_w;
-		cmd->top.luma_processed_pitch *= 2;
-		cmd->top.luma_src_pitch *= 2;
-		cmd->top.chroma_processed_pitch *= 2;
-		cmd->top.chroma_src_pitch *= 2;
-
-		/* Enable directional deinterlacing processing */
-		cmd->csdi.config = CSDI_CONFIG_INTER_DIR;
-		cmd->csdi.config2 = CSDI_CONFIG2_DFLT;
-		cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT;
-	}
-
-	/* Update hvsrc lut coef */
-	scale_h = SCALE_FACTOR * layer->dst_w / layer->src_w;
-	sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc);
-
-	scale_v = SCALE_FACTOR * layer->dst_h / layer->src_h;
-	sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
-
-	if (first_prepare) {
-		/* Prevent VTG shutdown */
-		if (clk_prepare_enable(hqvdp->clk_pix_main)) {
-			DRM_ERROR("Failed to prepare/enable pix main clk\n");
-			return -ENXIO;
-		}
-
-		/* Register VTG Vsync callback to handle bottom fields */
-		if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) &&
-				sti_vtg_register_client(hqvdp->vtg,
-					&hqvdp->vtg_nb, layer->mixer_id)) {
-			DRM_ERROR("Cannot register VTG notifier\n");
-			return -ENXIO;
-		}
-	}
-
-	return 0;
-}
-
-static int sti_hqvdp_commit_layer(struct sti_layer *layer)
+static void sti_hqvdp_disable(struct sti_hqvdp *hqvdp)
 {
-	struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
-	int cmd_offset;
-
-	dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
-
-	cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
-	if (cmd_offset == -1) {
-		DRM_ERROR("No available hqvdp_cmd now\n");
-		return -EBUSY;
-	}
-
-	writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
-			hqvdp->regs + HQVDP_MBX_NEXT_CMD);
-
-	hqvdp->curr_field_count++;
-
-	/* Interlaced : get ready to display the bottom field at next Vsync */
-	if (layer->fb->flags & DRM_MODE_FB_INTERLACED)
-		hqvdp->btm_field_pending = true;
-
-	dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
-			__func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
-
-	return 0;
-}
-
-static int sti_hqvdp_disable_layer(struct sti_layer *layer)
-{
-	struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
 	int i;
 
-	DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
+	DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&hqvdp->plane));
 
 	/* Unregister VTG Vsync callback */
-	if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) &&
-		sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb))
+	if (sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb))
 		DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
 
 	/* Set next cmd to NULL */
@@ -691,15 +539,10 @@ static int sti_hqvdp_disable_layer(struct sti_layer *layer)
 	/* VTG can stop now */
 	clk_disable_unprepare(hqvdp->clk_pix_main);
 
-	if (i == POLL_MAX_ATTEMPT) {
+	if (i == POLL_MAX_ATTEMPT)
 		DRM_ERROR("XP70 could not revert to idle\n");
-		return -ENXIO;
-	}
-
-	/* disable VID plane */
-	hqvdp->vid_plane->funcs->disable_plane(hqvdp->vid_plane);
 
-	return 0;
+	hqvdp->plane.status = STI_PLANE_DISABLED;
 }
 
 /**
@@ -724,6 +567,14 @@ int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
 		return 0;
 	}
 
+	if (hqvdp->plane.status == STI_PLANE_FLUSHING) {
+		/* disable need to be synchronize on vsync event */
+		DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
+				 sti_plane_to_str(&hqvdp->plane));
+
+		sti_hqvdp_disable(hqvdp);
+	}
+
 	if (hqvdp->btm_field_pending) {
 		/* Create the btm field command from the current one */
 		btm_cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
@@ -758,32 +609,10 @@ int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
 	return 0;
 }
 
-static struct drm_plane *sti_hqvdp_find_vid(struct drm_device *dev, int id)
+static void sti_hqvdp_init(struct sti_hqvdp *hqvdp)
 {
-	struct drm_plane *plane;
-
-	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
-		struct sti_layer *layer = to_sti_layer(plane);
-
-		if (layer->desc == id)
-			return plane;
-	}
-
-	return NULL;
-}
-
-static void sti_hqvd_init(struct sti_layer *layer)
-{
-	struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
 	int size;
 
-	/* find the plane macthing with vid 0 */
-	hqvdp->vid_plane = sti_hqvdp_find_vid(hqvdp->drm_dev, STI_VID_0);
-	if (!hqvdp->vid_plane) {
-		DRM_ERROR("Cannot find Main video layer\n");
-		return;
-	}
-
 	hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb;
 
 	/* Allocate memory for the VDP commands */
@@ -799,24 +628,213 @@ static void sti_hqvd_init(struct sti_layer *layer)
 	memset(hqvdp->hqvdp_cmd, 0, size);
 }
 
-static const struct sti_layer_funcs hqvdp_ops = {
-	.get_formats = sti_hqvdp_get_formats,
-	.get_nb_formats = sti_hqvdp_get_nb_formats,
-	.init = sti_hqvd_init,
-	.prepare = sti_hqvdp_prepare_layer,
-	.commit = sti_hqvdp_commit_layer,
-	.disable = sti_hqvdp_disable_layer,
+static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
+				    struct drm_plane_state *oldstate)
+{
+	struct drm_plane_state *state = drm_plane->state;
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+	struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
+	struct drm_crtc *crtc = state->crtc;
+	struct sti_mixer *mixer = to_sti_mixer(crtc);
+	struct drm_framebuffer *fb = state->fb;
+	struct drm_display_mode *mode = &crtc->mode;
+	int dst_x = state->crtc_x;
+	int dst_y = state->crtc_y;
+	int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+	int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+	/* src_x are in 16.16 format */
+	int src_x = state->src_x >> 16;
+	int src_y = state->src_y >> 16;
+	int src_w = state->src_w >> 16;
+	int src_h = state->src_h >> 16;
+	bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
+	struct drm_gem_cma_object *cma_obj;
+	struct sti_hqvdp_cmd *cmd;
+	int scale_h, scale_v;
+	int cmd_offset;
+
+	DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
+		      crtc->base.id, sti_mixer_to_str(mixer),
+		      drm_plane->base.id, sti_plane_to_str(plane));
+	DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
+		      sti_plane_to_str(plane),
+		      dst_w, dst_h, dst_x, dst_y,
+		      src_w, src_h, src_x, src_y);
+
+	cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
+	if (cmd_offset == -1) {
+		DRM_ERROR("No available hqvdp_cmd now\n");
+		return;
+	}
+	cmd = hqvdp->hqvdp_cmd + cmd_offset;
+
+	if (!sti_hqvdp_check_hw_scaling(hqvdp, mode,
+					src_w, src_h,
+					dst_w, dst_h)) {
+		DRM_ERROR("Scaling beyond HW capabilities\n");
+		return;
+	}
+
+	/* Static parameters, defaulting to progressive mode */
+	cmd->top.config = TOP_CONFIG_PROGRESSIVE;
+	cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
+	cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT;
+	cmd->csdi.config = CSDI_CONFIG_PROG;
+
+	/* VC1RE, FMD bypassed : keep everything set to 0
+	 * IQI/P2I bypassed */
+	cmd->iqi.config = IQI_CONFIG_DFLT;
+	cmd->iqi.con_bri = IQI_CON_BRI_DFLT;
+	cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
+	cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
+
+	cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+	if (!cma_obj) {
+		DRM_ERROR("Can't get CMA GEM object for fb\n");
+		return;
+	}
+
+	DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
+			 (char *)&fb->pixel_format,
+			 (unsigned long)cma_obj->paddr);
+
+	/* Buffer planes address */
+	cmd->top.current_luma = (u32)cma_obj->paddr + fb->offsets[0];
+	cmd->top.current_chroma = (u32)cma_obj->paddr + fb->offsets[1];
+
+	/* Pitches */
+	cmd->top.luma_processed_pitch = fb->pitches[0];
+	cmd->top.luma_src_pitch = fb->pitches[0];
+	cmd->top.chroma_processed_pitch = fb->pitches[1];
+	cmd->top.chroma_src_pitch = fb->pitches[1];
+
+	/* Input / output size
+	 * Align to upper even value */
+	dst_w = ALIGN(dst_w, 2);
+	dst_h = ALIGN(dst_h, 2);
+
+	if ((src_w > MAX_WIDTH) || (src_w < MIN_WIDTH) ||
+	    (src_h > MAX_HEIGHT) || (src_h < MIN_HEIGHT) ||
+	    (dst_w > MAX_WIDTH) || (dst_w < MIN_WIDTH) ||
+	    (dst_h > MAX_HEIGHT) || (dst_h < MIN_HEIGHT)) {
+		DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
+			  src_w, src_h,
+			  dst_w, dst_h);
+		return;
+	}
+
+	cmd->top.input_viewport_size = src_h << 16 | src_w;
+	cmd->top.input_frame_size = src_h << 16 | src_w;
+	cmd->hvsrc.output_picture_size = dst_h << 16 | dst_w;
+	cmd->top.input_viewport_ori = src_y << 16 | src_x;
+
+	/* Handle interlaced */
+	if (fb->flags & DRM_MODE_FB_INTERLACED) {
+		/* Top field to display */
+		cmd->top.config = TOP_CONFIG_INTER_TOP;
+
+		/* Update pitches and vert size */
+		cmd->top.input_frame_size = (src_h / 2) << 16 | src_w;
+		cmd->top.luma_processed_pitch *= 2;
+		cmd->top.luma_src_pitch *= 2;
+		cmd->top.chroma_processed_pitch *= 2;
+		cmd->top.chroma_src_pitch *= 2;
+
+		/* Enable directional deinterlacing processing */
+		cmd->csdi.config = CSDI_CONFIG_INTER_DIR;
+		cmd->csdi.config2 = CSDI_CONFIG2_DFLT;
+		cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT;
+	}
+
+	/* Update hvsrc lut coef */
+	scale_h = SCALE_FACTOR * dst_w / src_w;
+	sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc);
+
+	scale_v = SCALE_FACTOR * dst_h / src_h;
+	sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
+
+	if (first_prepare) {
+		/* Prevent VTG shutdown */
+		if (clk_prepare_enable(hqvdp->clk_pix_main)) {
+			DRM_ERROR("Failed to prepare/enable pix main clk\n");
+			return;
+		}
+
+		/* Register VTG Vsync callback to handle bottom fields */
+		if (sti_vtg_register_client(hqvdp->vtg,
+					    &hqvdp->vtg_nb,
+					    mixer->id)) {
+			DRM_ERROR("Cannot register VTG notifier\n");
+			return;
+		}
+	}
+
+	writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
+	       hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+
+	hqvdp->curr_field_count++;
+
+	/* Interlaced : get ready to display the bottom field at next Vsync */
+	if (fb->flags & DRM_MODE_FB_INTERLACED)
+		hqvdp->btm_field_pending = true;
+
+	dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
+		__func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
+
+	plane->status = STI_PLANE_UPDATED;
+}
+
+static void sti_hqvdp_atomic_disable(struct drm_plane *drm_plane,
+				     struct drm_plane_state *oldstate)
+{
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+	struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
+
+	if (!drm_plane->crtc) {
+		DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
+				 drm_plane->base.id);
+		return;
+	}
+
+	DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
+			 drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+			 drm_plane->base.id, sti_plane_to_str(plane));
+
+	plane->status = STI_PLANE_DISABLING;
+}
+
+static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = {
+	.atomic_update = sti_hqvdp_atomic_update,
+	.atomic_disable = sti_hqvdp_atomic_disable,
 };
 
-struct sti_layer *sti_hqvdp_create(struct device *dev)
+static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
+					  struct device *dev, int desc)
 {
 	struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
+	int res;
+
+	hqvdp->plane.desc = desc;
+	hqvdp->plane.status = STI_PLANE_DISABLED;
 
-	hqvdp->layer.ops = &hqvdp_ops;
+	sti_hqvdp_init(hqvdp);
 
-	return &hqvdp->layer;
+	res = drm_universal_plane_init(drm_dev, &hqvdp->plane.drm_plane, 1,
+				       &sti_plane_helpers_funcs,
+				       hqvdp_supported_formats,
+				       ARRAY_SIZE(hqvdp_supported_formats),
+				       DRM_PLANE_TYPE_OVERLAY);
+	if (res) {
+		DRM_ERROR("Failed to initialize universal plane\n");
+		return NULL;
+	}
+
+	drm_plane_helper_add(&hqvdp->plane.drm_plane, &sti_hqvdp_helpers_funcs);
+
+	sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY);
+
+	return &hqvdp->plane.drm_plane;
 }
-EXPORT_SYMBOL(sti_hqvdp_create);
 
 static void sti_hqvdp_init_plugs(struct sti_hqvdp *hqvdp)
 {
@@ -859,6 +877,12 @@ static void sti_hqvdp_start_xp70(const struct firmware *firmware, void *ctxt)
 	} *header;
 
 	DRM_DEBUG_DRIVER("\n");
+
+	if (hqvdp->xp70_initialized) {
+		DRM_INFO("HQVDP XP70 already initialized\n");
+		return;
+	}
+
 	/* Check firmware parts */
 	if (!firmware) {
 		DRM_ERROR("Firmware not available\n");
@@ -946,7 +970,10 @@ static void sti_hqvdp_start_xp70(const struct firmware *firmware, void *ctxt)
 	/* Launch Vsync */
 	writel(SOFT_VSYNC_HW, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
 
-	DRM_INFO("HQVDP XP70 started\n");
+	DRM_INFO("HQVDP XP70 initialized\n");
+
+	hqvdp->xp70_initialized = true;
+
 out:
 	release_firmware(firmware);
 }
@@ -955,7 +982,7 @@ int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
 {
 	struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
 	struct drm_device *drm_dev = data;
-	struct sti_layer *layer;
+	struct drm_plane *plane;
 	int err;
 
 	DRM_DEBUG_DRIVER("\n");
@@ -971,13 +998,10 @@ int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
 		return err;
 	}
 
-	layer = sti_layer_create(hqvdp->dev, STI_HQVDP_0, hqvdp->regs);
-	if (!layer) {
+	/* Create HQVDP plane once xp70 is initialized */
+	plane = sti_hqvdp_create(drm_dev, hqvdp->dev, STI_HQVDP_0);
+	if (!plane)
 		DRM_ERROR("Can't create HQVDP plane\n");
-		return -ENOMEM;
-	}
-
-	sti_drm_plane_init(drm_dev, layer, 1, DRM_PLANE_TYPE_OVERLAY);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.h b/drivers/gpu/drm/sti/sti_hqvdp.h
deleted file mode 100644
index cd5ecd0a6dea..000000000000
--- a/drivers/gpu/drm/sti/sti_hqvdp.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#ifndef _STI_HQVDP_H_
-#define _STI_HQVDP_H_
-
-struct sti_layer *sti_hqvdp_create(struct device *dev);
-
-#endif
diff --git a/drivers/gpu/drm/sti/sti_layer.c b/drivers/gpu/drm/sti/sti_layer.c
deleted file mode 100644
index 899104f9d4bc..000000000000
--- a/drivers/gpu/drm/sti/sti_layer.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
- *          Fabien Dessenne <fabien.dessenne@st.com>
- *          for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_fb_cma_helper.h>
-
-#include "sti_compositor.h"
-#include "sti_cursor.h"
-#include "sti_gdp.h"
-#include "sti_hqvdp.h"
-#include "sti_layer.h"
-#include "sti_vid.h"
-
-const char *sti_layer_to_str(struct sti_layer *layer)
-{
-	switch (layer->desc) {
-	case STI_GDP_0:
-		return "GDP0";
-	case STI_GDP_1:
-		return "GDP1";
-	case STI_GDP_2:
-		return "GDP2";
-	case STI_GDP_3:
-		return "GDP3";
-	case STI_VID_0:
-		return "VID0";
-	case STI_VID_1:
-		return "VID1";
-	case STI_CURSOR:
-		return "CURSOR";
-	case STI_HQVDP_0:
-		return "HQVDP0";
-	default:
-		return "<UNKNOWN LAYER>";
-	}
-}
-EXPORT_SYMBOL(sti_layer_to_str);
-
-struct sti_layer *sti_layer_create(struct device *dev, int desc,
-				   void __iomem *baseaddr)
-{
-
-	struct sti_layer *layer = NULL;
-
-	switch (desc & STI_LAYER_TYPE_MASK) {
-	case STI_GDP:
-		layer = sti_gdp_create(dev, desc);
-		break;
-	case STI_VID:
-		layer = sti_vid_create(dev);
-		break;
-	case STI_CUR:
-		layer = sti_cursor_create(dev);
-		break;
-	case STI_VDP:
-		layer = sti_hqvdp_create(dev);
-		break;
-	}
-
-	if (!layer) {
-		DRM_ERROR("Failed to create layer\n");
-		return NULL;
-	}
-
-	layer->desc = desc;
-	layer->dev = dev;
-	layer->regs = baseaddr;
-
-	layer->ops->init(layer);
-
-	DRM_DEBUG_DRIVER("%s created\n", sti_layer_to_str(layer));
-
-	return layer;
-}
-EXPORT_SYMBOL(sti_layer_create);
-
-int sti_layer_prepare(struct sti_layer *layer,
-		      struct drm_crtc *crtc,
-		      struct drm_framebuffer *fb,
-		      struct drm_display_mode *mode, int mixer_id,
-		      int dest_x, int dest_y, int dest_w, int dest_h,
-		      int src_x, int src_y, int src_w, int src_h)
-{
-	int ret;
-	unsigned int i;
-	struct drm_gem_cma_object *cma_obj;
-
-	if (!layer || !fb || !mode) {
-		DRM_ERROR("Null fb, layer or mode\n");
-		return 1;
-	}
-
-	cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
-	if (!cma_obj) {
-		DRM_ERROR("Can't get CMA GEM object for fb\n");
-		return 1;
-	}
-
-	layer->crtc = crtc;
-	layer->fb = fb;
-	layer->mode = mode;
-	layer->mixer_id = mixer_id;
-	layer->dst_x = dest_x;
-	layer->dst_y = dest_y;
-	layer->dst_w = clamp_val(dest_w, 0, mode->crtc_hdisplay - dest_x);
-	layer->dst_h = clamp_val(dest_h, 0, mode->crtc_vdisplay - dest_y);
-	layer->src_x = src_x;
-	layer->src_y = src_y;
-	layer->src_w = src_w;
-	layer->src_h = src_h;
-	layer->format = fb->pixel_format;
-	layer->vaddr = cma_obj->vaddr;
-	layer->paddr = cma_obj->paddr;
-	for (i = 0; i < 4; i++) {
-		layer->pitches[i] = fb->pitches[i];
-		layer->offsets[i] = fb->offsets[i];
-	}
-
-	DRM_DEBUG_DRIVER("%s is associated with mixer_id %d\n",
-			 sti_layer_to_str(layer),
-			 layer->mixer_id);
-	DRM_DEBUG_DRIVER("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
-			 sti_layer_to_str(layer),
-			 layer->dst_w, layer->dst_h, layer->dst_x, layer->dst_y,
-			 layer->src_w, layer->src_h, layer->src_x,
-			 layer->src_y);
-
-	DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
-			 (char *)&layer->format, (unsigned long)layer->paddr);
-
-	if (!layer->ops->prepare)
-		goto err_no_prepare;
-
-	ret = layer->ops->prepare(layer, !layer->enabled);
-	if (!ret)
-		layer->enabled = true;
-
-	return ret;
-
-err_no_prepare:
-	DRM_ERROR("Cannot prepare\n");
-	return 1;
-}
-
-int sti_layer_commit(struct sti_layer *layer)
-{
-	if (!layer)
-		return 1;
-
-	if (!layer->ops->commit)
-		goto err_no_commit;
-
-	return layer->ops->commit(layer);
-
-err_no_commit:
-	DRM_ERROR("Cannot commit\n");
-	return 1;
-}
-
-int sti_layer_disable(struct sti_layer *layer)
-{
-	int ret;
-
-	DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
-	if (!layer)
-		return 1;
-
-	if (!layer->enabled)
-		return 0;
-
-	if (!layer->ops->disable)
-		goto err_no_disable;
-
-	ret = layer->ops->disable(layer);
-	if (!ret)
-		layer->enabled = false;
-	else
-		DRM_ERROR("Disable failed\n");
-
-	return ret;
-
-err_no_disable:
-	DRM_ERROR("Cannot disable\n");
-	return 1;
-}
-
-const uint32_t *sti_layer_get_formats(struct sti_layer *layer)
-{
-	if (!layer)
-		return NULL;
-
-	if (!layer->ops->get_formats)
-		return NULL;
-
-	return layer->ops->get_formats(layer);
-}
-
-unsigned int sti_layer_get_nb_formats(struct sti_layer *layer)
-{
-	if (!layer)
-		return 0;
-
-	if (!layer->ops->get_nb_formats)
-		return 0;
-
-	return layer->ops->get_nb_formats(layer);
-}
diff --git a/drivers/gpu/drm/sti/sti_layer.h b/drivers/gpu/drm/sti/sti_layer.h
deleted file mode 100644
index ceff497f557e..000000000000
--- a/drivers/gpu/drm/sti/sti_layer.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
- *          Fabien Dessenne <fabien.dessenne@st.com>
- *          for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#ifndef _STI_LAYER_H_
-#define _STI_LAYER_H_
-
-#include <drm/drmP.h>
-
-#define to_sti_layer(x) container_of(x, struct sti_layer, plane)
-
-#define STI_LAYER_TYPE_SHIFT 8
-#define STI_LAYER_TYPE_MASK (~((1<<STI_LAYER_TYPE_SHIFT)-1))
-
-struct sti_layer;
-
-enum sti_layer_type {
-	STI_GDP = 1 << STI_LAYER_TYPE_SHIFT,
-	STI_VID = 2 << STI_LAYER_TYPE_SHIFT,
-	STI_CUR = 3 << STI_LAYER_TYPE_SHIFT,
-	STI_BCK = 4 << STI_LAYER_TYPE_SHIFT,
-	STI_VDP = 5 << STI_LAYER_TYPE_SHIFT
-};
-
-enum sti_layer_id_of_type {
-	STI_ID_0 = 0,
-	STI_ID_1 = 1,
-	STI_ID_2 = 2,
-	STI_ID_3 = 3
-};
-
-enum sti_layer_desc {
-	STI_GDP_0       = STI_GDP | STI_ID_0,
-	STI_GDP_1       = STI_GDP | STI_ID_1,
-	STI_GDP_2       = STI_GDP | STI_ID_2,
-	STI_GDP_3       = STI_GDP | STI_ID_3,
-	STI_VID_0       = STI_VID | STI_ID_0,
-	STI_VID_1       = STI_VID | STI_ID_1,
-	STI_HQVDP_0     = STI_VDP | STI_ID_0,
-	STI_CURSOR      = STI_CUR,
-	STI_BACK        = STI_BCK
-};
-
-/**
- * STI layer functions structure
- *
- * @get_formats:	get layer supported formats
- * @get_nb_formats:	get number of format supported
- * @init:               initialize the layer
- * @prepare:		prepare layer before rendering
- * @commit:		set layer for rendering
- * @disable:		disable layer
- */
-struct sti_layer_funcs {
-	const uint32_t* (*get_formats)(struct sti_layer *layer);
-	unsigned int (*get_nb_formats)(struct sti_layer *layer);
-	void (*init)(struct sti_layer *layer);
-	int (*prepare)(struct sti_layer *layer, bool first_prepare);
-	int (*commit)(struct sti_layer *layer);
-	int (*disable)(struct sti_layer *layer);
-};
-
-/**
- * STI layer structure
- *
- * @plane:              drm plane it is bound to (if any)
- * @fb:                 drm fb it is bound to
- * @crtc:               crtc it is bound to
- * @mode:               display mode
- * @desc:               layer type & id
- * @device:		driver device
- * @regs:		layer registers
- * @ops:                layer functions
- * @zorder:             layer z-order
- * @mixer_id:           id of the mixer used to display the layer
- * @enabled:            to know if the layer is active or not
- * @src_x src_y:        coordinates of the input (fb) area
- * @src_w src_h:        size of the input (fb) area
- * @dst_x dst_y:        coordinates of the output (crtc) area
- * @dst_w dst_h:        size of the output (crtc) area
- * @format:             format
- * @pitches:            pitch of 'planes' (eg: Y, U, V)
- * @offsets:            offset of 'planes'
- * @vaddr:              virtual address of the input buffer
- * @paddr:              physical address of the input buffer
- */
-struct sti_layer {
-	struct drm_plane plane;
-	struct drm_framebuffer *fb;
-	struct drm_crtc *crtc;
-	struct drm_display_mode *mode;
-	enum sti_layer_desc desc;
-	struct device *dev;
-	void __iomem *regs;
-	const struct sti_layer_funcs *ops;
-	int zorder;
-	int mixer_id;
-	bool enabled;
-	int src_x, src_y;
-	int src_w, src_h;
-	int dst_x, dst_y;
-	int dst_w, dst_h;
-	uint32_t format;
-	unsigned int pitches[4];
-	unsigned int offsets[4];
-	void *vaddr;
-	dma_addr_t paddr;
-};
-
-struct sti_layer *sti_layer_create(struct device *dev, int desc,
-			void __iomem *baseaddr);
-int sti_layer_prepare(struct sti_layer *layer,
-			struct drm_crtc *crtc,
-			struct drm_framebuffer *fb,
-			struct drm_display_mode *mode,
-			int mixer_id,
-			int dest_x, int dest_y,
-			int dest_w, int dest_h,
-			int src_x, int src_y,
-			int src_w, int src_h);
-int sti_layer_commit(struct sti_layer *layer);
-int sti_layer_disable(struct sti_layer *layer);
-const uint32_t *sti_layer_get_formats(struct sti_layer *layer);
-unsigned int sti_layer_get_nb_formats(struct sti_layer *layer);
-const char *sti_layer_to_str(struct sti_layer *layer);
-
-#endif
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index 13a4b84deab6..0182e9365004 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -58,6 +58,7 @@ const char *sti_mixer_to_str(struct sti_mixer *mixer)
 		return "<UNKNOWN MIXER>";
 	}
 }
+EXPORT_SYMBOL(sti_mixer_to_str);
 
 static inline u32 sti_mixer_reg_read(struct sti_mixer *mixer, u32 reg_id)
 {
@@ -101,52 +102,57 @@ static void sti_mixer_set_background_area(struct sti_mixer *mixer,
 	sti_mixer_reg_write(mixer, GAM_MIXER_BCS, yds << 16 | xds);
 }
 
-int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer)
+int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane)
 {
-	int layer_id = 0, depth = layer->zorder;
+	int plane_id, depth = plane->zorder;
+	unsigned int i;
 	u32 mask, val;
 
-	if (depth >= GAM_MIXER_NB_DEPTH_LEVEL)
+	if ((depth < 1) || (depth > GAM_MIXER_NB_DEPTH_LEVEL))
 		return 1;
 
-	switch (layer->desc) {
+	switch (plane->desc) {
 	case STI_GDP_0:
-		layer_id = GAM_DEPTH_GDP0_ID;
+		plane_id = GAM_DEPTH_GDP0_ID;
 		break;
 	case STI_GDP_1:
-		layer_id = GAM_DEPTH_GDP1_ID;
+		plane_id = GAM_DEPTH_GDP1_ID;
 		break;
 	case STI_GDP_2:
-		layer_id = GAM_DEPTH_GDP2_ID;
+		plane_id = GAM_DEPTH_GDP2_ID;
 		break;
 	case STI_GDP_3:
-		layer_id = GAM_DEPTH_GDP3_ID;
+		plane_id = GAM_DEPTH_GDP3_ID;
 		break;
-	case STI_VID_0:
 	case STI_HQVDP_0:
-		layer_id = GAM_DEPTH_VID0_ID;
-		break;
-	case STI_VID_1:
-		layer_id = GAM_DEPTH_VID1_ID;
+		plane_id = GAM_DEPTH_VID0_ID;
 		break;
 	case STI_CURSOR:
 		/* no need to set depth for cursor */
 		return 0;
 	default:
-		DRM_ERROR("Unknown layer %d\n", layer->desc);
+		DRM_ERROR("Unknown plane %d\n", plane->desc);
 		return 1;
 	}
-	mask = GAM_DEPTH_MASK_ID << (3 * depth);
-	layer_id = layer_id << (3 * depth);
+
+	/* Search if a previous depth was already assigned to the plane */
+	val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB);
+	for (i = 0; i < GAM_MIXER_NB_DEPTH_LEVEL; i++) {
+		mask = GAM_DEPTH_MASK_ID << (3 * i);
+		if ((val & mask) == plane_id << (3 * i))
+			break;
+	}
+
+	mask |= GAM_DEPTH_MASK_ID << (3 * (depth - 1));
+	plane_id = plane_id << (3 * (depth - 1));
 
 	DRM_DEBUG_DRIVER("%s %s depth=%d\n", sti_mixer_to_str(mixer),
-			 sti_layer_to_str(layer), depth);
+			 sti_plane_to_str(plane), depth);
 	dev_dbg(mixer->dev, "GAM_MIXER_CRB val 0x%x mask 0x%x\n",
-		layer_id, mask);
+		plane_id, mask);
 
-	val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB);
 	val &= ~mask;
-	val |= layer_id;
+	val |= plane_id;
 	sti_mixer_reg_write(mixer, GAM_MIXER_CRB, val);
 
 	dev_dbg(mixer->dev, "Read GAM_MIXER_CRB 0x%x\n",
@@ -176,9 +182,9 @@ int sti_mixer_active_video_area(struct sti_mixer *mixer,
 	return 0;
 }
 
-static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
+static u32 sti_mixer_get_plane_mask(struct sti_plane *plane)
 {
-	switch (layer->desc) {
+	switch (plane->desc) {
 	case STI_BACK:
 		return GAM_CTL_BACK_MASK;
 	case STI_GDP_0:
@@ -189,11 +195,8 @@ static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
 		return GAM_CTL_GDP2_MASK;
 	case STI_GDP_3:
 		return GAM_CTL_GDP3_MASK;
-	case STI_VID_0:
 	case STI_HQVDP_0:
 		return GAM_CTL_VID0_MASK;
-	case STI_VID_1:
-		return GAM_CTL_VID1_MASK;
 	case STI_CURSOR:
 		return GAM_CTL_CURSOR_MASK;
 	default:
@@ -201,17 +204,17 @@ static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
 	}
 }
 
-int sti_mixer_set_layer_status(struct sti_mixer *mixer,
-			       struct sti_layer *layer, bool status)
+int sti_mixer_set_plane_status(struct sti_mixer *mixer,
+			       struct sti_plane *plane, bool status)
 {
 	u32 mask, val;
 
 	DRM_DEBUG_DRIVER("%s %s %s\n", status ? "enable" : "disable",
-			 sti_mixer_to_str(mixer), sti_layer_to_str(layer));
+			 sti_mixer_to_str(mixer), sti_plane_to_str(plane));
 
-	mask = sti_mixer_get_layer_mask(layer);
+	mask = sti_mixer_get_plane_mask(plane);
 	if (!mask) {
-		DRM_ERROR("Can not find layer mask\n");
+		DRM_ERROR("Can't find layer mask\n");
 		return -EINVAL;
 	}
 
@@ -223,15 +226,6 @@ int sti_mixer_set_layer_status(struct sti_mixer *mixer,
 	return 0;
 }
 
-void sti_mixer_clear_all_layers(struct sti_mixer *mixer)
-{
-	u32 val;
-
-	DRM_DEBUG_DRIVER("%s clear all layer\n", sti_mixer_to_str(mixer));
-	val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL) & 0xFFFF0000;
-	sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val);
-}
-
 void sti_mixer_set_matrix(struct sti_mixer *mixer)
 {
 	unsigned int i;
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
index b97282182908..efb1a9a5ba86 100644
--- a/drivers/gpu/drm/sti/sti_mixer.h
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -11,10 +11,16 @@
 
 #include <drm/drmP.h>
 
-#include "sti_layer.h"
+#include "sti_plane.h"
 
 #define to_sti_mixer(x) container_of(x, struct sti_mixer, drm_crtc)
 
+enum sti_mixer_status {
+	STI_MIXER_READY,
+	STI_MIXER_DISABLING,
+	STI_MIXER_DISABLED,
+};
+
 /**
  * STI Mixer subdevice structure
  *
@@ -23,33 +29,32 @@
  * @id: id of the mixer
  * @drm_crtc: crtc object link to the mixer
  * @pending_event: set if a flip event is pending on crtc
- * @enabled: to know if the mixer is active or not
+ * @status: to know the status of the mixer
  */
 struct sti_mixer {
 	struct device *dev;
 	void __iomem *regs;
 	int id;
-	struct drm_crtc	drm_crtc;
+	struct drm_crtc drm_crtc;
 	struct drm_pending_vblank_event *pending_event;
-	bool enabled;
+	enum sti_mixer_status status;
 };
 
 const char *sti_mixer_to_str(struct sti_mixer *mixer);
 
 struct sti_mixer *sti_mixer_create(struct device *dev, int id,
-		void __iomem *baseaddr);
+				   void __iomem *baseaddr);
 
-int sti_mixer_set_layer_status(struct sti_mixer *mixer,
-		struct sti_layer *layer, bool status);
-void sti_mixer_clear_all_layers(struct sti_mixer *mixer);
-int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer);
+int sti_mixer_set_plane_status(struct sti_mixer *mixer,
+			       struct sti_plane *plane, bool status);
+int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane);
 int sti_mixer_active_video_area(struct sti_mixer *mixer,
-		struct drm_display_mode *mode);
+				struct drm_display_mode *mode);
 
 void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable);
 
 /* depth in Cross-bar control = z order */
-#define GAM_MIXER_NB_DEPTH_LEVEL 7
+#define GAM_MIXER_NB_DEPTH_LEVEL 6
 
 #define STI_MIXER_MAIN 0
 #define STI_MIXER_AUX  1
diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c
new file mode 100644
index 000000000000..d5c5e91f2956
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_plane.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ *          Fabien Dessenne <fabien.dessenne@st.com>
+ *          for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "sti_compositor.h"
+#include "sti_drv.h"
+#include "sti_plane.h"
+
+/* (Background) < GDP0 < GDP1 < HQVDP0 < GDP2 < GDP3 < (ForeGround) */
+enum sti_plane_desc sti_plane_default_zorder[] = {
+	STI_GDP_0,
+	STI_GDP_1,
+	STI_HQVDP_0,
+	STI_GDP_2,
+	STI_GDP_3,
+};
+
+const char *sti_plane_to_str(struct sti_plane *plane)
+{
+	switch (plane->desc) {
+	case STI_GDP_0:
+		return "GDP0";
+	case STI_GDP_1:
+		return "GDP1";
+	case STI_GDP_2:
+		return "GDP2";
+	case STI_GDP_3:
+		return "GDP3";
+	case STI_HQVDP_0:
+		return "HQVDP0";
+	case STI_CURSOR:
+		return "CURSOR";
+	default:
+		return "<UNKNOWN PLANE>";
+	}
+}
+EXPORT_SYMBOL(sti_plane_to_str);
+
+static void sti_plane_destroy(struct drm_plane *drm_plane)
+{
+	DRM_DEBUG_DRIVER("\n");
+
+	drm_plane_helper_disable(drm_plane);
+	drm_plane_cleanup(drm_plane);
+}
+
+static int sti_plane_set_property(struct drm_plane *drm_plane,
+				  struct drm_property *property,
+				  uint64_t val)
+{
+	struct drm_device *dev = drm_plane->dev;
+	struct sti_private *private = dev->dev_private;
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+
+	DRM_DEBUG_DRIVER("\n");
+
+	if (property == private->plane_zorder_property) {
+		plane->zorder = val;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static void sti_plane_attach_zorder_property(struct drm_plane *drm_plane)
+{
+	struct drm_device *dev = drm_plane->dev;
+	struct sti_private *private = dev->dev_private;
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+	struct drm_property *prop;
+
+	prop = private->plane_zorder_property;
+	if (!prop) {
+		prop = drm_property_create_range(dev, 0, "zpos", 1,
+						 GAM_MIXER_NB_DEPTH_LEVEL);
+		if (!prop)
+			return;
+
+		private->plane_zorder_property = prop;
+	}
+
+	drm_object_attach_property(&drm_plane->base, prop, plane->zorder);
+}
+
+void sti_plane_init_property(struct sti_plane *plane,
+			     enum drm_plane_type type)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(sti_plane_default_zorder); i++)
+		if (sti_plane_default_zorder[i] == plane->desc)
+			break;
+
+	plane->zorder = i + 1;
+
+	if (type == DRM_PLANE_TYPE_OVERLAY)
+		sti_plane_attach_zorder_property(&plane->drm_plane);
+
+	DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%d\n",
+			 plane->drm_plane.base.id,
+			 sti_plane_to_str(plane), plane->zorder);
+}
+EXPORT_SYMBOL(sti_plane_init_property);
+
+struct drm_plane_funcs sti_plane_helpers_funcs = {
+	.update_plane = drm_atomic_helper_update_plane,
+	.disable_plane = drm_atomic_helper_disable_plane,
+	.destroy = sti_plane_destroy,
+	.set_property = sti_plane_set_property,
+	.reset = drm_atomic_helper_plane_reset,
+	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+EXPORT_SYMBOL(sti_plane_helpers_funcs);
diff --git a/drivers/gpu/drm/sti/sti_plane.h b/drivers/gpu/drm/sti/sti_plane.h
new file mode 100644
index 000000000000..86f1e6fc81b9
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_plane.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_PLANE_H_
+#define _STI_PLANE_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+
+extern struct drm_plane_funcs sti_plane_helpers_funcs;
+
+#define to_sti_plane(x) container_of(x, struct sti_plane, drm_plane)
+
+#define STI_PLANE_TYPE_SHIFT 8
+#define STI_PLANE_TYPE_MASK (~((1 << STI_PLANE_TYPE_SHIFT) - 1))
+
+enum sti_plane_type {
+	STI_GDP = 1 << STI_PLANE_TYPE_SHIFT,
+	STI_VDP = 2 << STI_PLANE_TYPE_SHIFT,
+	STI_CUR = 3 << STI_PLANE_TYPE_SHIFT,
+	STI_BCK = 4 << STI_PLANE_TYPE_SHIFT
+};
+
+enum sti_plane_id_of_type {
+	STI_ID_0 = 0,
+	STI_ID_1 = 1,
+	STI_ID_2 = 2,
+	STI_ID_3 = 3
+};
+
+enum sti_plane_desc {
+	STI_GDP_0       = STI_GDP | STI_ID_0,
+	STI_GDP_1       = STI_GDP | STI_ID_1,
+	STI_GDP_2       = STI_GDP | STI_ID_2,
+	STI_GDP_3       = STI_GDP | STI_ID_3,
+	STI_HQVDP_0     = STI_VDP | STI_ID_0,
+	STI_CURSOR      = STI_CUR,
+	STI_BACK        = STI_BCK
+};
+
+enum sti_plane_status {
+	STI_PLANE_READY,
+	STI_PLANE_UPDATED,
+	STI_PLANE_DISABLING,
+	STI_PLANE_FLUSHING,
+	STI_PLANE_DISABLED,
+};
+
+/**
+ * STI plane structure
+ *
+ * @plane:              drm plane it is bound to (if any)
+ * @desc:               plane type & id
+ * @status:             to know the status of the plane
+ * @zorder:             plane z-order
+ */
+struct sti_plane {
+	struct drm_plane drm_plane;
+	enum sti_plane_desc desc;
+	enum sti_plane_status status;
+	int zorder;
+};
+
+const char *sti_plane_to_str(struct sti_plane *plane);
+void sti_plane_init_property(struct sti_plane *plane,
+			     enum drm_plane_type type);
+#endif
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index 5cc53116508e..c1aac8e66fb5 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -16,7 +16,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 
-#include "sti_drm_crtc.h"
+#include "sti_crtc.h"
 
 /* glue registers */
 #define TVO_CSC_MAIN_M0                  0x000
@@ -473,7 +473,7 @@ static void sti_dvo_encoder_commit(struct drm_encoder *encoder)
 {
 	struct sti_tvout *tvout = to_sti_tvout(encoder);
 
-	tvout_dvo_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
+	tvout_dvo_start(tvout, sti_crtc_is_main(encoder->crtc));
 }
 
 static void sti_dvo_encoder_disable(struct drm_encoder *encoder)
@@ -523,7 +523,7 @@ static void sti_hda_encoder_commit(struct drm_encoder *encoder)
 {
 	struct sti_tvout *tvout = to_sti_tvout(encoder);
 
-	tvout_hda_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
+	tvout_hda_start(tvout, sti_crtc_is_main(encoder->crtc));
 }
 
 static void sti_hda_encoder_disable(struct drm_encoder *encoder)
@@ -575,7 +575,7 @@ static void sti_hdmi_encoder_commit(struct drm_encoder *encoder)
 {
 	struct sti_tvout *tvout = to_sti_tvout(encoder);
 
-	tvout_hdmi_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
+	tvout_hdmi_start(tvout, sti_crtc_is_main(encoder->crtc));
 }
 
 static void sti_hdmi_encoder_disable(struct drm_encoder *encoder)
@@ -644,7 +644,6 @@ static int sti_tvout_bind(struct device *dev, struct device *master, void *data)
 	struct sti_tvout *tvout = dev_get_drvdata(dev);
 	struct drm_device *drm_dev = data;
 	unsigned int i;
-	int ret;
 
 	tvout->drm_dev = drm_dev;
 
@@ -658,17 +657,15 @@ static int sti_tvout_bind(struct device *dev, struct device *master, void *data)
 
 	sti_tvout_create_encoders(drm_dev, tvout);
 
-	ret = component_bind_all(dev, drm_dev);
-	if (ret)
-		sti_tvout_destroy_encoders(tvout);
-
-	return ret;
+	return 0;
 }
 
 static void sti_tvout_unbind(struct device *dev, struct device *master,
 	void *data)
 {
-	/* do nothing */
+	struct sti_tvout *tvout = dev_get_drvdata(dev);
+
+	sti_tvout_destroy_encoders(tvout);
 }
 
 static const struct component_ops sti_tvout_ops = {
@@ -676,34 +673,12 @@ static const struct component_ops sti_tvout_ops = {
 	.unbind	= sti_tvout_unbind,
 };
 
-static int compare_of(struct device *dev, void *data)
-{
-	return dev->of_node == data;
-}
-
-static int sti_tvout_master_bind(struct device *dev)
-{
-	return 0;
-}
-
-static void sti_tvout_master_unbind(struct device *dev)
-{
-	/* do nothing */
-}
-
-static const struct component_master_ops sti_tvout_master_ops = {
-	.bind = sti_tvout_master_bind,
-	.unbind = sti_tvout_master_unbind,
-};
-
 static int sti_tvout_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct device_node *node = dev->of_node;
 	struct sti_tvout *tvout;
 	struct resource *res;
-	struct device_node *child_np;
-	struct component_match *match = NULL;
 
 	DRM_INFO("%s\n", __func__);
 
@@ -734,24 +709,11 @@ static int sti_tvout_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, tvout);
 
-	of_platform_populate(node, NULL, NULL, dev);
-
-	child_np = of_get_next_available_child(node, NULL);
-
-	while (child_np) {
-		component_match_add(dev, &match, compare_of, child_np);
-		of_node_put(child_np);
-		child_np = of_get_next_available_child(node, child_np);
-	}
-
-	component_master_add_with_match(dev, &sti_tvout_master_ops, match);
-
 	return component_add(dev, &sti_tvout_ops);
 }
 
 static int sti_tvout_remove(struct platform_device *pdev)
 {
-	component_master_del(&pdev->dev, &sti_tvout_master_ops);
 	component_del(&pdev->dev, &sti_tvout_ops);
 	return 0;
 }
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
index 10ced6a479f4..a8254cc362a1 100644
--- a/drivers/gpu/drm/sti/sti_vid.c
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -6,7 +6,7 @@
 
 #include <drm/drmP.h>
 
-#include "sti_layer.h"
+#include "sti_plane.h"
 #include "sti_vid.h"
 #include "sti_vtg.h"
 
@@ -43,35 +43,37 @@
 #define VID_MPR2_BT709          0x07150545
 #define VID_MPR3_BT709          0x00000AE8
 
-static int sti_vid_prepare_layer(struct sti_layer *vid, bool first_prepare)
+void sti_vid_commit(struct sti_vid *vid,
+		    struct drm_plane_state *state)
 {
-	u32 val;
+	struct drm_crtc *crtc = state->crtc;
+	struct drm_display_mode *mode = &crtc->mode;
+	int dst_x = state->crtc_x;
+	int dst_y = state->crtc_y;
+	int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+	int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+	u32 val, ydo, xdo, yds, xds;
+
+	/* Input / output size
+	 * Align to upper even value */
+	dst_w = ALIGN(dst_w, 2);
+	dst_h = ALIGN(dst_h, 2);
 
 	/* Unmask */
 	val = readl(vid->regs + VID_CTL);
 	val &= ~VID_CTL_IGNORE;
 	writel(val, vid->regs + VID_CTL);
 
-	return 0;
-}
-
-static int sti_vid_commit_layer(struct sti_layer *vid)
-{
-	struct drm_display_mode *mode = vid->mode;
-	u32 ydo, xdo, yds, xds;
-
-	ydo = sti_vtg_get_line_number(*mode, vid->dst_y);
-	yds = sti_vtg_get_line_number(*mode, vid->dst_y + vid->dst_h - 1);
-	xdo = sti_vtg_get_pixel_number(*mode, vid->dst_x);
-	xds = sti_vtg_get_pixel_number(*mode, vid->dst_x + vid->dst_w - 1);
+	ydo = sti_vtg_get_line_number(*mode, dst_y);
+	yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
+	xdo = sti_vtg_get_pixel_number(*mode, dst_x);
+	xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1);
 
 	writel((ydo << 16) | xdo, vid->regs + VID_VPO);
 	writel((yds << 16) | xds, vid->regs + VID_VPS);
-
-	return 0;
 }
 
-static int sti_vid_disable_layer(struct sti_layer *vid)
+void sti_vid_disable(struct sti_vid *vid)
 {
 	u32 val;
 
@@ -79,21 +81,9 @@ static int sti_vid_disable_layer(struct sti_layer *vid)
 	val = readl(vid->regs + VID_CTL);
 	val |= VID_CTL_IGNORE;
 	writel(val, vid->regs + VID_CTL);
-
-	return 0;
 }
 
-static const uint32_t *sti_vid_get_formats(struct sti_layer *layer)
-{
-	return NULL;
-}
-
-static unsigned int sti_vid_get_nb_formats(struct sti_layer *layer)
-{
-	return 0;
-}
-
-static void sti_vid_init(struct sti_layer *vid)
+static void sti_vid_init(struct sti_vid *vid)
 {
 	/* Enable PSI, Mask layer */
 	writel(VID_CTL_PSI_ENABLE | VID_CTL_IGNORE, vid->regs + VID_CTL);
@@ -113,18 +103,10 @@ static void sti_vid_init(struct sti_layer *vid)
 	writel(VID_CSAT_DFLT, vid->regs + VID_CSAT);
 }
 
-static const struct sti_layer_funcs vid_ops = {
-	.get_formats = sti_vid_get_formats,
-	.get_nb_formats = sti_vid_get_nb_formats,
-	.init = sti_vid_init,
-	.prepare = sti_vid_prepare_layer,
-	.commit = sti_vid_commit_layer,
-	.disable = sti_vid_disable_layer,
-};
-
-struct sti_layer *sti_vid_create(struct device *dev)
+struct sti_vid *sti_vid_create(struct device *dev, int id,
+			       void __iomem *baseaddr)
 {
-	struct sti_layer *vid;
+	struct sti_vid *vid;
 
 	vid = devm_kzalloc(dev, sizeof(*vid), GFP_KERNEL);
 	if (!vid) {
@@ -132,7 +114,11 @@ struct sti_layer *sti_vid_create(struct device *dev)
 		return NULL;
 	}
 
-	vid->ops = &vid_ops;
+	vid->dev = dev;
+	vid->regs = baseaddr;
+	vid->id = id;
+
+	sti_vid_init(vid);
 
 	return vid;
 }
diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h
index 2c0aecd63294..5dea4791f1d6 100644
--- a/drivers/gpu/drm/sti/sti_vid.h
+++ b/drivers/gpu/drm/sti/sti_vid.h
@@ -7,6 +7,23 @@
 #ifndef _STI_VID_H_
 #define _STI_VID_H_
 
-struct sti_layer *sti_vid_create(struct device *dev);
+/**
+ * STI VID structure
+ *
+ * @dev:   driver device
+ * @regs:  vid registers
+ * @id:    id of the vid
+ */
+struct sti_vid {
+	struct device *dev;
+	void __iomem *regs;
+	int id;
+};
+
+void sti_vid_commit(struct sti_vid *vid,
+		    struct drm_plane_state *state);
+void sti_vid_disable(struct sti_vid *vid);
+struct sti_vid *sti_vid_create(struct device *dev, int id,
+			       void __iomem *baseaddr);
 
 #endif
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index a287e4fec865..ddefb85dc4f7 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -76,6 +76,14 @@ to_tegra_plane_state(struct drm_plane_state *state)
 	return NULL;
 }
 
+static void tegra_dc_stats_reset(struct tegra_dc_stats *stats)
+{
+	stats->frames = 0;
+	stats->vblank = 0;
+	stats->underflow = 0;
+	stats->overflow = 0;
+}
+
 /*
  * Reads the active copy of a register. This takes the dc->lock spinlock to
  * prevent races with the VBLANK processing which also needs access to the
@@ -759,7 +767,6 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
 	/* position the cursor */
 	value = (state->crtc_y & 0x3fff) << 16 | (state->crtc_x & 0x3fff);
 	tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
-
 }
 
 static void tegra_cursor_atomic_disable(struct drm_plane *plane,
@@ -809,9 +816,11 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
 		return ERR_PTR(-ENOMEM);
 
 	/*
-	 * We'll treat the cursor as an overlay plane with index 6 here so
-	 * that the update and activation request bits in DC_CMD_STATE_CONTROL
-	 * match up.
+	 * This index is kind of fake. The cursor isn't a regular plane, but
+	 * its update and activation request bits in DC_CMD_STATE_CONTROL do
+	 * use the same programming. Setting this fake index here allows the
+	 * code in tegra_add_plane_state() to do the right thing without the
+	 * need to special-casing the cursor plane.
 	 */
 	plane->index = 6;
 
@@ -1015,6 +1024,8 @@ static void tegra_crtc_reset(struct drm_crtc *crtc)
 		crtc->state = &state->base;
 		crtc->state->crtc = crtc;
 	}
+
+	drm_crtc_vblank_reset(crtc);
 }
 
 static struct drm_crtc_state *
@@ -1052,90 +1063,6 @@ static const struct drm_crtc_funcs tegra_crtc_funcs = {
 	.atomic_destroy_state = tegra_crtc_atomic_destroy_state,
 };
 
-static void tegra_dc_stop(struct tegra_dc *dc)
-{
-	u32 value;
-
-	/* stop the display controller */
-	value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
-	value &= ~DISP_CTRL_MODE_MASK;
-	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
-
-	tegra_dc_commit(dc);
-}
-
-static bool tegra_dc_idle(struct tegra_dc *dc)
-{
-	u32 value;
-
-	value = tegra_dc_readl_active(dc, DC_CMD_DISPLAY_COMMAND);
-
-	return (value & DISP_CTRL_MODE_MASK) == 0;
-}
-
-static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout)
-{
-	timeout = jiffies + msecs_to_jiffies(timeout);
-
-	while (time_before(jiffies, timeout)) {
-		if (tegra_dc_idle(dc))
-			return 0;
-
-		usleep_range(1000, 2000);
-	}
-
-	dev_dbg(dc->dev, "timeout waiting for DC to become idle\n");
-	return -ETIMEDOUT;
-}
-
-static void tegra_crtc_disable(struct drm_crtc *crtc)
-{
-	struct tegra_dc *dc = to_tegra_dc(crtc);
-	u32 value;
-
-	if (!tegra_dc_idle(dc)) {
-		tegra_dc_stop(dc);
-
-		/*
-		 * Ignore the return value, there isn't anything useful to do
-		 * in case this fails.
-		 */
-		tegra_dc_wait_idle(dc, 100);
-	}
-
-	/*
-	 * This should really be part of the RGB encoder driver, but clearing
-	 * these bits has the side-effect of stopping the display controller.
-	 * When that happens no VBLANK interrupts will be raised. At the same
-	 * time the encoder is disabled before the display controller, so the
-	 * above code is always going to timeout waiting for the controller
-	 * to go idle.
-	 *
-	 * Given the close coupling between the RGB encoder and the display
-	 * controller doing it here is still kind of okay. None of the other
-	 * encoder drivers require these bits to be cleared.
-	 *
-	 * XXX: Perhaps given that the display controller is switched off at
-	 * this point anyway maybe clearing these bits isn't even useful for
-	 * the RGB encoder?
-	 */
-	if (dc->rgb) {
-		value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
-		value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
-			   PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
-		tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
-	}
-
-	drm_crtc_vblank_off(crtc);
-}
-
-static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
-				  const struct drm_display_mode *mode,
-				  struct drm_display_mode *adjusted)
-{
-	return true;
-}
-
 static int tegra_dc_set_timings(struct tegra_dc *dc,
 				struct drm_display_mode *mode)
 {
@@ -1229,7 +1156,85 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
 	tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
 }
 
-static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc)
+static void tegra_dc_stop(struct tegra_dc *dc)
+{
+	u32 value;
+
+	/* stop the display controller */
+	value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+	value &= ~DISP_CTRL_MODE_MASK;
+	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+	tegra_dc_commit(dc);
+}
+
+static bool tegra_dc_idle(struct tegra_dc *dc)
+{
+	u32 value;
+
+	value = tegra_dc_readl_active(dc, DC_CMD_DISPLAY_COMMAND);
+
+	return (value & DISP_CTRL_MODE_MASK) == 0;
+}
+
+static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout)
+{
+	timeout = jiffies + msecs_to_jiffies(timeout);
+
+	while (time_before(jiffies, timeout)) {
+		if (tegra_dc_idle(dc))
+			return 0;
+
+		usleep_range(1000, 2000);
+	}
+
+	dev_dbg(dc->dev, "timeout waiting for DC to become idle\n");
+	return -ETIMEDOUT;
+}
+
+static void tegra_crtc_disable(struct drm_crtc *crtc)
+{
+	struct tegra_dc *dc = to_tegra_dc(crtc);
+	u32 value;
+
+	if (!tegra_dc_idle(dc)) {
+		tegra_dc_stop(dc);
+
+		/*
+		 * Ignore the return value, there isn't anything useful to do
+		 * in case this fails.
+		 */
+		tegra_dc_wait_idle(dc, 100);
+	}
+
+	/*
+	 * This should really be part of the RGB encoder driver, but clearing
+	 * these bits has the side-effect of stopping the display controller.
+	 * When that happens no VBLANK interrupts will be raised. At the same
+	 * time the encoder is disabled before the display controller, so the
+	 * above code is always going to timeout waiting for the controller
+	 * to go idle.
+	 *
+	 * Given the close coupling between the RGB encoder and the display
+	 * controller doing it here is still kind of okay. None of the other
+	 * encoder drivers require these bits to be cleared.
+	 *
+	 * XXX: Perhaps given that the display controller is switched off at
+	 * this point anyway maybe clearing these bits isn't even useful for
+	 * the RGB encoder?
+	 */
+	if (dc->rgb) {
+		value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+		value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+			   PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
+		tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+	}
+
+	tegra_dc_stats_reset(&dc->stats);
+	drm_crtc_vblank_off(crtc);
+}
+
+static void tegra_crtc_enable(struct drm_crtc *crtc)
 {
 	struct drm_display_mode *mode = &crtc->state->adjusted_mode;
 	struct tegra_dc_state *state = to_dc_state(crtc->state);
@@ -1259,15 +1264,7 @@ static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc)
 	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
 
 	tegra_dc_commit(dc);
-}
-
-static void tegra_crtc_prepare(struct drm_crtc *crtc)
-{
-	drm_crtc_vblank_off(crtc);
-}
 
-static void tegra_crtc_commit(struct drm_crtc *crtc)
-{
 	drm_crtc_vblank_on(crtc);
 }
 
@@ -1277,7 +1274,8 @@ static int tegra_crtc_atomic_check(struct drm_crtc *crtc,
 	return 0;
 }
 
-static void tegra_crtc_atomic_begin(struct drm_crtc *crtc)
+static void tegra_crtc_atomic_begin(struct drm_crtc *crtc,
+				    struct drm_crtc_state *old_crtc_state)
 {
 	struct tegra_dc *dc = to_tegra_dc(crtc);
 
@@ -1291,7 +1289,8 @@ static void tegra_crtc_atomic_begin(struct drm_crtc *crtc)
 	}
 }
 
-static void tegra_crtc_atomic_flush(struct drm_crtc *crtc)
+static void tegra_crtc_atomic_flush(struct drm_crtc *crtc,
+				    struct drm_crtc_state *old_crtc_state)
 {
 	struct tegra_dc_state *state = to_dc_state(crtc->state);
 	struct tegra_dc *dc = to_tegra_dc(crtc);
@@ -1302,10 +1301,7 @@ static void tegra_crtc_atomic_flush(struct drm_crtc *crtc)
 
 static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
 	.disable = tegra_crtc_disable,
-	.mode_fixup = tegra_crtc_mode_fixup,
-	.mode_set_nofb = tegra_crtc_mode_set_nofb,
-	.prepare = tegra_crtc_prepare,
-	.commit = tegra_crtc_commit,
+	.enable = tegra_crtc_enable,
 	.atomic_check = tegra_crtc_atomic_check,
 	.atomic_begin = tegra_crtc_atomic_begin,
 	.atomic_flush = tegra_crtc_atomic_flush,
@@ -1323,6 +1319,7 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
 		/*
 		dev_dbg(dc->dev, "%s(): frame end\n", __func__);
 		*/
+		dc->stats.frames++;
 	}
 
 	if (status & VBLANK_INT) {
@@ -1331,12 +1328,21 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
 		*/
 		drm_crtc_handle_vblank(&dc->base);
 		tegra_dc_finish_page_flip(dc);
+		dc->stats.vblank++;
 	}
 
 	if (status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)) {
 		/*
 		dev_dbg(dc->dev, "%s(): underflow\n", __func__);
 		*/
+		dc->stats.underflow++;
+	}
+
+	if (status & (WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT)) {
+		/*
+		dev_dbg(dc->dev, "%s(): overflow\n", __func__);
+		*/
+		dc->stats.overflow++;
 	}
 
 	return IRQ_HANDLED;
@@ -1346,6 +1352,14 @@ static int tegra_dc_show_regs(struct seq_file *s, void *data)
 {
 	struct drm_info_node *node = s->private;
 	struct tegra_dc *dc = node->info_ent->data;
+	int err = 0;
+
+	drm_modeset_lock_crtc(&dc->base, NULL);
+
+	if (!dc->base.state->active) {
+		err = -EBUSY;
+		goto unlock;
+	}
 
 #define DUMP_REG(name)						\
 	seq_printf(s, "%-40s %#05x %08x\n", #name, name,	\
@@ -1566,11 +1580,59 @@ static int tegra_dc_show_regs(struct seq_file *s, void *data)
 
 #undef DUMP_REG
 
+unlock:
+	drm_modeset_unlock_crtc(&dc->base);
+	return err;
+}
+
+static int tegra_dc_show_crc(struct seq_file *s, void *data)
+{
+	struct drm_info_node *node = s->private;
+	struct tegra_dc *dc = node->info_ent->data;
+	int err = 0;
+	u32 value;
+
+	drm_modeset_lock_crtc(&dc->base, NULL);
+
+	if (!dc->base.state->active) {
+		err = -EBUSY;
+		goto unlock;
+	}
+
+	value = DC_COM_CRC_CONTROL_ACTIVE_DATA | DC_COM_CRC_CONTROL_ENABLE;
+	tegra_dc_writel(dc, value, DC_COM_CRC_CONTROL);
+	tegra_dc_commit(dc);
+
+	drm_crtc_wait_one_vblank(&dc->base);
+	drm_crtc_wait_one_vblank(&dc->base);
+
+	value = tegra_dc_readl(dc, DC_COM_CRC_CHECKSUM);
+	seq_printf(s, "%08x\n", value);
+
+	tegra_dc_writel(dc, 0, DC_COM_CRC_CONTROL);
+
+unlock:
+	drm_modeset_unlock_crtc(&dc->base);
+	return err;
+}
+
+static int tegra_dc_show_stats(struct seq_file *s, void *data)
+{
+	struct drm_info_node *node = s->private;
+	struct tegra_dc *dc = node->info_ent->data;
+
+	seq_printf(s, "frames: %lu\n", dc->stats.frames);
+	seq_printf(s, "vblank: %lu\n", dc->stats.vblank);
+	seq_printf(s, "underflow: %lu\n", dc->stats.underflow);
+	seq_printf(s, "overflow: %lu\n", dc->stats.overflow);
+
 	return 0;
 }
 
 static struct drm_info_list debugfs_files[] = {
 	{ "regs", tegra_dc_show_regs, 0, NULL },
+	{ "crc", tegra_dc_show_crc, 0, NULL },
+	{ "stats", tegra_dc_show_stats, 0, NULL },
 };
 
 static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
@@ -1716,7 +1778,8 @@ static int tegra_dc_init(struct host1x_client *client)
 		tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC);
 	}
 
-	value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
+	value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+		WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
 	tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
 
 	value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
@@ -1732,15 +1795,19 @@ static int tegra_dc_init(struct host1x_client *client)
 		WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
 	tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
 
-	value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+	value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+		WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
 	tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
 
-	value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+	value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+		WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
 	tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
 
 	if (dc->soc->supports_border_color)
 		tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
 
+	tegra_dc_stats_reset(&dc->stats);
+
 	return 0;
 
 cleanup:
@@ -1826,8 +1893,20 @@ static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
 	.has_powergate = true,
 };
 
+static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
+	.supports_border_color = false,
+	.supports_interlacing = true,
+	.supports_cursor = true,
+	.supports_block_linear = true,
+	.pitch_align = 64,
+	.has_powergate = true,
+};
+
 static const struct of_device_id tegra_dc_of_match[] = {
 	{
+		.compatible = "nvidia,tegra210-dc",
+		.data = &tegra210_dc_soc_info,
+	}, {
 		.compatible = "nvidia,tegra124-dc",
 		.data = &tegra124_dc_soc_info,
 	}, {
@@ -1957,6 +2036,10 @@ static int tegra_dc_probe(struct platform_device *pdev)
 		return -ENXIO;
 	}
 
+	dc->syncpt = host1x_syncpt_request(&pdev->dev, flags);
+	if (!dc->syncpt)
+		dev_warn(&pdev->dev, "failed to allocate syncpoint\n");
+
 	INIT_LIST_HEAD(&dc->client.list);
 	dc->client.ops = &dc_client_ops;
 	dc->client.dev = &pdev->dev;
@@ -1974,10 +2057,6 @@ static int tegra_dc_probe(struct platform_device *pdev)
 		return err;
 	}
 
-	dc->syncpt = host1x_syncpt_request(&pdev->dev, flags);
-	if (!dc->syncpt)
-		dev_warn(&pdev->dev, "failed to allocate syncpoint\n");
-
 	platform_set_drvdata(pdev, dc);
 
 	return 0;
@@ -2016,7 +2095,6 @@ static int tegra_dc_remove(struct platform_device *pdev)
 struct platform_driver tegra_dc_driver = {
 	.driver = {
 		.name = "tegra-dc",
-		.owner = THIS_MODULE,
 		.of_match_table = tegra_dc_of_match,
 	},
 	.probe = tegra_dc_probe,
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 55792daabbb5..4a268635749b 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -86,6 +86,11 @@
 #define DC_CMD_REG_ACT_CONTROL			0x043
 
 #define DC_COM_CRC_CONTROL			0x300
+#define  DC_COM_CRC_CONTROL_ALWAYS (1 << 3)
+#define  DC_COM_CRC_CONTROL_FULL_FRAME  (0 << 2)
+#define  DC_COM_CRC_CONTROL_ACTIVE_DATA (1 << 2)
+#define  DC_COM_CRC_CONTROL_WAIT (1 << 1)
+#define  DC_COM_CRC_CONTROL_ENABLE (1 << 0)
 #define DC_COM_CRC_CHECKSUM			0x301
 #define DC_COM_PIN_OUTPUT_ENABLE(x) (0x302 + (x))
 #define DC_COM_PIN_OUTPUT_POLARITY(x) (0x306 + (x))
@@ -114,15 +119,17 @@
 #define DC_COM_CRC_CHECKSUM_LATCHED		0x329
 
 #define DC_DISP_DISP_SIGNAL_OPTIONS0		0x400
-#define H_PULSE_0_ENABLE (1 <<  8)
-#define H_PULSE_1_ENABLE (1 << 10)
-#define H_PULSE_2_ENABLE (1 << 12)
+#define H_PULSE0_ENABLE (1 <<  8)
+#define H_PULSE1_ENABLE (1 << 10)
+#define H_PULSE2_ENABLE (1 << 12)
 
 #define DC_DISP_DISP_SIGNAL_OPTIONS1		0x401
 
 #define DC_DISP_DISP_WIN_OPTIONS		0x402
 #define HDMI_ENABLE	(1 << 30)
 #define DSI_ENABLE	(1 << 29)
+#define SOR1_TIMING_CYA	(1 << 27)
+#define SOR1_ENABLE	(1 << 26)
 #define SOR_ENABLE	(1 << 25)
 #define CURSOR_ENABLE	(1 << 16)
 
@@ -242,9 +249,20 @@
 #define BASE_COLOR_SIZE565     (6 << 0)
 #define BASE_COLOR_SIZE332     (7 << 0)
 #define BASE_COLOR_SIZE888     (8 << 0)
+#define DITHER_CONTROL_MASK    (3 << 8)
 #define DITHER_CONTROL_DISABLE (0 << 8)
 #define DITHER_CONTROL_ORDERED (2 << 8)
 #define DITHER_CONTROL_ERRDIFF (3 << 8)
+#define BASE_COLOR_SIZE_MASK   (0xf << 0)
+#define BASE_COLOR_SIZE_666    (0 << 0)
+#define BASE_COLOR_SIZE_111    (1 << 0)
+#define BASE_COLOR_SIZE_222    (2 << 0)
+#define BASE_COLOR_SIZE_333    (3 << 0)
+#define BASE_COLOR_SIZE_444    (4 << 0)
+#define BASE_COLOR_SIZE_555    (5 << 0)
+#define BASE_COLOR_SIZE_565    (6 << 0)
+#define BASE_COLOR_SIZE_332    (7 << 0)
+#define BASE_COLOR_SIZE_888    (8 << 0)
 
 #define DC_DISP_SHIFT_CLOCK_OPTIONS		0x431
 #define  SC1_H_QUALIFIER_NONE	(1 << 16)
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 07b26972f487..224a7dc8e4ed 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -294,26 +294,41 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
 	}
 
 	dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
-	if (IS_ERR(dpaux->rst))
+	if (IS_ERR(dpaux->rst)) {
+		dev_err(&pdev->dev, "failed to get reset control: %ld\n",
+			PTR_ERR(dpaux->rst));
 		return PTR_ERR(dpaux->rst);
+	}
 
 	dpaux->clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(dpaux->clk))
+	if (IS_ERR(dpaux->clk)) {
+		dev_err(&pdev->dev, "failed to get module clock: %ld\n",
+			PTR_ERR(dpaux->clk));
 		return PTR_ERR(dpaux->clk);
+	}
 
 	err = clk_prepare_enable(dpaux->clk);
-	if (err < 0)
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to enable module clock: %d\n",
+			err);
 		return err;
+	}
 
 	reset_control_deassert(dpaux->rst);
 
 	dpaux->clk_parent = devm_clk_get(&pdev->dev, "parent");
-	if (IS_ERR(dpaux->clk_parent))
+	if (IS_ERR(dpaux->clk_parent)) {
+		dev_err(&pdev->dev, "failed to get parent clock: %ld\n",
+			PTR_ERR(dpaux->clk_parent));
 		return PTR_ERR(dpaux->clk_parent);
+	}
 
 	err = clk_prepare_enable(dpaux->clk_parent);
-	if (err < 0)
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to enable parent clock: %d\n",
+			err);
 		return err;
+	}
 
 	err = clk_set_rate(dpaux->clk_parent, 270000000);
 	if (err < 0) {
@@ -323,8 +338,11 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
 	}
 
 	dpaux->vdd = devm_regulator_get(&pdev->dev, "vdd");
-	if (IS_ERR(dpaux->vdd))
+	if (IS_ERR(dpaux->vdd)) {
+		dev_err(&pdev->dev, "failed to get VDD supply: %ld\n",
+			PTR_ERR(dpaux->vdd));
 		return PTR_ERR(dpaux->vdd);
+	}
 
 	err = devm_request_irq(dpaux->dev, dpaux->irq, tegra_dpaux_irq, 0,
 			       dev_name(dpaux->dev), dpaux);
@@ -334,6 +352,8 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
 		return err;
 	}
 
+	disable_irq(dpaux->irq);
+
 	dpaux->aux.transfer = tegra_dpaux_transfer;
 	dpaux->aux.dev = &pdev->dev;
 
@@ -341,6 +361,24 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
 	if (err < 0)
 		return err;
 
+	/*
+	 * Assume that by default the DPAUX/I2C pads will be used for HDMI,
+	 * so power them up and configure them in I2C mode.
+	 *
+	 * The DPAUX code paths reconfigure the pads in AUX mode, but there
+	 * is no possibility to perform the I2C mode configuration in the
+	 * HDMI path.
+	 */
+	value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
+	value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
+	tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+
+	value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_PADCTL);
+	value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
+		DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
+		DPAUX_HYBRID_PADCTL_MODE_I2C;
+	tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
+
 	/* enable and clear all interrupts */
 	value = DPAUX_INTR_AUX_DONE | DPAUX_INTR_IRQ_EVENT |
 		DPAUX_INTR_UNPLUG_EVENT | DPAUX_INTR_PLUG_EVENT;
@@ -359,6 +397,12 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
 static int tegra_dpaux_remove(struct platform_device *pdev)
 {
 	struct tegra_dpaux *dpaux = platform_get_drvdata(pdev);
+	u32 value;
+
+	/* make sure pads are powered down when not in use */
+	value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
+	value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
+	tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
 
 	drm_dp_aux_unregister(&dpaux->aux);
 
@@ -376,6 +420,7 @@ static int tegra_dpaux_remove(struct platform_device *pdev)
 }
 
 static const struct of_device_id tegra_dpaux_of_match[] = {
+	{ .compatible = "nvidia,tegra210-dpaux", },
 	{ .compatible = "nvidia,tegra124-dpaux", },
 	{ },
 };
@@ -425,8 +470,10 @@ int tegra_dpaux_attach(struct tegra_dpaux *dpaux, struct tegra_output *output)
 		enum drm_connector_status status;
 
 		status = tegra_dpaux_detect(dpaux);
-		if (status == connector_status_connected)
+		if (status == connector_status_connected) {
+			enable_irq(dpaux->irq);
 			return 0;
+		}
 
 		usleep_range(1000, 2000);
 	}
@@ -439,6 +486,8 @@ int tegra_dpaux_detach(struct tegra_dpaux *dpaux)
 	unsigned long timeout;
 	int err;
 
+	disable_irq(dpaux->irq);
+
 	err = regulator_disable(dpaux->vdd);
 	if (err < 0)
 		return err;
diff --git a/drivers/gpu/drm/tegra/dpaux.h b/drivers/gpu/drm/tegra/dpaux.h
index 806e245ca787..20783d9f4728 100644
--- a/drivers/gpu/drm/tegra/dpaux.h
+++ b/drivers/gpu/drm/tegra/dpaux.h
@@ -57,6 +57,8 @@
 #define DPAUX_DP_AUX_CONFIG 0x45
 
 #define DPAUX_HYBRID_PADCTL 0x49
+#define DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV (1 << 15)
+#define DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV (1 << 14)
 #define DPAUX_HYBRID_PADCTL_AUX_CMH(x) (((x) & 0x3) << 12)
 #define DPAUX_HYBRID_PADCTL_AUX_DRVZ(x) (((x) & 0x7) << 8)
 #define DPAUX_HYBRID_PADCTL_AUX_DRVI(x) (((x) & 0x3f) << 2)
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 427f50c6803c..6d88cf1fcd1c 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -171,8 +171,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
 	if (err < 0)
 		goto fbdev;
 
-	drm_mode_config_reset(drm);
-
 	/*
 	 * We don't use the drm_irq_install() helpers provided by the DRM
 	 * core, so we need to set this manually in order to allow the
@@ -182,11 +180,14 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
 
 	/* syncpoints are used for full 32-bit hardware VBLANK counters */
 	drm->max_vblank_count = 0xffffffff;
+	drm->vblank_disable_allowed = true;
 
 	err = drm_vblank_init(drm, drm->mode_config.num_crtc);
 	if (err < 0)
 		goto device;
 
+	drm_mode_config_reset(drm);
+
 	err = tegra_drm_fb_init(drm);
 	if (err < 0)
 		goto vblank;
@@ -1037,9 +1038,8 @@ static int host1x_drm_resume(struct device *dev)
 }
 #endif
 
-static const struct dev_pm_ops host1x_drm_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(host1x_drm_suspend, host1x_drm_resume)
-};
+static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend,
+			 host1x_drm_resume);
 
 static const struct of_device_id host1x_drm_subdevs[] = {
 	{ .compatible = "nvidia,tegra20-dc", },
@@ -1056,6 +1056,12 @@ static const struct of_device_id host1x_drm_subdevs[] = {
 	{ .compatible = "nvidia,tegra124-dc", },
 	{ .compatible = "nvidia,tegra124-sor", },
 	{ .compatible = "nvidia,tegra124-hdmi", },
+	{ .compatible = "nvidia,tegra124-dsi", },
+	{ .compatible = "nvidia,tegra132-dsi", },
+	{ .compatible = "nvidia,tegra210-dc", },
+	{ .compatible = "nvidia,tegra210-dsi", },
+	{ .compatible = "nvidia,tegra210-sor", },
+	{ .compatible = "nvidia,tegra210-sor1", },
 	{ /* sentinel */ }
 };
 
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 659b2fcc986d..ec49275ffb24 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -12,6 +12,7 @@
 
 #include <uapi/drm/tegra_drm.h>
 #include <linux/host1x.h>
+#include <linux/of_gpio.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
@@ -104,6 +105,13 @@ int tegra_drm_exit(struct tegra_drm *tegra);
 struct tegra_dc_soc_info;
 struct tegra_output;
 
+struct tegra_dc_stats {
+	unsigned long frames;
+	unsigned long vblank;
+	unsigned long underflow;
+	unsigned long overflow;
+};
+
 struct tegra_dc {
 	struct host1x_client client;
 	struct host1x_syncpt *syncpt;
@@ -121,6 +129,7 @@ struct tegra_dc {
 
 	struct tegra_output *rgb;
 
+	struct tegra_dc_stats stats;
 	struct list_head list;
 
 	struct drm_info_list *debugfs_files;
@@ -200,6 +209,7 @@ struct tegra_output {
 	const struct edid *edid;
 	unsigned int hpd_irq;
 	int hpd_gpio;
+	enum of_gpio_flags hpd_gpio_flags;
 
 	struct drm_encoder encoder;
 	struct drm_connector connector;
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index ed970f622903..f0a138ef68ce 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -119,6 +119,16 @@ static int tegra_dsi_show_regs(struct seq_file *s, void *data)
 {
 	struct drm_info_node *node = s->private;
 	struct tegra_dsi *dsi = node->info_ent->data;
+	struct drm_crtc *crtc = dsi->output.encoder.crtc;
+	struct drm_device *drm = node->minor->dev;
+	int err = 0;
+
+	drm_modeset_lock_all(drm);
+
+	if (!crtc || !crtc->state->active) {
+		err = -EBUSY;
+		goto unlock;
+	}
 
 #define DUMP_REG(name)						\
 	seq_printf(s, "%-32s %#05x %08x\n", #name, name,	\
@@ -208,7 +218,9 @@ static int tegra_dsi_show_regs(struct seq_file *s, void *data)
 
 #undef DUMP_REG
 
-	return 0;
+unlock:
+	drm_modeset_unlock_all(drm);
+	return err;
 }
 
 static struct drm_info_list debugfs_files[] = {
@@ -548,14 +560,19 @@ static void tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe,
 
 		/* horizontal sync width */
 		hsw = (mode->hsync_end - mode->hsync_start) * mul / div;
-		hsw -= 10;
 
 		/* horizontal back porch */
 		hbp = (mode->htotal - mode->hsync_end) * mul / div;
-		hbp -= 14;
+
+		if ((dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) == 0)
+			hbp += hsw;
 
 		/* horizontal front porch */
 		hfp = (mode->hsync_start - mode->hdisplay) * mul / div;
+
+		/* subtract packet overhead */
+		hsw -= 10;
+		hbp -= 14;
 		hfp -= 8;
 
 		tegra_dsi_writel(dsi, hsw << 16 | 0, DSI_PKT_LEN_0_1);
@@ -726,10 +743,6 @@ static void tegra_dsi_soft_reset(struct tegra_dsi *dsi)
 		tegra_dsi_soft_reset(dsi->slave);
 }
 
-static void tegra_dsi_connector_dpms(struct drm_connector *connector, int mode)
-{
-}
-
 static void tegra_dsi_connector_reset(struct drm_connector *connector)
 {
 	struct tegra_dsi_state *state;
@@ -756,7 +769,7 @@ tegra_dsi_connector_duplicate_state(struct drm_connector *connector)
 }
 
 static const struct drm_connector_funcs tegra_dsi_connector_funcs = {
-	.dpms = tegra_dsi_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.reset = tegra_dsi_connector_reset,
 	.detect = tegra_output_connector_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -782,22 +795,48 @@ static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = {
 	.destroy = tegra_output_encoder_destroy,
 };
 
-static void tegra_dsi_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
 {
-}
+	struct tegra_output *output = encoder_to_output(encoder);
+	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+	struct tegra_dsi *dsi = to_dsi(output);
+	u32 value;
+	int err;
 
-static void tegra_dsi_encoder_prepare(struct drm_encoder *encoder)
-{
-}
+	if (output->panel)
+		drm_panel_disable(output->panel);
 
-static void tegra_dsi_encoder_commit(struct drm_encoder *encoder)
-{
+	tegra_dsi_video_disable(dsi);
+
+	/*
+	 * The following accesses registers of the display controller, so make
+	 * sure it's only executed when the output is attached to one.
+	 */
+	if (dc) {
+		value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+		value &= ~DSI_ENABLE;
+		tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+		tegra_dc_commit(dc);
+	}
+
+	err = tegra_dsi_wait_idle(dsi, 100);
+	if (err < 0)
+		dev_dbg(dsi->dev, "failed to idle DSI: %d\n", err);
+
+	tegra_dsi_soft_reset(dsi);
+
+	if (output->panel)
+		drm_panel_unprepare(output->panel);
+
+	tegra_dsi_disable(dsi);
+
+	return;
 }
 
-static void tegra_dsi_encoder_mode_set(struct drm_encoder *encoder,
-				       struct drm_display_mode *mode,
-				       struct drm_display_mode *adjusted)
+static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
 {
+	struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
 	struct tegra_output *output = encoder_to_output(encoder);
 	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
 	struct tegra_dsi *dsi = to_dsi(output);
@@ -835,45 +874,6 @@ static void tegra_dsi_encoder_mode_set(struct drm_encoder *encoder,
 	return;
 }
 
-static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
-{
-	struct tegra_output *output = encoder_to_output(encoder);
-	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
-	struct tegra_dsi *dsi = to_dsi(output);
-	u32 value;
-	int err;
-
-	if (output->panel)
-		drm_panel_disable(output->panel);
-
-	tegra_dsi_video_disable(dsi);
-
-	/*
-	 * The following accesses registers of the display controller, so make
-	 * sure it's only executed when the output is attached to one.
-	 */
-	if (dc) {
-		value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
-		value &= ~DSI_ENABLE;
-		tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
-		tegra_dc_commit(dc);
-	}
-
-	err = tegra_dsi_wait_idle(dsi, 100);
-	if (err < 0)
-		dev_dbg(dsi->dev, "failed to idle DSI: %d\n", err);
-
-	tegra_dsi_soft_reset(dsi);
-
-	if (output->panel)
-		drm_panel_unprepare(output->panel);
-
-	tegra_dsi_disable(dsi);
-
-	return;
-}
-
 static int
 tegra_dsi_encoder_atomic_check(struct drm_encoder *encoder,
 			       struct drm_crtc_state *crtc_state,
@@ -956,11 +956,8 @@ tegra_dsi_encoder_atomic_check(struct drm_encoder *encoder,
 }
 
 static const struct drm_encoder_helper_funcs tegra_dsi_encoder_helper_funcs = {
-	.dpms = tegra_dsi_encoder_dpms,
-	.prepare = tegra_dsi_encoder_prepare,
-	.commit = tegra_dsi_encoder_commit,
-	.mode_set = tegra_dsi_encoder_mode_set,
 	.disable = tegra_dsi_encoder_disable,
+	.enable = tegra_dsi_encoder_enable,
 	.atomic_check = tegra_dsi_encoder_atomic_check,
 };
 
@@ -992,6 +989,10 @@ static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
 		DSI_PAD_OUT_CLK(0x0);
 	tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2);
 
+	value = DSI_PAD_PREEMP_PD_CLK(0x3) | DSI_PAD_PREEMP_PU_CLK(0x3) |
+		DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3);
+	tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3);
+
 	return tegra_mipi_calibrate(dsi->mipi);
 }
 
@@ -1621,6 +1622,9 @@ static int tegra_dsi_remove(struct platform_device *pdev)
 }
 
 static const struct of_device_id tegra_dsi_of_match[] = {
+	{ .compatible = "nvidia,tegra210-dsi", },
+	{ .compatible = "nvidia,tegra132-dsi", },
+	{ .compatible = "nvidia,tegra124-dsi", },
 	{ .compatible = "nvidia,tegra114-dsi", },
 	{ },
 };
diff --git a/drivers/gpu/drm/tegra/dsi.h b/drivers/gpu/drm/tegra/dsi.h
index bad1006a5150..219263615399 100644
--- a/drivers/gpu/drm/tegra/dsi.h
+++ b/drivers/gpu/drm/tegra/dsi.h
@@ -113,6 +113,10 @@
 #define DSI_PAD_SLEW_DN(x)		(((x) & 0x7) << 12)
 #define DSI_PAD_SLEW_UP(x)		(((x) & 0x7) << 16)
 #define DSI_PAD_CONTROL_3		0x51
+#define  DSI_PAD_PREEMP_PD_CLK(x)	(((x) & 0x3) << 12)
+#define  DSI_PAD_PREEMP_PU_CLK(x)	(((x) & 0x3) << 8)
+#define  DSI_PAD_PREEMP_PD(x)		(((x) & 0x3) << 4)
+#define  DSI_PAD_PREEMP_PU(x)		(((x) & 0x3) << 0)
 #define DSI_PAD_CONTROL_4		0x52
 #define DSI_GANGED_MODE_CONTROL		0x53
 #define DSI_GANGED_MODE_CONTROL_ENABLE	(1 << 0)
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 397fb34d5d5b..07c844b746b4 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -184,9 +184,9 @@ unreference:
 #ifdef CONFIG_DRM_TEGRA_FBDEV
 static struct fb_ops tegra_fb_ops = {
 	.owner = THIS_MODULE,
-	.fb_fillrect = sys_fillrect,
-	.fb_copyarea = sys_copyarea,
-	.fb_imageblit = sys_imageblit,
+	.fb_fillrect = drm_fb_helper_sys_fillrect,
+	.fb_copyarea = drm_fb_helper_sys_copyarea,
+	.fb_imageblit = drm_fb_helper_sys_imageblit,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
 	.fb_blank = drm_fb_helper_blank,
@@ -224,11 +224,11 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
 	if (IS_ERR(bo))
 		return PTR_ERR(bo);
 
-	info = framebuffer_alloc(0, drm->dev);
-	if (!info) {
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info)) {
 		dev_err(drm->dev, "failed to allocate framebuffer info\n");
 		drm_gem_object_unreference_unlocked(&bo->gem);
-		return -ENOMEM;
+		return PTR_ERR(info);
 	}
 
 	fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1);
@@ -248,12 +248,6 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
 	info->flags = FBINFO_FLAG_DEFAULT;
 	info->fbops = &tegra_fb_ops;
 
-	err = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (err < 0) {
-		dev_err(drm->dev, "failed to allocate color map: %d\n", err);
-		goto destroy;
-	}
-
 	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
 	drm_fb_helper_fill_var(info, helper, fb->width, fb->height);
 
@@ -282,7 +276,7 @@ destroy:
 	drm_framebuffer_unregister_private(fb);
 	tegra_fb_destroy(fb);
 release:
-	framebuffer_release(info);
+	drm_fb_helper_release_fbi(helper);
 	return err;
 }
 
@@ -347,20 +341,9 @@ fini:
 
 static void tegra_fbdev_exit(struct tegra_fbdev *fbdev)
 {
-	struct fb_info *info = fbdev->base.fbdev;
-
-	if (info) {
-		int err;
 
-		err = unregister_framebuffer(info);
-		if (err < 0)
-			DRM_DEBUG_KMS("failed to unregister framebuffer\n");
-
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&fbdev->base);
+	drm_fb_helper_release_fbi(&fbdev->base);
 
 	if (fbdev->fb) {
 		drm_framebuffer_unregister_private(&fbdev->fb->base);
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 06ab1783bba1..52b32cbd9de6 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -772,13 +772,8 @@ static bool tegra_output_is_hdmi(struct tegra_output *output)
 	return drm_detect_hdmi_monitor(edid);
 }
 
-static void tegra_hdmi_connector_dpms(struct drm_connector *connector,
-				      int mode)
-{
-}
-
 static const struct drm_connector_funcs tegra_hdmi_connector_funcs = {
-	.dpms = tegra_hdmi_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.reset = drm_atomic_helper_connector_reset,
 	.detect = tegra_output_connector_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -818,22 +813,27 @@ static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = {
 	.destroy = tegra_output_encoder_destroy,
 };
 
-static void tegra_hdmi_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
 {
-}
+	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+	u32 value;
 
-static void tegra_hdmi_encoder_prepare(struct drm_encoder *encoder)
-{
-}
+	/*
+	 * The following accesses registers of the display controller, so make
+	 * sure it's only executed when the output is attached to one.
+	 */
+	if (dc) {
+		value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+		value &= ~HDMI_ENABLE;
+		tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
 
-static void tegra_hdmi_encoder_commit(struct drm_encoder *encoder)
-{
+		tegra_dc_commit(dc);
+	}
 }
 
-static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder,
-					struct drm_display_mode *mode,
-					struct drm_display_mode *adjusted)
+static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
 {
+	struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
 	unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey;
 	struct tegra_output *output = encoder_to_output(encoder);
 	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
@@ -872,13 +872,13 @@ static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder,
 
 	tegra_dc_writel(dc, VSYNC_H_POSITION(1),
 			DC_DISP_DISP_TIMING_OPTIONS);
-	tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888,
+	tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE_888,
 			DC_DISP_DISP_COLOR_CONTROL);
 
 	/* video_preamble uses h_pulse2 */
 	pulse_start = 1 + h_sync_width + h_back_porch - 10;
 
-	tegra_dc_writel(dc, H_PULSE_2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0);
+	tegra_dc_writel(dc, H_PULSE2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0);
 
 	value = PULSE_MODE_NORMAL | PULSE_POLARITY_HIGH | PULSE_QUAL_VACTIVE |
 		PULSE_LAST_END_A;
@@ -1035,24 +1035,6 @@ static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder,
 	/* TODO: add HDCP support */
 }
 
-static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
-{
-	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
-	u32 value;
-
-	/*
-	 * The following accesses registers of the display controller, so make
-	 * sure it's only executed when the output is attached to one.
-	 */
-	if (dc) {
-		value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
-		value &= ~HDMI_ENABLE;
-		tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
-		tegra_dc_commit(dc);
-	}
-}
-
 static int
 tegra_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
 				struct drm_crtc_state *crtc_state,
@@ -1075,11 +1057,8 @@ tegra_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
 }
 
 static const struct drm_encoder_helper_funcs tegra_hdmi_encoder_helper_funcs = {
-	.dpms = tegra_hdmi_encoder_dpms,
-	.prepare = tegra_hdmi_encoder_prepare,
-	.commit = tegra_hdmi_encoder_commit,
-	.mode_set = tegra_hdmi_encoder_mode_set,
 	.disable = tegra_hdmi_encoder_disable,
+	.enable = tegra_hdmi_encoder_enable,
 	.atomic_check = tegra_hdmi_encoder_atomic_check,
 };
 
@@ -1087,11 +1066,16 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
 {
 	struct drm_info_node *node = s->private;
 	struct tegra_hdmi *hdmi = node->info_ent->data;
-	int err;
+	struct drm_crtc *crtc = hdmi->output.encoder.crtc;
+	struct drm_device *drm = node->minor->dev;
+	int err = 0;
 
-	err = clk_prepare_enable(hdmi->clk);
-	if (err)
-		return err;
+	drm_modeset_lock_all(drm);
+
+	if (!crtc || !crtc->state->active) {
+		err = -EBUSY;
+		goto unlock;
+	}
 
 #define DUMP_REG(name)						\
 	seq_printf(s, "%-56s %#05x %08x\n", #name, name,	\
@@ -1258,9 +1242,9 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
 
 #undef DUMP_REG
 
-	clk_disable_unprepare(hdmi->clk);
-
-	return 0;
+unlock:
+	drm_modeset_unlock_all(drm);
+	return err;
 }
 
 static struct drm_info_list debugfs_files[] = {
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 37db47975d48..46664b622270 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -7,8 +7,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/of_gpio.h>
-
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_panel.h>
 #include "drm.h"
@@ -59,10 +57,17 @@ tegra_output_connector_detect(struct drm_connector *connector, bool force)
 	enum drm_connector_status status = connector_status_unknown;
 
 	if (gpio_is_valid(output->hpd_gpio)) {
-		if (gpio_get_value(output->hpd_gpio) == 0)
-			status = connector_status_disconnected;
-		else
-			status = connector_status_connected;
+		if (output->hpd_gpio_flags & OF_GPIO_ACTIVE_LOW) {
+			if (gpio_get_value(output->hpd_gpio) != 0)
+				status = connector_status_disconnected;
+			else
+				status = connector_status_connected;
+		} else {
+			if (gpio_get_value(output->hpd_gpio) == 0)
+				status = connector_status_disconnected;
+			else
+				status = connector_status_connected;
+		}
 	} else {
 		if (!output->panel)
 			status = connector_status_disconnected;
@@ -97,7 +102,6 @@ static irqreturn_t hpd_irq(int irq, void *data)
 int tegra_output_probe(struct tegra_output *output)
 {
 	struct device_node *ddc, *panel;
-	enum of_gpio_flags flags;
 	int err, size;
 
 	if (!output->of_node)
@@ -128,7 +132,7 @@ int tegra_output_probe(struct tegra_output *output)
 
 	output->hpd_gpio = of_get_named_gpio_flags(output->of_node,
 						   "nvidia,hpd-gpio", 0,
-						   &flags);
+						   &output->hpd_gpio_flags);
 	if (gpio_is_valid(output->hpd_gpio)) {
 		unsigned long flags;
 
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 7cd833f5b5b5..bc9735b4ad60 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -18,7 +18,6 @@
 struct tegra_rgb {
 	struct tegra_output output;
 	struct tegra_dc *dc;
-	bool enabled;
 
 	struct clk *clk_parent;
 	struct clk *clk;
@@ -88,13 +87,8 @@ static void tegra_dc_write_regs(struct tegra_dc *dc,
 		tegra_dc_writel(dc, table[i].value, table[i].offset);
 }
 
-static void tegra_rgb_connector_dpms(struct drm_connector *connector,
-				     int mode)
-{
-}
-
 static const struct drm_connector_funcs tegra_rgb_connector_funcs = {
-	.dpms = tegra_rgb_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.reset = drm_atomic_helper_connector_reset,
 	.detect = tegra_output_connector_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -125,21 +119,22 @@ static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = {
 	.destroy = tegra_output_encoder_destroy,
 };
 
-static void tegra_rgb_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void tegra_rgb_encoder_disable(struct drm_encoder *encoder)
 {
-}
+	struct tegra_output *output = encoder_to_output(encoder);
+	struct tegra_rgb *rgb = to_rgb(output);
 
-static void tegra_rgb_encoder_prepare(struct drm_encoder *encoder)
-{
-}
+	if (output->panel)
+		drm_panel_disable(output->panel);
 
-static void tegra_rgb_encoder_commit(struct drm_encoder *encoder)
-{
+	tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
+	tegra_dc_commit(rgb->dc);
+
+	if (output->panel)
+		drm_panel_unprepare(output->panel);
 }
 
-static void tegra_rgb_encoder_mode_set(struct drm_encoder *encoder,
-				       struct drm_display_mode *mode,
-				       struct drm_display_mode *adjusted)
+static void tegra_rgb_encoder_enable(struct drm_encoder *encoder)
 {
 	struct tegra_output *output = encoder_to_output(encoder);
 	struct tegra_rgb *rgb = to_rgb(output);
@@ -174,21 +169,6 @@ static void tegra_rgb_encoder_mode_set(struct drm_encoder *encoder,
 		drm_panel_enable(output->panel);
 }
 
-static void tegra_rgb_encoder_disable(struct drm_encoder *encoder)
-{
-	struct tegra_output *output = encoder_to_output(encoder);
-	struct tegra_rgb *rgb = to_rgb(output);
-
-	if (output->panel)
-		drm_panel_disable(output->panel);
-
-	tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
-	tegra_dc_commit(rgb->dc);
-
-	if (output->panel)
-		drm_panel_unprepare(output->panel);
-}
-
 static int
 tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder,
 			       struct drm_crtc_state *crtc_state,
@@ -231,11 +211,8 @@ tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder,
 }
 
 static const struct drm_encoder_helper_funcs tegra_rgb_encoder_helper_funcs = {
-	.dpms = tegra_rgb_encoder_dpms,
-	.prepare = tegra_rgb_encoder_prepare,
-	.commit = tegra_rgb_encoder_commit,
-	.mode_set = tegra_rgb_encoder_mode_set,
 	.disable = tegra_rgb_encoder_disable,
+	.enable = tegra_rgb_encoder_enable,
 	.atomic_check = tegra_rgb_encoder_atomic_check,
 };
 
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 7591d8901f9a..da1715ebdd71 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -10,7 +10,9 @@
 #include <linux/debugfs.h>
 #include <linux/gpio.h>
 #include <linux/io.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
 #include <linux/reset.h>
 
 #include <soc/tegra/pmc.h>
@@ -23,11 +25,146 @@
 #include "drm.h"
 #include "sor.h"
 
+#define SOR_REKEY 0x38
+
+struct tegra_sor_hdmi_settings {
+	unsigned long frequency;
+
+	u8 vcocap;
+	u8 ichpmp;
+	u8 loadadj;
+	u8 termadj;
+	u8 tx_pu;
+	u8 bg_vref;
+
+	u8 drive_current[4];
+	u8 preemphasis[4];
+};
+
+#if 1
+static const struct tegra_sor_hdmi_settings tegra210_sor_hdmi_defaults[] = {
+	{
+		.frequency = 54000000,
+		.vcocap = 0x0,
+		.ichpmp = 0x1,
+		.loadadj = 0x3,
+		.termadj = 0x9,
+		.tx_pu = 0x10,
+		.bg_vref = 0x8,
+		.drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
+		.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+	}, {
+		.frequency = 75000000,
+		.vcocap = 0x3,
+		.ichpmp = 0x1,
+		.loadadj = 0x3,
+		.termadj = 0x9,
+		.tx_pu = 0x40,
+		.bg_vref = 0x8,
+		.drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
+		.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+	}, {
+		.frequency = 150000000,
+		.vcocap = 0x3,
+		.ichpmp = 0x1,
+		.loadadj = 0x3,
+		.termadj = 0x9,
+		.tx_pu = 0x66,
+		.bg_vref = 0x8,
+		.drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
+		.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+	}, {
+		.frequency = 300000000,
+		.vcocap = 0x3,
+		.ichpmp = 0x1,
+		.loadadj = 0x3,
+		.termadj = 0x9,
+		.tx_pu = 0x66,
+		.bg_vref = 0xa,
+		.drive_current = { 0x33, 0x3f, 0x3f, 0x3f },
+		.preemphasis = { 0x00, 0x17, 0x17, 0x17 },
+	}, {
+		.frequency = 600000000,
+		.vcocap = 0x3,
+		.ichpmp = 0x1,
+		.loadadj = 0x3,
+		.termadj = 0x9,
+		.tx_pu = 0x66,
+		.bg_vref = 0x8,
+		.drive_current = { 0x33, 0x3f, 0x3f, 0x3f },
+		.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+	},
+};
+#else
+static const struct tegra_sor_hdmi_settings tegra210_sor_hdmi_defaults[] = {
+	{
+		.frequency = 75000000,
+		.vcocap = 0x3,
+		.ichpmp = 0x1,
+		.loadadj = 0x3,
+		.termadj = 0x9,
+		.tx_pu = 0x40,
+		.bg_vref = 0x8,
+		.drive_current = { 0x29, 0x29, 0x29, 0x29 },
+		.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+	}, {
+		.frequency = 150000000,
+		.vcocap = 0x3,
+		.ichpmp = 0x1,
+		.loadadj = 0x3,
+		.termadj = 0x9,
+		.tx_pu = 0x66,
+		.bg_vref = 0x8,
+		.drive_current = { 0x30, 0x37, 0x37, 0x37 },
+		.preemphasis = { 0x01, 0x02, 0x02, 0x02 },
+	}, {
+		.frequency = 300000000,
+		.vcocap = 0x3,
+		.ichpmp = 0x6,
+		.loadadj = 0x3,
+		.termadj = 0x9,
+		.tx_pu = 0x66,
+		.bg_vref = 0xf,
+		.drive_current = { 0x30, 0x37, 0x37, 0x37 },
+		.preemphasis = { 0x10, 0x3e, 0x3e, 0x3e },
+	}, {
+		.frequency = 600000000,
+		.vcocap = 0x3,
+		.ichpmp = 0xa,
+		.loadadj = 0x3,
+		.termadj = 0xb,
+		.tx_pu = 0x66,
+		.bg_vref = 0xe,
+		.drive_current = { 0x35, 0x3e, 0x3e, 0x3e },
+		.preemphasis = { 0x02, 0x3f, 0x3f, 0x3f },
+	},
+};
+#endif
+
+struct tegra_sor_soc {
+	bool supports_edp;
+	bool supports_lvds;
+	bool supports_hdmi;
+	bool supports_dp;
+
+	const struct tegra_sor_hdmi_settings *settings;
+	unsigned int num_settings;
+};
+
+struct tegra_sor;
+
+struct tegra_sor_ops {
+	const char *name;
+	int (*probe)(struct tegra_sor *sor);
+	int (*remove)(struct tegra_sor *sor);
+};
+
 struct tegra_sor {
 	struct host1x_client client;
 	struct tegra_output output;
 	struct device *dev;
 
+	const struct tegra_sor_soc *soc;
 	void __iomem *regs;
 
 	struct reset_control *rst;
@@ -38,12 +175,19 @@ struct tegra_sor {
 
 	struct tegra_dpaux *dpaux;
 
-	struct mutex lock;
-	bool enabled;
-
 	struct drm_info_list *debugfs_files;
 	struct drm_minor *minor;
 	struct dentry *debugfs;
+
+	const struct tegra_sor_ops *ops;
+
+	/* for HDMI 2.0 */
+	struct tegra_sor_hdmi_settings *settings;
+	unsigned int num_settings;
+
+	struct regulator *avdd_io_supply;
+	struct regulator *vdd_pll_supply;
+	struct regulator *hdmi_supply;
 };
 
 struct tegra_sor_config {
@@ -94,40 +238,40 @@ static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
 		SOR_LANE_DRIVE_CURRENT_LANE2(0x40) |
 		SOR_LANE_DRIVE_CURRENT_LANE1(0x40) |
 		SOR_LANE_DRIVE_CURRENT_LANE0(0x40);
-	tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT_0);
+	tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0);
 
 	value = SOR_LANE_PREEMPHASIS_LANE3(0x0f) |
 		SOR_LANE_PREEMPHASIS_LANE2(0x0f) |
 		SOR_LANE_PREEMPHASIS_LANE1(0x0f) |
 		SOR_LANE_PREEMPHASIS_LANE0(0x0f);
-	tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS_0);
+	tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0);
 
-	value = SOR_LANE_POST_CURSOR_LANE3(0x00) |
-		SOR_LANE_POST_CURSOR_LANE2(0x00) |
-		SOR_LANE_POST_CURSOR_LANE1(0x00) |
-		SOR_LANE_POST_CURSOR_LANE0(0x00);
-	tegra_sor_writel(sor, value, SOR_LANE_POST_CURSOR_0);
+	value = SOR_LANE_POSTCURSOR_LANE3(0x00) |
+		SOR_LANE_POSTCURSOR_LANE2(0x00) |
+		SOR_LANE_POSTCURSOR_LANE1(0x00) |
+		SOR_LANE_POSTCURSOR_LANE0(0x00);
+	tegra_sor_writel(sor, value, SOR_LANE_POSTCURSOR0);
 
 	/* disable LVDS mode */
 	tegra_sor_writel(sor, 0, SOR_LVDS);
 
-	value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
 	value |= SOR_DP_PADCTL_TX_PU_ENABLE;
 	value &= ~SOR_DP_PADCTL_TX_PU_MASK;
 	value |= SOR_DP_PADCTL_TX_PU(2); /* XXX: don't hardcode? */
-	tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
 
-	value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
 	value |= SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
 		 SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0;
-	tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
 
 	usleep_range(10, 100);
 
-	value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
 	value &= ~(SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
 		   SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0);
-	tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
 
 	err = tegra_dpaux_prepare(sor->dpaux, DP_SET_ANSI_8B10B);
 	if (err < 0)
@@ -148,11 +292,11 @@ static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
 	if (err < 0)
 		return err;
 
-	value = tegra_sor_readl(sor, SOR_DP_SPARE_0);
+	value = tegra_sor_readl(sor, SOR_DP_SPARE0);
 	value |= SOR_DP_SPARE_SEQ_ENABLE;
 	value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
 	value |= SOR_DP_SPARE_MACRO_SOR_CLK;
-	tegra_sor_writel(sor, value, SOR_DP_SPARE_0);
+	tegra_sor_writel(sor, value, SOR_DP_SPARE0);
 
 	for (i = 0, value = 0; i < link->num_lanes; i++) {
 		unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
@@ -187,18 +331,59 @@ static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
 	return 0;
 }
 
+static void tegra_sor_dp_term_calibrate(struct tegra_sor *sor)
+{
+	u32 mask = 0x08, adj = 0, value;
+
+	/* enable pad calibration logic */
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+	value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+
+	value = tegra_sor_readl(sor, SOR_PLL1);
+	value |= SOR_PLL1_TMDS_TERM;
+	tegra_sor_writel(sor, value, SOR_PLL1);
+
+	while (mask) {
+		adj |= mask;
+
+		value = tegra_sor_readl(sor, SOR_PLL1);
+		value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
+		value |= SOR_PLL1_TMDS_TERMADJ(adj);
+		tegra_sor_writel(sor, value, SOR_PLL1);
+
+		usleep_range(100, 200);
+
+		value = tegra_sor_readl(sor, SOR_PLL1);
+		if (value & SOR_PLL1_TERM_COMPOUT)
+			adj &= ~mask;
+
+		mask >>= 1;
+	}
+
+	value = tegra_sor_readl(sor, SOR_PLL1);
+	value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
+	value |= SOR_PLL1_TMDS_TERMADJ(adj);
+	tegra_sor_writel(sor, value, SOR_PLL1);
+
+	/* disable pad calibration logic */
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+	value |= SOR_DP_PADCTL_PAD_CAL_PD;
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+}
+
 static void tegra_sor_super_update(struct tegra_sor *sor)
 {
-	tegra_sor_writel(sor, 0, SOR_SUPER_STATE_0);
-	tegra_sor_writel(sor, 1, SOR_SUPER_STATE_0);
-	tegra_sor_writel(sor, 0, SOR_SUPER_STATE_0);
+	tegra_sor_writel(sor, 0, SOR_SUPER_STATE0);
+	tegra_sor_writel(sor, 1, SOR_SUPER_STATE0);
+	tegra_sor_writel(sor, 0, SOR_SUPER_STATE0);
 }
 
 static void tegra_sor_update(struct tegra_sor *sor)
 {
-	tegra_sor_writel(sor, 0, SOR_STATE_0);
-	tegra_sor_writel(sor, 1, SOR_STATE_0);
-	tegra_sor_writel(sor, 0, SOR_STATE_0);
+	tegra_sor_writel(sor, 0, SOR_STATE0);
+	tegra_sor_writel(sor, 1, SOR_STATE0);
+	tegra_sor_writel(sor, 0, SOR_STATE0);
 }
 
 static int tegra_sor_setup_pwm(struct tegra_sor *sor, unsigned long timeout)
@@ -235,16 +420,16 @@ static int tegra_sor_attach(struct tegra_sor *sor)
 	unsigned long value, timeout;
 
 	/* wake up in normal mode */
-	value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+	value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
 	value |= SOR_SUPER_STATE_HEAD_MODE_AWAKE;
 	value |= SOR_SUPER_STATE_MODE_NORMAL;
-	tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+	tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
 	tegra_sor_super_update(sor);
 
 	/* attach */
-	value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+	value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
 	value |= SOR_SUPER_STATE_ATTACHED;
-	tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+	tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
 	tegra_sor_super_update(sor);
 
 	timeout = jiffies + msecs_to_jiffies(250);
@@ -385,7 +570,7 @@ static int tegra_sor_compute_params(struct tegra_sor *sor,
 }
 
 static int tegra_sor_calc_config(struct tegra_sor *sor,
-				 struct drm_display_mode *mode,
+				 const struct drm_display_mode *mode,
 				 struct tegra_sor_config *config,
 				 struct drm_dp_link *link)
 {
@@ -481,9 +666,9 @@ static int tegra_sor_detach(struct tegra_sor *sor)
 	unsigned long value, timeout;
 
 	/* switch to safe mode */
-	value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+	value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
 	value &= ~SOR_SUPER_STATE_MODE_NORMAL;
-	tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+	tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
 	tegra_sor_super_update(sor);
 
 	timeout = jiffies + msecs_to_jiffies(250);
@@ -498,15 +683,15 @@ static int tegra_sor_detach(struct tegra_sor *sor)
 		return -ETIMEDOUT;
 
 	/* go to sleep */
-	value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+	value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
 	value &= ~SOR_SUPER_STATE_HEAD_MODE_MASK;
-	tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+	tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
 	tegra_sor_super_update(sor);
 
 	/* detach */
-	value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+	value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
 	value &= ~SOR_SUPER_STATE_ATTACHED;
-	tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+	tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
 	tegra_sor_super_update(sor);
 
 	timeout = jiffies + msecs_to_jiffies(250);
@@ -552,10 +737,10 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
 	if (err < 0)
 		dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
 
-	value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
 	value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
 		   SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2);
-	tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
 
 	/* stop lane sequencer */
 	value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP |
@@ -575,39 +760,26 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
 	if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
 		return -ETIMEDOUT;
 
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value |= SOR_PLL_2_PORT_POWERDOWN;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value |= SOR_PLL2_PORT_POWERDOWN;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 
 	usleep_range(20, 100);
 
-	value = tegra_sor_readl(sor, SOR_PLL_0);
-	value |= SOR_PLL_0_POWER_OFF;
-	value |= SOR_PLL_0_VCOPD;
-	tegra_sor_writel(sor, value, SOR_PLL_0);
+	value = tegra_sor_readl(sor, SOR_PLL0);
+	value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR;
+	tegra_sor_writel(sor, value, SOR_PLL0);
 
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value |= SOR_PLL_2_SEQ_PLLCAPPD;
-	value |= SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value |= SOR_PLL2_SEQ_PLLCAPPD;
+	value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 
 	usleep_range(20, 100);
 
 	return 0;
 }
 
-static int tegra_sor_crc_open(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-
-	return 0;
-}
-
-static int tegra_sor_crc_release(struct inode *inode, struct file *file)
-{
-	return 0;
-}
-
 static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout)
 {
 	u32 value;
@@ -615,8 +787,8 @@ static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout)
 	timeout = jiffies + msecs_to_jiffies(timeout);
 
 	while (time_before(jiffies, timeout)) {
-		value = tegra_sor_readl(sor, SOR_CRC_A);
-		if (value & SOR_CRC_A_VALID)
+		value = tegra_sor_readl(sor, SOR_CRCA);
+		if (value & SOR_CRCA_VALID)
 			return 0;
 
 		usleep_range(100, 200);
@@ -625,24 +797,25 @@ static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout)
 	return -ETIMEDOUT;
 }
 
-static ssize_t tegra_sor_crc_read(struct file *file, char __user *buffer,
-				  size_t size, loff_t *ppos)
+static int tegra_sor_show_crc(struct seq_file *s, void *data)
 {
-	struct tegra_sor *sor = file->private_data;
-	ssize_t num, err;
-	char buf[10];
+	struct drm_info_node *node = s->private;
+	struct tegra_sor *sor = node->info_ent->data;
+	struct drm_crtc *crtc = sor->output.encoder.crtc;
+	struct drm_device *drm = node->minor->dev;
+	int err = 0;
 	u32 value;
 
-	mutex_lock(&sor->lock);
+	drm_modeset_lock_all(drm);
 
-	if (!sor->enabled) {
-		err = -EAGAIN;
+	if (!crtc || !crtc->state->active) {
+		err = -EBUSY;
 		goto unlock;
 	}
 
-	value = tegra_sor_readl(sor, SOR_STATE_1);
+	value = tegra_sor_readl(sor, SOR_STATE1);
 	value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
-	tegra_sor_writel(sor, value, SOR_STATE_1);
+	tegra_sor_writel(sor, value, SOR_STATE1);
 
 	value = tegra_sor_readl(sor, SOR_CRC_CNTRL);
 	value |= SOR_CRC_CNTRL_ENABLE;
@@ -656,65 +829,66 @@ static ssize_t tegra_sor_crc_read(struct file *file, char __user *buffer,
 	if (err < 0)
 		goto unlock;
 
-	tegra_sor_writel(sor, SOR_CRC_A_RESET, SOR_CRC_A);
-	value = tegra_sor_readl(sor, SOR_CRC_B);
+	tegra_sor_writel(sor, SOR_CRCA_RESET, SOR_CRCA);
+	value = tegra_sor_readl(sor, SOR_CRCB);
 
-	num = scnprintf(buf, sizeof(buf), "%08x\n", value);
-
-	err = simple_read_from_buffer(buffer, size, ppos, buf, num);
+	seq_printf(s, "%08x\n", value);
 
 unlock:
-	mutex_unlock(&sor->lock);
+	drm_modeset_unlock_all(drm);
 	return err;
 }
 
-static const struct file_operations tegra_sor_crc_fops = {
-	.owner = THIS_MODULE,
-	.open = tegra_sor_crc_open,
-	.read = tegra_sor_crc_read,
-	.release = tegra_sor_crc_release,
-};
-
 static int tegra_sor_show_regs(struct seq_file *s, void *data)
 {
 	struct drm_info_node *node = s->private;
 	struct tegra_sor *sor = node->info_ent->data;
+	struct drm_crtc *crtc = sor->output.encoder.crtc;
+	struct drm_device *drm = node->minor->dev;
+	int err = 0;
+
+	drm_modeset_lock_all(drm);
+
+	if (!crtc || !crtc->state->active) {
+		err = -EBUSY;
+		goto unlock;
+	}
 
 #define DUMP_REG(name)						\
 	seq_printf(s, "%-38s %#05x %08x\n", #name, name,	\
 		   tegra_sor_readl(sor, name))
 
 	DUMP_REG(SOR_CTXSW);
-	DUMP_REG(SOR_SUPER_STATE_0);
-	DUMP_REG(SOR_SUPER_STATE_1);
-	DUMP_REG(SOR_STATE_0);
-	DUMP_REG(SOR_STATE_1);
-	DUMP_REG(SOR_HEAD_STATE_0(0));
-	DUMP_REG(SOR_HEAD_STATE_0(1));
-	DUMP_REG(SOR_HEAD_STATE_1(0));
-	DUMP_REG(SOR_HEAD_STATE_1(1));
-	DUMP_REG(SOR_HEAD_STATE_2(0));
-	DUMP_REG(SOR_HEAD_STATE_2(1));
-	DUMP_REG(SOR_HEAD_STATE_3(0));
-	DUMP_REG(SOR_HEAD_STATE_3(1));
-	DUMP_REG(SOR_HEAD_STATE_4(0));
-	DUMP_REG(SOR_HEAD_STATE_4(1));
-	DUMP_REG(SOR_HEAD_STATE_5(0));
-	DUMP_REG(SOR_HEAD_STATE_5(1));
+	DUMP_REG(SOR_SUPER_STATE0);
+	DUMP_REG(SOR_SUPER_STATE1);
+	DUMP_REG(SOR_STATE0);
+	DUMP_REG(SOR_STATE1);
+	DUMP_REG(SOR_HEAD_STATE0(0));
+	DUMP_REG(SOR_HEAD_STATE0(1));
+	DUMP_REG(SOR_HEAD_STATE1(0));
+	DUMP_REG(SOR_HEAD_STATE1(1));
+	DUMP_REG(SOR_HEAD_STATE2(0));
+	DUMP_REG(SOR_HEAD_STATE2(1));
+	DUMP_REG(SOR_HEAD_STATE3(0));
+	DUMP_REG(SOR_HEAD_STATE3(1));
+	DUMP_REG(SOR_HEAD_STATE4(0));
+	DUMP_REG(SOR_HEAD_STATE4(1));
+	DUMP_REG(SOR_HEAD_STATE5(0));
+	DUMP_REG(SOR_HEAD_STATE5(1));
 	DUMP_REG(SOR_CRC_CNTRL);
 	DUMP_REG(SOR_DP_DEBUG_MVID);
 	DUMP_REG(SOR_CLK_CNTRL);
 	DUMP_REG(SOR_CAP);
 	DUMP_REG(SOR_PWR);
 	DUMP_REG(SOR_TEST);
-	DUMP_REG(SOR_PLL_0);
-	DUMP_REG(SOR_PLL_1);
-	DUMP_REG(SOR_PLL_2);
-	DUMP_REG(SOR_PLL_3);
+	DUMP_REG(SOR_PLL0);
+	DUMP_REG(SOR_PLL1);
+	DUMP_REG(SOR_PLL2);
+	DUMP_REG(SOR_PLL3);
 	DUMP_REG(SOR_CSTM);
 	DUMP_REG(SOR_LVDS);
-	DUMP_REG(SOR_CRC_A);
-	DUMP_REG(SOR_CRC_B);
+	DUMP_REG(SOR_CRCA);
+	DUMP_REG(SOR_CRCB);
 	DUMP_REG(SOR_BLANK);
 	DUMP_REG(SOR_SEQ_CTL);
 	DUMP_REG(SOR_LANE_SEQ_CTL);
@@ -736,86 +910,89 @@ static int tegra_sor_show_regs(struct seq_file *s, void *data)
 	DUMP_REG(SOR_SEQ_INST(15));
 	DUMP_REG(SOR_PWM_DIV);
 	DUMP_REG(SOR_PWM_CTL);
-	DUMP_REG(SOR_VCRC_A_0);
-	DUMP_REG(SOR_VCRC_A_1);
-	DUMP_REG(SOR_VCRC_B_0);
-	DUMP_REG(SOR_VCRC_B_1);
-	DUMP_REG(SOR_CCRC_A_0);
-	DUMP_REG(SOR_CCRC_A_1);
-	DUMP_REG(SOR_CCRC_B_0);
-	DUMP_REG(SOR_CCRC_B_1);
-	DUMP_REG(SOR_EDATA_A_0);
-	DUMP_REG(SOR_EDATA_A_1);
-	DUMP_REG(SOR_EDATA_B_0);
-	DUMP_REG(SOR_EDATA_B_1);
-	DUMP_REG(SOR_COUNT_A_0);
-	DUMP_REG(SOR_COUNT_A_1);
-	DUMP_REG(SOR_COUNT_B_0);
-	DUMP_REG(SOR_COUNT_B_1);
-	DUMP_REG(SOR_DEBUG_A_0);
-	DUMP_REG(SOR_DEBUG_A_1);
-	DUMP_REG(SOR_DEBUG_B_0);
-	DUMP_REG(SOR_DEBUG_B_1);
+	DUMP_REG(SOR_VCRC_A0);
+	DUMP_REG(SOR_VCRC_A1);
+	DUMP_REG(SOR_VCRC_B0);
+	DUMP_REG(SOR_VCRC_B1);
+	DUMP_REG(SOR_CCRC_A0);
+	DUMP_REG(SOR_CCRC_A1);
+	DUMP_REG(SOR_CCRC_B0);
+	DUMP_REG(SOR_CCRC_B1);
+	DUMP_REG(SOR_EDATA_A0);
+	DUMP_REG(SOR_EDATA_A1);
+	DUMP_REG(SOR_EDATA_B0);
+	DUMP_REG(SOR_EDATA_B1);
+	DUMP_REG(SOR_COUNT_A0);
+	DUMP_REG(SOR_COUNT_A1);
+	DUMP_REG(SOR_COUNT_B0);
+	DUMP_REG(SOR_COUNT_B1);
+	DUMP_REG(SOR_DEBUG_A0);
+	DUMP_REG(SOR_DEBUG_A1);
+	DUMP_REG(SOR_DEBUG_B0);
+	DUMP_REG(SOR_DEBUG_B1);
 	DUMP_REG(SOR_TRIG);
 	DUMP_REG(SOR_MSCHECK);
 	DUMP_REG(SOR_XBAR_CTRL);
 	DUMP_REG(SOR_XBAR_POL);
-	DUMP_REG(SOR_DP_LINKCTL_0);
-	DUMP_REG(SOR_DP_LINKCTL_1);
-	DUMP_REG(SOR_LANE_DRIVE_CURRENT_0);
-	DUMP_REG(SOR_LANE_DRIVE_CURRENT_1);
-	DUMP_REG(SOR_LANE4_DRIVE_CURRENT_0);
-	DUMP_REG(SOR_LANE4_DRIVE_CURRENT_1);
-	DUMP_REG(SOR_LANE_PREEMPHASIS_0);
-	DUMP_REG(SOR_LANE_PREEMPHASIS_1);
-	DUMP_REG(SOR_LANE4_PREEMPHASIS_0);
-	DUMP_REG(SOR_LANE4_PREEMPHASIS_1);
-	DUMP_REG(SOR_LANE_POST_CURSOR_0);
-	DUMP_REG(SOR_LANE_POST_CURSOR_1);
-	DUMP_REG(SOR_DP_CONFIG_0);
-	DUMP_REG(SOR_DP_CONFIG_1);
-	DUMP_REG(SOR_DP_MN_0);
-	DUMP_REG(SOR_DP_MN_1);
-	DUMP_REG(SOR_DP_PADCTL_0);
-	DUMP_REG(SOR_DP_PADCTL_1);
-	DUMP_REG(SOR_DP_DEBUG_0);
-	DUMP_REG(SOR_DP_DEBUG_1);
-	DUMP_REG(SOR_DP_SPARE_0);
-	DUMP_REG(SOR_DP_SPARE_1);
+	DUMP_REG(SOR_DP_LINKCTL0);
+	DUMP_REG(SOR_DP_LINKCTL1);
+	DUMP_REG(SOR_LANE_DRIVE_CURRENT0);
+	DUMP_REG(SOR_LANE_DRIVE_CURRENT1);
+	DUMP_REG(SOR_LANE4_DRIVE_CURRENT0);
+	DUMP_REG(SOR_LANE4_DRIVE_CURRENT1);
+	DUMP_REG(SOR_LANE_PREEMPHASIS0);
+	DUMP_REG(SOR_LANE_PREEMPHASIS1);
+	DUMP_REG(SOR_LANE4_PREEMPHASIS0);
+	DUMP_REG(SOR_LANE4_PREEMPHASIS1);
+	DUMP_REG(SOR_LANE_POSTCURSOR0);
+	DUMP_REG(SOR_LANE_POSTCURSOR1);
+	DUMP_REG(SOR_DP_CONFIG0);
+	DUMP_REG(SOR_DP_CONFIG1);
+	DUMP_REG(SOR_DP_MN0);
+	DUMP_REG(SOR_DP_MN1);
+	DUMP_REG(SOR_DP_PADCTL0);
+	DUMP_REG(SOR_DP_PADCTL1);
+	DUMP_REG(SOR_DP_DEBUG0);
+	DUMP_REG(SOR_DP_DEBUG1);
+	DUMP_REG(SOR_DP_SPARE0);
+	DUMP_REG(SOR_DP_SPARE1);
 	DUMP_REG(SOR_DP_AUDIO_CTRL);
 	DUMP_REG(SOR_DP_AUDIO_HBLANK_SYMBOLS);
 	DUMP_REG(SOR_DP_AUDIO_VBLANK_SYMBOLS);
 	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_HEADER);
-	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_0);
-	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_1);
-	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_2);
-	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_3);
-	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_4);
-	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_5);
-	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_6);
+	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK0);
+	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK1);
+	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK2);
+	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK3);
+	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK4);
+	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK5);
+	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK6);
 	DUMP_REG(SOR_DP_TPG);
 	DUMP_REG(SOR_DP_TPG_CONFIG);
-	DUMP_REG(SOR_DP_LQ_CSTM_0);
-	DUMP_REG(SOR_DP_LQ_CSTM_1);
-	DUMP_REG(SOR_DP_LQ_CSTM_2);
+	DUMP_REG(SOR_DP_LQ_CSTM0);
+	DUMP_REG(SOR_DP_LQ_CSTM1);
+	DUMP_REG(SOR_DP_LQ_CSTM2);
 
 #undef DUMP_REG
 
-	return 0;
+unlock:
+	drm_modeset_unlock_all(drm);
+	return err;
 }
 
 static const struct drm_info_list debugfs_files[] = {
+	{ "crc", tegra_sor_show_crc, 0, NULL },
 	{ "regs", tegra_sor_show_regs, 0, NULL },
 };
 
 static int tegra_sor_debugfs_init(struct tegra_sor *sor,
 				  struct drm_minor *minor)
 {
-	struct dentry *entry;
+	const char *name = sor->soc->supports_dp ? "sor1" : "sor";
 	unsigned int i;
-	int err = 0;
+	int err;
 
-	sor->debugfs = debugfs_create_dir("sor", minor->debugfs_root);
+	sor->debugfs = debugfs_create_dir(name, minor->debugfs_root);
 	if (!sor->debugfs)
 		return -ENOMEM;
 
@@ -835,14 +1012,9 @@ static int tegra_sor_debugfs_init(struct tegra_sor *sor,
 	if (err < 0)
 		goto free;
 
-	entry = debugfs_create_file("crc", 0644, sor->debugfs, sor,
-				    &tegra_sor_crc_fops);
-	if (!entry) {
-		err = -ENOMEM;
-		goto free;
-	}
+	sor->minor = minor;
 
-	return err;
+	return 0;
 
 free:
 	kfree(sor->debugfs_files);
@@ -860,14 +1032,10 @@ static void tegra_sor_debugfs_exit(struct tegra_sor *sor)
 	sor->minor = NULL;
 
 	kfree(sor->debugfs_files);
-	sor->debugfs = NULL;
-
-	debugfs_remove_recursive(sor->debugfs);
 	sor->debugfs_files = NULL;
-}
 
-static void tegra_sor_connector_dpms(struct drm_connector *connector, int mode)
-{
+	debugfs_remove_recursive(sor->debugfs);
+	sor->debugfs = NULL;
 }
 
 static enum drm_connector_status
@@ -879,11 +1047,11 @@ tegra_sor_connector_detect(struct drm_connector *connector, bool force)
 	if (sor->dpaux)
 		return tegra_dpaux_detect(sor->dpaux);
 
-	return connector_status_unknown;
+	return tegra_output_connector_detect(connector, force);
 }
 
 static const struct drm_connector_funcs tegra_sor_connector_funcs = {
-	.dpms = tegra_sor_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.reset = drm_atomic_helper_connector_reset,
 	.detect = tegra_sor_connector_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -926,22 +1094,102 @@ static const struct drm_encoder_funcs tegra_sor_encoder_funcs = {
 	.destroy = tegra_output_encoder_destroy,
 };
 
-static void tegra_sor_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void tegra_sor_edp_disable(struct drm_encoder *encoder)
 {
-}
+	struct tegra_output *output = encoder_to_output(encoder);
+	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+	struct tegra_sor *sor = to_sor(output);
+	u32 value;
+	int err;
 
-static void tegra_sor_encoder_prepare(struct drm_encoder *encoder)
-{
+	if (output->panel)
+		drm_panel_disable(output->panel);
+
+	err = tegra_sor_detach(sor);
+	if (err < 0)
+		dev_err(sor->dev, "failed to detach SOR: %d\n", err);
+
+	tegra_sor_writel(sor, 0, SOR_STATE1);
+	tegra_sor_update(sor);
+
+	/*
+	 * The following accesses registers of the display controller, so make
+	 * sure it's only executed when the output is attached to one.
+	 */
+	if (dc) {
+		value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+		value &= ~SOR_ENABLE;
+		tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+		tegra_dc_commit(dc);
+	}
+
+	err = tegra_sor_power_down(sor);
+	if (err < 0)
+		dev_err(sor->dev, "failed to power down SOR: %d\n", err);
+
+	if (sor->dpaux) {
+		err = tegra_dpaux_disable(sor->dpaux);
+		if (err < 0)
+			dev_err(sor->dev, "failed to disable DP: %d\n", err);
+	}
+
+	err = tegra_io_rail_power_off(TEGRA_IO_RAIL_LVDS);
+	if (err < 0)
+		dev_err(sor->dev, "failed to power off I/O rail: %d\n", err);
+
+	if (output->panel)
+		drm_panel_unprepare(output->panel);
+
+	reset_control_assert(sor->rst);
+	clk_disable_unprepare(sor->clk);
 }
 
-static void tegra_sor_encoder_commit(struct drm_encoder *encoder)
+#if 0
+static int calc_h_ref_to_sync(const struct drm_display_mode *mode,
+			      unsigned int *value)
 {
+	unsigned int hfp, hsw, hbp, a = 0, b;
+
+	hfp = mode->hsync_start - mode->hdisplay;
+	hsw = mode->hsync_end - mode->hsync_start;
+	hbp = mode->htotal - mode->hsync_end;
+
+	pr_info("hfp: %u, hsw: %u, hbp: %u\n", hfp, hsw, hbp);
+
+	b = hfp - 1;
+
+	pr_info("a: %u, b: %u\n", a, b);
+	pr_info("a + hsw + hbp = %u\n", a + hsw + hbp);
+
+	if (a + hsw + hbp <= 11) {
+		a = 1 + 11 - hsw - hbp;
+		pr_info("a: %u\n", a);
+	}
+
+	if (a > b)
+		return -EINVAL;
+
+	if (hsw < 1)
+		return -EINVAL;
+
+	if (mode->hdisplay < 16)
+		return -EINVAL;
+
+	if (value) {
+		if (b > a && a % 2)
+			*value = a + 1;
+		else
+			*value = a;
+	}
+
+	return 0;
 }
+#endif
 
-static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
-				       struct drm_display_mode *mode,
-				       struct drm_display_mode *adjusted)
+static void tegra_sor_edp_enable(struct drm_encoder *encoder)
 {
+	struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
 	struct tegra_output *output = encoder_to_output(encoder);
 	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
 	unsigned int vbe, vse, hbe, hse, vbs, hbs, i;
@@ -952,14 +1200,9 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
 	int err = 0;
 	u32 value;
 
-	mutex_lock(&sor->lock);
-
-	if (sor->enabled)
-		goto unlock;
-
 	err = clk_prepare_enable(sor->clk);
 	if (err < 0)
-		goto unlock;
+		dev_err(sor->dev, "failed to enable clock: %d\n", err);
 
 	reset_control_deassert(sor->rst);
 
@@ -978,7 +1221,7 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
 		if (err < 0) {
 			dev_err(sor->dev, "failed to probe eDP link: %d\n",
 				err);
-			goto unlock;
+			return;
 		}
 	}
 
@@ -999,40 +1242,40 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
 	value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
 	tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
 
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value &= ~SOR_PLL_2_BANDGAP_POWERDOWN;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 	usleep_range(20, 100);
 
-	value = tegra_sor_readl(sor, SOR_PLL_3);
-	value |= SOR_PLL_3_PLL_VDD_MODE_V3_3;
-	tegra_sor_writel(sor, value, SOR_PLL_3);
+	value = tegra_sor_readl(sor, SOR_PLL3);
+	value |= SOR_PLL3_PLL_VDD_MODE_3V3;
+	tegra_sor_writel(sor, value, SOR_PLL3);
 
-	value = SOR_PLL_0_ICHPMP(0xf) | SOR_PLL_0_VCOCAP_RST |
-		SOR_PLL_0_PLLREG_LEVEL_V45 | SOR_PLL_0_RESISTOR_EXT;
-	tegra_sor_writel(sor, value, SOR_PLL_0);
+	value = SOR_PLL0_ICHPMP(0xf) | SOR_PLL0_VCOCAP_RST |
+		SOR_PLL0_PLLREG_LEVEL_V45 | SOR_PLL0_RESISTOR_EXT;
+	tegra_sor_writel(sor, value, SOR_PLL0);
 
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value |= SOR_PLL_2_SEQ_PLLCAPPD;
-	value &= ~SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE;
-	value |= SOR_PLL_2_LVDS_ENABLE;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value |= SOR_PLL2_SEQ_PLLCAPPD;
+	value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+	value |= SOR_PLL2_LVDS_ENABLE;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 
-	value = SOR_PLL_1_TERM_COMPOUT | SOR_PLL_1_TMDS_TERM;
-	tegra_sor_writel(sor, value, SOR_PLL_1);
+	value = SOR_PLL1_TERM_COMPOUT | SOR_PLL1_TMDS_TERM;
+	tegra_sor_writel(sor, value, SOR_PLL1);
 
 	while (true) {
-		value = tegra_sor_readl(sor, SOR_PLL_2);
-		if ((value & SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE) == 0)
+		value = tegra_sor_readl(sor, SOR_PLL2);
+		if ((value & SOR_PLL2_SEQ_PLLCAPPD_ENFORCE) == 0)
 			break;
 
 		usleep_range(250, 1000);
 	}
 
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value &= ~SOR_PLL_2_POWERDOWN_OVERRIDE;
-	value &= ~SOR_PLL_2_PORT_POWERDOWN;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
+	value &= ~SOR_PLL2_PORT_POWERDOWN;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 
 	/*
 	 * power up
@@ -1045,51 +1288,49 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
 	tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
 
 	/* step 1 */
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value |= SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL_2_PORT_POWERDOWN |
-		 SOR_PLL_2_BANDGAP_POWERDOWN;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL2_PORT_POWERDOWN |
+		 SOR_PLL2_BANDGAP_POWERDOWN;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 
-	value = tegra_sor_readl(sor, SOR_PLL_0);
-	value |= SOR_PLL_0_VCOPD | SOR_PLL_0_POWER_OFF;
-	tegra_sor_writel(sor, value, SOR_PLL_0);
+	value = tegra_sor_readl(sor, SOR_PLL0);
+	value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR;
+	tegra_sor_writel(sor, value, SOR_PLL0);
 
-	value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
 	value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
-	tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
 
 	/* step 2 */
 	err = tegra_io_rail_power_on(TEGRA_IO_RAIL_LVDS);
-	if (err < 0) {
+	if (err < 0)
 		dev_err(sor->dev, "failed to power on I/O rail: %d\n", err);
-		goto unlock;
-	}
 
 	usleep_range(5, 100);
 
 	/* step 3 */
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value &= ~SOR_PLL_2_BANDGAP_POWERDOWN;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 
 	usleep_range(20, 100);
 
 	/* step 4 */
-	value = tegra_sor_readl(sor, SOR_PLL_0);
-	value &= ~SOR_PLL_0_POWER_OFF;
-	value &= ~SOR_PLL_0_VCOPD;
-	tegra_sor_writel(sor, value, SOR_PLL_0);
+	value = tegra_sor_readl(sor, SOR_PLL0);
+	value &= ~SOR_PLL0_VCOPD;
+	value &= ~SOR_PLL0_PWR;
+	tegra_sor_writel(sor, value, SOR_PLL0);
 
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value &= ~SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 
 	usleep_range(200, 1000);
 
 	/* step 5 */
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value &= ~SOR_PLL_2_PORT_POWERDOWN;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value &= ~SOR_PLL2_PORT_POWERDOWN;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 
 	/* switch to DP clock */
 	err = clk_set_parent(sor->clk, sor->clk_dp);
@@ -1097,7 +1338,7 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
 		dev_err(sor->dev, "failed to set DP parent clock: %d\n", err);
 
 	/* power DP lanes */
-	value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
 
 	if (link.num_lanes <= 2)
 		value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2);
@@ -1114,12 +1355,12 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
 	else
 		value |= SOR_DP_PADCTL_PD_TXD_0;
 
-	tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
 
-	value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
 	value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
 	value |= SOR_DP_LINKCTL_LANE_COUNT(link.num_lanes);
-	tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
 
 	/* start lane sequencer */
 	value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
@@ -1141,14 +1382,14 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
 	tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
 
 	/* set linkctl */
-	value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
 	value |= SOR_DP_LINKCTL_ENABLE;
 
 	value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK;
 	value |= SOR_DP_LINKCTL_TU_SIZE(config.tu_size);
 
 	value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
-	tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
 
 	for (i = 0, value = 0; i < 4; i++) {
 		unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
@@ -1159,7 +1400,7 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
 
 	tegra_sor_writel(sor, value, SOR_DP_TPG);
 
-	value = tegra_sor_readl(sor, SOR_DP_CONFIG_0);
+	value = tegra_sor_readl(sor, SOR_DP_CONFIG0);
 	value &= ~SOR_DP_CONFIG_WATERMARK_MASK;
 	value |= SOR_DP_CONFIG_WATERMARK(config.watermark);
 
@@ -1176,7 +1417,7 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
 
 	value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE;
 	value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE;
-	tegra_sor_writel(sor, value, SOR_DP_CONFIG_0);
+	tegra_sor_writel(sor, value, SOR_DP_CONFIG0);
 
 	value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS);
 	value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK;
@@ -1189,33 +1430,27 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
 	tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS);
 
 	/* enable pad calibration logic */
-	value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
 	value |= SOR_DP_PADCTL_PAD_CAL_PD;
-	tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
 
 	if (sor->dpaux) {
 		u8 rate, lanes;
 
 		err = drm_dp_link_probe(aux, &link);
-		if (err < 0) {
+		if (err < 0)
 			dev_err(sor->dev, "failed to probe eDP link: %d\n",
 				err);
-			goto unlock;
-		}
 
 		err = drm_dp_link_power_up(aux, &link);
-		if (err < 0) {
+		if (err < 0)
 			dev_err(sor->dev, "failed to power up eDP link: %d\n",
 				err);
-			goto unlock;
-		}
 
 		err = drm_dp_link_configure(aux, &link);
-		if (err < 0) {
+		if (err < 0)
 			dev_err(sor->dev, "failed to configure eDP link: %d\n",
 				err);
-			goto unlock;
-		}
 
 		rate = drm_dp_link_rate_to_bw_code(link.rate);
 		lanes = link.num_lanes;
@@ -1225,14 +1460,14 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
 		value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
 		tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
 
-		value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0);
+		value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
 		value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
 		value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
 
 		if (link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
 			value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
 
-		tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0);
+		tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
 
 		/* disable training pattern generator */
 
@@ -1249,17 +1484,14 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
 		if (err < 0) {
 			dev_err(sor->dev, "DP fast link training failed: %d\n",
 				err);
-			goto unlock;
 		}
 
 		dev_dbg(sor->dev, "fast link training succeeded\n");
 	}
 
 	err = tegra_sor_power_up(sor, 250);
-	if (err < 0) {
+	if (err < 0)
 		dev_err(sor->dev, "failed to power up SOR: %d\n", err);
-		goto unlock;
-	}
 
 	/*
 	 * configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete
@@ -1295,7 +1527,7 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
 		break;
 	}
 
-	tegra_sor_writel(sor, value, SOR_STATE_1);
+	tegra_sor_writel(sor, value, SOR_STATE1);
 
 	/*
 	 * TODO: The video timing programming below doesn't seem to match the
@@ -1303,25 +1535,27 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
 	 */
 
 	value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
-	tegra_sor_writel(sor, value, SOR_HEAD_STATE_1(0));
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
 
 	vse = mode->vsync_end - mode->vsync_start - 1;
 	hse = mode->hsync_end - mode->hsync_start - 1;
 
 	value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
-	tegra_sor_writel(sor, value, SOR_HEAD_STATE_2(0));
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
 
 	vbe = vse + (mode->vsync_start - mode->vdisplay);
 	hbe = hse + (mode->hsync_start - mode->hdisplay);
 
 	value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
-	tegra_sor_writel(sor, value, SOR_HEAD_STATE_3(0));
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
 
 	vbs = vbe + mode->vdisplay;
 	hbs = hbe + mode->hdisplay;
 
 	value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
-	tegra_sor_writel(sor, value, SOR_HEAD_STATE_4(0));
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
+
+	tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe));
 
 	/* CSTM (LVDS, link A/B, upper) */
 	value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
@@ -1330,10 +1564,8 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
 
 	/* PWM setup */
 	err = tegra_sor_setup_pwm(sor, 250);
-	if (err < 0) {
+	if (err < 0)
 		dev_err(sor->dev, "failed to setup PWM: %d\n", err);
-		goto unlock;
-	}
 
 	tegra_sor_update(sor);
 
@@ -1344,147 +1576,610 @@ static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
 	tegra_dc_commit(dc);
 
 	err = tegra_sor_attach(sor);
-	if (err < 0) {
+	if (err < 0)
 		dev_err(sor->dev, "failed to attach SOR: %d\n", err);
-		goto unlock;
-	}
 
 	err = tegra_sor_wakeup(sor);
-	if (err < 0) {
+	if (err < 0)
 		dev_err(sor->dev, "failed to enable DC: %d\n", err);
-		goto unlock;
-	}
 
 	if (output->panel)
 		drm_panel_enable(output->panel);
-
-	sor->enabled = true;
-
-unlock:
-	mutex_unlock(&sor->lock);
 }
 
-static void tegra_sor_encoder_disable(struct drm_encoder *encoder)
+static int
+tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
+			       struct drm_crtc_state *crtc_state,
+			       struct drm_connector_state *conn_state)
 {
 	struct tegra_output *output = encoder_to_output(encoder);
-	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+	struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
+	unsigned long pclk = crtc_state->mode.clock * 1000;
 	struct tegra_sor *sor = to_sor(output);
-	u32 value;
 	int err;
 
-	mutex_lock(&sor->lock);
+	err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent,
+					 pclk, 0);
+	if (err < 0) {
+		dev_err(output->dev, "failed to setup CRTC state: %d\n", err);
+		return err;
+	}
 
-	if (!sor->enabled)
-		goto unlock;
+	return 0;
+}
 
-	if (output->panel)
-		drm_panel_disable(output->panel);
+static const struct drm_encoder_helper_funcs tegra_sor_edp_helpers = {
+	.disable = tegra_sor_edp_disable,
+	.enable = tegra_sor_edp_enable,
+	.atomic_check = tegra_sor_encoder_atomic_check,
+};
 
-	err = tegra_sor_detach(sor);
-	if (err < 0) {
-		dev_err(sor->dev, "failed to detach SOR: %d\n", err);
-		goto unlock;
+static inline u32 tegra_sor_hdmi_subpack(const u8 *ptr, size_t size)
+{
+	u32 value = 0;
+	size_t i;
+
+	for (i = size; i > 0; i--)
+		value = (value << 8) | ptr[i - 1];
+
+	return value;
+}
+
+static void tegra_sor_hdmi_write_infopack(struct tegra_sor *sor,
+					  const void *data, size_t size)
+{
+	const u8 *ptr = data;
+	unsigned long offset;
+	size_t i, j;
+	u32 value;
+
+	switch (ptr[0]) {
+	case HDMI_INFOFRAME_TYPE_AVI:
+		offset = SOR_HDMI_AVI_INFOFRAME_HEADER;
+		break;
+
+	case HDMI_INFOFRAME_TYPE_AUDIO:
+		offset = SOR_HDMI_AUDIO_INFOFRAME_HEADER;
+		break;
+
+	case HDMI_INFOFRAME_TYPE_VENDOR:
+		offset = SOR_HDMI_VSI_INFOFRAME_HEADER;
+		break;
+
+	default:
+		dev_err(sor->dev, "unsupported infoframe type: %02x\n",
+			ptr[0]);
+		return;
 	}
 
-	tegra_sor_writel(sor, 0, SOR_STATE_1);
-	tegra_sor_update(sor);
+	value = INFOFRAME_HEADER_TYPE(ptr[0]) |
+		INFOFRAME_HEADER_VERSION(ptr[1]) |
+		INFOFRAME_HEADER_LEN(ptr[2]);
+	tegra_sor_writel(sor, value, offset);
+	offset++;
 
 	/*
-	 * The following accesses registers of the display controller, so make
-	 * sure it's only executed when the output is attached to one.
+	 * Each subpack contains 7 bytes, divided into:
+	 * - subpack_low: bytes 0 - 3
+	 * - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00)
 	 */
-	if (dc) {
-		value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
-		value &= ~SOR_ENABLE;
-		tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+	for (i = 3, j = 0; i < size; i += 7, j += 8) {
+		size_t rem = size - i, num = min_t(size_t, rem, 4);
 
-		tegra_dc_commit(dc);
-	}
+		value = tegra_sor_hdmi_subpack(&ptr[i], num);
+		tegra_sor_writel(sor, value, offset++);
 
-	err = tegra_sor_power_down(sor);
-	if (err < 0) {
-		dev_err(sor->dev, "failed to power down SOR: %d\n", err);
-		goto unlock;
+		num = min_t(size_t, rem - num, 3);
+
+		value = tegra_sor_hdmi_subpack(&ptr[i + 4], num);
+		tegra_sor_writel(sor, value, offset++);
 	}
+}
 
-	if (sor->dpaux) {
-		err = tegra_dpaux_disable(sor->dpaux);
-		if (err < 0) {
-			dev_err(sor->dev, "failed to disable DP: %d\n", err);
-			goto unlock;
-		}
+static int
+tegra_sor_hdmi_setup_avi_infoframe(struct tegra_sor *sor,
+				   const struct drm_display_mode *mode)
+{
+	u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
+	struct hdmi_avi_infoframe frame;
+	u32 value;
+	int err;
+
+	/* disable AVI infoframe */
+	value = tegra_sor_readl(sor, SOR_HDMI_AVI_INFOFRAME_CTRL);
+	value &= ~INFOFRAME_CTRL_SINGLE;
+	value &= ~INFOFRAME_CTRL_OTHER;
+	value &= ~INFOFRAME_CTRL_ENABLE;
+	tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL);
+
+	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+	if (err < 0) {
+		dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err);
+		return err;
 	}
 
-	err = tegra_io_rail_power_off(TEGRA_IO_RAIL_LVDS);
+	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
 	if (err < 0) {
-		dev_err(sor->dev, "failed to power off I/O rail: %d\n", err);
-		goto unlock;
+		dev_err(sor->dev, "failed to pack AVI infoframe: %d\n", err);
+		return err;
 	}
 
-	if (output->panel)
-		drm_panel_unprepare(output->panel);
+	tegra_sor_hdmi_write_infopack(sor, buffer, err);
 
-	clk_disable_unprepare(sor->clk);
-	reset_control_assert(sor->rst);
+	/* enable AVI infoframe */
+	value = tegra_sor_readl(sor, SOR_HDMI_AVI_INFOFRAME_CTRL);
+	value |= INFOFRAME_CTRL_CHECKSUM_ENABLE;
+	value |= INFOFRAME_CTRL_ENABLE;
+	tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL);
 
-	sor->enabled = false;
+	return 0;
+}
 
-unlock:
-	mutex_unlock(&sor->lock);
+static void tegra_sor_hdmi_disable_audio_infoframe(struct tegra_sor *sor)
+{
+	u32 value;
+
+	value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
+	value &= ~INFOFRAME_CTRL_ENABLE;
+	tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
 }
 
-static int
-tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
-			       struct drm_crtc_state *crtc_state,
-			       struct drm_connector_state *conn_state)
+static struct tegra_sor_hdmi_settings *
+tegra_sor_hdmi_find_settings(struct tegra_sor *sor, unsigned long frequency)
+{
+	unsigned int i;
+
+	for (i = 0; i < sor->num_settings; i++)
+		if (frequency <= sor->settings[i].frequency)
+			return &sor->settings[i];
+
+	return NULL;
+}
+
+static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
 {
 	struct tegra_output *output = encoder_to_output(encoder);
-	struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
-	unsigned long pclk = crtc_state->mode.clock * 1000;
+	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
 	struct tegra_sor *sor = to_sor(output);
+	u32 value;
 	int err;
 
-	err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent,
-					 pclk, 0);
-	if (err < 0) {
-		dev_err(output->dev, "failed to setup CRTC state: %d\n", err);
-		return err;
+	err = tegra_sor_detach(sor);
+	if (err < 0)
+		dev_err(sor->dev, "failed to detach SOR: %d\n", err);
+
+	tegra_sor_writel(sor, 0, SOR_STATE1);
+	tegra_sor_update(sor);
+
+	/* disable display to SOR clock */
+	value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+	value &= ~SOR1_TIMING_CYA;
+	value &= ~SOR1_ENABLE;
+	tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+	tegra_dc_commit(dc);
+
+	err = tegra_sor_power_down(sor);
+	if (err < 0)
+		dev_err(sor->dev, "failed to power down SOR: %d\n", err);
+
+	err = tegra_io_rail_power_off(TEGRA_IO_RAIL_HDMI);
+	if (err < 0)
+		dev_err(sor->dev, "failed to power off HDMI rail: %d\n", err);
+
+	reset_control_assert(sor->rst);
+	usleep_range(1000, 2000);
+	clk_disable_unprepare(sor->clk);
+}
+
+static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
+{
+	struct tegra_output *output = encoder_to_output(encoder);
+	unsigned int h_ref_to_sync = 1, pulse_start, max_ac;
+	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+	unsigned int vbe, vse, hbe, hse, vbs, hbs, div;
+	struct tegra_sor_hdmi_settings *settings;
+	struct tegra_sor *sor = to_sor(output);
+	struct drm_display_mode *mode;
+	struct drm_display_info *info;
+	u32 value;
+	int err;
+
+	mode = &encoder->crtc->state->adjusted_mode;
+	info = &output->connector.display_info;
+
+	err = clk_prepare_enable(sor->clk);
+	if (err < 0)
+		dev_err(sor->dev, "failed to enable clock: %d\n", err);
+
+	usleep_range(1000, 2000);
+
+	reset_control_deassert(sor->rst);
+
+	err = clk_set_parent(sor->clk, sor->clk_safe);
+	if (err < 0)
+		dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+
+	div = clk_get_rate(sor->clk) / 1000000 * 4;
+
+	err = tegra_io_rail_power_on(TEGRA_IO_RAIL_HDMI);
+	if (err < 0)
+		dev_err(sor->dev, "failed to power on HDMI rail: %d\n", err);
+
+	usleep_range(20, 100);
+
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
+	tegra_sor_writel(sor, value, SOR_PLL2);
+
+	usleep_range(20, 100);
+
+	value = tegra_sor_readl(sor, SOR_PLL3);
+	value &= ~SOR_PLL3_PLL_VDD_MODE_3V3;
+	tegra_sor_writel(sor, value, SOR_PLL3);
+
+	value = tegra_sor_readl(sor, SOR_PLL0);
+	value &= ~SOR_PLL0_VCOPD;
+	value &= ~SOR_PLL0_PWR;
+	tegra_sor_writel(sor, value, SOR_PLL0);
+
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+	tegra_sor_writel(sor, value, SOR_PLL2);
+
+	usleep_range(200, 400);
+
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
+	value &= ~SOR_PLL2_PORT_POWERDOWN;
+	tegra_sor_writel(sor, value, SOR_PLL2);
+
+	usleep_range(20, 100);
+
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+	value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
+		 SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2;
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+
+	while (true) {
+		value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+		if ((value & SOR_LANE_SEQ_CTL_STATE_BUSY) == 0)
+			break;
+
+		usleep_range(250, 1000);
 	}
 
-	return 0;
+	value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
+		SOR_LANE_SEQ_CTL_POWER_STATE_UP | SOR_LANE_SEQ_CTL_DELAY(5);
+	tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
+
+	while (true) {
+		value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+		if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
+			break;
+
+		usleep_range(250, 1000);
+	}
+
+	value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+	value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
+	value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
+
+	if (mode->clock < 340000)
+		value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G2_70;
+	else
+		value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G5_40;
+
+	value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK;
+	tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+	value = tegra_sor_readl(sor, SOR_DP_SPARE0);
+	value |= SOR_DP_SPARE_DISP_VIDEO_PREAMBLE;
+	value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
+	value |= SOR_DP_SPARE_SEQ_ENABLE;
+	tegra_sor_writel(sor, value, SOR_DP_SPARE0);
+
+	value = SOR_SEQ_CTL_PU_PC(0) | SOR_SEQ_CTL_PU_PC_ALT(0) |
+		SOR_SEQ_CTL_PD_PC(8) | SOR_SEQ_CTL_PD_PC_ALT(8);
+	tegra_sor_writel(sor, value, SOR_SEQ_CTL);
+
+	value = SOR_SEQ_INST_DRIVE_PWM_OUT_LO | SOR_SEQ_INST_HALT |
+		SOR_SEQ_INST_WAIT_VSYNC | SOR_SEQ_INST_WAIT(1);
+	tegra_sor_writel(sor, value, SOR_SEQ_INST(0));
+	tegra_sor_writel(sor, value, SOR_SEQ_INST(8));
+
+	/* program the reference clock */
+	value = SOR_REFCLK_DIV_INT(div) | SOR_REFCLK_DIV_FRAC(div);
+	tegra_sor_writel(sor, value, SOR_REFCLK);
+
+	/* XXX don't hardcode */
+	value = SOR_XBAR_CTRL_LINK1_XSEL(4, 4) |
+		SOR_XBAR_CTRL_LINK1_XSEL(3, 3) |
+		SOR_XBAR_CTRL_LINK1_XSEL(2, 2) |
+		SOR_XBAR_CTRL_LINK1_XSEL(1, 1) |
+		SOR_XBAR_CTRL_LINK1_XSEL(0, 0) |
+		SOR_XBAR_CTRL_LINK0_XSEL(4, 4) |
+		SOR_XBAR_CTRL_LINK0_XSEL(3, 3) |
+		SOR_XBAR_CTRL_LINK0_XSEL(2, 0) |
+		SOR_XBAR_CTRL_LINK0_XSEL(1, 1) |
+		SOR_XBAR_CTRL_LINK0_XSEL(0, 2);
+	tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
+
+	tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
+
+	err = clk_set_parent(sor->clk, sor->clk_parent);
+	if (err < 0)
+		dev_err(sor->dev, "failed to set parent clock: %d\n", err);
+
+	value = SOR_INPUT_CONTROL_HDMI_SRC_SELECT(dc->pipe);
+
+	/* XXX is this the proper check? */
+	if (mode->clock < 75000)
+		value |= SOR_INPUT_CONTROL_ARM_VIDEO_RANGE_LIMITED;
+
+	tegra_sor_writel(sor, value, SOR_INPUT_CONTROL);
+
+	max_ac = ((mode->htotal - mode->hdisplay) - SOR_REKEY - 18) / 32;
+
+	value = SOR_HDMI_CTRL_ENABLE | SOR_HDMI_CTRL_MAX_AC_PACKET(max_ac) |
+		SOR_HDMI_CTRL_AUDIO_LAYOUT | SOR_HDMI_CTRL_REKEY(SOR_REKEY);
+	tegra_sor_writel(sor, value, SOR_HDMI_CTRL);
+
+	/* H_PULSE2 setup */
+	pulse_start = h_ref_to_sync + (mode->hsync_end - mode->hsync_start) +
+		      (mode->htotal - mode->hsync_end) - 10;
+
+	value = PULSE_LAST_END_A | PULSE_QUAL_VACTIVE |
+		PULSE_POLARITY_HIGH | PULSE_MODE_NORMAL;
+	tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL);
+
+	value = PULSE_END(pulse_start + 8) | PULSE_START(pulse_start);
+	tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A);
+
+	value = tegra_dc_readl(dc, DC_DISP_DISP_SIGNAL_OPTIONS0);
+	value |= H_PULSE2_ENABLE;
+	tegra_dc_writel(dc, value, DC_DISP_DISP_SIGNAL_OPTIONS0);
+
+	/* infoframe setup */
+	err = tegra_sor_hdmi_setup_avi_infoframe(sor, mode);
+	if (err < 0)
+		dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err);
+
+	/* XXX HDMI audio support not implemented yet */
+	tegra_sor_hdmi_disable_audio_infoframe(sor);
+
+	/* use single TMDS protocol */
+	value = tegra_sor_readl(sor, SOR_STATE1);
+	value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
+	value |= SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A;
+	tegra_sor_writel(sor, value, SOR_STATE1);
+
+	/* power up pad calibration */
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+	value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+
+	/* production settings */
+	settings = tegra_sor_hdmi_find_settings(sor, mode->clock * 1000);
+	if (IS_ERR(settings)) {
+		dev_err(sor->dev, "no settings for pixel clock %d Hz: %ld\n",
+			mode->clock * 1000, PTR_ERR(settings));
+		return;
+	}
+
+	value = tegra_sor_readl(sor, SOR_PLL0);
+	value &= ~SOR_PLL0_ICHPMP_MASK;
+	value &= ~SOR_PLL0_VCOCAP_MASK;
+	value |= SOR_PLL0_ICHPMP(settings->ichpmp);
+	value |= SOR_PLL0_VCOCAP(settings->vcocap);
+	tegra_sor_writel(sor, value, SOR_PLL0);
+
+	tegra_sor_dp_term_calibrate(sor);
+
+	value = tegra_sor_readl(sor, SOR_PLL1);
+	value &= ~SOR_PLL1_LOADADJ_MASK;
+	value |= SOR_PLL1_LOADADJ(settings->loadadj);
+	tegra_sor_writel(sor, value, SOR_PLL1);
+
+	value = tegra_sor_readl(sor, SOR_PLL3);
+	value &= ~SOR_PLL3_BG_VREF_LEVEL_MASK;
+	value |= SOR_PLL3_BG_VREF_LEVEL(settings->bg_vref);
+	tegra_sor_writel(sor, value, SOR_PLL3);
+
+	value = settings->drive_current[0] << 24 |
+		settings->drive_current[1] << 16 |
+		settings->drive_current[2] <<  8 |
+		settings->drive_current[3] <<  0;
+	tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0);
+
+	value = settings->preemphasis[0] << 24 |
+		settings->preemphasis[1] << 16 |
+		settings->preemphasis[2] <<  8 |
+		settings->preemphasis[3] <<  0;
+	tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0);
+
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+	value &= ~SOR_DP_PADCTL_TX_PU_MASK;
+	value |= SOR_DP_PADCTL_TX_PU_ENABLE;
+	value |= SOR_DP_PADCTL_TX_PU(settings->tx_pu);
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+
+	/* power down pad calibration */
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+	value |= SOR_DP_PADCTL_PAD_CAL_PD;
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+
+	/* miscellaneous display controller settings */
+	value = VSYNC_H_POSITION(1);
+	tegra_dc_writel(dc, value, DC_DISP_DISP_TIMING_OPTIONS);
+
+	value = tegra_dc_readl(dc, DC_DISP_DISP_COLOR_CONTROL);
+	value &= ~DITHER_CONTROL_MASK;
+	value &= ~BASE_COLOR_SIZE_MASK;
+
+	switch (info->bpc) {
+	case 6:
+		value |= BASE_COLOR_SIZE_666;
+		break;
+
+	case 8:
+		value |= BASE_COLOR_SIZE_888;
+		break;
+
+	default:
+		WARN(1, "%u bits-per-color not supported\n", info->bpc);
+		break;
+	}
+
+	tegra_dc_writel(dc, value, DC_DISP_DISP_COLOR_CONTROL);
+
+	err = tegra_sor_power_up(sor, 250);
+	if (err < 0)
+		dev_err(sor->dev, "failed to power up SOR: %d\n", err);
+
+	/* configure mode */
+	value = tegra_sor_readl(sor, SOR_STATE1);
+	value &= ~SOR_STATE_ASY_PIXELDEPTH_MASK;
+	value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
+	value &= ~SOR_STATE_ASY_OWNER_MASK;
+
+	value |= SOR_STATE_ASY_CRC_MODE_COMPLETE |
+		 SOR_STATE_ASY_OWNER(dc->pipe + 1);
+
+	if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+		value &= ~SOR_STATE_ASY_HSYNCPOL;
+
+	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+		value |= SOR_STATE_ASY_HSYNCPOL;
+
+	if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+		value &= ~SOR_STATE_ASY_VSYNCPOL;
+
+	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+		value |= SOR_STATE_ASY_VSYNCPOL;
+
+	switch (info->bpc) {
+	case 8:
+		value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
+		break;
+
+	case 6:
+		value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
+		break;
+
+	default:
+		BUG();
+		break;
+	}
+
+	tegra_sor_writel(sor, value, SOR_STATE1);
+
+	value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe));
+	value &= ~SOR_HEAD_STATE_RANGECOMPRESS_MASK;
+	value &= ~SOR_HEAD_STATE_DYNRANGE_MASK;
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe));
+
+	value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe));
+	value &= ~SOR_HEAD_STATE_COLORSPACE_MASK;
+	value |= SOR_HEAD_STATE_COLORSPACE_RGB;
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe));
+
+	/*
+	 * TODO: The video timing programming below doesn't seem to match the
+	 * register definitions.
+	 */
+
+	value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
+
+	/* sync end = sync width - 1 */
+	vse = mode->vsync_end - mode->vsync_start - 1;
+	hse = mode->hsync_end - mode->hsync_start - 1;
+
+	value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
+
+	/* blank end = sync end + back porch */
+	vbe = vse + (mode->vtotal - mode->vsync_end);
+	hbe = hse + (mode->htotal - mode->hsync_end);
+
+	value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
+
+	/* blank start = blank end + active */
+	vbs = vbe + mode->vdisplay;
+	hbs = hbe + mode->hdisplay;
+
+	value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
+
+	tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe));
+
+	tegra_sor_update(sor);
+
+	err = tegra_sor_attach(sor);
+	if (err < 0)
+		dev_err(sor->dev, "failed to attach SOR: %d\n", err);
+
+	/* enable display to SOR clock and generate HDMI preamble */
+	value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+	value |= SOR1_ENABLE | SOR1_TIMING_CYA;
+	tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+	tegra_dc_commit(dc);
+
+	err = tegra_sor_wakeup(sor);
+	if (err < 0)
+		dev_err(sor->dev, "failed to wakeup SOR: %d\n", err);
 }
 
-static const struct drm_encoder_helper_funcs tegra_sor_encoder_helper_funcs = {
-	.dpms = tegra_sor_encoder_dpms,
-	.prepare = tegra_sor_encoder_prepare,
-	.commit = tegra_sor_encoder_commit,
-	.mode_set = tegra_sor_encoder_mode_set,
-	.disable = tegra_sor_encoder_disable,
+static const struct drm_encoder_helper_funcs tegra_sor_hdmi_helpers = {
+	.disable = tegra_sor_hdmi_disable,
+	.enable = tegra_sor_hdmi_enable,
 	.atomic_check = tegra_sor_encoder_atomic_check,
 };
 
 static int tegra_sor_init(struct host1x_client *client)
 {
 	struct drm_device *drm = dev_get_drvdata(client->parent);
+	const struct drm_encoder_helper_funcs *helpers = NULL;
 	struct tegra_sor *sor = host1x_client_to_sor(client);
+	int connector = DRM_MODE_CONNECTOR_Unknown;
+	int encoder = DRM_MODE_ENCODER_NONE;
 	int err;
 
-	if (!sor->dpaux)
-		return -ENODEV;
+	if (!sor->dpaux) {
+		if (sor->soc->supports_hdmi) {
+			connector = DRM_MODE_CONNECTOR_HDMIA;
+			encoder = DRM_MODE_ENCODER_TMDS;
+			helpers = &tegra_sor_hdmi_helpers;
+		} else if (sor->soc->supports_lvds) {
+			connector = DRM_MODE_CONNECTOR_LVDS;
+			encoder = DRM_MODE_ENCODER_LVDS;
+		}
+	} else {
+		if (sor->soc->supports_edp) {
+			connector = DRM_MODE_CONNECTOR_eDP;
+			encoder = DRM_MODE_ENCODER_TMDS;
+			helpers = &tegra_sor_edp_helpers;
+		} else if (sor->soc->supports_dp) {
+			connector = DRM_MODE_CONNECTOR_DisplayPort;
+			encoder = DRM_MODE_ENCODER_TMDS;
+		}
+	}
 
 	sor->output.dev = sor->dev;
 
 	drm_connector_init(drm, &sor->output.connector,
 			   &tegra_sor_connector_funcs,
-			   DRM_MODE_CONNECTOR_eDP);
+			   connector);
 	drm_connector_helper_add(&sor->output.connector,
 				 &tegra_sor_connector_helper_funcs);
 	sor->output.connector.dpms = DRM_MODE_DPMS_OFF;
 
 	drm_encoder_init(drm, &sor->output.encoder, &tegra_sor_encoder_funcs,
-			 DRM_MODE_ENCODER_TMDS);
-	drm_encoder_helper_add(&sor->output.encoder,
-			       &tegra_sor_encoder_helper_funcs);
+			 encoder);
+	drm_encoder_helper_add(&sor->output.encoder, helpers);
 
 	drm_mode_connector_attach_encoder(&sor->output.connector,
 					  &sor->output.encoder);
@@ -1577,18 +2272,130 @@ static const struct host1x_client_ops sor_client_ops = {
 	.exit = tegra_sor_exit,
 };
 
+static const struct tegra_sor_ops tegra_sor_edp_ops = {
+	.name = "eDP",
+};
+
+static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
+{
+	int err;
+
+	sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io");
+	if (IS_ERR(sor->avdd_io_supply)) {
+		dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n",
+			PTR_ERR(sor->avdd_io_supply));
+		return PTR_ERR(sor->avdd_io_supply);
+	}
+
+	err = regulator_enable(sor->avdd_io_supply);
+	if (err < 0) {
+		dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n",
+			err);
+		return err;
+	}
+
+	sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll");
+	if (IS_ERR(sor->vdd_pll_supply)) {
+		dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n",
+			PTR_ERR(sor->vdd_pll_supply));
+		return PTR_ERR(sor->vdd_pll_supply);
+	}
+
+	err = regulator_enable(sor->vdd_pll_supply);
+	if (err < 0) {
+		dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n",
+			err);
+		return err;
+	}
+
+	sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi");
+	if (IS_ERR(sor->hdmi_supply)) {
+		dev_err(sor->dev, "cannot get HDMI supply: %ld\n",
+			PTR_ERR(sor->hdmi_supply));
+		return PTR_ERR(sor->hdmi_supply);
+	}
+
+	err = regulator_enable(sor->hdmi_supply);
+	if (err < 0) {
+		dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+static int tegra_sor_hdmi_remove(struct tegra_sor *sor)
+{
+	regulator_disable(sor->hdmi_supply);
+	regulator_disable(sor->vdd_pll_supply);
+	regulator_disable(sor->avdd_io_supply);
+
+	return 0;
+}
+
+static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
+	.name = "HDMI",
+	.probe = tegra_sor_hdmi_probe,
+	.remove = tegra_sor_hdmi_remove,
+};
+
+static const struct tegra_sor_soc tegra124_sor = {
+	.supports_edp = true,
+	.supports_lvds = true,
+	.supports_hdmi = false,
+	.supports_dp = false,
+};
+
+static const struct tegra_sor_soc tegra210_sor = {
+	.supports_edp = true,
+	.supports_lvds = false,
+	.supports_hdmi = false,
+	.supports_dp = false,
+};
+
+static const struct tegra_sor_soc tegra210_sor1 = {
+	.supports_edp = false,
+	.supports_lvds = false,
+	.supports_hdmi = true,
+	.supports_dp = true,
+
+	.num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults),
+	.settings = tegra210_sor_hdmi_defaults,
+};
+
+static const struct of_device_id tegra_sor_of_match[] = {
+	{ .compatible = "nvidia,tegra210-sor1", .data = &tegra210_sor1 },
+	{ .compatible = "nvidia,tegra210-sor", .data = &tegra210_sor },
+	{ .compatible = "nvidia,tegra124-sor", .data = &tegra124_sor },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, tegra_sor_of_match);
+
 static int tegra_sor_probe(struct platform_device *pdev)
 {
+	const struct of_device_id *match;
 	struct device_node *np;
 	struct tegra_sor *sor;
 	struct resource *regs;
 	int err;
 
+	match = of_match_device(tegra_sor_of_match, &pdev->dev);
+
 	sor = devm_kzalloc(&pdev->dev, sizeof(*sor), GFP_KERNEL);
 	if (!sor)
 		return -ENOMEM;
 
 	sor->output.dev = sor->dev = &pdev->dev;
+	sor->soc = match->data;
+
+	sor->settings = devm_kmemdup(&pdev->dev, sor->soc->settings,
+				     sor->soc->num_settings *
+					sizeof(*sor->settings),
+				     GFP_KERNEL);
+	if (!sor->settings)
+		return -ENOMEM;
+
+	sor->num_settings = sor->soc->num_settings;
 
 	np = of_parse_phandle(pdev->dev.of_node, "nvidia,dpaux", 0);
 	if (np) {
@@ -1599,51 +2406,106 @@ static int tegra_sor_probe(struct platform_device *pdev)
 			return -EPROBE_DEFER;
 	}
 
+	if (!sor->dpaux) {
+		if (sor->soc->supports_hdmi) {
+			sor->ops = &tegra_sor_hdmi_ops;
+		} else if (sor->soc->supports_lvds) {
+			dev_err(&pdev->dev, "LVDS not supported yet\n");
+			return -ENODEV;
+		} else {
+			dev_err(&pdev->dev, "unknown (non-DP) support\n");
+			return -ENODEV;
+		}
+	} else {
+		if (sor->soc->supports_edp) {
+			sor->ops = &tegra_sor_edp_ops;
+		} else if (sor->soc->supports_dp) {
+			dev_err(&pdev->dev, "DisplayPort not supported yet\n");
+			return -ENODEV;
+		} else {
+			dev_err(&pdev->dev, "unknown (DP) support\n");
+			return -ENODEV;
+		}
+	}
+
 	err = tegra_output_probe(&sor->output);
-	if (err < 0)
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to probe output: %d\n", err);
 		return err;
+	}
+
+	if (sor->ops && sor->ops->probe) {
+		err = sor->ops->probe(sor);
+		if (err < 0) {
+			dev_err(&pdev->dev, "failed to probe %s: %d\n",
+				sor->ops->name, err);
+			goto output;
+		}
+	}
 
 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	sor->regs = devm_ioremap_resource(&pdev->dev, regs);
-	if (IS_ERR(sor->regs))
-		return PTR_ERR(sor->regs);
+	if (IS_ERR(sor->regs)) {
+		err = PTR_ERR(sor->regs);
+		goto remove;
+	}
 
 	sor->rst = devm_reset_control_get(&pdev->dev, "sor");
-	if (IS_ERR(sor->rst))
-		return PTR_ERR(sor->rst);
+	if (IS_ERR(sor->rst)) {
+		err = PTR_ERR(sor->rst);
+		dev_err(&pdev->dev, "failed to get reset control: %d\n", err);
+		goto remove;
+	}
 
 	sor->clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(sor->clk))
-		return PTR_ERR(sor->clk);
+	if (IS_ERR(sor->clk)) {
+		err = PTR_ERR(sor->clk);
+		dev_err(&pdev->dev, "failed to get module clock: %d\n", err);
+		goto remove;
+	}
 
 	sor->clk_parent = devm_clk_get(&pdev->dev, "parent");
-	if (IS_ERR(sor->clk_parent))
-		return PTR_ERR(sor->clk_parent);
+	if (IS_ERR(sor->clk_parent)) {
+		err = PTR_ERR(sor->clk_parent);
+		dev_err(&pdev->dev, "failed to get parent clock: %d\n", err);
+		goto remove;
+	}
 
 	sor->clk_safe = devm_clk_get(&pdev->dev, "safe");
-	if (IS_ERR(sor->clk_safe))
-		return PTR_ERR(sor->clk_safe);
+	if (IS_ERR(sor->clk_safe)) {
+		err = PTR_ERR(sor->clk_safe);
+		dev_err(&pdev->dev, "failed to get safe clock: %d\n", err);
+		goto remove;
+	}
 
 	sor->clk_dp = devm_clk_get(&pdev->dev, "dp");
-	if (IS_ERR(sor->clk_dp))
-		return PTR_ERR(sor->clk_dp);
+	if (IS_ERR(sor->clk_dp)) {
+		err = PTR_ERR(sor->clk_dp);
+		dev_err(&pdev->dev, "failed to get DP clock: %d\n", err);
+		goto remove;
+	}
 
 	INIT_LIST_HEAD(&sor->client.list);
 	sor->client.ops = &sor_client_ops;
 	sor->client.dev = &pdev->dev;
 
-	mutex_init(&sor->lock);
-
 	err = host1x_client_register(&sor->client);
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
 			err);
-		return err;
+		goto remove;
 	}
 
 	platform_set_drvdata(pdev, sor);
 
 	return 0;
+
+remove:
+	if (sor->ops && sor->ops->remove)
+		sor->ops->remove(sor);
+output:
+	tegra_output_remove(&sor->output);
+	return err;
 }
 
 static int tegra_sor_remove(struct platform_device *pdev)
@@ -1658,17 +2520,17 @@ static int tegra_sor_remove(struct platform_device *pdev)
 		return err;
 	}
 
+	if (sor->ops && sor->ops->remove) {
+		err = sor->ops->remove(sor);
+		if (err < 0)
+			dev_err(&pdev->dev, "failed to remove SOR: %d\n", err);
+	}
+
 	tegra_output_remove(&sor->output);
 
 	return 0;
 }
 
-static const struct of_device_id tegra_sor_of_match[] = {
-	{ .compatible = "nvidia,tegra124-sor", },
-	{ },
-};
-MODULE_DEVICE_TABLE(of, tegra_sor_of_match);
-
 struct platform_driver tegra_sor_driver = {
 	.driver = {
 		.name = "tegra-sor",
diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h
index a5f8853fedb5..2d31d027e3f6 100644
--- a/drivers/gpu/drm/tegra/sor.h
+++ b/drivers/gpu/drm/tegra/sor.h
@@ -11,9 +11,9 @@
 
 #define SOR_CTXSW 0x00
 
-#define SOR_SUPER_STATE_0 0x01
+#define SOR_SUPER_STATE0 0x01
 
-#define SOR_SUPER_STATE_1 0x02
+#define SOR_SUPER_STATE1 0x02
 #define  SOR_SUPER_STATE_ATTACHED		(1 << 3)
 #define  SOR_SUPER_STATE_MODE_NORMAL		(1 << 2)
 #define  SOR_SUPER_STATE_HEAD_MODE_MASK		(3 << 0)
@@ -21,9 +21,9 @@
 #define  SOR_SUPER_STATE_HEAD_MODE_SNOOZE	(1 << 0)
 #define  SOR_SUPER_STATE_HEAD_MODE_SLEEP	(0 << 0)
 
-#define SOR_STATE_0 0x03
+#define SOR_STATE0 0x03
 
-#define SOR_STATE_1 0x04
+#define SOR_STATE1 0x04
 #define  SOR_STATE_ASY_PIXELDEPTH_MASK		(0xf << 17)
 #define  SOR_STATE_ASY_PIXELDEPTH_BPP_18_444	(0x2 << 17)
 #define  SOR_STATE_ASY_PIXELDEPTH_BPP_24_444	(0x5 << 17)
@@ -33,19 +33,27 @@
 #define  SOR_STATE_ASY_PROTOCOL_CUSTOM		(0xf << 8)
 #define  SOR_STATE_ASY_PROTOCOL_DP_A		(0x8 << 8)
 #define  SOR_STATE_ASY_PROTOCOL_DP_B		(0x9 << 8)
+#define  SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A	(0x1 << 8)
 #define  SOR_STATE_ASY_PROTOCOL_LVDS		(0x0 << 8)
 #define  SOR_STATE_ASY_CRC_MODE_MASK		(0x3 << 6)
 #define  SOR_STATE_ASY_CRC_MODE_NON_ACTIVE	(0x2 << 6)
 #define  SOR_STATE_ASY_CRC_MODE_COMPLETE	(0x1 << 6)
 #define  SOR_STATE_ASY_CRC_MODE_ACTIVE		(0x0 << 6)
+#define  SOR_STATE_ASY_OWNER_MASK		0xf
 #define  SOR_STATE_ASY_OWNER(x)			(((x) & 0xf) << 0)
 
-#define SOR_HEAD_STATE_0(x) (0x05 + (x))
-#define SOR_HEAD_STATE_1(x) (0x07 + (x))
-#define SOR_HEAD_STATE_2(x) (0x09 + (x))
-#define SOR_HEAD_STATE_3(x) (0x0b + (x))
-#define SOR_HEAD_STATE_4(x) (0x0d + (x))
-#define SOR_HEAD_STATE_5(x) (0x0f + (x))
+#define SOR_HEAD_STATE0(x) (0x05 + (x))
+#define  SOR_HEAD_STATE_RANGECOMPRESS_MASK (0x1 << 3)
+#define  SOR_HEAD_STATE_DYNRANGE_MASK (0x1 << 2)
+#define  SOR_HEAD_STATE_DYNRANGE_VESA (0 << 2)
+#define  SOR_HEAD_STATE_DYNRANGE_CEA (1 << 2)
+#define  SOR_HEAD_STATE_COLORSPACE_MASK (0x3 << 0)
+#define  SOR_HEAD_STATE_COLORSPACE_RGB (0 << 0)
+#define SOR_HEAD_STATE1(x) (0x07 + (x))
+#define SOR_HEAD_STATE2(x) (0x09 + (x))
+#define SOR_HEAD_STATE3(x) (0x0b + (x))
+#define SOR_HEAD_STATE4(x) (0x0d + (x))
+#define SOR_HEAD_STATE5(x) (0x0f + (x))
 #define SOR_CRC_CNTRL 0x11
 #define  SOR_CRC_CNTRL_ENABLE			(1 << 0)
 #define SOR_DP_DEBUG_MVID 0x12
@@ -75,62 +83,101 @@
 #define  SOR_TEST_HEAD_MODE_MASK		(3 << 8)
 #define  SOR_TEST_HEAD_MODE_AWAKE		(2 << 8)
 
-#define SOR_PLL_0 0x17
-#define  SOR_PLL_0_ICHPMP_MASK			(0xf << 24)
-#define  SOR_PLL_0_ICHPMP(x)			(((x) & 0xf) << 24)
-#define  SOR_PLL_0_VCOCAP_MASK			(0xf << 8)
-#define  SOR_PLL_0_VCOCAP(x)			(((x) & 0xf) << 8)
-#define  SOR_PLL_0_VCOCAP_RST			SOR_PLL_0_VCOCAP(3)
-#define  SOR_PLL_0_PLLREG_MASK			(0x3 << 6)
-#define  SOR_PLL_0_PLLREG_LEVEL(x)		(((x) & 0x3) << 6)
-#define  SOR_PLL_0_PLLREG_LEVEL_V25		SOR_PLL_0_PLLREG_LEVEL(0)
-#define  SOR_PLL_0_PLLREG_LEVEL_V15		SOR_PLL_0_PLLREG_LEVEL(1)
-#define  SOR_PLL_0_PLLREG_LEVEL_V35		SOR_PLL_0_PLLREG_LEVEL(2)
-#define  SOR_PLL_0_PLLREG_LEVEL_V45		SOR_PLL_0_PLLREG_LEVEL(3)
-#define  SOR_PLL_0_PULLDOWN			(1 << 5)
-#define  SOR_PLL_0_RESISTOR_EXT			(1 << 4)
-#define  SOR_PLL_0_VCOPD			(1 << 2)
-#define  SOR_PLL_0_POWER_OFF			(1 << 0)
-
-#define SOR_PLL_1 0x18
+#define SOR_PLL0 0x17
+#define  SOR_PLL0_ICHPMP_MASK			(0xf << 24)
+#define  SOR_PLL0_ICHPMP(x)			(((x) & 0xf) << 24)
+#define  SOR_PLL0_VCOCAP_MASK			(0xf << 8)
+#define  SOR_PLL0_VCOCAP(x)			(((x) & 0xf) << 8)
+#define  SOR_PLL0_VCOCAP_RST			SOR_PLL0_VCOCAP(3)
+#define  SOR_PLL0_PLLREG_MASK			(0x3 << 6)
+#define  SOR_PLL0_PLLREG_LEVEL(x)		(((x) & 0x3) << 6)
+#define  SOR_PLL0_PLLREG_LEVEL_V25		SOR_PLL0_PLLREG_LEVEL(0)
+#define  SOR_PLL0_PLLREG_LEVEL_V15		SOR_PLL0_PLLREG_LEVEL(1)
+#define  SOR_PLL0_PLLREG_LEVEL_V35		SOR_PLL0_PLLREG_LEVEL(2)
+#define  SOR_PLL0_PLLREG_LEVEL_V45		SOR_PLL0_PLLREG_LEVEL(3)
+#define  SOR_PLL0_PULLDOWN			(1 << 5)
+#define  SOR_PLL0_RESISTOR_EXT			(1 << 4)
+#define  SOR_PLL0_VCOPD				(1 << 2)
+#define  SOR_PLL0_PWR				(1 << 0)
+
+#define SOR_PLL1 0x18
 /* XXX: read-only bit? */
-#define  SOR_PLL_1_TERM_COMPOUT			(1 << 15)
-#define  SOR_PLL_1_TMDS_TERM			(1 << 8)
-
-#define SOR_PLL_2 0x19
-#define  SOR_PLL_2_LVDS_ENABLE			(1 << 25)
-#define  SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE		(1 << 24)
-#define  SOR_PLL_2_PORT_POWERDOWN		(1 << 23)
-#define  SOR_PLL_2_BANDGAP_POWERDOWN		(1 << 22)
-#define  SOR_PLL_2_POWERDOWN_OVERRIDE		(1 << 18)
-#define  SOR_PLL_2_SEQ_PLLCAPPD			(1 << 17)
-
-#define SOR_PLL_3 0x1a
-#define  SOR_PLL_3_PLL_VDD_MODE_V1_8 (0 << 13)
-#define  SOR_PLL_3_PLL_VDD_MODE_V3_3 (1 << 13)
+#define  SOR_PLL1_LOADADJ_MASK			(0xf << 20)
+#define  SOR_PLL1_LOADADJ(x)			(((x) & 0xf) << 20)
+#define  SOR_PLL1_TERM_COMPOUT			(1 << 15)
+#define  SOR_PLL1_TMDS_TERMADJ_MASK		(0xf << 9)
+#define  SOR_PLL1_TMDS_TERMADJ(x)		(((x) & 0xf) << 9)
+#define  SOR_PLL1_TMDS_TERM			(1 << 8)
+
+#define SOR_PLL2 0x19
+#define  SOR_PLL2_LVDS_ENABLE			(1 << 25)
+#define  SOR_PLL2_SEQ_PLLCAPPD_ENFORCE		(1 << 24)
+#define  SOR_PLL2_PORT_POWERDOWN		(1 << 23)
+#define  SOR_PLL2_BANDGAP_POWERDOWN		(1 << 22)
+#define  SOR_PLL2_POWERDOWN_OVERRIDE		(1 << 18)
+#define  SOR_PLL2_SEQ_PLLCAPPD			(1 << 17)
+#define  SOR_PLL2_SEQ_PLL_PULLDOWN		(1 << 16)
+
+#define SOR_PLL3 0x1a
+#define  SOR_PLL3_BG_VREF_LEVEL_MASK		(0xf << 24)
+#define  SOR_PLL3_BG_VREF_LEVEL(x)		(((x) & 0xf) << 24)
+#define  SOR_PLL3_PLL_VDD_MODE_1V8		(0 << 13)
+#define  SOR_PLL3_PLL_VDD_MODE_3V3		(1 << 13)
 
 #define SOR_CSTM 0x1b
+#define  SOR_CSTM_ROTCLK_MASK			(0xf << 24)
+#define  SOR_CSTM_ROTCLK(x)			(((x) & 0xf) << 24)
 #define  SOR_CSTM_LVDS				(1 << 16)
 #define  SOR_CSTM_LINK_ACT_B			(1 << 15)
 #define  SOR_CSTM_LINK_ACT_A			(1 << 14)
 #define  SOR_CSTM_UPPER				(1 << 11)
 
 #define SOR_LVDS 0x1c
-#define SOR_CRC_A 0x1d
-#define  SOR_CRC_A_VALID			(1 << 0)
-#define  SOR_CRC_A_RESET			(1 << 0)
-#define SOR_CRC_B 0x1e
+#define SOR_CRCA 0x1d
+#define  SOR_CRCA_VALID			(1 << 0)
+#define  SOR_CRCA_RESET			(1 << 0)
+#define SOR_CRCB 0x1e
 #define SOR_BLANK 0x1f
 #define SOR_SEQ_CTL 0x20
+#define  SOR_SEQ_CTL_PD_PC_ALT(x)	(((x) & 0xf) << 12)
+#define  SOR_SEQ_CTL_PD_PC(x)		(((x) & 0xf) <<  8)
+#define  SOR_SEQ_CTL_PU_PC_ALT(x)	(((x) & 0xf) <<  4)
+#define  SOR_SEQ_CTL_PU_PC(x)		(((x) & 0xf) <<  0)
 
 #define SOR_LANE_SEQ_CTL 0x21
 #define  SOR_LANE_SEQ_CTL_TRIGGER		(1 << 31)
+#define  SOR_LANE_SEQ_CTL_STATE_BUSY		(1 << 28)
 #define  SOR_LANE_SEQ_CTL_SEQUENCE_UP		(0 << 20)
 #define  SOR_LANE_SEQ_CTL_SEQUENCE_DOWN		(1 << 20)
 #define  SOR_LANE_SEQ_CTL_POWER_STATE_UP	(0 << 16)
 #define  SOR_LANE_SEQ_CTL_POWER_STATE_DOWN	(1 << 16)
+#define  SOR_LANE_SEQ_CTL_DELAY(x)		(((x) & 0xf) << 12)
 
 #define SOR_SEQ_INST(x) (0x22 + (x))
+#define  SOR_SEQ_INST_PLL_PULLDOWN (1 << 31)
+#define  SOR_SEQ_INST_POWERDOWN_MACRO (1 << 30)
+#define  SOR_SEQ_INST_ASSERT_PLL_RESET (1 << 29)
+#define  SOR_SEQ_INST_BLANK_V (1 << 28)
+#define  SOR_SEQ_INST_BLANK_H (1 << 27)
+#define  SOR_SEQ_INST_BLANK_DE (1 << 26)
+#define  SOR_SEQ_INST_BLACK_DATA (1 << 25)
+#define  SOR_SEQ_INST_TRISTATE_IOS (1 << 24)
+#define  SOR_SEQ_INST_DRIVE_PWM_OUT_LO (1 << 23)
+#define  SOR_SEQ_INST_PIN_B_LOW (0 << 22)
+#define  SOR_SEQ_INST_PIN_B_HIGH (1 << 22)
+#define  SOR_SEQ_INST_PIN_A_LOW (0 << 21)
+#define  SOR_SEQ_INST_PIN_A_HIGH (1 << 21)
+#define  SOR_SEQ_INST_SEQUENCE_UP (0 << 19)
+#define  SOR_SEQ_INST_SEQUENCE_DOWN (1 << 19)
+#define  SOR_SEQ_INST_LANE_SEQ_STOP (0 << 18)
+#define  SOR_SEQ_INST_LANE_SEQ_RUN (1 << 18)
+#define  SOR_SEQ_INST_PORT_POWERDOWN (1 << 17)
+#define  SOR_SEQ_INST_PLL_POWERDOWN (1 << 16)
+#define  SOR_SEQ_INST_HALT (1 << 15)
+#define  SOR_SEQ_INST_WAIT_US (0 << 12)
+#define  SOR_SEQ_INST_WAIT_MS (1 << 12)
+#define  SOR_SEQ_INST_WAIT_VSYNC (2 << 12)
+#define  SOR_SEQ_INST_WAIT(x) (((x) & 0x3ff) << 0)
 
 #define SOR_PWM_DIV 0x32
 #define  SOR_PWM_DIV_MASK			0xffffff
@@ -140,32 +187,36 @@
 #define  SOR_PWM_CTL_CLK_SEL			(1 << 30)
 #define  SOR_PWM_CTL_DUTY_CYCLE_MASK		0xffffff
 
-#define SOR_VCRC_A_0 0x34
-#define SOR_VCRC_A_1 0x35
-#define SOR_VCRC_B_0 0x36
-#define SOR_VCRC_B_1 0x37
-#define SOR_CCRC_A_0 0x38
-#define SOR_CCRC_A_1 0x39
-#define SOR_CCRC_B_0 0x3a
-#define SOR_CCRC_B_1 0x3b
-#define SOR_EDATA_A_0 0x3c
-#define SOR_EDATA_A_1 0x3d
-#define SOR_EDATA_B_0 0x3e
-#define SOR_EDATA_B_1 0x3f
-#define SOR_COUNT_A_0 0x40
-#define SOR_COUNT_A_1 0x41
-#define SOR_COUNT_B_0 0x42
-#define SOR_COUNT_B_1 0x43
-#define SOR_DEBUG_A_0 0x44
-#define SOR_DEBUG_A_1 0x45
-#define SOR_DEBUG_B_0 0x46
-#define SOR_DEBUG_B_1 0x47
+#define SOR_VCRC_A0 0x34
+#define SOR_VCRC_A1 0x35
+#define SOR_VCRC_B0 0x36
+#define SOR_VCRC_B1 0x37
+#define SOR_CCRC_A0 0x38
+#define SOR_CCRC_A1 0x39
+#define SOR_CCRC_B0 0x3a
+#define SOR_CCRC_B1 0x3b
+#define SOR_EDATA_A0 0x3c
+#define SOR_EDATA_A1 0x3d
+#define SOR_EDATA_B0 0x3e
+#define SOR_EDATA_B1 0x3f
+#define SOR_COUNT_A0 0x40
+#define SOR_COUNT_A1 0x41
+#define SOR_COUNT_B0 0x42
+#define SOR_COUNT_B1 0x43
+#define SOR_DEBUG_A0 0x44
+#define SOR_DEBUG_A1 0x45
+#define SOR_DEBUG_B0 0x46
+#define SOR_DEBUG_B1 0x47
 #define SOR_TRIG 0x48
 #define SOR_MSCHECK 0x49
 #define SOR_XBAR_CTRL 0x4a
+#define  SOR_XBAR_CTRL_LINK1_XSEL(channel, value) ((((value) & 0x7) << ((channel) * 3)) << 17)
+#define  SOR_XBAR_CTRL_LINK0_XSEL(channel, value) ((((value) & 0x7) << ((channel) * 3)) <<  2)
+#define  SOR_XBAR_CTRL_LINK_SWAP (1 << 1)
+#define  SOR_XBAR_CTRL_BYPASS (1 << 0)
 #define SOR_XBAR_POL 0x4b
 
-#define SOR_DP_LINKCTL_0 0x4c
+#define SOR_DP_LINKCTL0 0x4c
 #define  SOR_DP_LINKCTL_LANE_COUNT_MASK		(0x1f << 16)
 #define  SOR_DP_LINKCTL_LANE_COUNT(x)		(((1 << (x)) - 1) << 16)
 #define  SOR_DP_LINKCTL_ENHANCED_FRAME		(1 << 14)
@@ -173,34 +224,34 @@
 #define  SOR_DP_LINKCTL_TU_SIZE(x)		(((x) & 0x7f) << 2)
 #define  SOR_DP_LINKCTL_ENABLE			(1 << 0)
 
-#define SOR_DP_LINKCTL_1 0x4d
+#define SOR_DP_LINKCTL1 0x4d
 
-#define SOR_LANE_DRIVE_CURRENT_0 0x4e
-#define SOR_LANE_DRIVE_CURRENT_1 0x4f
-#define SOR_LANE4_DRIVE_CURRENT_0 0x50
-#define SOR_LANE4_DRIVE_CURRENT_1 0x51
+#define SOR_LANE_DRIVE_CURRENT0 0x4e
+#define SOR_LANE_DRIVE_CURRENT1 0x4f
+#define SOR_LANE4_DRIVE_CURRENT0 0x50
+#define SOR_LANE4_DRIVE_CURRENT1 0x51
 #define  SOR_LANE_DRIVE_CURRENT_LANE3(x) (((x) & 0xff) << 24)
 #define  SOR_LANE_DRIVE_CURRENT_LANE2(x) (((x) & 0xff) << 16)
 #define  SOR_LANE_DRIVE_CURRENT_LANE1(x) (((x) & 0xff) << 8)
 #define  SOR_LANE_DRIVE_CURRENT_LANE0(x) (((x) & 0xff) << 0)
 
-#define SOR_LANE_PREEMPHASIS_0 0x52
-#define SOR_LANE_PREEMPHASIS_1 0x53
-#define SOR_LANE4_PREEMPHASIS_0 0x54
-#define SOR_LANE4_PREEMPHASIS_1 0x55
+#define SOR_LANE_PREEMPHASIS0 0x52
+#define SOR_LANE_PREEMPHASIS1 0x53
+#define SOR_LANE4_PREEMPHASIS0 0x54
+#define SOR_LANE4_PREEMPHASIS1 0x55
 #define  SOR_LANE_PREEMPHASIS_LANE3(x) (((x) & 0xff) << 24)
 #define  SOR_LANE_PREEMPHASIS_LANE2(x) (((x) & 0xff) << 16)
 #define  SOR_LANE_PREEMPHASIS_LANE1(x) (((x) & 0xff) << 8)
 #define  SOR_LANE_PREEMPHASIS_LANE0(x) (((x) & 0xff) << 0)
 
-#define SOR_LANE_POST_CURSOR_0 0x56
-#define SOR_LANE_POST_CURSOR_1 0x57
-#define  SOR_LANE_POST_CURSOR_LANE3(x) (((x) & 0xff) << 24)
-#define  SOR_LANE_POST_CURSOR_LANE2(x) (((x) & 0xff) << 16)
-#define  SOR_LANE_POST_CURSOR_LANE1(x) (((x) & 0xff) << 8)
-#define  SOR_LANE_POST_CURSOR_LANE0(x) (((x) & 0xff) << 0)
+#define SOR_LANE_POSTCURSOR0 0x56
+#define SOR_LANE_POSTCURSOR1 0x57
+#define  SOR_LANE_POSTCURSOR_LANE3(x) (((x) & 0xff) << 24)
+#define  SOR_LANE_POSTCURSOR_LANE2(x) (((x) & 0xff) << 16)
+#define  SOR_LANE_POSTCURSOR_LANE1(x) (((x) & 0xff) << 8)
+#define  SOR_LANE_POSTCURSOR_LANE0(x) (((x) & 0xff) << 0)
 
-#define SOR_DP_CONFIG_0 0x58
+#define SOR_DP_CONFIG0 0x58
 #define SOR_DP_CONFIG_DISPARITY_NEGATIVE	(1 << 31)
 #define SOR_DP_CONFIG_ACTIVE_SYM_ENABLE		(1 << 26)
 #define SOR_DP_CONFIG_ACTIVE_SYM_POLARITY	(1 << 24)
@@ -211,11 +262,11 @@
 #define SOR_DP_CONFIG_WATERMARK_MASK	(0x3f << 0)
 #define SOR_DP_CONFIG_WATERMARK(x)	(((x) & 0x3f) << 0)
 
-#define SOR_DP_CONFIG_1 0x59
-#define SOR_DP_MN_0 0x5a
-#define SOR_DP_MN_1 0x5b
+#define SOR_DP_CONFIG1 0x59
+#define SOR_DP_MN0 0x5a
+#define SOR_DP_MN1 0x5b
 
-#define SOR_DP_PADCTL_0 0x5c
+#define SOR_DP_PADCTL0 0x5c
 #define  SOR_DP_PADCTL_PAD_CAL_PD	(1 << 23)
 #define  SOR_DP_PADCTL_TX_PU_ENABLE	(1 << 22)
 #define  SOR_DP_PADCTL_TX_PU_MASK	(0xff << 8)
@@ -229,17 +280,18 @@
 #define  SOR_DP_PADCTL_PD_TXD_1		(1 << 1)
 #define  SOR_DP_PADCTL_PD_TXD_2		(1 << 0)
 
-#define SOR_DP_PADCTL_1 0x5d
+#define SOR_DP_PADCTL1 0x5d
 
-#define SOR_DP_DEBUG_0 0x5e
-#define SOR_DP_DEBUG_1 0x5f
+#define SOR_DP_DEBUG0 0x5e
+#define SOR_DP_DEBUG1 0x5f
 
-#define SOR_DP_SPARE_0 0x60
-#define  SOR_DP_SPARE_MACRO_SOR_CLK	(1 << 2)
-#define  SOR_DP_SPARE_PANEL_INTERNAL	(1 << 1)
-#define  SOR_DP_SPARE_SEQ_ENABLE	(1 << 0)
+#define SOR_DP_SPARE0 0x60
+#define  SOR_DP_SPARE_DISP_VIDEO_PREAMBLE	(1 << 3)
+#define  SOR_DP_SPARE_MACRO_SOR_CLK		(1 << 2)
+#define  SOR_DP_SPARE_PANEL_INTERNAL		(1 << 1)
+#define  SOR_DP_SPARE_SEQ_ENABLE		(1 << 0)
 
-#define SOR_DP_SPARE_1 0x61
+#define SOR_DP_SPARE1 0x61
 #define SOR_DP_AUDIO_CTRL 0x62
 
 #define SOR_DP_AUDIO_HBLANK_SYMBOLS 0x63
@@ -249,13 +301,13 @@
 #define SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK (0x1fffff << 0)
 
 #define SOR_DP_GENERIC_INFOFRAME_HEADER 0x65
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_0 0x66
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_1 0x67
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_2 0x68
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_3 0x69
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_4 0x6a
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_5 0x6b
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_6 0x6c
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK0 0x66
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK1 0x67
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK2 0x68
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK3 0x69
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK4 0x6a
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK5 0x6b
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK6 0x6c
 
 #define SOR_DP_TPG 0x6d
 #define  SOR_DP_TPG_CHANNEL_CODING	(1 << 6)
@@ -275,8 +327,44 @@
 #define  SOR_DP_TPG_PATTERN_NONE	(0x0 << 0)
 
 #define SOR_DP_TPG_CONFIG 0x6e
-#define SOR_DP_LQ_CSTM_0 0x6f
-#define SOR_DP_LQ_CSTM_1 0x70
-#define SOR_DP_LQ_CSTM_2 0x71
+#define SOR_DP_LQ_CSTM0 0x6f
+#define SOR_DP_LQ_CSTM1 0x70
+#define SOR_DP_LQ_CSTM2 0x71
+
+#define SOR_HDMI_AUDIO_INFOFRAME_CTRL 0x9a
+#define SOR_HDMI_AUDIO_INFOFRAME_STATUS 0x9b
+#define SOR_HDMI_AUDIO_INFOFRAME_HEADER 0x9c
+
+#define SOR_HDMI_AVI_INFOFRAME_CTRL 0x9f
+#define  INFOFRAME_CTRL_CHECKSUM_ENABLE	(1 << 9)
+#define  INFOFRAME_CTRL_SINGLE		(1 << 8)
+#define  INFOFRAME_CTRL_OTHER		(1 << 4)
+#define  INFOFRAME_CTRL_ENABLE		(1 << 0)
+
+#define SOR_HDMI_AVI_INFOFRAME_STATUS 0xa0
+#define  INFOFRAME_STATUS_DONE		(1 << 0)
+
+#define SOR_HDMI_AVI_INFOFRAME_HEADER 0xa1
+#define  INFOFRAME_HEADER_LEN(x) (((x) & 0xff) << 16)
+#define  INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
+#define  INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0)
+
+#define SOR_HDMI_CTRL 0xc0
+#define  SOR_HDMI_CTRL_ENABLE (1 << 30)
+#define  SOR_HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16)
+#define  SOR_HDMI_CTRL_AUDIO_LAYOUT (1 << 10)
+#define  SOR_HDMI_CTRL_REKEY(x) (((x) & 0x7f) << 0)
+
+#define SOR_REFCLK 0xe6
+#define  SOR_REFCLK_DIV_INT(x) ((((x) >> 2) & 0xff) << 8)
+#define  SOR_REFCLK_DIV_FRAC(x) (((x) & 0x3) << 6)
+
+#define SOR_INPUT_CONTROL 0xe8
+#define  SOR_INPUT_CONTROL_ARM_VIDEO_RANGE_LIMITED (1 << 1)
+#define  SOR_INPUT_CONTROL_HDMI_SRC_SELECT(x) (((x) & 0x1) << 0)
+
+#define SOR_HDMI_VSI_INFOFRAME_CTRL 0x123
+#define SOR_HDMI_VSI_INFOFRAME_STATUS 0x124
+#define SOR_HDMI_VSI_INFOFRAME_HEADER 0x125
 
 #endif
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index bf080abc86d1..4e19d0f9cc30 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -340,7 +340,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
 		swap_storage = shmem_file_setup("ttm swap",
 						ttm->num_pages << PAGE_SHIFT,
 						0);
-		if (unlikely(IS_ERR(swap_storage))) {
+		if (IS_ERR(swap_storage)) {
 			pr_err("Failed allocating swap storage\n");
 			return PTR_ERR(swap_storage);
 		}
@@ -354,7 +354,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
 		if (unlikely(from_page == NULL))
 			continue;
 		to_page = shmem_read_mapping_page(swap_space, i);
-		if (unlikely(IS_ERR(to_page))) {
+		if (IS_ERR(to_page)) {
 			ret = PTR_ERR(to_page);
 			goto out_err;
 		}
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 5fc16cecd3ba..62c7b1dafaa4 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -288,7 +288,7 @@ static void udl_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect
 {
 	struct udl_fbdev *ufbdev = info->par;
 
-	sys_fillrect(info, rect);
+	drm_fb_helper_sys_fillrect(info, rect);
 
 	udl_handle_damage(&ufbdev->ufb, rect->dx, rect->dy, rect->width,
 			  rect->height);
@@ -298,7 +298,7 @@ static void udl_fb_copyarea(struct fb_info *info, const struct fb_copyarea *regi
 {
 	struct udl_fbdev *ufbdev = info->par;
 
-	sys_copyarea(info, region);
+	drm_fb_helper_sys_copyarea(info, region);
 
 	udl_handle_damage(&ufbdev->ufb, region->dx, region->dy, region->width,
 			  region->height);
@@ -308,7 +308,7 @@ static void udl_fb_imageblit(struct fb_info *info, const struct fb_image *image)
 {
 	struct udl_fbdev *ufbdev = info->par;
 
-	sys_imageblit(info, image);
+	drm_fb_helper_sys_imageblit(info, image);
 
 	udl_handle_damage(&ufbdev->ufb, image->dx, image->dy, image->width,
 			  image->height);
@@ -476,7 +476,6 @@ static int udlfb_create(struct drm_fb_helper *helper,
 		container_of(helper, struct udl_fbdev, helper);
 	struct drm_device *dev = ufbdev->helper.dev;
 	struct fb_info *info;
-	struct device *device = dev->dev;
 	struct drm_framebuffer *fb;
 	struct drm_mode_fb_cmd2 mode_cmd;
 	struct udl_gem_object *obj;
@@ -506,21 +505,20 @@ static int udlfb_create(struct drm_fb_helper *helper,
 		goto out_gfree;
 	}
 
-	info = framebuffer_alloc(0, device);
-	if (!info) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto out_gfree;
 	}
 	info->par = ufbdev;
 
 	ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj);
 	if (ret)
-		goto out_gfree;
+		goto out_destroy_fbi;
 
 	fb = &ufbdev->ufb.base;
 
 	ufbdev->helper.fb = fb;
-	ufbdev->helper.fbdev = info;
 
 	strcpy(info->fix.id, "udldrmfb");
 
@@ -533,18 +531,13 @@ static int udlfb_create(struct drm_fb_helper *helper,
 	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
 	drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height);
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto out_gfree;
-	}
-
-
 	DRM_DEBUG_KMS("allocated %dx%d vmal %p\n",
 		      fb->width, fb->height,
 		      ufbdev->ufb.obj->vmapping);
 
 	return ret;
+out_destroy_fbi:
+	drm_fb_helper_release_fbi(helper);
 out_gfree:
 	drm_gem_object_unreference(&ufbdev->ufb.obj->base);
 out:
@@ -558,14 +551,8 @@ static const struct drm_fb_helper_funcs udl_fb_helper_funcs = {
 static void udl_fbdev_destroy(struct drm_device *dev,
 			      struct udl_fbdev *ufbdev)
 {
-	struct fb_info *info;
-	if (ufbdev->helper.fbdev) {
-		info = ufbdev->helper.fbdev;
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&ufbdev->helper);
+	drm_fb_helper_release_fbi(&ufbdev->helper);
 	drm_fb_helper_fini(&ufbdev->helper);
 	drm_framebuffer_unregister_private(&ufbdev->ufb.base);
 	drm_framebuffer_cleanup(&ufbdev->ufb.base);
@@ -631,11 +618,7 @@ void udl_fbdev_unplug(struct drm_device *dev)
 		return;
 
 	ufbdev = udl->fbdev;
-	if (ufbdev->helper.fbdev) {
-		struct fb_info *info;
-		info = ufbdev->helper.fbdev;
-		unlink_framebuffer(info);
-	}
+	drm_fb_helper_unlink_fbi(&ufbdev->helper);
 }
 
 struct drm_framebuffer *
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index df198d9e770c..6a81e084593b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -173,7 +173,7 @@ static void virtio_gpu_3d_fillrect(struct fb_info *info,
 				   const struct fb_fillrect *rect)
 {
 	struct virtio_gpu_fbdev *vfbdev = info->par;
-	sys_fillrect(info, rect);
+	drm_fb_helper_sys_fillrect(info, rect);
 	virtio_gpu_dirty_update(&vfbdev->vgfb, true, rect->dx, rect->dy,
 			     rect->width, rect->height);
 	schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
@@ -183,7 +183,7 @@ static void virtio_gpu_3d_copyarea(struct fb_info *info,
 				   const struct fb_copyarea *area)
 {
 	struct virtio_gpu_fbdev *vfbdev = info->par;
-	sys_copyarea(info, area);
+	drm_fb_helper_sys_copyarea(info, area);
 	virtio_gpu_dirty_update(&vfbdev->vgfb, true, area->dx, area->dy,
 			   area->width, area->height);
 	schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
@@ -193,7 +193,7 @@ static void virtio_gpu_3d_imageblit(struct fb_info *info,
 				    const struct fb_image *image)
 {
 	struct virtio_gpu_fbdev *vfbdev = info->par;
-	sys_imageblit(info, image);
+	drm_fb_helper_sys_imageblit(info, image);
 	virtio_gpu_dirty_update(&vfbdev->vgfb, true, image->dx, image->dy,
 			     image->width, image->height);
 	schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
@@ -230,7 +230,6 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
 	struct drm_framebuffer *fb;
 	struct drm_mode_fb_cmd2 mode_cmd = {};
 	struct virtio_gpu_object *obj;
-	struct device *device = vgdev->dev;
 	uint32_t resid, format, size;
 	int ret;
 
@@ -317,18 +316,12 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
 	if (ret)
 		goto err_obj_attach;
 
-	info = framebuffer_alloc(0, device);
-	if (!info) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto err_fb_alloc;
 	}
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto err_fb_alloc_cmap;
-	}
-
 	info->par = helper;
 
 	ret = virtio_gpu_framebuffer_init(dev, &vfbdev->vgfb,
@@ -339,7 +332,6 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
 	fb = &vfbdev->vgfb.base;
 
 	vfbdev->helper.fb = fb;
-	vfbdev->helper.fbdev = info;
 
 	strcpy(info->fix.id, "virtiodrmfb");
 	info->flags = FBINFO_DEFAULT;
@@ -357,9 +349,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
 	return 0;
 
 err_fb_init:
-	fb_dealloc_cmap(&info->cmap);
-err_fb_alloc_cmap:
-	framebuffer_release(info);
+	drm_fb_helper_release_fbi(helper);
 err_fb_alloc:
 	virtio_gpu_cmd_resource_inval_backing(vgdev, resid);
 err_obj_attach:
@@ -371,15 +361,11 @@ err_obj_vmap:
 static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
 				    struct virtio_gpu_fbdev *vgfbdev)
 {
-	struct fb_info *info;
 	struct virtio_gpu_framebuffer *vgfb = &vgfbdev->vgfb;
 
-	if (vgfbdev->helper.fbdev) {
-		info = vgfbdev->helper.fbdev;
+	drm_fb_helper_unregister_fbi(&vgfbdev->helper);
+	drm_fb_helper_release_fbi(&vgfbdev->helper);
 
-		unregister_framebuffer(info);
-		framebuffer_release(info);
-	}
 	if (vgfb->obj)
 		vgfb->obj = NULL;
 	drm_fb_helper_fini(&vgfbdev->helper);
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index ce0ab951f507..d281575bbe11 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -7,6 +7,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
 	    vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
 	    vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
 	    vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
-	    vmwgfx_cmdbuf_res.o \
+	    vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
+	    vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o
 
 obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h b/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h
new file mode 100644
index 000000000000..8cce7f15b6eb
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h
@@ -0,0 +1,3 @@
+/*
+ * Intentionally empty file.
+ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
new file mode 100644
index 000000000000..9ce2466a5d00
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
@@ -0,0 +1,110 @@
+/**********************************************************
+ * Copyright 2007-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_caps.h --
+ *
+ *       Definitions for SVGA3D hardware capabilities.  Capabilities
+ *       are used to query for optional rendering features during
+ *       driver initialization. The capability data is stored as very
+ *       basic key/value dictionary within the "FIFO register" memory
+ *       area at the beginning of BAR2.
+ *
+ *       Note that these definitions are only for 3D capabilities.
+ *       The SVGA device also has "device capabilities" and "FIFO
+ *       capabilities", which are non-3D-specific and are stored as
+ *       bitfields rather than key/value pairs.
+ */
+
+#ifndef _SVGA3D_CAPS_H_
+#define _SVGA3D_CAPS_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+
+#include "includeCheck.h"
+
+#include "svga_reg.h"
+
+#define SVGA_FIFO_3D_CAPS_SIZE   (SVGA_FIFO_3D_CAPS_LAST - \
+                                  SVGA_FIFO_3D_CAPS + 1)
+
+
+/*
+ * SVGA3dCapsRecordType
+ *
+ *    Record types that can be found in the caps block.
+ *    Related record types are grouped together numerically so that
+ *    SVGA3dCaps_FindRecord() can be applied on a range of record
+ *    types.
+ */
+
+typedef enum {
+   SVGA3DCAPS_RECORD_UNKNOWN        = 0,
+   SVGA3DCAPS_RECORD_DEVCAPS_MIN    = 0x100,
+   SVGA3DCAPS_RECORD_DEVCAPS        = 0x100,
+   SVGA3DCAPS_RECORD_DEVCAPS_MAX    = 0x1ff,
+} SVGA3dCapsRecordType;
+
+
+/*
+ * SVGA3dCapsRecordHeader
+ *
+ *    Header field leading each caps block record. Contains the offset (in
+ *    register words, NOT bytes) to the next caps block record (or the end
+ *    of caps block records which will be a zero word) and the record type
+ *    as defined above.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCapsRecordHeader {
+   uint32 length;
+   SVGA3dCapsRecordType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCapsRecordHeader;
+
+
+/*
+ * SVGA3dCapsRecord
+ *
+ *    Caps block record; "data" is a placeholder for the actual data structure
+ *    contained within the record;
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCapsRecord {
+   SVGA3dCapsRecordHeader header;
+   uint32 data[1];
+}
+#include "vmware_pack_end.h"
+SVGA3dCapsRecord;
+
+
+typedef uint32 SVGA3dCapPair[2];
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
new file mode 100644
index 000000000000..2dfd57c5f463
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
@@ -0,0 +1,2071 @@
+/**********************************************************
+ * Copyright 1998-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_cmd.h --
+ *
+ *       SVGA 3d hardware cmd definitions
+ */
+
+#ifndef _SVGA3D_CMD_H_
+#define _SVGA3D_CMD_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+#include "svga3d_types.h"
+
+/*
+ * Identifiers for commands in the command FIFO.
+ *
+ * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
+ * the SVGA3D protocol and remain reserved; they should not be used in the
+ * future.
+ *
+ * IDs between 1040 and 1999 (inclusive) are available for use by the
+ * current SVGA3D protocol.
+ *
+ * FIFO clients other than SVGA3D should stay below 1000, or at 2000
+ * and up.
+ */
+
+typedef enum {
+   SVGA_3D_CMD_LEGACY_BASE                                = 1000,
+   SVGA_3D_CMD_BASE                                       = 1040,
+
+   SVGA_3D_CMD_SURFACE_DEFINE                             = 1040,
+   SVGA_3D_CMD_SURFACE_DESTROY                            = 1041,
+   SVGA_3D_CMD_SURFACE_COPY                               = 1042,
+   SVGA_3D_CMD_SURFACE_STRETCHBLT                         = 1043,
+   SVGA_3D_CMD_SURFACE_DMA                                = 1044,
+   SVGA_3D_CMD_CONTEXT_DEFINE                             = 1045,
+   SVGA_3D_CMD_CONTEXT_DESTROY                            = 1046,
+   SVGA_3D_CMD_SETTRANSFORM                               = 1047,
+   SVGA_3D_CMD_SETZRANGE                                  = 1048,
+   SVGA_3D_CMD_SETRENDERSTATE                             = 1049,
+   SVGA_3D_CMD_SETRENDERTARGET                            = 1050,
+   SVGA_3D_CMD_SETTEXTURESTATE                            = 1051,
+   SVGA_3D_CMD_SETMATERIAL                                = 1052,
+   SVGA_3D_CMD_SETLIGHTDATA                               = 1053,
+   SVGA_3D_CMD_SETLIGHTENABLED                            = 1054,
+   SVGA_3D_CMD_SETVIEWPORT                                = 1055,
+   SVGA_3D_CMD_SETCLIPPLANE                               = 1056,
+   SVGA_3D_CMD_CLEAR                                      = 1057,
+   SVGA_3D_CMD_PRESENT                                    = 1058,
+   SVGA_3D_CMD_SHADER_DEFINE                              = 1059,
+   SVGA_3D_CMD_SHADER_DESTROY                             = 1060,
+   SVGA_3D_CMD_SET_SHADER                                 = 1061,
+   SVGA_3D_CMD_SET_SHADER_CONST                           = 1062,
+   SVGA_3D_CMD_DRAW_PRIMITIVES                            = 1063,
+   SVGA_3D_CMD_SETSCISSORRECT                             = 1064,
+   SVGA_3D_CMD_BEGIN_QUERY                                = 1065,
+   SVGA_3D_CMD_END_QUERY                                  = 1066,
+   SVGA_3D_CMD_WAIT_FOR_QUERY                             = 1067,
+   SVGA_3D_CMD_PRESENT_READBACK                           = 1068,
+   SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN                     = 1069,
+   SVGA_3D_CMD_SURFACE_DEFINE_V2                          = 1070,
+   SVGA_3D_CMD_GENERATE_MIPMAPS                           = 1071,
+   SVGA_3D_CMD_VIDEO_CREATE_DECODER                       = 1072,
+   SVGA_3D_CMD_VIDEO_DESTROY_DECODER                      = 1073,
+   SVGA_3D_CMD_VIDEO_CREATE_PROCESSOR                     = 1074,
+   SVGA_3D_CMD_VIDEO_DESTROY_PROCESSOR                    = 1075,
+   SVGA_3D_CMD_VIDEO_DECODE_START_FRAME                   = 1076,
+   SVGA_3D_CMD_VIDEO_DECODE_RENDER                        = 1077,
+   SVGA_3D_CMD_VIDEO_DECODE_END_FRAME                     = 1078,
+   SVGA_3D_CMD_VIDEO_PROCESS_FRAME                        = 1079,
+   SVGA_3D_CMD_ACTIVATE_SURFACE                           = 1080,
+   SVGA_3D_CMD_DEACTIVATE_SURFACE                         = 1081,
+   SVGA_3D_CMD_SCREEN_DMA                                 = 1082,
+   SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE                   = 1083,
+   SVGA_3D_CMD_OPEN_CONTEXT_SURFACE                       = 1084,
+
+   SVGA_3D_CMD_LOGICOPS_BITBLT                            = 1085,
+   SVGA_3D_CMD_LOGICOPS_TRANSBLT                          = 1086,
+   SVGA_3D_CMD_LOGICOPS_STRETCHBLT                        = 1087,
+   SVGA_3D_CMD_LOGICOPS_COLORFILL                         = 1088,
+   SVGA_3D_CMD_LOGICOPS_ALPHABLEND                        = 1089,
+   SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND                    = 1090,
+
+   SVGA_3D_CMD_SET_OTABLE_BASE                            = 1091,
+   SVGA_3D_CMD_READBACK_OTABLE                            = 1092,
+
+   SVGA_3D_CMD_DEFINE_GB_MOB                              = 1093,
+   SVGA_3D_CMD_DESTROY_GB_MOB                             = 1094,
+   SVGA_3D_CMD_DEAD3                                      = 1095,
+   SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING                      = 1096,
+
+   SVGA_3D_CMD_DEFINE_GB_SURFACE                          = 1097,
+   SVGA_3D_CMD_DESTROY_GB_SURFACE                         = 1098,
+   SVGA_3D_CMD_BIND_GB_SURFACE                            = 1099,
+   SVGA_3D_CMD_COND_BIND_GB_SURFACE                       = 1100,
+   SVGA_3D_CMD_UPDATE_GB_IMAGE                            = 1101,
+   SVGA_3D_CMD_UPDATE_GB_SURFACE                          = 1102,
+   SVGA_3D_CMD_READBACK_GB_IMAGE                          = 1103,
+   SVGA_3D_CMD_READBACK_GB_SURFACE                        = 1104,
+   SVGA_3D_CMD_INVALIDATE_GB_IMAGE                        = 1105,
+   SVGA_3D_CMD_INVALIDATE_GB_SURFACE                      = 1106,
+
+   SVGA_3D_CMD_DEFINE_GB_CONTEXT                          = 1107,
+   SVGA_3D_CMD_DESTROY_GB_CONTEXT                         = 1108,
+   SVGA_3D_CMD_BIND_GB_CONTEXT                            = 1109,
+   SVGA_3D_CMD_READBACK_GB_CONTEXT                        = 1110,
+   SVGA_3D_CMD_INVALIDATE_GB_CONTEXT                      = 1111,
+
+   SVGA_3D_CMD_DEFINE_GB_SHADER                           = 1112,
+   SVGA_3D_CMD_DESTROY_GB_SHADER                          = 1113,
+   SVGA_3D_CMD_BIND_GB_SHADER                             = 1114,
+
+   SVGA_3D_CMD_SET_OTABLE_BASE64                          = 1115,
+
+   SVGA_3D_CMD_BEGIN_GB_QUERY                             = 1116,
+   SVGA_3D_CMD_END_GB_QUERY                               = 1117,
+   SVGA_3D_CMD_WAIT_FOR_GB_QUERY                          = 1118,
+
+   SVGA_3D_CMD_NOP                                        = 1119,
+
+   SVGA_3D_CMD_ENABLE_GART                                = 1120,
+   SVGA_3D_CMD_DISABLE_GART                               = 1121,
+   SVGA_3D_CMD_MAP_MOB_INTO_GART                          = 1122,
+   SVGA_3D_CMD_UNMAP_GART_RANGE                           = 1123,
+
+   SVGA_3D_CMD_DEFINE_GB_SCREENTARGET                     = 1124,
+   SVGA_3D_CMD_DESTROY_GB_SCREENTARGET                    = 1125,
+   SVGA_3D_CMD_BIND_GB_SCREENTARGET                       = 1126,
+   SVGA_3D_CMD_UPDATE_GB_SCREENTARGET                     = 1127,
+
+   SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL                  = 1128,
+   SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL                = 1129,
+
+   SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE                 = 1130,
+
+   SVGA_3D_CMD_GB_SCREEN_DMA                              = 1131,
+   SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH                 = 1132,
+   SVGA_3D_CMD_GB_MOB_FENCE                               = 1133,
+   SVGA_3D_CMD_DEFINE_GB_SURFACE_V2                       = 1134,
+   SVGA_3D_CMD_DEFINE_GB_MOB64                            = 1135,
+   SVGA_3D_CMD_REDEFINE_GB_MOB64                          = 1136,
+   SVGA_3D_CMD_NOP_ERROR                                  = 1137,
+
+   SVGA_3D_CMD_SET_VERTEX_STREAMS                         = 1138,
+   SVGA_3D_CMD_SET_VERTEX_DECLS                           = 1139,
+   SVGA_3D_CMD_SET_VERTEX_DIVISORS                        = 1140,
+   SVGA_3D_CMD_DRAW                                       = 1141,
+   SVGA_3D_CMD_DRAW_INDEXED                               = 1142,
+
+   /*
+    * DX10 Commands
+    */
+   SVGA_3D_CMD_DX_MIN                                     = 1143,
+   SVGA_3D_CMD_DX_DEFINE_CONTEXT                          = 1143,
+   SVGA_3D_CMD_DX_DESTROY_CONTEXT                         = 1144,
+   SVGA_3D_CMD_DX_BIND_CONTEXT                            = 1145,
+   SVGA_3D_CMD_DX_READBACK_CONTEXT                        = 1146,
+   SVGA_3D_CMD_DX_INVALIDATE_CONTEXT                      = 1147,
+   SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER              = 1148,
+   SVGA_3D_CMD_DX_SET_SHADER_RESOURCES                    = 1149,
+   SVGA_3D_CMD_DX_SET_SHADER                              = 1150,
+   SVGA_3D_CMD_DX_SET_SAMPLERS                            = 1151,
+   SVGA_3D_CMD_DX_DRAW                                    = 1152,
+   SVGA_3D_CMD_DX_DRAW_INDEXED                            = 1153,
+   SVGA_3D_CMD_DX_DRAW_INSTANCED                          = 1154,
+   SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED                  = 1155,
+   SVGA_3D_CMD_DX_DRAW_AUTO                               = 1156,
+   SVGA_3D_CMD_DX_SET_INPUT_LAYOUT                        = 1157,
+   SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS                      = 1158,
+   SVGA_3D_CMD_DX_SET_INDEX_BUFFER                        = 1159,
+   SVGA_3D_CMD_DX_SET_TOPOLOGY                            = 1160,
+   SVGA_3D_CMD_DX_SET_RENDERTARGETS                       = 1161,
+   SVGA_3D_CMD_DX_SET_BLEND_STATE                         = 1162,
+   SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE                  = 1163,
+   SVGA_3D_CMD_DX_SET_RASTERIZER_STATE                    = 1164,
+   SVGA_3D_CMD_DX_DEFINE_QUERY                            = 1165,
+   SVGA_3D_CMD_DX_DESTROY_QUERY                           = 1166,
+   SVGA_3D_CMD_DX_BIND_QUERY                              = 1167,
+   SVGA_3D_CMD_DX_SET_QUERY_OFFSET                        = 1168,
+   SVGA_3D_CMD_DX_BEGIN_QUERY                             = 1169,
+   SVGA_3D_CMD_DX_END_QUERY                               = 1170,
+   SVGA_3D_CMD_DX_READBACK_QUERY                          = 1171,
+   SVGA_3D_CMD_DX_SET_PREDICATION                         = 1172,
+   SVGA_3D_CMD_DX_SET_SOTARGETS                           = 1173,
+   SVGA_3D_CMD_DX_SET_VIEWPORTS                           = 1174,
+   SVGA_3D_CMD_DX_SET_SCISSORRECTS                        = 1175,
+   SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW                 = 1176,
+   SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW                 = 1177,
+   SVGA_3D_CMD_DX_PRED_COPY_REGION                        = 1178,
+   SVGA_3D_CMD_DX_PRED_COPY                               = 1179,
+   SVGA_3D_CMD_DX_STRETCHBLT                              = 1180,
+   SVGA_3D_CMD_DX_GENMIPS                                 = 1181,
+   SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE                      = 1182,
+   SVGA_3D_CMD_DX_READBACK_SUBRESOURCE                    = 1183,
+   SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE                  = 1184,
+   SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW              = 1185,
+   SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW             = 1186,
+   SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW                = 1187,
+   SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW               = 1188,
+   SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW                = 1189,
+   SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW               = 1190,
+   SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT                    = 1191,
+   SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT                   = 1192,
+   SVGA_3D_CMD_DX_DEFINE_BLEND_STATE                      = 1193,
+   SVGA_3D_CMD_DX_DESTROY_BLEND_STATE                     = 1194,
+   SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE               = 1195,
+   SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE              = 1196,
+   SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE                 = 1197,
+   SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE                = 1198,
+   SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE                    = 1199,
+   SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE                   = 1200,
+   SVGA_3D_CMD_DX_DEFINE_SHADER                           = 1201,
+   SVGA_3D_CMD_DX_DESTROY_SHADER                          = 1202,
+   SVGA_3D_CMD_DX_BIND_SHADER                             = 1203,
+   SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT                     = 1204,
+   SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT                    = 1205,
+   SVGA_3D_CMD_DX_SET_STREAMOUTPUT                        = 1206,
+   SVGA_3D_CMD_DX_SET_COTABLE                             = 1207,
+   SVGA_3D_CMD_DX_READBACK_COTABLE                        = 1208,
+   SVGA_3D_CMD_DX_BUFFER_COPY                             = 1209,
+   SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER                    = 1210,
+   SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK               = 1211,
+   SVGA_3D_CMD_DX_MOVE_QUERY                              = 1212,
+   SVGA_3D_CMD_DX_BIND_ALL_QUERY                          = 1213,
+   SVGA_3D_CMD_DX_READBACK_ALL_QUERY                      = 1214,
+   SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER               = 1215,
+   SVGA_3D_CMD_DX_MOB_FENCE_64                            = 1216,
+   SVGA_3D_CMD_DX_BIND_SHADER_ON_CONTEXT                  = 1217,
+   SVGA_3D_CMD_DX_HINT                                    = 1218,
+   SVGA_3D_CMD_DX_BUFFER_UPDATE                           = 1219,
+   SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET           = 1220,
+   SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET           = 1221,
+   SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET           = 1222,
+
+   /*
+    * Reserve some IDs to be used for the DX11 shader types.
+    */
+   SVGA_3D_CMD_DX_RESERVED1                               = 1223,
+   SVGA_3D_CMD_DX_RESERVED2                               = 1224,
+   SVGA_3D_CMD_DX_RESERVED3                               = 1225,
+
+   SVGA_3D_CMD_DX_MAX                                     = 1226,
+   SVGA_3D_CMD_MAX                                        = 1226,
+   SVGA_3D_CMD_FUTURE_MAX                                 = 3000
+} SVGAFifo3dCmdId;
+
+/*
+ * FIFO command format definitions:
+ */
+
+/*
+ * The data size header following cmdNum for every 3d command
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               id;
+   uint32               size;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdHeader;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               numMipLevels;
+}
+#include "vmware_pack_end.h"
+SVGA3dSurfaceFace;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                      sid;
+   SVGA3dSurfaceFlags          surfaceFlags;
+   SVGA3dSurfaceFormat         format;
+   /*
+    * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
+    * structures must have the same value of numMipLevels field.
+    * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
+    * numMipLevels set to 0.
+    */
+   SVGA3dSurfaceFace           face[SVGA3D_MAX_SURFACE_FACES];
+   /*
+    * Followed by an SVGA3dSize structure for each mip level in each face.
+    *
+    * A note on surface sizes: Sizes are always specified in pixels,
+    * even if the true surface size is not a multiple of the minimum
+    * block size of the surface's format. For example, a 3x3x1 DXT1
+    * compressed texture would actually be stored as a 4x4x1 image in
+    * memory.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineSurface;       /* SVGA_3D_CMD_SURFACE_DEFINE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                      sid;
+   SVGA3dSurfaceFlags          surfaceFlags;
+   SVGA3dSurfaceFormat         format;
+   /*
+    * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
+    * structures must have the same value of numMipLevels field.
+    * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
+    * numMipLevels set to 0.
+    */
+   SVGA3dSurfaceFace           face[SVGA3D_MAX_SURFACE_FACES];
+   uint32                      multisampleCount;
+   SVGA3dTextureFilter         autogenFilter;
+   /*
+    * Followed by an SVGA3dSize structure for each mip level in each face.
+    *
+    * A note on surface sizes: Sizes are always specified in pixels,
+    * even if the true surface size is not a multiple of the minimum
+    * block size of the surface's format. For example, a 3x3x1 DXT1
+    * compressed texture would actually be stored as a 4x4x1 image in
+    * memory.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineSurface_v2;     /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroySurface;      /* SVGA_3D_CMD_SURFACE_DESTROY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineContext;       /* SVGA_3D_CMD_CONTEXT_DEFINE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyContext;      /* SVGA_3D_CMD_CONTEXT_DESTROY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dClearFlag      clearFlag;
+   uint32               color;
+   float                depth;
+   uint32               stencil;
+   /* Followed by variable number of SVGA3dRect structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdClear;               /* SVGA_3D_CMD_CLEAR */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dLightType      type;
+   SVGA3dBool           inWorldSpace;
+   float                diffuse[4];
+   float                specular[4];
+   float                ambient[4];
+   float                position[4];
+   float                direction[4];
+   float                range;
+   float                falloff;
+   float                attenuation0;
+   float                attenuation1;
+   float                attenuation2;
+   float                theta;
+   float                phi;
+}
+#include "vmware_pack_end.h"
+SVGA3dLightData;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               sid;
+   /* Followed by variable number of SVGA3dCopyRect structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdPresent;             /* SVGA_3D_CMD_PRESENT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dRenderStateName   state;
+   union {
+      uint32               uintValue;
+      float                floatValue;
+   };
+}
+#include "vmware_pack_end.h"
+SVGA3dRenderState;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   /* Followed by variable number of SVGA3dRenderState structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetRenderState;      /* SVGA_3D_CMD_SETRENDERSTATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                 cid;
+   SVGA3dRenderTargetType type;
+   SVGA3dSurfaceImageId   target;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetRenderTarget;     /* SVGA_3D_CMD_SETRENDERTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceImageId  src;
+   SVGA3dSurfaceImageId  dest;
+   /* Followed by variable number of SVGA3dCopyBox structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceCopy;               /* SVGA_3D_CMD_SURFACE_COPY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceImageId  src;
+   SVGA3dSurfaceImageId  dest;
+   SVGA3dBox             boxSrc;
+   SVGA3dBox             boxDest;
+   SVGA3dStretchBltMode  mode;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceStretchBlt;         /* SVGA_3D_CMD_SURFACE_STRETCHBLT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   /*
+    * If the discard flag is present in a surface DMA operation, the host may
+    * discard the contents of the current mipmap level and face of the target
+    * surface before applying the surface DMA contents.
+    */
+   uint32 discard : 1;
+
+   /*
+    * If the unsynchronized flag is present, the host may perform this upload
+    * without syncing to pending reads on this surface.
+    */
+   uint32 unsynchronized : 1;
+
+   /*
+    * Guests *MUST* set the reserved bits to 0 before submitting the command
+    * suffix as future flags may occupy these bits.
+    */
+   uint32 reserved : 30;
+}
+#include "vmware_pack_end.h"
+SVGA3dSurfaceDMAFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAGuestImage guest;
+   SVGA3dSurfaceImageId host;
+   SVGA3dTransferType transfer;
+   /*
+    * Followed by variable number of SVGA3dCopyBox structures. For consistency
+    * in all clipping logic and coordinate translation, we define the
+    * "source" in each copyBox as the guest image and the
+    * "destination" as the host image, regardless of transfer
+    * direction.
+    *
+    * For efficiency, the SVGA3D device is free to copy more data than
+    * specified. For example, it may round copy boxes outwards such
+    * that they lie on particular alignment boundaries.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceDMA;                /* SVGA_3D_CMD_SURFACE_DMA */
+
+/*
+ * SVGA3dCmdSurfaceDMASuffix --
+ *
+ *    This is a command suffix that will appear after a SurfaceDMA command in
+ *    the FIFO.  It contains some extra information that hosts may use to
+ *    optimize performance or protect the guest.  This suffix exists to preserve
+ *    backwards compatibility while also allowing for new functionality to be
+ *    implemented.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 suffixSize;
+
+   /*
+    * The maximum offset is used to determine the maximum offset from the
+    * guestPtr base address that will be accessed or written to during this
+    * surfaceDMA.  If the suffix is supported, the host will respect this
+    * boundary while performing surface DMAs.
+    *
+    * Defaults to MAX_UINT32
+    */
+   uint32 maximumOffset;
+
+   /*
+    * A set of flags that describes optimizations that the host may perform
+    * while performing this surface DMA operation.  The guest should never rely
+    * on behaviour that is different when these flags are set for correctness.
+    *
+    * Defaults to 0
+    */
+   SVGA3dSurfaceDMAFlags flags;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceDMASuffix;
+
+/*
+ * SVGA_3D_CMD_DRAW_PRIMITIVES --
+ *
+ *   This command is the SVGA3D device's generic drawing entry point.
+ *   It can draw multiple ranges of primitives, optionally using an
+ *   index buffer, using an arbitrary collection of vertex buffers.
+ *
+ *   Each SVGA3dVertexDecl defines a distinct vertex array to bind
+ *   during this draw call. The declarations specify which surface
+ *   the vertex data lives in, what that vertex data is used for,
+ *   and how to interpret it.
+ *
+ *   Each SVGA3dPrimitiveRange defines a collection of primitives
+ *   to render using the same vertex arrays. An index buffer is
+ *   optional.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   /*
+    * A range hint is an optional specification for the range of indices
+    * in an SVGA3dArray that will be used. If 'last' is zero, it is assumed
+    * that the entire array will be used.
+    *
+    * These are only hints. The SVGA3D device may use them for
+    * performance optimization if possible, but it's also allowed to
+    * ignore these values.
+    */
+   uint32               first;
+   uint32               last;
+}
+#include "vmware_pack_end.h"
+SVGA3dArrayRangeHint;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   /*
+    * Define the origin and shape of a vertex or index array. Both
+    * 'offset' and 'stride' are in bytes. The provided surface will be
+    * reinterpreted as a flat array of bytes in the same format used
+    * by surface DMA operations. To avoid unnecessary conversions, the
+    * surface should be created with the SVGA3D_BUFFER format.
+    *
+    * Index 0 in the array starts 'offset' bytes into the surface.
+    * Index 1 begins at byte 'offset + stride', etc. Array indices may
+    * not be negative.
+    */
+   uint32               surfaceId;
+   uint32               offset;
+   uint32               stride;
+}
+#include "vmware_pack_end.h"
+SVGA3dArray;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   /*
+    * Describe a vertex array's data type, and define how it is to be
+    * used by the fixed function pipeline or the vertex shader. It
+    * isn't useful to have two VertexDecls with the same
+    * VertexArrayIdentity in one draw call.
+    */
+   SVGA3dDeclType       type;
+   SVGA3dDeclMethod     method;
+   SVGA3dDeclUsage      usage;
+   uint32               usageIndex;
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexArrayIdentity;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dVertexDecl {
+   SVGA3dVertexArrayIdentity  identity;
+   SVGA3dArray                array;
+   SVGA3dArrayRangeHint       rangeHint;
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexDecl;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dPrimitiveRange {
+   /*
+    * Define a group of primitives to render, from sequential indices.
+    *
+    * The value of 'primitiveType' and 'primitiveCount' imply the
+    * total number of vertices that will be rendered.
+    */
+   SVGA3dPrimitiveType  primType;
+   uint32               primitiveCount;
+
+   /*
+    * Optional index buffer. If indexArray.surfaceId is
+    * SVGA3D_INVALID_ID, we render without an index buffer. Rendering
+    * without an index buffer is identical to rendering with an index
+    * buffer containing the sequence [0, 1, 2, 3, ...].
+    *
+    * If an index buffer is in use, indexWidth specifies the width in
+    * bytes of each index value. It must be less than or equal to
+    * indexArray.stride.
+    *
+    * (Currently, the SVGA3D device requires index buffers to be tightly
+    * packed. In other words, indexWidth == indexArray.stride)
+    */
+   SVGA3dArray          indexArray;
+   uint32               indexWidth;
+
+   /*
+    * Optional index bias. This number is added to all indices from
+    * indexArray before they are used as vertex array indices. This
+    * can be used in multiple ways:
+    *
+    *  - When not using an indexArray, this bias can be used to
+    *    specify where in the vertex arrays to begin rendering.
+    *
+    *  - A positive number here is equivalent to increasing the
+    *    offset in each vertex array.
+    *
+    *  - A negative number can be used to render using a small
+    *    vertex array and an index buffer that contains large
+    *    values. This may be used by some applications that
+    *    crop a vertex buffer without modifying their index
+    *    buffer.
+    *
+    * Note that rendering with a negative bias value may be slower and
+    * use more memory than rendering with a positive or zero bias.
+    */
+   int32                indexBias;
+}
+#include "vmware_pack_end.h"
+SVGA3dPrimitiveRange;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   uint32               numVertexDecls;
+   uint32               numRanges;
+
+   /*
+    * There are two variable size arrays after the
+    * SVGA3dCmdDrawPrimitives structure. In order,
+    * they are:
+    *
+    * 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than
+    *    SVGA3D_MAX_VERTEX_ARRAYS;
+    * 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than
+    *    SVGA3D_MAX_DRAW_PRIMITIVE_RANGES;
+    * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
+    *    the frequency divisor for the corresponding vertex decl).
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDrawPrimitives;      /* SVGA_3D_CMD_DRAWPRIMITIVES */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+
+   uint32 primitiveCount;        /* How many primitives to render */
+   uint32 startVertexLocation;   /* Which vertex do we start rendering at. */
+
+   uint8 primitiveType;          /* SVGA3dPrimitiveType */
+   uint8 padding[3];
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDraw;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+
+   uint8 primitiveType;       /* SVGA3dPrimitiveType */
+
+   uint32 indexBufferSid;     /* Valid index buffer sid. */
+   uint32 indexBufferOffset;  /* Byte offset into the vertex buffer, almost */
+			      /* always 0 for DX9 guests, non-zero for OpenGL */
+                              /* guests.  We can't represent non-multiple of */
+                              /* stride offsets in D3D9Renderer... */
+   uint8 indexBufferStride;   /* Allowable values = 1, 2, or 4 */
+
+   int32 baseVertexLocation;  /* Bias applied to the index when selecting a */
+                              /* vertex from the streams, may be negative */
+
+   uint32 primitiveCount;     /* How many primitives to render */
+   uint32 pad0;
+   uint16 pad1;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDrawIndexed;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   /*
+    * Describe a vertex array's data type, and define how it is to be
+    * used by the fixed function pipeline or the vertex shader. It
+    * isn't useful to have two VertexDecls with the same
+    * VertexArrayIdentity in one draw call.
+    */
+   uint16 streamOffset;
+   uint8 stream;
+   uint8 type;          /* SVGA3dDeclType */
+   uint8 method;        /* SVGA3dDeclMethod */
+   uint8 usage;         /* SVGA3dDeclUsage */
+   uint8 usageIndex;
+   uint8 padding;
+
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexElement;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+
+   uint32 numElements;
+
+   /*
+    * Followed by numElements SVGA3dVertexElement structures.
+    *
+    * If numElements < SVGA3D_MAX_VERTEX_ARRAYS, the remaining elements
+    * are cleared and will not be used by following draws.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetVertexDecls;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 sid;
+   uint32 stride;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexStream;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+
+   uint32 numStreams;
+   /*
+    * Followed by numStream SVGA3dVertexStream structures.
+    *
+    * If numStreams < SVGA3D_MAX_VERTEX_ARRAYS, the remaining streams
+    * are cleared and will not be used by following draws.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetVertexStreams;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+   uint32 numDivisors;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetVertexDivisors;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                   stage;
+   SVGA3dTextureStateName   name;
+   union {
+      uint32                value;
+      float                 floatValue;
+   };
+}
+#include "vmware_pack_end.h"
+SVGA3dTextureState;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   /* Followed by variable number of SVGA3dTextureState structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetTextureState;      /* SVGA_3D_CMD_SETTEXTURESTATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                   cid;
+   SVGA3dTransformType      type;
+   float                    matrix[16];
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetTransform;          /* SVGA_3D_CMD_SETTRANSFORM */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   float                min;
+   float                max;
+}
+#include "vmware_pack_end.h"
+SVGA3dZRange;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dZRange         zRange;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetZRange;             /* SVGA_3D_CMD_SETZRANGE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   float                diffuse[4];
+   float                ambient[4];
+   float                specular[4];
+   float                emissive[4];
+   float                shininess;
+}
+#include "vmware_pack_end.h"
+SVGA3dMaterial;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dFace           face;
+   SVGA3dMaterial       material;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetMaterial;           /* SVGA_3D_CMD_SETMATERIAL */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   uint32               index;
+   SVGA3dLightData      data;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetLightData;           /* SVGA_3D_CMD_SETLIGHTDATA */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   uint32               index;
+   uint32               enabled;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetLightEnabled;      /* SVGA_3D_CMD_SETLIGHTENABLED */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dRect           rect;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetViewport;           /* SVGA_3D_CMD_SETVIEWPORT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dRect           rect;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetScissorRect;         /* SVGA_3D_CMD_SETSCISSORRECT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   uint32               index;
+   float                plane[4];
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetClipPlane;           /* SVGA_3D_CMD_SETCLIPPLANE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   uint32               shid;
+   SVGA3dShaderType     type;
+   /* Followed by variable number of DWORDs for shader bycode */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineShader;           /* SVGA_3D_CMD_SHADER_DEFINE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   uint32               shid;
+   SVGA3dShaderType     type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyShader;         /* SVGA_3D_CMD_SHADER_DESTROY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                  cid;
+   uint32                  reg;     /* register number */
+   SVGA3dShaderType        type;
+   SVGA3dShaderConstType   ctype;
+   uint32                  values[4];
+
+   /*
+    * Followed by a variable number of additional values.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetShaderConst;        /* SVGA_3D_CMD_SET_SHADER_CONST */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dShaderType     type;
+   uint32               shid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetShader;       /* SVGA_3D_CMD_SET_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBeginQuery;           /* SVGA_3D_CMD_BEGIN_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+   SVGAGuestPtr         guestResult;   /* Points to an SVGA3dQueryResult structure */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdEndQuery;                  /* SVGA_3D_CMD_END_QUERY */
+
+
+/*
+ * SVGA3D_CMD_WAIT_FOR_QUERY --
+ *
+ *    Will read the SVGA3dQueryResult structure pointed to by guestResult,
+ *    and if the state member is set to anything else than
+ *    SVGA3D_QUERYSTATE_PENDING, this command will always be a no-op.
+ *
+ *    Otherwise, in addition to the query explicitly waited for,
+ *    All queries with the same type and issued with the same cid, for which
+ *    an SVGA_3D_CMD_END_QUERY command has previously been sent, will
+ *    be finished after execution of this command.
+ *
+ *    A query will be identified by the gmrId and offset of the guestResult
+ *    member. If the device can't find an SVGA_3D_CMD_END_QUERY that has
+ *    been sent previously with an indentical gmrId and offset, it will
+ *    effectively end all queries with an identical type issued with the
+ *    same cid, and the SVGA3dQueryResult structure pointed to by
+ *    guestResult will not be written to. This property can be used to
+ *    implement a query barrier for a given cid and query type.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;        /* Same parameters passed to END_QUERY */
+   SVGA3dQueryType      type;
+   SVGAGuestPtr         guestResult;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdWaitForQuery;              /* SVGA_3D_CMD_WAIT_FOR_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               totalSize;    /* Set by guest before query is ended. */
+   SVGA3dQueryState     state;        /* Set by host or guest. See SVGA3dQueryState. */
+   union {                            /* Set by host on exit from PENDING state */
+      uint32            result32;
+      uint32            queryCookie; /* May be used to identify which QueryGetData this
+                                        result corresponds to. */
+   };
+}
+#include "vmware_pack_end.h"
+SVGA3dQueryResult;
+
+
+/*
+ * SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN --
+ *
+ *    This is a blit from an SVGA3D surface to a Screen Object.
+ *    This blit must be directed at a specific screen.
+ *
+ *    The blit copies from a rectangular region of an SVGA3D surface
+ *    image to a rectangular region of a screen.
+ *
+ *    This command takes an optional variable-length list of clipping
+ *    rectangles after the body of the command. If no rectangles are
+ *    specified, there is no clipping region. The entire destRect is
+ *    drawn to. If one or more rectangles are included, they describe
+ *    a clipping region. The clip rectangle coordinates are measured
+ *    relative to the top-left corner of destRect.
+ *
+ *    The srcImage must be from mip=0 face=0.
+ *
+ *    This supports scaling if the src and dest are of different sizes.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceImageId srcImage;
+   SVGASignedRect       srcRect;
+   uint32               destScreenId; /* Screen Object ID */
+   SVGASignedRect       destRect;
+   /* Clipping: zero or more SVGASignedRects follow */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBlitSurfaceToScreen;         /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               sid;
+   SVGA3dTextureFilter  filter;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdGenerateMipmaps;             /* SVGA_3D_CMD_GENERATE_MIPMAPS */
+
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdActivateSurface;               /* SVGA_3D_CMD_ACTIVATE_SURFACE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDeactivateSurface;             /* SVGA_3D_CMD_DEACTIVATE_SURFACE */
+
+/*
+ * Screen DMA command
+ *
+ * Available with SVGA_FIFO_CAP_SCREEN_OBJECT_2.  The SVGA_CAP_3D device
+ * cap bit is not required.
+ *
+ * - refBuffer and destBuffer are 32bit BGRX; refBuffer and destBuffer could
+ *   be different, but it is required that guest makes sure refBuffer has
+ *   exactly the same contents that were written to when last time screen DMA
+ *   command is received by host.
+ *
+ * - changemap is generated by lib/blit, and it has the changes from last
+ *   received screen DMA or more.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdScreenDMA {
+   uint32 screenId;
+   SVGAGuestImage refBuffer;
+   SVGAGuestImage destBuffer;
+   SVGAGuestImage changeMap;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdScreenDMA;        /* SVGA_3D_CMD_SCREEN_DMA */
+
+/*
+ * Set Unity Surface Cookie
+ *
+ * Associates the supplied cookie with the surface id for use with
+ * Unity.  This cookie is a hint from guest to host, there is no way
+ * for the guest to readback the cookie and the host is free to drop
+ * the cookie association at will.  The default value for the cookie
+ * on all surfaces is 0.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdSetUnitySurfaceCookie {
+   uint32 sid;
+   uint64 cookie;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetUnitySurfaceCookie;   /* SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE */
+
+/*
+ * Open a context-specific surface in a non-context-specific manner.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdOpenContextSurface {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdOpenContextSurface;   /* SVGA_3D_CMD_OPEN_CONTEXT_SURFACE */
+
+
+/*
+ * Logic ops
+ */
+
+#define SVGA3D_LOTRANSBLT_HONORALPHA     (0x01)
+#define SVGA3D_LOSTRETCHBLT_MIRRORX      (0x01)
+#define SVGA3D_LOSTRETCHBLT_MIRRORY      (0x02)
+#define SVGA3D_LOALPHABLEND_SRCHASALPHA  (0x01)
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsBitBlt {
+   /*
+    * All LogicOps surfaces are one-level
+    * surfaces so mipmap & face should always
+    * be zero.
+    */
+   SVGA3dSurfaceImageId src;
+   SVGA3dSurfaceImageId dst;
+   SVGA3dLogicOp logicOp;
+   /* Followed by variable number of SVGA3dCopyBox structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsBitBlt;   /* SVGA_3D_CMD_LOGICOPS_BITBLT */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsTransBlt {
+   /*
+    * All LogicOps surfaces are one-level
+    * surfaces so mipmap & face should always
+    * be zero.
+    */
+   SVGA3dSurfaceImageId src;
+   SVGA3dSurfaceImageId dst;
+   uint32 color;
+   uint32 flags;
+   SVGA3dBox srcBox;
+   SVGA3dBox dstBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsTransBlt;   /* SVGA_3D_CMD_LOGICOPS_TRANSBLT */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsStretchBlt {
+   /*
+    * All LogicOps surfaces are one-level
+    * surfaces so mipmap & face should always
+    * be zero.
+    */
+   SVGA3dSurfaceImageId src;
+   SVGA3dSurfaceImageId dst;
+   uint16 mode;
+   uint16 flags;
+   SVGA3dBox srcBox;
+   SVGA3dBox dstBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsStretchBlt;   /* SVGA_3D_CMD_LOGICOPS_STRETCHBLT */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsColorFill {
+   /*
+    * All LogicOps surfaces are one-level
+    * surfaces so mipmap & face should always
+    * be zero.
+    */
+   SVGA3dSurfaceImageId dst;
+   uint32 color;
+   SVGA3dLogicOp logicOp;
+   /* Followed by variable number of SVGA3dRect structures. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsColorFill;   /* SVGA_3D_CMD_LOGICOPS_COLORFILL */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsAlphaBlend {
+   /*
+    * All LogicOps surfaces are one-level
+    * surfaces so mipmap & face should always
+    * be zero.
+    */
+   SVGA3dSurfaceImageId src;
+   SVGA3dSurfaceImageId dst;
+   uint32 alphaVal;
+   uint32 flags;
+   SVGA3dBox srcBox;
+   SVGA3dBox dstBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsAlphaBlend;   /* SVGA_3D_CMD_LOGICOPS_ALPHABLEND */
+
+#define SVGA3D_CLEARTYPE_INVALID_GAMMA_INDEX 0xFFFFFFFF
+
+#define SVGA3D_CLEARTYPE_GAMMA_WIDTH  512
+#define SVGA3D_CLEARTYPE_GAMMA_HEIGHT 16
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsClearTypeBlend {
+   /*
+    * All LogicOps surfaces are one-level
+    * surfaces so mipmap & face should always
+    * be zero.
+    */
+   SVGA3dSurfaceImageId tmp;
+   SVGA3dSurfaceImageId dst;
+   SVGA3dSurfaceImageId gammaSurf;
+   SVGA3dSurfaceImageId alphaSurf;
+   uint32 gamma;
+   uint32 color;
+   uint32 color2;
+   int32 alphaOffsetX;
+   int32 alphaOffsetY;
+   /* Followed by variable number of SVGA3dBox structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsClearTypeBlend;   /* SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND */
+
+
+/*
+ * Guest-backed objects definitions.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAMobFormat ptDepth;
+   uint32 sizeInBytes;
+   PPN64 base;
+}
+#include "vmware_pack_end.h"
+SVGAOTableMobEntry;
+#define SVGA3D_OTABLE_MOB_ENTRY_SIZE (sizeof(SVGAOTableMobEntry))
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceFormat format;
+   SVGA3dSurfaceFlags surfaceFlags;
+   uint32 numMipLevels;
+   uint32 multisampleCount;
+   SVGA3dTextureFilter autogenFilter;
+   SVGA3dSize size;
+   SVGAMobId mobid;
+   uint32 arraySize;
+   uint32 mobPitch;
+   uint32 pad[5];
+}
+#include "vmware_pack_end.h"
+SVGAOTableSurfaceEntry;
+#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE (sizeof(SVGAOTableSurfaceEntry))
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGAOTableContextEntry;
+#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE (sizeof(SVGAOTableContextEntry))
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dShaderType type;
+   uint32 sizeInBytes;
+   uint32 offsetInBytes;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGAOTableShaderEntry;
+#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE (sizeof(SVGAOTableShaderEntry))
+
+#define SVGA_STFLAG_PRIMARY (1 << 0)
+typedef uint32 SVGAScreenTargetFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceImageId image;
+   uint32 width;
+   uint32 height;
+   int32 xRoot;
+   int32 yRoot;
+   SVGAScreenTargetFlags flags;
+   uint32 dpi;
+   uint32 pad[7];
+}
+#include "vmware_pack_end.h"
+SVGAOTableScreenTargetEntry;
+#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE \
+	(sizeof(SVGAOTableScreenTargetEntry))
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   float value[4];
+}
+#include "vmware_pack_end.h"
+SVGA3dShaderConstFloat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   int32 value[4];
+}
+#include "vmware_pack_end.h"
+SVGA3dShaderConstInt;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 value;
+}
+#include "vmware_pack_end.h"
+SVGA3dShaderConstBool;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint16 streamOffset;
+   uint8 stream;
+   uint8 type;
+   uint8 methodUsage;
+   uint8 usageIndex;
+}
+#include "vmware_pack_end.h"
+SVGAGBVertexElement;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 sid;
+   uint16 stride;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGAGBVertexStream;
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dRect viewport;
+   SVGA3dRect scissorRect;
+   SVGA3dZRange zRange;
+
+   SVGA3dSurfaceImageId renderTargets[SVGA3D_RT_MAX];
+   SVGAGBVertexElement decl1[4];
+
+   uint32 renderStates[SVGA3D_RS_MAX];
+   SVGAGBVertexElement decl2[18];
+   uint32 pad0[2];
+
+   struct {
+      SVGA3dFace face;
+      SVGA3dMaterial material;
+   } material;
+
+   float clipPlanes[SVGA3D_NUM_CLIPPLANES][4];
+   float matrices[SVGA3D_TRANSFORM_MAX][16];
+
+   SVGA3dBool lightEnabled[SVGA3D_NUM_LIGHTS];
+   SVGA3dLightData lightData[SVGA3D_NUM_LIGHTS];
+
+   /*
+    * Shaders currently bound
+    */
+   uint32 shaders[SVGA3D_NUM_SHADERTYPE_PREDX];
+   SVGAGBVertexElement decl3[10];
+   uint32 pad1[3];
+
+   uint32 occQueryActive;
+   uint32 occQueryValue;
+
+   /*
+    * Int/Bool Shader constants
+    */
+   SVGA3dShaderConstInt pShaderIValues[SVGA3D_CONSTINTREG_MAX];
+   SVGA3dShaderConstInt vShaderIValues[SVGA3D_CONSTINTREG_MAX];
+   uint16 pShaderBValues;
+   uint16 vShaderBValues;
+
+
+   SVGAGBVertexStream streams[SVGA3D_MAX_VERTEX_ARRAYS];
+   SVGA3dVertexDivisor divisors[SVGA3D_MAX_VERTEX_ARRAYS];
+   uint32 numVertexDecls;
+   uint32 numVertexStreams;
+   uint32 numVertexDivisors;
+   uint32 pad2[30];
+
+   /*
+    * Texture Stages
+    *
+    * SVGA3D_TS_INVALID through SVGA3D_TS_CONSTANT are in the
+    * textureStages array.
+    * SVGA3D_TS_COLOR_KEY is in tsColorKey.
+    */
+   uint32 tsColorKey[SVGA3D_NUM_TEXTURE_UNITS];
+   uint32 textureStages[SVGA3D_NUM_TEXTURE_UNITS][SVGA3D_TS_CONSTANT + 1];
+   uint32 tsColorKeyEnable[SVGA3D_NUM_TEXTURE_UNITS];
+
+   /*
+    * Float Shader constants.
+    */
+   SVGA3dShaderConstFloat pShaderFValues[SVGA3D_CONSTREG_MAX];
+   SVGA3dShaderConstFloat vShaderFValues[SVGA3D_CONSTREG_MAX];
+}
+#include "vmware_pack_end.h"
+SVGAGBContextData;
+#define SVGA3D_CONTEXT_DATA_SIZE (sizeof(SVGAGBContextData))
+
+/*
+ * SVGA3dCmdSetOTableBase --
+ *
+ * This command allows the guest to specify the base PPN of the
+ * specified object table.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAOTableType type;
+   PPN baseAddress;
+   uint32 sizeInBytes;
+   uint32 validSizeInBytes;
+   SVGAMobFormat ptDepth;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetOTableBase;  /* SVGA_3D_CMD_SET_OTABLE_BASE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAOTableType type;
+   PPN64 baseAddress;
+   uint32 sizeInBytes;
+   uint32 validSizeInBytes;
+   SVGAMobFormat ptDepth;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetOTableBase64;  /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAOTableType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackOTable;  /* SVGA_3D_CMD_READBACK_OTABLE */
+
+/*
+ * Define a memory object (Mob) in the OTable.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBMob {
+   SVGAMobId mobid;
+   SVGAMobFormat ptDepth;
+   PPN base;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBMob;   /* SVGA_3D_CMD_DEFINE_GB_MOB */
+
+
+/*
+ * Destroys an object in the OTable.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDestroyGBMob {
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBMob;   /* SVGA_3D_CMD_DESTROY_GB_MOB */
+
+
+/*
+ * Define a memory object (Mob) in the OTable with a PPN64 base.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBMob64 {
+   SVGAMobId mobid;
+   SVGAMobFormat ptDepth;
+   PPN64 base;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBMob64;   /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
+
+/*
+ * Redefine an object in the OTable with PPN64 base.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdRedefineGBMob64 {
+   SVGAMobId mobid;
+   SVGAMobFormat ptDepth;
+   PPN64 base;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdRedefineGBMob64;   /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
+
+/*
+ * Notification that the page tables have been modified.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdUpdateGBMobMapping {
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUpdateGBMobMapping;   /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
+
+/*
+ * Define a guest-backed surface.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBSurface {
+   uint32 sid;
+   SVGA3dSurfaceFlags surfaceFlags;
+   SVGA3dSurfaceFormat format;
+   uint32 numMipLevels;
+   uint32 multisampleCount;
+   SVGA3dTextureFilter autogenFilter;
+   SVGA3dSize size;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBSurface;   /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
+
+/*
+ * Destroy a guest-backed surface.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDestroyGBSurface {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBSurface;   /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
+
+/*
+ * Bind a guest-backed surface to a mob.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdBindGBSurface {
+   uint32 sid;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBSurface;   /* SVGA_3D_CMD_BIND_GB_SURFACE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdBindGBSurfaceWithPitch {
+   uint32 sid;
+   SVGAMobId mobid;
+   uint32 baseLevelPitch;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBSurfaceWithPitch;   /* SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH */
+
+/*
+ * Conditionally bind a mob to a guest-backed surface if testMobid
+ * matches the currently bound mob.  Optionally issue a
+ * readback/update on the surface while it is still bound to the old
+ * mobid if the mobid is changed by this command.
+ */
+
+#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0)
+#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_UPDATE   (1 << 1)
+
+typedef
+#include "vmware_pack_begin.h"
+struct{
+   uint32 sid;
+   SVGAMobId testMobid;
+   SVGAMobId mobid;
+   uint32 flags;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdCondBindGBSurface;          /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
+
+/*
+ * Update an image in a guest-backed surface.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdUpdateGBImage {
+   SVGA3dSurfaceImageId image;
+   SVGA3dBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUpdateGBImage;   /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
+
+/*
+ * Update an entire guest-backed surface.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdUpdateGBSurface {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUpdateGBSurface;   /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
+
+/*
+ * Readback an image in a guest-backed surface.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdReadbackGBImage {
+   SVGA3dSurfaceImageId image;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackGBImage;   /* SVGA_3D_CMD_READBACK_GB_IMAGE */
+
+/*
+ * Readback an entire guest-backed surface.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdReadbackGBSurface {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackGBSurface;   /* SVGA_3D_CMD_READBACK_GB_SURFACE */
+
+/*
+ * Readback a sub rect of an image in a guest-backed surface.  After
+ * issuing this command the driver is required to issue an update call
+ * of the same region before issuing any other commands that reference
+ * this surface or rendering is not guaranteed.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdReadbackGBImagePartial {
+   SVGA3dSurfaceImageId image;
+   SVGA3dBox box;
+   uint32 invertBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
+
+
+/*
+ * Invalidate an image in a guest-backed surface.
+ * (Notify the device that the contents can be lost.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdInvalidateGBImage {
+   SVGA3dSurfaceImageId image;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdInvalidateGBImage;   /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
+
+/*
+ * Invalidate an entire guest-backed surface.
+ * (Notify the device that the contents if all images can be lost.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdInvalidateGBSurface {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
+
+/*
+ * Invalidate a sub rect of an image in a guest-backed surface.  After
+ * issuing this command the driver is required to issue an update call
+ * of the same region before issuing any other commands that reference
+ * this surface or rendering is not guaranteed.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdInvalidateGBImagePartial {
+   SVGA3dSurfaceImageId image;
+   SVGA3dBox box;
+   uint32 invertBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
+
+
+/*
+ * Define a guest-backed context.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBContext;   /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
+
+/*
+ * Destroy a guest-backed context.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDestroyGBContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBContext;   /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
+
+/*
+ * Bind a guest-backed context.
+ *
+ * validContents should be set to 0 for new contexts,
+ * and 1 if this is an old context which is getting paged
+ * back on to the device.
+ *
+ * For new contexts, it is recommended that the driver
+ * issue commands to initialize all interesting state
+ * prior to rendering.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdBindGBContext {
+   uint32 cid;
+   SVGAMobId mobid;
+   uint32 validContents;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBContext;   /* SVGA_3D_CMD_BIND_GB_CONTEXT */
+
+/*
+ * Readback a guest-backed context.
+ * (Request that the device flush the contents back into guest memory.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdReadbackGBContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackGBContext;   /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
+
+/*
+ * Invalidate a guest-backed context.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdInvalidateGBContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdInvalidateGBContext;   /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
+
+/*
+ * Define a guest-backed shader.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBShader {
+   uint32 shid;
+   SVGA3dShaderType type;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBShader;   /* SVGA_3D_CMD_DEFINE_GB_SHADER */
+
+/*
+ * Bind a guest-backed shader.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdBindGBShader {
+   uint32 shid;
+   SVGAMobId mobid;
+   uint32 offsetInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBShader;   /* SVGA_3D_CMD_BIND_GB_SHADER */
+
+/*
+ * Destroy a guest-backed shader.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDestroyGBShader {
+   uint32 shid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBShader;   /* SVGA_3D_CMD_DESTROY_GB_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                  cid;
+   uint32                  regStart;
+   SVGA3dShaderType        shaderType;
+   SVGA3dShaderConstType   constType;
+
+   /*
+    * Followed by a variable number of shader constants.
+    *
+    * Note that FLOAT and INT constants are 4-dwords in length, while
+    * BOOL constants are 1-dword in length.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetGBShaderConstInline;   /* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBeginGBQuery;           /* SVGA_3D_CMD_BEGIN_GB_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+   SVGAMobId mobid;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdEndGBQuery;                  /* SVGA_3D_CMD_END_GB_QUERY */
+
+
+/*
+ * SVGA_3D_CMD_WAIT_FOR_GB_QUERY --
+ *
+ *    The semantics of this command are identical to the
+ *    SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written
+ *    to a Mob instead of a GMR.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+   SVGAMobId mobid;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdWaitForGBQuery;          /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAMobId mobid;
+   uint32 mustBeZero;
+   uint32 initialized;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdEnableGart;              /* SVGA_3D_CMD_ENABLE_GART */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAMobId mobid;
+   uint32 gartOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdMapMobIntoGart;          /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 gartOffset;
+   uint32 numPages;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUnmapGartRange;          /* SVGA_3D_CMD_UNMAP_GART_RANGE */
+
+
+/*
+ * Screen Targets
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 stid;
+   uint32 width;
+   uint32 height;
+   int32 xRoot;
+   int32 yRoot;
+   SVGAScreenTargetFlags flags;
+
+   /*
+    * The physical DPI that the guest expects this screen displayed at.
+    *
+    * Guests which are not DPI-aware should set this to zero.
+    */
+   uint32 dpi;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBScreenTarget;    /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 stid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBScreenTarget;  /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 stid;
+   SVGA3dSurfaceImageId image;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBScreenTarget;  /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 stid;
+   SVGA3dRect rect;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUpdateGBScreenTarget;  /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdGBScreenDMA {
+   uint32 screenId;
+   uint32 dead;
+   SVGAMobId destMobID;
+   uint32 destPitch;
+   SVGAMobId changeMapMobID;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdGBScreenDMA;        /* SVGA_3D_CMD_GB_SCREEN_DMA */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 value;
+   uint32 mobId;
+   uint32 mobOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdGBMobFence;  /* SVGA_3D_CMD_GB_MOB_FENCE*/
+
+#endif /* _SVGA3D_CMD_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
new file mode 100644
index 000000000000..c18b663f360f
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
@@ -0,0 +1,457 @@
+/**********************************************************
+ * Copyright 1998-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_devcaps.h --
+ *
+ *       SVGA 3d caps definitions
+ */
+
+#ifndef _SVGA3D_DEVCAPS_H_
+#define _SVGA3D_DEVCAPS_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+
+/*
+ * 3D Hardware Version
+ *
+ *   The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
+ *   register.   Is set by the host and read by the guest.  This lets
+ *   us make new guest drivers which are backwards-compatible with old
+ *   SVGA hardware revisions.  It does not let us support old guest
+ *   drivers.  Good enough for now.
+ *
+ */
+
+#define SVGA3D_MAKE_HWVERSION(major, minor)      (((major) << 16) | ((minor) & 0xFF))
+#define SVGA3D_MAJOR_HWVERSION(version)          ((version) >> 16)
+#define SVGA3D_MINOR_HWVERSION(version)          ((version) & 0xFF)
+
+typedef enum {
+   SVGA3D_HWVERSION_WS5_RC1   = SVGA3D_MAKE_HWVERSION(0, 1),
+   SVGA3D_HWVERSION_WS5_RC2   = SVGA3D_MAKE_HWVERSION(0, 2),
+   SVGA3D_HWVERSION_WS51_RC1  = SVGA3D_MAKE_HWVERSION(0, 3),
+   SVGA3D_HWVERSION_WS6_B1    = SVGA3D_MAKE_HWVERSION(1, 1),
+   SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
+   SVGA3D_HWVERSION_WS65_B1   = SVGA3D_MAKE_HWVERSION(2, 0),
+   SVGA3D_HWVERSION_WS8_B1    = SVGA3D_MAKE_HWVERSION(2, 1),
+   SVGA3D_HWVERSION_CURRENT   = SVGA3D_HWVERSION_WS8_B1,
+} SVGA3dHardwareVersion;
+
+/*
+ * DevCap indexes.
+ */
+
+typedef enum {
+   SVGA3D_DEVCAP_INVALID                           = ((uint32)-1),
+   SVGA3D_DEVCAP_3D                                = 0,
+   SVGA3D_DEVCAP_MAX_LIGHTS                        = 1,
+
+   /*
+    * SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
+    * fixed-function texture units available. Each of these units
+    * work in both FFP and Shader modes, and they support texture
+    * transforms and texture coordinates. The host may have additional
+    * texture image units that are only usable with shaders.
+    */
+   SVGA3D_DEVCAP_MAX_TEXTURES                      = 2,
+   SVGA3D_DEVCAP_MAX_CLIP_PLANES                   = 3,
+   SVGA3D_DEVCAP_VERTEX_SHADER_VERSION             = 4,
+   SVGA3D_DEVCAP_VERTEX_SHADER                     = 5,
+   SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION           = 6,
+   SVGA3D_DEVCAP_FRAGMENT_SHADER                   = 7,
+   SVGA3D_DEVCAP_MAX_RENDER_TARGETS                = 8,
+   SVGA3D_DEVCAP_S23E8_TEXTURES                    = 9,
+   SVGA3D_DEVCAP_S10E5_TEXTURES                    = 10,
+   SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND             = 11,
+   SVGA3D_DEVCAP_D16_BUFFER_FORMAT                 = 12,
+   SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT               = 13,
+   SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT               = 14,
+   SVGA3D_DEVCAP_QUERY_TYPES                       = 15,
+   SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING         = 16,
+   SVGA3D_DEVCAP_MAX_POINT_SIZE                    = 17,
+   SVGA3D_DEVCAP_MAX_SHADER_TEXTURES               = 18,
+   SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH                 = 19,
+   SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT                = 20,
+   SVGA3D_DEVCAP_MAX_VOLUME_EXTENT                 = 21,
+   SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT                = 22,
+   SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO          = 23,
+   SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY            = 24,
+   SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT               = 25,
+   SVGA3D_DEVCAP_MAX_VERTEX_INDEX                  = 26,
+   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS    = 27,
+   SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS  = 28,
+   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS           = 29,
+   SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS         = 30,
+   SVGA3D_DEVCAP_TEXTURE_OPS                       = 31,
+   SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8               = 32,
+   SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8               = 33,
+   SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10            = 34,
+   SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5               = 35,
+   SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5               = 36,
+   SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4               = 37,
+   SVGA3D_DEVCAP_SURFACEFMT_R5G6B5                 = 38,
+   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16            = 39,
+   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8      = 40,
+   SVGA3D_DEVCAP_SURFACEFMT_ALPHA8                 = 41,
+   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8             = 42,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_D16                  = 43,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8                = 44,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8                = 45,
+   SVGA3D_DEVCAP_SURFACEFMT_DXT1                   = 46,
+   SVGA3D_DEVCAP_SURFACEFMT_DXT2                   = 47,
+   SVGA3D_DEVCAP_SURFACEFMT_DXT3                   = 48,
+   SVGA3D_DEVCAP_SURFACEFMT_DXT4                   = 49,
+   SVGA3D_DEVCAP_SURFACEFMT_DXT5                   = 50,
+   SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8           = 51,
+   SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10            = 52,
+   SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8               = 53,
+   SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8               = 54,
+   SVGA3D_DEVCAP_SURFACEFMT_CxV8U8                 = 55,
+   SVGA3D_DEVCAP_SURFACEFMT_R_S10E5                = 56,
+   SVGA3D_DEVCAP_SURFACEFMT_R_S23E8                = 57,
+   SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5               = 58,
+   SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8               = 59,
+   SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5             = 60,
+   SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8             = 61,
+
+   /*
+    * There is a hole in our devcap definitions for
+    * historical reasons.
+    *
+    * Define a constant just for completeness.
+    */
+   SVGA3D_DEVCAP_MISSING62                         = 62,
+
+   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES        = 63,
+
+   /*
+    * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
+    * render targets.  This does not include the depth or stencil targets.
+    */
+   SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS   = 64,
+
+   SVGA3D_DEVCAP_SURFACEFMT_V16U16                 = 65,
+   SVGA3D_DEVCAP_SURFACEFMT_G16R16                 = 66,
+   SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16           = 67,
+   SVGA3D_DEVCAP_SURFACEFMT_UYVY                   = 68,
+   SVGA3D_DEVCAP_SURFACEFMT_YUY2                   = 69,
+   SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES    = 70,
+   SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES       = 71,
+   SVGA3D_DEVCAP_ALPHATOCOVERAGE                   = 72,
+   SVGA3D_DEVCAP_SUPERSAMPLE                       = 73,
+   SVGA3D_DEVCAP_AUTOGENMIPMAPS                    = 74,
+   SVGA3D_DEVCAP_SURFACEFMT_NV12                   = 75,
+   SVGA3D_DEVCAP_SURFACEFMT_AYUV                   = 76,
+
+   /*
+    * This is the maximum number of SVGA context IDs that the guest
+    * can define using SVGA_3D_CMD_CONTEXT_DEFINE.
+    */
+   SVGA3D_DEVCAP_MAX_CONTEXT_IDS                   = 77,
+
+   /*
+    * This is the maximum number of SVGA surface IDs that the guest
+    * can define using SVGA_3D_CMD_SURFACE_DEFINE*.
+    */
+   SVGA3D_DEVCAP_MAX_SURFACE_IDS                   = 78,
+
+   SVGA3D_DEVCAP_SURFACEFMT_Z_DF16                 = 79,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_DF24                 = 80,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT            = 81,
+
+   SVGA3D_DEVCAP_SURFACEFMT_ATI1                   = 82,
+   SVGA3D_DEVCAP_SURFACEFMT_ATI2                   = 83,
+
+   /*
+    * Deprecated.
+    */
+   SVGA3D_DEVCAP_DEAD1                             = 84,
+
+   /*
+    * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements
+    * ored together, one for every type of video decoding supported.
+    */
+   SVGA3D_DEVCAP_VIDEO_DECODE                      = 85,
+
+   /*
+    * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements
+    * ored together, one for every type of video processing supported.
+    */
+   SVGA3D_DEVCAP_VIDEO_PROCESS                     = 86,
+
+   SVGA3D_DEVCAP_LINE_AA                           = 87,  /* boolean */
+   SVGA3D_DEVCAP_LINE_STIPPLE                      = 88,  /* boolean */
+   SVGA3D_DEVCAP_MAX_LINE_WIDTH                    = 89,  /* float */
+   SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH                 = 90,  /* float */
+
+   SVGA3D_DEVCAP_SURFACEFMT_YV12                   = 91,
+
+   /*
+    * Does the host support the SVGA logic ops commands?
+    */
+   SVGA3D_DEVCAP_LOGICOPS                          = 92,
+
+   /*
+    * Are TS_CONSTANT, TS_COLOR_KEY, and TS_COLOR_KEY_ENABLE supported?
+    */
+   SVGA3D_DEVCAP_TS_COLOR_KEY                      = 93, /* boolean */
+
+   /*
+    * Deprecated.
+    */
+   SVGA3D_DEVCAP_DEAD2                             = 94,
+
+   /*
+    * Does the device support the DX commands?
+    */
+   SVGA3D_DEVCAP_DX                                = 95,
+
+   /*
+    * What is the maximum size of a texture array?
+    *
+    * (Even if this cap is zero, cubemaps are still allowed.)
+    */
+   SVGA3D_DEVCAP_MAX_TEXTURE_ARRAY_SIZE            = 96,
+
+   /*
+    * What is the maximum number of vertex buffers that can
+    * be used in the DXContext inputAssembly?
+    */
+   SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS              = 97,
+
+   /*
+    * What is the maximum number of constant buffers
+    * that can be expected to work correctly with a
+    * DX context?
+    */
+   SVGA3D_DEVCAP_DX_MAX_CONSTANT_BUFFERS           = 98,
+
+   /*
+    * Does the device support provoking vertex control?
+    * If zero, the first vertex will always be the provoking vertex.
+    */
+   SVGA3D_DEVCAP_DX_PROVOKING_VERTEX               = 99,
+
+   SVGA3D_DEVCAP_DXFMT_X8R8G8B8                    = 100,
+   SVGA3D_DEVCAP_DXFMT_A8R8G8B8                    = 101,
+   SVGA3D_DEVCAP_DXFMT_R5G6B5                      = 102,
+   SVGA3D_DEVCAP_DXFMT_X1R5G5B5                    = 103,
+   SVGA3D_DEVCAP_DXFMT_A1R5G5B5                    = 104,
+   SVGA3D_DEVCAP_DXFMT_A4R4G4B4                    = 105,
+   SVGA3D_DEVCAP_DXFMT_Z_D32                       = 106,
+   SVGA3D_DEVCAP_DXFMT_Z_D16                       = 107,
+   SVGA3D_DEVCAP_DXFMT_Z_D24S8                     = 108,
+   SVGA3D_DEVCAP_DXFMT_Z_D15S1                     = 109,
+   SVGA3D_DEVCAP_DXFMT_LUMINANCE8                  = 110,
+   SVGA3D_DEVCAP_DXFMT_LUMINANCE4_ALPHA4           = 111,
+   SVGA3D_DEVCAP_DXFMT_LUMINANCE16                 = 112,
+   SVGA3D_DEVCAP_DXFMT_LUMINANCE8_ALPHA8           = 113,
+   SVGA3D_DEVCAP_DXFMT_DXT1                        = 114,
+   SVGA3D_DEVCAP_DXFMT_DXT2                        = 115,
+   SVGA3D_DEVCAP_DXFMT_DXT3                        = 116,
+   SVGA3D_DEVCAP_DXFMT_DXT4                        = 117,
+   SVGA3D_DEVCAP_DXFMT_DXT5                        = 118,
+   SVGA3D_DEVCAP_DXFMT_BUMPU8V8                    = 119,
+   SVGA3D_DEVCAP_DXFMT_BUMPL6V5U5                  = 120,
+   SVGA3D_DEVCAP_DXFMT_BUMPX8L8V8U8                = 121,
+   SVGA3D_DEVCAP_DXFMT_BUMPL8V8U8                  = 122,
+   SVGA3D_DEVCAP_DXFMT_ARGB_S10E5                  = 123,
+   SVGA3D_DEVCAP_DXFMT_ARGB_S23E8                  = 124,
+   SVGA3D_DEVCAP_DXFMT_A2R10G10B10                 = 125,
+   SVGA3D_DEVCAP_DXFMT_V8U8                        = 126,
+   SVGA3D_DEVCAP_DXFMT_Q8W8V8U8                    = 127,
+   SVGA3D_DEVCAP_DXFMT_CxV8U8                      = 128,
+   SVGA3D_DEVCAP_DXFMT_X8L8V8U8                    = 129,
+   SVGA3D_DEVCAP_DXFMT_A2W10V10U10                 = 130,
+   SVGA3D_DEVCAP_DXFMT_ALPHA8                      = 131,
+   SVGA3D_DEVCAP_DXFMT_R_S10E5                     = 132,
+   SVGA3D_DEVCAP_DXFMT_R_S23E8                     = 133,
+   SVGA3D_DEVCAP_DXFMT_RG_S10E5                    = 134,
+   SVGA3D_DEVCAP_DXFMT_RG_S23E8                    = 135,
+   SVGA3D_DEVCAP_DXFMT_BUFFER                      = 136,
+   SVGA3D_DEVCAP_DXFMT_Z_D24X8                     = 137,
+   SVGA3D_DEVCAP_DXFMT_V16U16                      = 138,
+   SVGA3D_DEVCAP_DXFMT_G16R16                      = 139,
+   SVGA3D_DEVCAP_DXFMT_A16B16G16R16                = 140,
+   SVGA3D_DEVCAP_DXFMT_UYVY                        = 141,
+   SVGA3D_DEVCAP_DXFMT_YUY2                        = 142,
+   SVGA3D_DEVCAP_DXFMT_NV12                        = 143,
+   SVGA3D_DEVCAP_DXFMT_AYUV                        = 144,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32A32_TYPELESS       = 145,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32A32_UINT           = 146,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32A32_SINT           = 147,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32_TYPELESS          = 148,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32_FLOAT             = 149,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32_UINT              = 150,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32_SINT              = 151,
+   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_TYPELESS       = 152,
+   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UINT           = 153,
+   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SNORM          = 154,
+   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SINT           = 155,
+   SVGA3D_DEVCAP_DXFMT_R32G32_TYPELESS             = 156,
+   SVGA3D_DEVCAP_DXFMT_R32G32_UINT                 = 157,
+   SVGA3D_DEVCAP_DXFMT_R32G32_SINT                 = 158,
+   SVGA3D_DEVCAP_DXFMT_R32G8X24_TYPELESS           = 159,
+   SVGA3D_DEVCAP_DXFMT_D32_FLOAT_S8X24_UINT        = 160,
+   SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24_TYPELESS    = 161,
+   SVGA3D_DEVCAP_DXFMT_X32_TYPELESS_G8X24_UINT     = 162,
+   SVGA3D_DEVCAP_DXFMT_R10G10B10A2_TYPELESS        = 163,
+   SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UINT            = 164,
+   SVGA3D_DEVCAP_DXFMT_R11G11B10_FLOAT             = 165,
+   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_TYPELESS           = 166,
+   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM              = 167,
+   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM_SRGB         = 168,
+   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UINT               = 169,
+   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SINT               = 170,
+   SVGA3D_DEVCAP_DXFMT_R16G16_TYPELESS             = 171,
+   SVGA3D_DEVCAP_DXFMT_R16G16_UINT                 = 172,
+   SVGA3D_DEVCAP_DXFMT_R16G16_SINT                 = 173,
+   SVGA3D_DEVCAP_DXFMT_R32_TYPELESS                = 174,
+   SVGA3D_DEVCAP_DXFMT_D32_FLOAT                   = 175,
+   SVGA3D_DEVCAP_DXFMT_R32_UINT                    = 176,
+   SVGA3D_DEVCAP_DXFMT_R32_SINT                    = 177,
+   SVGA3D_DEVCAP_DXFMT_R24G8_TYPELESS              = 178,
+   SVGA3D_DEVCAP_DXFMT_D24_UNORM_S8_UINT           = 179,
+   SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8_TYPELESS       = 180,
+   SVGA3D_DEVCAP_DXFMT_X24_TYPELESS_G8_UINT        = 181,
+   SVGA3D_DEVCAP_DXFMT_R8G8_TYPELESS               = 182,
+   SVGA3D_DEVCAP_DXFMT_R8G8_UNORM                  = 183,
+   SVGA3D_DEVCAP_DXFMT_R8G8_UINT                   = 184,
+   SVGA3D_DEVCAP_DXFMT_R8G8_SINT                   = 185,
+   SVGA3D_DEVCAP_DXFMT_R16_TYPELESS                = 186,
+   SVGA3D_DEVCAP_DXFMT_R16_UNORM                   = 187,
+   SVGA3D_DEVCAP_DXFMT_R16_UINT                    = 188,
+   SVGA3D_DEVCAP_DXFMT_R16_SNORM                   = 189,
+   SVGA3D_DEVCAP_DXFMT_R16_SINT                    = 190,
+   SVGA3D_DEVCAP_DXFMT_R8_TYPELESS                 = 191,
+   SVGA3D_DEVCAP_DXFMT_R8_UNORM                    = 192,
+   SVGA3D_DEVCAP_DXFMT_R8_UINT                     = 193,
+   SVGA3D_DEVCAP_DXFMT_R8_SNORM                    = 194,
+   SVGA3D_DEVCAP_DXFMT_R8_SINT                     = 195,
+   SVGA3D_DEVCAP_DXFMT_P8                          = 196,
+   SVGA3D_DEVCAP_DXFMT_R9G9B9E5_SHAREDEXP          = 197,
+   SVGA3D_DEVCAP_DXFMT_R8G8_B8G8_UNORM             = 198,
+   SVGA3D_DEVCAP_DXFMT_G8R8_G8B8_UNORM             = 199,
+   SVGA3D_DEVCAP_DXFMT_BC1_TYPELESS                = 200,
+   SVGA3D_DEVCAP_DXFMT_BC1_UNORM_SRGB              = 201,
+   SVGA3D_DEVCAP_DXFMT_BC2_TYPELESS                = 202,
+   SVGA3D_DEVCAP_DXFMT_BC2_UNORM_SRGB              = 203,
+   SVGA3D_DEVCAP_DXFMT_BC3_TYPELESS                = 204,
+   SVGA3D_DEVCAP_DXFMT_BC3_UNORM_SRGB              = 205,
+   SVGA3D_DEVCAP_DXFMT_BC4_TYPELESS                = 206,
+   SVGA3D_DEVCAP_DXFMT_ATI1                        = 207,
+   SVGA3D_DEVCAP_DXFMT_BC4_SNORM                   = 208,
+   SVGA3D_DEVCAP_DXFMT_BC5_TYPELESS                = 209,
+   SVGA3D_DEVCAP_DXFMT_ATI2                        = 210,
+   SVGA3D_DEVCAP_DXFMT_BC5_SNORM                   = 211,
+   SVGA3D_DEVCAP_DXFMT_R10G10B10_XR_BIAS_A2_UNORM  = 212,
+   SVGA3D_DEVCAP_DXFMT_B8G8R8A8_TYPELESS           = 213,
+   SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM_SRGB         = 214,
+   SVGA3D_DEVCAP_DXFMT_B8G8R8X8_TYPELESS           = 215,
+   SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM_SRGB         = 216,
+   SVGA3D_DEVCAP_DXFMT_Z_DF16                      = 217,
+   SVGA3D_DEVCAP_DXFMT_Z_DF24                      = 218,
+   SVGA3D_DEVCAP_DXFMT_Z_D24S8_INT                 = 219,
+   SVGA3D_DEVCAP_DXFMT_YV12                        = 220,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32A32_FLOAT          = 221,
+   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_FLOAT          = 222,
+   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UNORM          = 223,
+   SVGA3D_DEVCAP_DXFMT_R32G32_FLOAT                = 224,
+   SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UNORM           = 225,
+   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SNORM              = 226,
+   SVGA3D_DEVCAP_DXFMT_R16G16_FLOAT                = 227,
+   SVGA3D_DEVCAP_DXFMT_R16G16_UNORM                = 228,
+   SVGA3D_DEVCAP_DXFMT_R16G16_SNORM                = 229,
+   SVGA3D_DEVCAP_DXFMT_R32_FLOAT                   = 230,
+   SVGA3D_DEVCAP_DXFMT_R8G8_SNORM                  = 231,
+   SVGA3D_DEVCAP_DXFMT_R16_FLOAT                   = 232,
+   SVGA3D_DEVCAP_DXFMT_D16_UNORM                   = 233,
+   SVGA3D_DEVCAP_DXFMT_A8_UNORM                    = 234,
+   SVGA3D_DEVCAP_DXFMT_BC1_UNORM                   = 235,
+   SVGA3D_DEVCAP_DXFMT_BC2_UNORM                   = 236,
+   SVGA3D_DEVCAP_DXFMT_BC3_UNORM                   = 237,
+   SVGA3D_DEVCAP_DXFMT_B5G6R5_UNORM                = 238,
+   SVGA3D_DEVCAP_DXFMT_B5G5R5A1_UNORM              = 239,
+   SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM              = 240,
+   SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM              = 241,
+   SVGA3D_DEVCAP_DXFMT_BC4_UNORM                   = 242,
+   SVGA3D_DEVCAP_DXFMT_BC5_UNORM                   = 243,
+
+   SVGA3D_DEVCAP_MAX                       /* This must be the last index. */
+} SVGA3dDevCapIndex;
+
+/*
+ * Bit definitions for DXFMT devcaps
+ *
+ *
+ * SUPPORTED: Can the format be defined?
+ * SHADER_SAMPLE: Can the format be sampled from a shader?
+ * COLOR_RENDERTARGET: Can the format be a color render target?
+ * DEPTH_RENDERTARGET: Can the format be a depth render target?
+ * BLENDABLE: Is the format blendable?
+ * MIPS: Does the format support mip levels?
+ * ARRAY: Does the format support texture arrays?
+ * VOLUME: Does the format support having volume?
+ * MULTISAMPLE_2: Does the format support 2x multisample?
+ * MULTISAMPLE_4: Does the format support 4x multisample?
+ * MULTISAMPLE_8: Does the format support 8x multisample?
+ */
+#define SVGA3D_DXFMT_SUPPORTED                (1 <<  0)
+#define SVGA3D_DXFMT_SHADER_SAMPLE            (1 <<  1)
+#define SVGA3D_DXFMT_COLOR_RENDERTARGET       (1 <<  2)
+#define SVGA3D_DXFMT_DEPTH_RENDERTARGET       (1 <<  3)
+#define SVGA3D_DXFMT_BLENDABLE                (1 <<  4)
+#define SVGA3D_DXFMT_MIPS                     (1 <<  5)
+#define SVGA3D_DXFMT_ARRAY                    (1 <<  6)
+#define SVGA3D_DXFMT_VOLUME                   (1 <<  7)
+#define SVGA3D_DXFMT_DX_VERTEX_BUFFER         (1 <<  8)
+#define SVGADX_DXFMT_MULTISAMPLE_2            (1 <<  9)
+#define SVGADX_DXFMT_MULTISAMPLE_4            (1 << 10)
+#define SVGADX_DXFMT_MULTISAMPLE_8            (1 << 11)
+#define SVGADX_DXFMT_MAX                      (1 << 12)
+
+/*
+ * Convenience mask for any multisample capability.
+ *
+ * The multisample bits imply both load and render capability.
+ */
+#define SVGA3D_DXFMT_MULTISAMPLE ( \
+           SVGADX_DXFMT_MULTISAMPLE_2 | \
+           SVGADX_DXFMT_MULTISAMPLE_4 | \
+           SVGADX_DXFMT_MULTISAMPLE_8 )
+
+typedef union {
+   Bool   b;
+   uint32 u;
+   int32  i;
+   float  f;
+} SVGA3dDevCapResult;
+
+#endif /* _SVGA3D_DEVCAPS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
new file mode 100644
index 000000000000..8c5ae608cfb4
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
@@ -0,0 +1,1487 @@
+/**********************************************************
+ * Copyright 2012-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_dx.h --
+ *
+ *       SVGA 3d hardware definitions for DX10 support.
+ */
+
+#ifndef _SVGA3D_DX_H_
+#define _SVGA3D_DX_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+#include "includeCheck.h"
+
+#include "svga3d_limits.h"
+
+#define SVGA3D_INPUT_MIN               0
+#define SVGA3D_INPUT_PER_VERTEX_DATA   0
+#define SVGA3D_INPUT_PER_INSTANCE_DATA 1
+#define SVGA3D_INPUT_MAX               2
+typedef uint32 SVGA3dInputClassification;
+
+#define SVGA3D_RESOURCE_TYPE_MIN      1
+#define SVGA3D_RESOURCE_BUFFER        1
+#define SVGA3D_RESOURCE_TEXTURE1D     2
+#define SVGA3D_RESOURCE_TEXTURE2D     3
+#define SVGA3D_RESOURCE_TEXTURE3D     4
+#define SVGA3D_RESOURCE_TEXTURECUBE   5
+#define SVGA3D_RESOURCE_TYPE_DX10_MAX 6
+#define SVGA3D_RESOURCE_BUFFEREX      6
+#define SVGA3D_RESOURCE_TYPE_MAX      7
+typedef uint32 SVGA3dResourceType;
+
+#define SVGA3D_DEPTH_WRITE_MASK_ZERO   0
+#define SVGA3D_DEPTH_WRITE_MASK_ALL    1
+typedef uint8 SVGA3dDepthWriteMask;
+
+#define SVGA3D_FILTER_MIP_LINEAR  (1 << 0)
+#define SVGA3D_FILTER_MAG_LINEAR  (1 << 2)
+#define SVGA3D_FILTER_MIN_LINEAR  (1 << 4)
+#define SVGA3D_FILTER_ANISOTROPIC (1 << 6)
+#define SVGA3D_FILTER_COMPARE     (1 << 7)
+typedef uint32 SVGA3dFilter;
+
+#define SVGA3D_CULL_INVALID 0
+#define SVGA3D_CULL_MIN     1
+#define SVGA3D_CULL_NONE    1
+#define SVGA3D_CULL_FRONT   2
+#define SVGA3D_CULL_BACK    3
+#define SVGA3D_CULL_MAX     4
+typedef uint8 SVGA3dCullMode;
+
+#define SVGA3D_COMPARISON_INVALID         0
+#define SVGA3D_COMPARISON_MIN             1
+#define SVGA3D_COMPARISON_NEVER           1
+#define SVGA3D_COMPARISON_LESS            2
+#define SVGA3D_COMPARISON_EQUAL           3
+#define SVGA3D_COMPARISON_LESS_EQUAL      4
+#define SVGA3D_COMPARISON_GREATER         5
+#define SVGA3D_COMPARISON_NOT_EQUAL       6
+#define SVGA3D_COMPARISON_GREATER_EQUAL   7
+#define SVGA3D_COMPARISON_ALWAYS          8
+#define SVGA3D_COMPARISON_MAX             9
+typedef uint8 SVGA3dComparisonFunc;
+
+#define SVGA3D_DX_MAX_VERTEXBUFFERS 32
+#define SVGA3D_DX_MAX_SOTARGETS 4
+#define SVGA3D_DX_MAX_SRVIEWS 128
+#define SVGA3D_DX_MAX_CONSTBUFFERS 16
+#define SVGA3D_DX_MAX_SAMPLERS 16
+
+/* Id limits */
+static const uint32 SVGA3dBlendObjectCountPerContext = 4096;
+static const uint32 SVGA3dDepthStencilObjectCountPerContext = 4096;
+
+typedef uint32 SVGA3dSurfaceId;
+typedef uint32 SVGA3dShaderResourceViewId;
+typedef uint32 SVGA3dRenderTargetViewId;
+typedef uint32 SVGA3dDepthStencilViewId;
+
+typedef uint32 SVGA3dShaderId;
+typedef uint32 SVGA3dElementLayoutId;
+typedef uint32 SVGA3dSamplerId;
+typedef uint32 SVGA3dBlendStateId;
+typedef uint32 SVGA3dDepthStencilStateId;
+typedef uint32 SVGA3dRasterizerStateId;
+typedef uint32 SVGA3dQueryId;
+typedef uint32 SVGA3dStreamOutputId;
+
+typedef union {
+   struct {
+      float r;
+      float g;
+      float b;
+      float a;
+   };
+
+   float value[4];
+} SVGA3dRGBAFloat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGAOTableDXContextEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineContext;   /* SVGA_3D_CMD_DX_DEFINE_CONTEXT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyContext;   /* SVGA_3D_CMD_DX_DESTROY_CONTEXT */
+
+/*
+ * Bind a DX context.
+ *
+ * validContents should be set to 0 for new contexts,
+ * and 1 if this is an old context which is getting paged
+ * back on to the device.
+ *
+ * For new contexts, it is recommended that the driver
+ * issue commands to initialize all interesting state
+ * prior to rendering.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindContext {
+   uint32 cid;
+   SVGAMobId mobid;
+   uint32 validContents;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindContext;   /* SVGA_3D_CMD_DX_BIND_CONTEXT */
+
+/*
+ * Readback a DX context.
+ * (Request that the device flush the contents back into guest memory.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackContext;   /* SVGA_3D_CMD_DX_READBACK_CONTEXT */
+
+/*
+ * Invalidate a guest-backed context.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXInvalidateContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXInvalidateContext;   /* SVGA_3D_CMD_DX_INVALIDATE_CONTEXT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dReplyFormatData {
+   uint32 formatSupport;
+   uint32 msaa2xQualityLevels:5;
+   uint32 msaa4xQualityLevels:5;
+   uint32 msaa8xQualityLevels:5;
+   uint32 msaa16xQualityLevels:5;
+   uint32 msaa32xQualityLevels:5;
+   uint32 pad:7;
+}
+#include "vmware_pack_end.h"
+SVGA3dReplyFormatData;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetSingleConstantBuffer {
+   uint32 slot;
+   SVGA3dShaderType type;
+   SVGA3dSurfaceId sid;
+   uint32 offsetInBytes;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetSingleConstantBuffer;
+/* SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetShaderResources {
+   uint32 startView;
+   SVGA3dShaderType type;
+
+   /*
+    * Followed by a variable number of SVGA3dShaderResourceViewId's.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetShaderResources; /* SVGA_3D_CMD_DX_SET_SHADER_RESOURCES */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetShader {
+   SVGA3dShaderId shaderId;
+   SVGA3dShaderType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetShader; /* SVGA_3D_CMD_DX_SET_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetSamplers {
+   uint32 startSampler;
+   SVGA3dShaderType type;
+
+   /*
+    * Followed by a variable number of SVGA3dSamplerId's.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetSamplers; /* SVGA_3D_CMD_DX_SET_SAMPLERS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDraw {
+   uint32 vertexCount;
+   uint32 startVertexLocation;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDraw; /* SVGA_3D_CMD_DX_DRAW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawIndexed {
+   uint32 indexCount;
+   uint32 startIndexLocation;
+   int32  baseVertexLocation;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawIndexed; /* SVGA_3D_CMD_DX_DRAW_INDEXED */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawInstanced {
+   uint32 vertexCountPerInstance;
+   uint32 instanceCount;
+   uint32 startVertexLocation;
+   uint32 startInstanceLocation;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawInstanced; /* SVGA_3D_CMD_DX_DRAW_INSTANCED */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawIndexedInstanced {
+   uint32 indexCountPerInstance;
+   uint32 instanceCount;
+   uint32 startIndexLocation;
+   int32  baseVertexLocation;
+   uint32 startInstanceLocation;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawIndexedInstanced; /* SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawAuto {
+   uint32 pad0;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawAuto; /* SVGA_3D_CMD_DX_DRAW_AUTO */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetInputLayout {
+   SVGA3dElementLayoutId elementLayoutId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetInputLayout; /* SVGA_3D_CMD_DX_SET_INPUT_LAYOUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dVertexBuffer {
+   SVGA3dSurfaceId sid;
+   uint32 stride;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexBuffer;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetVertexBuffers {
+   uint32 startBuffer;
+   /* Followed by a variable number of SVGA3dVertexBuffer's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetVertexBuffers; /* SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetIndexBuffer {
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetIndexBuffer; /* SVGA_3D_CMD_DX_SET_INDEX_BUFFER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetTopology {
+   SVGA3dPrimitiveType topology;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetTopology; /* SVGA_3D_CMD_DX_SET_TOPOLOGY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetRenderTargets {
+   SVGA3dDepthStencilViewId depthStencilViewId;
+   /* Followed by a variable number of SVGA3dRenderTargetViewId's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetRenderTargets; /* SVGA_3D_CMD_DX_SET_RENDERTARGETS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetBlendState {
+   SVGA3dBlendStateId blendId;
+   float blendFactor[4];
+   uint32 sampleMask;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetBlendState; /* SVGA_3D_CMD_DX_SET_BLEND_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetDepthStencilState {
+   SVGA3dDepthStencilStateId depthStencilId;
+   uint32 stencilRef;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetDepthStencilState; /* SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetRasterizerState {
+   SVGA3dRasterizerStateId rasterizerId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetRasterizerState; /* SVGA_3D_CMD_DX_SET_RASTERIZER_STATE */
+
+#define SVGA3D_DXQUERY_FLAG_PREDICATEHINT (1 << 0)
+typedef uint32 SVGA3dDXQueryFlags;
+
+/*
+ * The SVGADXQueryDeviceState and SVGADXQueryDeviceBits are used by the device
+ * to track query state transitions, but are not intended to be used by the
+ * driver.
+ */
+#define SVGADX_QDSTATE_INVALID   ((uint8)-1) /* Query has no state */
+#define SVGADX_QDSTATE_MIN       0
+#define SVGADX_QDSTATE_IDLE      0   /* Query hasn't started yet */
+#define SVGADX_QDSTATE_ACTIVE    1   /* Query is actively gathering data */
+#define SVGADX_QDSTATE_PENDING   2   /* Query is waiting for results */
+#define SVGADX_QDSTATE_FINISHED  3   /* Query has completed */
+#define SVGADX_QDSTATE_MAX       4
+typedef uint8 SVGADXQueryDeviceState;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dQueryTypeUint8 type;
+   uint16 pad0;
+   SVGADXQueryDeviceState state;
+   SVGA3dDXQueryFlags flags;
+   SVGAMobId mobid;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXQueryEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineQuery {
+   SVGA3dQueryId queryId;
+   SVGA3dQueryType type;
+   SVGA3dDXQueryFlags flags;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineQuery; /* SVGA_3D_CMD_DX_DEFINE_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyQuery {
+   SVGA3dQueryId queryId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyQuery; /* SVGA_3D_CMD_DX_DESTROY_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindQuery {
+   SVGA3dQueryId queryId;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindQuery; /* SVGA_3D_CMD_DX_BIND_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetQueryOffset {
+   SVGA3dQueryId queryId;
+   uint32 mobOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetQueryOffset; /* SVGA_3D_CMD_DX_SET_QUERY_OFFSET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBeginQuery {
+   SVGA3dQueryId queryId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBeginQuery; /* SVGA_3D_CMD_DX_QUERY_BEGIN */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXEndQuery {
+   SVGA3dQueryId queryId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXEndQuery; /* SVGA_3D_CMD_DX_QUERY_END */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackQuery {
+   SVGA3dQueryId queryId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackQuery; /* SVGA_3D_CMD_DX_READBACK_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXMoveQuery {
+   SVGA3dQueryId queryId;
+   SVGAMobId mobid;
+   uint32 mobOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXMoveQuery; /* SVGA_3D_CMD_DX_MOVE_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindAllQuery {
+   uint32 cid;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindAllQuery; /* SVGA_3D_CMD_DX_BIND_ALL_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackAllQuery {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackAllQuery; /* SVGA_3D_CMD_DX_READBACK_ALL_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetPredication {
+   SVGA3dQueryId queryId;
+   uint32 predicateValue;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetPredication; /* SVGA_3D_CMD_DX_SET_PREDICATION */
+
+typedef
+#include "vmware_pack_begin.h"
+struct MKS3dDXSOState {
+   uint32 offset;       /* Starting offset */
+   uint32 intOffset;    /* Internal offset */
+   uint32 vertexCount;  /* vertices written */
+   uint32 sizeInBytes;  /* max bytes to write */
+}
+#include "vmware_pack_end.h"
+SVGA3dDXSOState;
+
+/* Set the offset field to this value to append SO values to the buffer */
+#define SVGA3D_DX_SO_OFFSET_APPEND ((uint32) ~0u)
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dSoTarget {
+   SVGA3dSurfaceId sid;
+   uint32 offset;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dSoTarget;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetSOTargets {
+   uint32 pad0;
+   /* Followed by a variable number of SVGA3dSOTarget's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetSOTargets; /* SVGA_3D_CMD_DX_SET_SOTARGETS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dViewport
+{
+   float x;
+   float y;
+   float width;
+   float height;
+   float minDepth;
+   float maxDepth;
+}
+#include "vmware_pack_end.h"
+SVGA3dViewport;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetViewports {
+   uint32 pad0;
+   /* Followed by a variable number of SVGA3dViewport's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetViewports; /* SVGA_3D_CMD_DX_SET_VIEWPORTS */
+
+#define SVGA3D_DX_MAX_VIEWPORTS  16
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetScissorRects {
+   uint32 pad0;
+   /* Followed by a variable number of SVGASignedRect's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetScissorRects; /* SVGA_3D_CMD_DX_SET_SCISSORRECTS */
+
+#define SVGA3D_DX_MAX_SCISSORRECTS  16
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXClearRenderTargetView {
+   SVGA3dRenderTargetViewId renderTargetViewId;
+   SVGA3dRGBAFloat rgba;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXClearRenderTargetView; /* SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXClearDepthStencilView {
+   uint16 flags;
+   uint16 stencil;
+   SVGA3dDepthStencilViewId depthStencilViewId;
+   float depth;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXClearDepthStencilView; /* SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredCopyRegion {
+   SVGA3dSurfaceId dstSid;
+   uint32 dstSubResource;
+   SVGA3dSurfaceId srcSid;
+   uint32 srcSubResource;
+   SVGA3dCopyBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredCopyRegion;
+/* SVGA_3D_CMD_DX_PRED_COPY_REGION */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredCopy {
+   SVGA3dSurfaceId dstSid;
+   SVGA3dSurfaceId srcSid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredCopy; /* SVGA_3D_CMD_DX_PRED_COPY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBufferCopy {
+   SVGA3dSurfaceId dest;
+   SVGA3dSurfaceId src;
+   uint32 destX;
+   uint32 srcX;
+   uint32 width;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBufferCopy;
+/* SVGA_3D_CMD_DX_BUFFER_COPY */
+
+typedef uint32 SVGA3dDXStretchBltMode;
+#define SVGADX_STRETCHBLT_LINEAR         (1 << 0)
+#define SVGADX_STRETCHBLT_FORCE_SRC_SRGB (1 << 1)
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXStretchBlt {
+   SVGA3dSurfaceId srcSid;
+   uint32 srcSubResource;
+   SVGA3dSurfaceId dstSid;
+   uint32 destSubResource;
+   SVGA3dBox boxSrc;
+   SVGA3dBox boxDest;
+   SVGA3dDXStretchBltMode mode;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXStretchBlt; /* SVGA_3D_CMD_DX_STRETCHBLT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXGenMips {
+   SVGA3dShaderResourceViewId shaderResourceViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXGenMips; /* SVGA_3D_CMD_DX_GENMIPS */
+
+/*
+ * Defines a resource/DX surface.  Resources share the surfaceId namespace.
+ *
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBSurface_v2 {
+   uint32 sid;
+   SVGA3dSurfaceFlags surfaceFlags;
+   SVGA3dSurfaceFormat format;
+   uint32 numMipLevels;
+   uint32 multisampleCount;
+   SVGA3dTextureFilter autogenFilter;
+   SVGA3dSize size;
+   uint32 arraySize;
+   uint32 pad;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBSurface_v2;   /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 */
+
+/*
+ * Update a sub-resource in a guest-backed resource.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXUpdateSubResource {
+   SVGA3dSurfaceId sid;
+   uint32 subResource;
+   SVGA3dBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXUpdateSubResource;   /* SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE */
+
+/*
+ * Readback a subresource in a guest-backed resource.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackSubResource {
+   SVGA3dSurfaceId sid;
+   uint32 subResource;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackSubResource;   /* SVGA_3D_CMD_DX_READBACK_SUBRESOURCE */
+
+/*
+ * Invalidate an image in a guest-backed surface.
+ * (Notify the device that the contents can be lost.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXInvalidateSubResource {
+   SVGA3dSurfaceId sid;
+   uint32 subResource;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXInvalidateSubResource;   /* SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE */
+
+
+/*
+ * Raw byte wise transfer from a buffer surface into another surface
+ * of the requested box.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXTransferFromBuffer {
+   SVGA3dSurfaceId srcSid;
+   uint32 srcOffset;
+   uint32 srcPitch;
+   uint32 srcSlicePitch;
+   SVGA3dSurfaceId destSid;
+   uint32 destSubResource;
+   SVGA3dBox destBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXTransferFromBuffer;   /* SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER */
+
+
+/*
+ * Raw byte wise transfer from a buffer surface into another surface
+ * of the requested box.  Supported if SVGA3D_DEVCAP_DXCONTEXT is set.
+ * The context is implied from the command buffer header.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredTransferFromBuffer {
+   SVGA3dSurfaceId srcSid;
+   uint32 srcOffset;
+   uint32 srcPitch;
+   uint32 srcSlicePitch;
+   SVGA3dSurfaceId destSid;
+   uint32 destSubResource;
+   SVGA3dBox destBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredTransferFromBuffer;
+/* SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSurfaceCopyAndReadback {
+   SVGA3dSurfaceId srcSid;
+   SVGA3dSurfaceId destSid;
+   SVGA3dCopyBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSurfaceCopyAndReadback;
+/* SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   union {
+      struct {
+         uint32 firstElement;
+         uint32 numElements;
+         uint32 pad0;
+         uint32 pad1;
+      } buffer;
+      struct {
+         uint32 mostDetailedMip;
+         uint32 firstArraySlice;
+         uint32 mipLevels;
+         uint32 arraySize;
+      } tex;
+      struct {
+         uint32 firstElement;
+         uint32 numElements;
+         uint32 flags;
+         uint32 pad0;
+      } bufferex;
+   };
+}
+#include "vmware_pack_end.h"
+SVGA3dShaderResourceViewDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+   SVGA3dShaderResourceViewDesc desc;
+   uint32 pad;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXSRViewEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineShaderResourceView {
+   SVGA3dShaderResourceViewId shaderResourceViewId;
+
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+
+   SVGA3dShaderResourceViewDesc desc;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineShaderResourceView;
+/* SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyShaderResourceView {
+   SVGA3dShaderResourceViewId shaderResourceViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyShaderResourceView;
+/* SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dRenderTargetViewDesc {
+   union {
+      struct {
+         uint32 firstElement;
+         uint32 numElements;
+      } buffer;
+      struct {
+         uint32 mipSlice;
+         uint32 firstArraySlice;
+         uint32 arraySize;
+      } tex;                    /* 1d, 2d, cube */
+      struct {
+         uint32 mipSlice;
+         uint32 firstW;
+         uint32 wSize;
+      } tex3D;
+   };
+}
+#include "vmware_pack_end.h"
+SVGA3dRenderTargetViewDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+   SVGA3dRenderTargetViewDesc desc;
+   uint32 pad[2];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXRTViewEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineRenderTargetView {
+   SVGA3dRenderTargetViewId renderTargetViewId;
+
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+
+   SVGA3dRenderTargetViewDesc desc;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineRenderTargetView;
+/* SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyRenderTargetView {
+   SVGA3dRenderTargetViewId renderTargetViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyRenderTargetView;
+/* SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW */
+
+/*
+ */
+#define SVGA3D_DXDSVIEW_CREATE_READ_ONLY_DEPTH   0x01
+#define SVGA3D_DXDSVIEW_CREATE_READ_ONLY_STENCIL 0x02
+#define SVGA3D_DXDSVIEW_CREATE_FLAG_MASK         0x03
+typedef uint8 SVGA3DCreateDSViewFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+   uint32 mipSlice;
+   uint32 firstArraySlice;
+   uint32 arraySize;
+   SVGA3DCreateDSViewFlags flags;
+   uint8 pad0;
+   uint16 pad1;
+   uint32 pad2;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXDSViewEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineDepthStencilView {
+   SVGA3dDepthStencilViewId depthStencilViewId;
+
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+   uint32 mipSlice;
+   uint32 firstArraySlice;
+   uint32 arraySize;
+   SVGA3DCreateDSViewFlags flags;
+   uint8 pad0;
+   uint16 pad1;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineDepthStencilView;
+/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyDepthStencilView {
+   SVGA3dDepthStencilViewId depthStencilViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyDepthStencilView;
+/* SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dInputElementDesc {
+   uint32 inputSlot;
+   uint32 alignedByteOffset;
+   SVGA3dSurfaceFormat format;
+   SVGA3dInputClassification inputSlotClass;
+   uint32 instanceDataStepRate;
+   uint32 inputRegister;
+}
+#include "vmware_pack_end.h"
+SVGA3dInputElementDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   /*
+    * XXX: How many of these can there be?
+    */
+   uint32 elid;
+   uint32 numDescs;
+   SVGA3dInputElementDesc desc[32];
+   uint32 pad[62];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXElementLayoutEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineElementLayout {
+   SVGA3dElementLayoutId elementLayoutId;
+   /* Followed by a variable number of SVGA3dInputElementDesc's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineElementLayout;
+/* SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyElementLayout {
+   SVGA3dElementLayoutId elementLayoutId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyElementLayout;
+/* SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT */
+
+
+#define SVGA3D_DX_MAX_RENDER_TARGETS 8
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dDXBlendStatePerRT {
+      uint8 blendEnable;
+      uint8 srcBlend;
+      uint8 destBlend;
+      uint8 blendOp;
+      uint8 srcBlendAlpha;
+      uint8 destBlendAlpha;
+      uint8 blendOpAlpha;
+      uint8 renderTargetWriteMask;
+      uint8 logicOpEnable;
+      uint8 logicOp;
+      uint16 pad0;
+}
+#include "vmware_pack_end.h"
+SVGA3dDXBlendStatePerRT;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint8 alphaToCoverageEnable;
+   uint8 independentBlendEnable;
+   uint16 pad0;
+   SVGA3dDXBlendStatePerRT perRT[SVGA3D_MAX_RENDER_TARGETS];
+   uint32 pad1[7];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXBlendStateEntry;
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineBlendState {
+   SVGA3dBlendStateId blendId;
+   uint8 alphaToCoverageEnable;
+   uint8 independentBlendEnable;
+   uint16 pad0;
+   SVGA3dDXBlendStatePerRT perRT[SVGA3D_MAX_RENDER_TARGETS];
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineBlendState; /* SVGA_3D_CMD_DX_DEFINE_BLEND_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyBlendState {
+   SVGA3dBlendStateId blendId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyBlendState; /* SVGA_3D_CMD_DX_DESTROY_BLEND_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint8 depthEnable;
+   SVGA3dDepthWriteMask depthWriteMask;
+   SVGA3dComparisonFunc depthFunc;
+   uint8 stencilEnable;
+   uint8 frontEnable;
+   uint8 backEnable;
+   uint8 stencilReadMask;
+   uint8 stencilWriteMask;
+
+   uint8 frontStencilFailOp;
+   uint8 frontStencilDepthFailOp;
+   uint8 frontStencilPassOp;
+   SVGA3dComparisonFunc frontStencilFunc;
+
+   uint8 backStencilFailOp;
+   uint8 backStencilDepthFailOp;
+   uint8 backStencilPassOp;
+   SVGA3dComparisonFunc backStencilFunc;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXDepthStencilEntry;
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineDepthStencilState {
+   SVGA3dDepthStencilStateId depthStencilId;
+
+   uint8 depthEnable;
+   SVGA3dDepthWriteMask depthWriteMask;
+   SVGA3dComparisonFunc depthFunc;
+   uint8 stencilEnable;
+   uint8 frontEnable;
+   uint8 backEnable;
+   uint8 stencilReadMask;
+   uint8 stencilWriteMask;
+
+   uint8 frontStencilFailOp;
+   uint8 frontStencilDepthFailOp;
+   uint8 frontStencilPassOp;
+   SVGA3dComparisonFunc frontStencilFunc;
+
+   uint8 backStencilFailOp;
+   uint8 backStencilDepthFailOp;
+   uint8 backStencilPassOp;
+   SVGA3dComparisonFunc backStencilFunc;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineDepthStencilState;
+/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyDepthStencilState {
+   SVGA3dDepthStencilStateId depthStencilId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyDepthStencilState;
+/* SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint8 fillMode;
+   SVGA3dCullMode cullMode;
+   uint8 frontCounterClockwise;
+   uint8 provokingVertexLast;
+   int32 depthBias;
+   float depthBiasClamp;
+   float slopeScaledDepthBias;
+   uint8 depthClipEnable;
+   uint8 scissorEnable;
+   uint8 multisampleEnable;
+   uint8 antialiasedLineEnable;
+   float lineWidth;
+   uint8 lineStippleEnable;
+   uint8 lineStippleFactor;
+   uint16 lineStipplePattern;
+   uint32 forcedSampleCount;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXRasterizerStateEntry;
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineRasterizerState {
+   SVGA3dRasterizerStateId rasterizerId;
+
+   uint8 fillMode;
+   SVGA3dCullMode cullMode;
+   uint8 frontCounterClockwise;
+   uint8 provokingVertexLast;
+   int32 depthBias;
+   float depthBiasClamp;
+   float slopeScaledDepthBias;
+   uint8 depthClipEnable;
+   uint8 scissorEnable;
+   uint8 multisampleEnable;
+   uint8 antialiasedLineEnable;
+   float lineWidth;
+   uint8 lineStippleEnable;
+   uint8 lineStippleFactor;
+   uint16 lineStipplePattern;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineRasterizerState;
+/* SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyRasterizerState {
+   SVGA3dRasterizerStateId rasterizerId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyRasterizerState;
+/* SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dFilter filter;
+   uint8 addressU;
+   uint8 addressV;
+   uint8 addressW;
+   uint8 pad0;
+   float mipLODBias;
+   uint8 maxAnisotropy;
+   SVGA3dComparisonFunc comparisonFunc;
+   uint16 pad1;
+   SVGA3dRGBAFloat borderColor;
+   float minLOD;
+   float maxLOD;
+   uint32 pad2[6];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXSamplerEntry;
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineSamplerState {
+   SVGA3dSamplerId samplerId;
+   SVGA3dFilter filter;
+   uint8 addressU;
+   uint8 addressV;
+   uint8 addressW;
+   uint8 pad0;
+   float mipLODBias;
+   uint8 maxAnisotropy;
+   SVGA3dComparisonFunc comparisonFunc;
+   uint16 pad1;
+   SVGA3dRGBAFloat borderColor;
+   float minLOD;
+   float maxLOD;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineSamplerState; /* SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroySamplerState {
+   SVGA3dSamplerId samplerId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroySamplerState; /* SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE */
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dSignatureEntry {
+   uint8 systemValue;
+   uint8 reg;                 /* register is a reserved word */
+   uint16 mask;
+   uint8 registerComponentType;
+   uint8 minPrecision;
+   uint16 pad0;
+}
+#include "vmware_pack_end.h"
+SVGA3dSignatureEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineShader {
+   SVGA3dShaderId shaderId;
+   SVGA3dShaderType type;
+   uint32 sizeInBytes; /* Number of bytes of shader text. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineShader; /* SVGA_3D_CMD_DX_DEFINE_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGACOTableDXShaderEntry {
+   SVGA3dShaderType type;
+   uint32 sizeInBytes;
+   uint32 offsetInBytes;
+   SVGAMobId mobid;
+   uint32 numInputSignatureEntries;
+   uint32 numOutputSignatureEntries;
+
+   uint32 numPatchConstantSignatureEntries;
+
+   uint32 pad;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXShaderEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyShader {
+   SVGA3dShaderId shaderId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyShader; /* SVGA_3D_CMD_DX_DESTROY_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindShader {
+   uint32 cid;
+   uint32 shid;
+   SVGAMobId mobid;
+   uint32 offsetInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindShader;   /* SVGA_3D_CMD_DX_BIND_SHADER */
+
+/*
+ * The maximum number of streamout decl's in each streamout entry.
+ */
+#define SVGA3D_MAX_STREAMOUT_DECLS 64
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dStreamOutputDeclarationEntry {
+   uint32 outputSlot;
+   uint32 registerIndex;
+   uint8  registerMask;
+   uint8  pad0;
+   uint16 pad1;
+   uint32 stream;
+}
+#include "vmware_pack_end.h"
+SVGA3dStreamOutputDeclarationEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGAOTableStreamOutputEntry {
+   uint32 numOutputStreamEntries;
+   SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_STREAMOUT_DECLS];
+   uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
+   uint32 rasterizedStream;
+   uint32 pad[250];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXStreamOutputEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineStreamOutput {
+   SVGA3dStreamOutputId soid;
+   uint32 numOutputStreamEntries;
+   SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_STREAMOUT_DECLS];
+   uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
+   uint32 rasterizedStream;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineStreamOutput; /* SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyStreamOutput {
+   SVGA3dStreamOutputId soid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyStreamOutput; /* SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetStreamOutput {
+   SVGA3dStreamOutputId soid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetStreamOutput; /* SVGA_3D_CMD_DX_SET_STREAMOUTPUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint64 value;
+   uint32 mobId;
+   uint32 mobOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXMobFence64;  /* SVGA_3D_CMD_DX_MOB_FENCE_64 */
+
+/*
+ * SVGA3dCmdSetCOTable --
+ *
+ * This command allows the guest to bind a mob to a context-object table.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetCOTable {
+   uint32 cid;
+   uint32 mobid;
+   SVGACOTableType type;
+   uint32 validSizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetCOTable; /* SVGA_3D_CMD_DX_SET_COTABLE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackCOTable {
+   uint32 cid;
+   SVGACOTableType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackCOTable; /* SVGA_3D_CMD_DX_READBACK_COTABLE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCOTableData {
+   uint32 mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCOTableData;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dBufferBinding {
+   uint32 bufferId;
+   uint32 stride;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dBufferBinding;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dConstantBufferBinding {
+   uint32 sid;
+   uint32 offsetInBytes;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dConstantBufferBinding;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGADXInputAssemblyMobFormat {
+   uint32 layoutId;
+   SVGA3dBufferBinding vertexBuffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
+   uint32 indexBufferSid;
+   uint32 pad;
+   uint32 indexBufferOffset;
+   uint32 indexBufferFormat;
+   uint32 topology;
+}
+#include "vmware_pack_end.h"
+SVGADXInputAssemblyMobFormat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGADXContextMobFormat {
+   SVGADXInputAssemblyMobFormat inputAssembly;
+
+   struct {
+      uint32 blendStateId;
+      uint32 blendFactor[4];
+      uint32 sampleMask;
+      uint32 depthStencilStateId;
+      uint32 stencilRef;
+      uint32 rasterizerStateId;
+      uint32 depthStencilViewId;
+      uint32 renderTargetViewIds[SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS];
+      uint32 unorderedAccessViewIds[SVGA3D_MAX_UAVIEWS];
+   } renderState;
+
+   struct {
+      uint32 targets[SVGA3D_DX_MAX_SOTARGETS];
+      uint32 soid;
+   } streamOut;
+   uint32 pad0[11];
+
+   uint8 numViewports;
+   uint8 numScissorRects;
+   uint16 pad1[1];
+
+   uint32 pad2[3];
+
+   SVGA3dViewport viewports[SVGA3D_DX_MAX_VIEWPORTS];
+   uint32 pad3[32];
+
+   SVGASignedRect scissorRects[SVGA3D_DX_MAX_SCISSORRECTS];
+   uint32 pad4[64];
+
+   struct {
+      uint32 queryID;
+      uint32 value;
+   } predication;
+   uint32 pad5[2];
+
+   struct {
+      uint32 shaderId;
+      SVGA3dConstantBufferBinding constantBuffers[SVGA3D_DX_MAX_CONSTBUFFERS];
+      uint32 shaderResources[SVGA3D_DX_MAX_SRVIEWS];
+      uint32 samplers[SVGA3D_DX_MAX_SAMPLERS];
+   } shaderState[SVGA3D_NUM_SHADERTYPE];
+   uint32 pad6[26];
+
+   SVGA3dQueryId queryID[SVGA3D_MAX_QUERY];
+
+   SVGA3dCOTableData cotables[SVGA_COTABLE_MAX];
+   uint32 pad7[381];
+}
+#include "vmware_pack_end.h"
+SVGADXContextMobFormat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXTempSetContext {
+   uint32 dxcid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXTempSetContext; /* SVGA_3D_CMD_DX_TEMP_SET_CONTEXT */
+
+#endif /* _SVGA3D_DX_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
new file mode 100644
index 000000000000..a1c36877ad55
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
@@ -0,0 +1,99 @@
+/**********************************************************
+ * Copyright 2007-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_limits.h --
+ *
+ *       SVGA 3d hardware limits
+ */
+
+#ifndef _SVGA3D_LIMITS_H_
+#define _SVGA3D_LIMITS_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+
+#define SVGA3D_NUM_CLIPPLANES                   6
+#define SVGA3D_MAX_RENDER_TARGETS               8
+#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS  (SVGA3D_MAX_RENDER_TARGETS)
+#define SVGA3D_MAX_UAVIEWS                      8
+#define SVGA3D_MAX_CONTEXT_IDS                  256
+#define SVGA3D_MAX_SURFACE_IDS                  (32 * 1024)
+
+/*
+ * Maximum ID a shader can be assigned on a given context.
+ */
+#define SVGA3D_MAX_SHADERIDS                    5000
+/*
+ * Maximum number of shaders of a given type that can be defined
+ * (including all contexts).
+ */
+#define SVGA3D_MAX_SIMULTANEOUS_SHADERS         20000
+
+#define SVGA3D_NUM_TEXTURE_UNITS                32
+#define SVGA3D_NUM_LIGHTS                       8
+
+/*
+ * Maximum size in dwords of shader text the SVGA device will allow.
+ * Currently 8 MB.
+ */
+#define SVGA3D_MAX_SHADER_MEMORY  (8 * 1024 * 1024 / sizeof(uint32))
+
+#define SVGA3D_MAX_CLIP_PLANES    6
+
+/*
+ * This is the limit to the number of fixed-function texture
+ * transforms and texture coordinates we can support. It does *not*
+ * correspond to the number of texture image units (samplers) we
+ * support!
+ */
+#define SVGA3D_MAX_TEXTURE_COORDS 8
+
+/*
+ * Number of faces in a cubemap.
+ */
+#define SVGA3D_MAX_SURFACE_FACES 6
+
+/*
+ * Maximum number of array indexes in a GB surface (with DX enabled).
+ */
+#define SVGA3D_MAX_SURFACE_ARRAYSIZE 512
+
+/*
+ * The maximum number of vertex arrays we're guaranteed to support in
+ * SVGA_3D_CMD_DRAWPRIMITIVES.
+ */
+#define SVGA3D_MAX_VERTEX_ARRAYS   32
+
+/*
+ * The maximum number of primitive ranges we're guaranteed to support
+ * in SVGA_3D_CMD_DRAWPRIMITIVES.
+ */
+#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32
+
+#endif /* _SVGA3D_LIMITS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
new file mode 100644
index 000000000000..b44ce648f592
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
@@ -0,0 +1,50 @@
+/**********************************************************
+ * Copyright 1998-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_reg.h --
+ *
+ *       SVGA 3d hardware definitions
+ */
+
+#ifndef _SVGA3D_REG_H_
+#define _SVGA3D_REG_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+
+#include "svga_reg.h"
+
+#include "svga3d_types.h"
+#include "svga3d_limits.h"
+#include "svga3d_cmd.h"
+#include "svga3d_dx.h"
+#include "svga3d_devcaps.h"
+
+
+#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
new file mode 100644
index 000000000000..58704f0a4607
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
@@ -0,0 +1,1204 @@
+/**************************************************************************
+ *
+ * Copyright © 2008-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifdef __KERNEL__
+
+#include <drm/vmwgfx_drm.h>
+#define surf_size_struct struct drm_vmw_size
+
+#else /* __KERNEL__ */
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
+#endif /* ARRAY_SIZE */
+
+#define DIV_ROUND_UP(x, y)  (((x) + (y) - 1) / (y))
+#define max_t(type, x, y)  ((x) > (y) ? (x) : (y))
+#define surf_size_struct SVGA3dSize
+#define u32 uint32
+
+#endif /* __KERNEL__ */
+
+#include "svga3d_reg.h"
+
+/*
+ * enum svga3d_block_desc describes the active data channels in a block.
+ *
+ * There can be at-most four active channels in a block:
+ *    1. Red, bump W, luminance and depth are stored in the first channel.
+ *    2. Green, bump V and stencil are stored in the second channel.
+ *    3. Blue and bump U are stored in the third channel.
+ *    4. Alpha and bump Q are stored in the fourth channel.
+ *
+ * Block channels can be used to store compressed and buffer data:
+ *    1. For compressed formats, only the data channel is used and its size
+ *       is equal to that of a singular block in the compression scheme.
+ *    2. For buffer formats, only the data channel is used and its size is
+ *       exactly one byte in length.
+ *    3. In each case the bit depth represent the size of a singular block.
+ *
+ * Note: Compressed and IEEE formats do not use the bitMask structure.
+ */
+
+enum svga3d_block_desc {
+	SVGA3DBLOCKDESC_NONE        = 0,         /* No channels are active */
+	SVGA3DBLOCKDESC_BLUE        = 1 << 0,    /* Block with red channel
+						    data */
+	SVGA3DBLOCKDESC_U           = 1 << 0,    /* Block with bump U channel
+						    data */
+	SVGA3DBLOCKDESC_UV_VIDEO    = 1 << 7,    /* Block with alternating video
+						    U and V */
+	SVGA3DBLOCKDESC_GREEN       = 1 << 1,    /* Block with green channel
+						    data */
+	SVGA3DBLOCKDESC_V           = 1 << 1,    /* Block with bump V channel
+						    data */
+	SVGA3DBLOCKDESC_STENCIL     = 1 << 1,    /* Block with a stencil
+						    channel */
+	SVGA3DBLOCKDESC_RED         = 1 << 2,    /* Block with blue channel
+						    data */
+	SVGA3DBLOCKDESC_W           = 1 << 2,    /* Block with bump W channel
+						    data */
+	SVGA3DBLOCKDESC_LUMINANCE   = 1 << 2,    /* Block with luminance channel
+						    data */
+	SVGA3DBLOCKDESC_Y           = 1 << 2,    /* Block with video luminance
+						    data */
+	SVGA3DBLOCKDESC_DEPTH       = 1 << 2,    /* Block with depth channel */
+	SVGA3DBLOCKDESC_ALPHA       = 1 << 3,    /* Block with an alpha
+						    channel */
+	SVGA3DBLOCKDESC_Q           = 1 << 3,    /* Block with bump Q channel
+						    data */
+	SVGA3DBLOCKDESC_BUFFER      = 1 << 4,    /* Block stores 1 byte of
+						    data */
+	SVGA3DBLOCKDESC_COMPRESSED  = 1 << 5,    /* Block stores n bytes of
+						    data depending on the
+						    compression method used */
+	SVGA3DBLOCKDESC_IEEE_FP     = 1 << 6,    /* Block stores data in an IEEE
+						    floating point
+						    representation in
+						    all channels */
+	SVGA3DBLOCKDESC_PLANAR_YUV  = 1 << 8,    /* Three separate blocks store
+						    data. */
+	SVGA3DBLOCKDESC_U_VIDEO     = 1 << 9,    /* Block with U video data */
+	SVGA3DBLOCKDESC_V_VIDEO     = 1 << 10,   /* Block with V video data */
+	SVGA3DBLOCKDESC_EXP         = 1 << 11,   /* Shared exponent */
+	SVGA3DBLOCKDESC_SRGB        = 1 << 12,   /* Data is in sRGB format */
+	SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13,   /* 2 planes of Y, UV,
+						    e.g., NV12. */
+	SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14,   /* 3 planes of separate
+						    Y, U, V, e.g., YV12. */
+
+	SVGA3DBLOCKDESC_RG         = SVGA3DBLOCKDESC_RED |
+	SVGA3DBLOCKDESC_GREEN,
+	SVGA3DBLOCKDESC_RGB        = SVGA3DBLOCKDESC_RG |
+	SVGA3DBLOCKDESC_BLUE,
+	SVGA3DBLOCKDESC_RGB_SRGB   = SVGA3DBLOCKDESC_RGB |
+	SVGA3DBLOCKDESC_SRGB,
+	SVGA3DBLOCKDESC_RGBA       = SVGA3DBLOCKDESC_RGB |
+	SVGA3DBLOCKDESC_ALPHA,
+	SVGA3DBLOCKDESC_RGBA_SRGB  = SVGA3DBLOCKDESC_RGBA |
+	SVGA3DBLOCKDESC_SRGB,
+	SVGA3DBLOCKDESC_UV         = SVGA3DBLOCKDESC_U |
+	SVGA3DBLOCKDESC_V,
+	SVGA3DBLOCKDESC_UVL        = SVGA3DBLOCKDESC_UV |
+	SVGA3DBLOCKDESC_LUMINANCE,
+	SVGA3DBLOCKDESC_UVW        = SVGA3DBLOCKDESC_UV |
+	SVGA3DBLOCKDESC_W,
+	SVGA3DBLOCKDESC_UVWA       = SVGA3DBLOCKDESC_UVW |
+	SVGA3DBLOCKDESC_ALPHA,
+	SVGA3DBLOCKDESC_UVWQ       = SVGA3DBLOCKDESC_U |
+	SVGA3DBLOCKDESC_V |
+	SVGA3DBLOCKDESC_W |
+	SVGA3DBLOCKDESC_Q,
+	SVGA3DBLOCKDESC_LA         = SVGA3DBLOCKDESC_LUMINANCE |
+	SVGA3DBLOCKDESC_ALPHA,
+	SVGA3DBLOCKDESC_R_FP       = SVGA3DBLOCKDESC_RED |
+	SVGA3DBLOCKDESC_IEEE_FP,
+	SVGA3DBLOCKDESC_RG_FP      = SVGA3DBLOCKDESC_R_FP |
+	SVGA3DBLOCKDESC_GREEN,
+	SVGA3DBLOCKDESC_RGB_FP     = SVGA3DBLOCKDESC_RG_FP |
+	SVGA3DBLOCKDESC_BLUE,
+	SVGA3DBLOCKDESC_RGBA_FP    = SVGA3DBLOCKDESC_RGB_FP |
+	SVGA3DBLOCKDESC_ALPHA,
+	SVGA3DBLOCKDESC_DS         = SVGA3DBLOCKDESC_DEPTH |
+	SVGA3DBLOCKDESC_STENCIL,
+	SVGA3DBLOCKDESC_YUV        = SVGA3DBLOCKDESC_UV_VIDEO |
+	SVGA3DBLOCKDESC_Y,
+	SVGA3DBLOCKDESC_AYUV       = SVGA3DBLOCKDESC_ALPHA |
+	SVGA3DBLOCKDESC_Y |
+	SVGA3DBLOCKDESC_U_VIDEO |
+	SVGA3DBLOCKDESC_V_VIDEO,
+	SVGA3DBLOCKDESC_RGBE       = SVGA3DBLOCKDESC_RGB |
+	SVGA3DBLOCKDESC_EXP,
+	SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
+	SVGA3DBLOCKDESC_SRGB,
+	SVGA3DBLOCKDESC_NV12       = SVGA3DBLOCKDESC_PLANAR_YUV |
+	SVGA3DBLOCKDESC_2PLANAR_YUV,
+	SVGA3DBLOCKDESC_YV12       = SVGA3DBLOCKDESC_PLANAR_YUV |
+	SVGA3DBLOCKDESC_3PLANAR_YUV,
+};
+
+/*
+ * SVGA3dSurfaceDesc describes the actual pixel data.
+ *
+ * This structure provides the following information:
+ *    1. Block description.
+ *    2. Dimensions of a block in the surface.
+ *    3. Size of block in bytes.
+ *    4. Bit depth of the pixel data.
+ *    5. Channel bit depths and masks (if applicable).
+ */
+struct svga3d_channel_def {
+	union {
+		u8 blue;
+		u8 u;
+		u8 uv_video;
+		u8 u_video;
+	};
+	union {
+		u8 green;
+		u8 v;
+		u8 stencil;
+		u8 v_video;
+	};
+	union {
+		u8 red;
+		u8 w;
+		u8 luminance;
+		u8 y;
+		u8 depth;
+		u8 data;
+	};
+	union {
+		u8 alpha;
+		u8 q;
+		u8 exp;
+	};
+};
+
+struct svga3d_surface_desc {
+	SVGA3dSurfaceFormat format;
+	enum svga3d_block_desc block_desc;
+	surf_size_struct block_size;
+	u32 bytes_per_block;
+	u32 pitch_bytes_per_block;
+
+	u32 total_bit_depth;
+	struct svga3d_channel_def bit_depth;
+	struct svga3d_channel_def bit_offset;
+};
+
+static const struct svga3d_surface_desc svga3d_surface_descs[] = {
+   {SVGA3D_FORMAT_INVALID, SVGA3DBLOCKDESC_NONE,
+      {1, 1, 1},  0, 0,
+      0, {{0}, {0}, {0}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_X8R8G8B8, SVGA3DBLOCKDESC_RGB,
+      {1, 1, 1},  4, 4,
+      24, {{8}, {8}, {8}, {0}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_A8R8G8B8, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_R5G6B5, SVGA3DBLOCKDESC_RGB,
+      {1, 1, 1},  2, 2,
+      16, {{5}, {6}, {5}, {0}},
+      {{0}, {5}, {11}, {0}}},
+
+   {SVGA3D_X1R5G5B5, SVGA3DBLOCKDESC_RGB,
+      {1, 1, 1},  2, 2,
+      15, {{5}, {5}, {5}, {0}},
+      {{0}, {5}, {10}, {0}}},
+
+   {SVGA3D_A1R5G5B5, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  2, 2,
+      16, {{5}, {5}, {5}, {1}},
+      {{0}, {5}, {10}, {15}}},
+
+   {SVGA3D_A4R4G4B4, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  2, 2,
+      16, {{4}, {4}, {4}, {4}},
+      {{0}, {4}, {8}, {12}}},
+
+   {SVGA3D_Z_D32, SVGA3DBLOCKDESC_DEPTH,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_Z_D16, SVGA3DBLOCKDESC_DEPTH,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_Z_D24S8, SVGA3DBLOCKDESC_DS,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {8}, {24}, {0}},
+      {{0}, {24}, {0}, {0}}},
+
+   {SVGA3D_Z_D15S1, SVGA3DBLOCKDESC_DS,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {1}, {15}, {0}},
+      {{0}, {15}, {0}, {0}}},
+
+   {SVGA3D_LUMINANCE8, SVGA3DBLOCKDESC_LUMINANCE,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_LUMINANCE4_ALPHA4, SVGA3DBLOCKDESC_LA,
+    {1  , 1, 1},  1, 1,
+      8, {{0}, {0}, {4}, {4}},
+      {{0}, {0}, {0}, {4}}},
+
+   {SVGA3D_LUMINANCE16, SVGA3DBLOCKDESC_LUMINANCE,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_LUMINANCE8_ALPHA8, SVGA3DBLOCKDESC_LA,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {8}, {8}},
+      {{0}, {0}, {0}, {8}}},
+
+   {SVGA3D_DXT1, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  8, 8,
+      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_DXT2, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_DXT3, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_DXT4, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_DXT5, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BUMPU8V8, SVGA3DBLOCKDESC_UV,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {8}, {8}},
+      {{0}, {0}, {0}, {8}}},
+
+   {SVGA3D_BUMPL6V5U5, SVGA3DBLOCKDESC_UVL,
+      {1, 1, 1},  2, 2,
+      16, {{5}, {5}, {6}, {0}},
+      {{11}, {6}, {0}, {0}}},
+
+   {SVGA3D_BUMPX8L8V8U8, SVGA3DBLOCKDESC_UVL,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {0}},
+      {{16}, {8}, {0}, {0}}},
+
+   {SVGA3D_BUMPL8V8U8, SVGA3DBLOCKDESC_UVL,
+      {1, 1, 1},  3, 3,
+      24, {{8}, {8}, {8}, {0}},
+      {{16}, {8}, {0}, {0}}},
+
+   {SVGA3D_ARGB_S10E5, SVGA3DBLOCKDESC_RGBA_FP,
+      {1, 1, 1},  8, 8,
+      64, {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_ARGB_S23E8, SVGA3DBLOCKDESC_RGBA_FP,
+      {1, 1, 1},  16, 16,
+      128, {{32}, {32}, {32}, {32}},
+      {{64}, {32}, {0}, {96}}},
+
+   {SVGA3D_A2R10G10B10, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{10}, {10}, {10}, {2}},
+      {{0}, {10}, {20}, {30}}},
+
+   {SVGA3D_V8U8, SVGA3DBLOCKDESC_UV,
+      {1, 1, 1},  2, 2,
+      16, {{8}, {8}, {0}, {0}},
+      {{8}, {0}, {0}, {0}}},
+
+   {SVGA3D_Q8W8V8U8, SVGA3DBLOCKDESC_UVWQ,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{24}, {16}, {8}, {0}}},
+
+   {SVGA3D_CxV8U8, SVGA3DBLOCKDESC_UV,
+      {1, 1, 1},  2, 2,
+      16, {{8}, {8}, {0}, {0}},
+      {{8}, {0}, {0}, {0}}},
+
+   {SVGA3D_X8L8V8U8, SVGA3DBLOCKDESC_UVL,
+      {1, 1, 1},  4, 4,
+      24, {{8}, {8}, {8}, {0}},
+      {{16}, {8}, {0}, {0}}},
+
+   {SVGA3D_A2W10V10U10, SVGA3DBLOCKDESC_UVWA,
+      {1, 1, 1},  4, 4,
+      32, {{10}, {10}, {10}, {2}},
+      {{0}, {10}, {20}, {30}}},
+
+   {SVGA3D_ALPHA8, SVGA3DBLOCKDESC_ALPHA,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {0}, {8}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R_S10E5, SVGA3DBLOCKDESC_R_FP,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R_S23E8, SVGA3DBLOCKDESC_R_FP,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_RG_S10E5, SVGA3DBLOCKDESC_RG_FP,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
+
+   {SVGA3D_RG_S23E8, SVGA3DBLOCKDESC_RG_FP,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {32}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_BUFFER, SVGA3DBLOCKDESC_BUFFER,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_Z_D24X8, SVGA3DBLOCKDESC_DEPTH,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {24}, {0}},
+      {{0}, {24}, {0}, {0}}},
+
+   {SVGA3D_V16U16, SVGA3DBLOCKDESC_UV,
+      {1, 1, 1},  4, 4,
+      32, {{16}, {16}, {0}, {0}},
+      {{16}, {0}, {0}, {0}}},
+
+   {SVGA3D_G16R16, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {16}, {16}, {0}},
+      {{0}, {0}, {16}, {0}}},
+
+   {SVGA3D_A16B16G16R16, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  8, 8,
+      64, {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_UYVY, SVGA3DBLOCKDESC_YUV,
+      {1, 1, 1},  2, 2,
+      16, {{8}, {0}, {8}, {0}},
+      {{0}, {0}, {8}, {0}}},
+
+   {SVGA3D_YUY2, SVGA3DBLOCKDESC_YUV,
+      {1, 1, 1},  2, 2,
+      16, {{8}, {0}, {8}, {0}},
+      {{8}, {0}, {0}, {0}}},
+
+   {SVGA3D_NV12, SVGA3DBLOCKDESC_NV12,
+      {2, 2, 1},  6, 2,
+      48, {{0}, {0}, {48}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_AYUV, SVGA3DBLOCKDESC_AYUV,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_R32G32B32A32_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  16, 16,
+      128, {{32}, {32}, {32}, {32}},
+      {{64}, {32}, {0}, {96}}},
+
+   {SVGA3D_R32G32B32A32_UINT, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  16, 16,
+      128, {{32}, {32}, {32}, {32}},
+      {{64}, {32}, {0}, {96}}},
+
+   {SVGA3D_R32G32B32A32_SINT, SVGA3DBLOCKDESC_UVWQ,
+      {1, 1, 1},  16, 16,
+      128, {{32}, {32}, {32}, {32}},
+      {{64}, {32}, {0}, {96}}},
+
+   {SVGA3D_R32G32B32_TYPELESS, SVGA3DBLOCKDESC_RGB,
+      {1, 1, 1},  12, 12,
+      96, {{32}, {32}, {32}, {0}},
+      {{64}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32G32B32_FLOAT, SVGA3DBLOCKDESC_RGB_FP,
+      {1, 1, 1},  12, 12,
+      96, {{32}, {32}, {32}, {0}},
+      {{64}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32G32B32_UINT, SVGA3DBLOCKDESC_RGB,
+      {1, 1, 1},  12, 12,
+      96, {{32}, {32}, {32}, {0}},
+      {{64}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32G32B32_SINT, SVGA3DBLOCKDESC_UVW,
+      {1, 1, 1},  12, 12,
+      96, {{32}, {32}, {32}, {0}},
+      {{64}, {32}, {0}, {0}}},
+
+   {SVGA3D_R16G16B16A16_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  8, 8,
+      64, {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_R16G16B16A16_UINT, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  8, 8,
+      64, {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_R16G16B16A16_SNORM, SVGA3DBLOCKDESC_UVWQ,
+      {1, 1, 1},  8, 8,
+      64, {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_R16G16B16A16_SINT, SVGA3DBLOCKDESC_UVWQ,
+      {1, 1, 1},  8, 8,
+      64, {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_R32G32_TYPELESS, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {32}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32G32_UINT, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {32}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32G32_SINT, SVGA3DBLOCKDESC_UV,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {32}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32G8X24_TYPELESS, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {8}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_D32_FLOAT_S8X24_UINT, SVGA3DBLOCKDESC_DS,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {8}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32_FLOAT_X8X24_TYPELESS, SVGA3DBLOCKDESC_R_FP,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_X32_TYPELESS_G8X24_UINT, SVGA3DBLOCKDESC_GREEN,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {8}, {0}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_R10G10B10A2_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{10}, {10}, {10}, {2}},
+      {{0}, {10}, {20}, {30}}},
+
+   {SVGA3D_R10G10B10A2_UINT, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{10}, {10}, {10}, {2}},
+      {{0}, {10}, {20}, {30}}},
+
+   {SVGA3D_R11G11B10_FLOAT, SVGA3DBLOCKDESC_RGB_FP,
+      {1, 1, 1},  4, 4,
+      32, {{10}, {11}, {11}, {0}},
+      {{0}, {10}, {21}, {0}}},
+
+   {SVGA3D_R8G8B8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{16}, {8}, {0}, {24}}},
+
+   {SVGA3D_R8G8B8A8_UNORM, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{16}, {8}, {0}, {24}}},
+
+   {SVGA3D_R8G8B8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{16}, {8}, {0}, {24}}},
+
+   {SVGA3D_R8G8B8A8_UINT, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{16}, {8}, {0}, {24}}},
+
+   {SVGA3D_R8G8B8A8_SINT, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{16}, {8}, {0}, {24}}},
+
+   {SVGA3D_R16G16_TYPELESS, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
+
+   {SVGA3D_R16G16_UINT, SVGA3DBLOCKDESC_RG_FP,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
+
+   {SVGA3D_R16G16_SINT, SVGA3DBLOCKDESC_UV,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
+
+   {SVGA3D_R32_TYPELESS, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_D32_FLOAT, SVGA3DBLOCKDESC_DEPTH,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R32_UINT, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R32_SINT, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R24G8_TYPELESS, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {8}, {24}, {0}},
+      {{0}, {24}, {0}, {0}}},
+
+   {SVGA3D_D24_UNORM_S8_UINT, SVGA3DBLOCKDESC_DS,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {8}, {24}, {0}},
+      {{0}, {24}, {0}, {0}}},
+
+   {SVGA3D_R24_UNORM_X8_TYPELESS, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {24}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_X24_TYPELESS_G8_UINT, SVGA3DBLOCKDESC_GREEN,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {8}, {0}, {0}},
+      {{0}, {24}, {0}, {0}}},
+
+   {SVGA3D_R8G8_TYPELESS, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_R8G8_UNORM, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_R8G8_UINT, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_R8G8_SINT, SVGA3DBLOCKDESC_UV,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_R16_TYPELESS, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R16_UNORM, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R16_UINT, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R16_SNORM, SVGA3DBLOCKDESC_U,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R16_SINT, SVGA3DBLOCKDESC_U,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R8_TYPELESS, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R8_UNORM, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R8_UINT, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R8_SNORM, SVGA3DBLOCKDESC_U,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R8_SINT, SVGA3DBLOCKDESC_U,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_P8, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R9G9B9E5_SHAREDEXP, SVGA3DBLOCKDESC_RGBE,
+      {1, 1, 1},  4, 4,
+      32, {{9}, {9}, {9}, {5}},
+      {{18}, {9}, {0}, {27}}},
+
+   {SVGA3D_R8G8_B8G8_UNORM, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_G8R8_G8B8_UNORM, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_BC1_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  8, 8,
+      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC1_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+      {4, 4, 1},  8, 8,
+      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC2_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC2_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC3_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC3_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC4_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  8, 8,
+      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_ATI1, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  8, 8,
+      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC4_SNORM, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  8, 8,
+      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC5_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_ATI2, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC5_SNORM, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R10G10B10_XR_BIAS_A2_UNORM, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{10}, {10}, {10}, {2}},
+      {{0}, {10}, {20}, {30}}},
+
+   {SVGA3D_B8G8R8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_B8G8R8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_B8G8R8X8_TYPELESS, SVGA3DBLOCKDESC_RGB,
+      {1, 1, 1},  4, 4,
+      24, {{8}, {8}, {8}, {0}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_B8G8R8X8_UNORM_SRGB, SVGA3DBLOCKDESC_RGB_SRGB,
+      {1, 1, 1},  4, 4,
+      24, {{8}, {8}, {8}, {0}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_Z_DF16, SVGA3DBLOCKDESC_DEPTH,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_Z_DF24, SVGA3DBLOCKDESC_DEPTH,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {8}, {24}, {0}},
+      {{0}, {24}, {0}, {0}}},
+
+   {SVGA3D_Z_D24S8_INT, SVGA3DBLOCKDESC_DS,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {8}, {24}, {0}},
+      {{0}, {24}, {0}, {0}}},
+
+   {SVGA3D_YV12, SVGA3DBLOCKDESC_YV12,
+      {2, 2, 1},  6, 2,
+      48, {{0}, {0}, {48}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R32G32B32A32_FLOAT, SVGA3DBLOCKDESC_RGBA_FP,
+      {1, 1, 1},  16, 16,
+      128, {{32}, {32}, {32}, {32}},
+      {{64}, {32}, {0}, {96}}},
+
+   {SVGA3D_R16G16B16A16_FLOAT, SVGA3DBLOCKDESC_RGBA_FP,
+      {1, 1, 1},  8, 8,
+      64, {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_R16G16B16A16_UNORM, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  8, 8,
+      64, {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_R32G32_FLOAT, SVGA3DBLOCKDESC_RG_FP,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {32}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_R10G10B10A2_UNORM, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{10}, {10}, {10}, {2}},
+      {{0}, {10}, {20}, {30}}},
+
+   {SVGA3D_R8G8B8A8_SNORM, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{24}, {16}, {8}, {0}}},
+
+   {SVGA3D_R16G16_FLOAT, SVGA3DBLOCKDESC_RG_FP,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
+
+   {SVGA3D_R16G16_UNORM, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {16}, {16}, {0}},
+      {{0}, {0}, {16}, {0}}},
+
+   {SVGA3D_R16G16_SNORM, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  4, 4,
+      32, {{16}, {16}, {0}, {0}},
+      {{16}, {0}, {0}, {0}}},
+
+   {SVGA3D_R32_FLOAT, SVGA3DBLOCKDESC_R_FP,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R8G8_SNORM, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  2, 2,
+      16, {{8}, {8}, {0}, {0}},
+      {{8}, {0}, {0}, {0}}},
+
+   {SVGA3D_R16_FLOAT, SVGA3DBLOCKDESC_R_FP,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_D16_UNORM, SVGA3DBLOCKDESC_DEPTH,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_A8_UNORM, SVGA3DBLOCKDESC_ALPHA,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {0}, {8}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC1_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  8, 8,
+      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC2_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC3_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_B5G6R5_UNORM, SVGA3DBLOCKDESC_RGB,
+      {1, 1, 1},  2, 2,
+      16, {{5}, {6}, {5}, {0}},
+      {{0}, {5}, {11}, {0}}},
+
+   {SVGA3D_B5G5R5A1_UNORM, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  2, 2,
+      16, {{5}, {5}, {5}, {1}},
+      {{0}, {5}, {10}, {15}}},
+
+   {SVGA3D_B8G8R8A8_UNORM, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_B8G8R8X8_UNORM, SVGA3DBLOCKDESC_RGB,
+      {1, 1, 1},  4, 4,
+      24, {{8}, {8}, {8}, {0}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_BC4_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  8, 8,
+      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC5_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+};
+
+static inline u32 clamped_umul32(u32 a, u32 b)
+{
+	uint64_t tmp = (uint64_t) a*b;
+	return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
+}
+
+static inline const struct svga3d_surface_desc *
+svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
+{
+	if (format < ARRAY_SIZE(svga3d_surface_descs))
+		return &svga3d_surface_descs[format];
+
+	return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * svga3dsurface_get_mip_size --
+ *
+ *      Given a base level size and the mip level, compute the size of
+ *      the mip level.
+ *
+ * Results:
+ *      See above.
+ *
+ * Side effects:
+ *      None.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static inline surf_size_struct
+svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
+{
+	surf_size_struct size;
+
+	size.width = max_t(u32, base_level.width >> mip_level, 1);
+	size.height = max_t(u32, base_level.height >> mip_level, 1);
+	size.depth = max_t(u32, base_level.depth >> mip_level, 1);
+	return size;
+}
+
+static inline void
+svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
+				 const surf_size_struct *pixel_size,
+				 surf_size_struct *block_size)
+{
+	block_size->width = DIV_ROUND_UP(pixel_size->width,
+					 desc->block_size.width);
+	block_size->height = DIV_ROUND_UP(pixel_size->height,
+					  desc->block_size.height);
+	block_size->depth = DIV_ROUND_UP(pixel_size->depth,
+					 desc->block_size.depth);
+}
+
+static inline bool
+svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
+{
+	return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
+}
+
+static inline u32
+svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
+			      const surf_size_struct *size)
+{
+	u32 pitch;
+	surf_size_struct blocks;
+
+	svga3dsurface_get_size_in_blocks(desc, size, &blocks);
+
+	pitch = blocks.width * desc->pitch_bytes_per_block;
+
+	return pitch;
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * svga3dsurface_get_image_buffer_size --
+ *
+ *      Return the number of bytes of buffer space required to store
+ *      one image of a surface, optionally using the specified pitch.
+ *
+ *      If pitch is zero, it is assumed that rows are tightly packed.
+ *
+ *      This function is overflow-safe. If the result would have
+ *      overflowed, instead we return MAX_UINT32.
+ *
+ * Results:
+ *      Byte count.
+ *
+ * Side effects:
+ *      None.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+static inline u32
+svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
+				    const surf_size_struct *size,
+				    u32 pitch)
+{
+	surf_size_struct image_blocks;
+	u32 slice_size, total_size;
+
+	svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
+
+	if (svga3dsurface_is_planar_surface(desc)) {
+		total_size = clamped_umul32(image_blocks.width,
+					    image_blocks.height);
+		total_size = clamped_umul32(total_size, image_blocks.depth);
+		total_size = clamped_umul32(total_size, desc->bytes_per_block);
+		return total_size;
+	}
+
+	if (pitch == 0)
+		pitch = svga3dsurface_calculate_pitch(desc, size);
+
+	slice_size = clamped_umul32(image_blocks.height, pitch);
+	total_size = clamped_umul32(slice_size, image_blocks.depth);
+
+	return total_size;
+}
+
+static inline u32
+svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
+				  surf_size_struct base_level_size,
+				  u32 num_mip_levels,
+				  u32 num_layers)
+{
+	const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+	u32 total_size = 0;
+	u32 mip;
+
+	for (mip = 0; mip < num_mip_levels; mip++) {
+		surf_size_struct size =
+			svga3dsurface_get_mip_size(base_level_size, mip);
+		total_size += svga3dsurface_get_image_buffer_size(desc,
+								  &size, 0);
+	}
+
+	return total_size * num_layers;
+}
+
+
+/**
+ * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
+ * in an image (or volume).
+ *
+ * @width: The image width in pixels.
+ * @height: The image height in pixels
+ */
+static inline u32
+svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
+			       u32 width, u32 height,
+			       u32 x, u32 y, u32 z)
+{
+	const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+	const u32 bw = desc->block_size.width, bh = desc->block_size.height;
+	const u32 bd = desc->block_size.depth;
+	const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
+	const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
+	const u32 offset = (z / bd * imgstride +
+			    y / bh * rowstride +
+			    x / bw * desc->bytes_per_block);
+	return offset;
+}
+
+
+static inline u32
+svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
+			       surf_size_struct baseLevelSize,
+			       u32 numMipLevels,
+			       u32 face,
+			       u32 mip)
+
+{
+	u32 offset;
+	u32 mipChainBytes;
+	u32 mipChainBytesToLevel;
+	u32 i;
+	const struct svga3d_surface_desc *desc;
+	surf_size_struct mipSize;
+	u32 bytes;
+
+	desc = svga3dsurface_get_desc(format);
+
+	mipChainBytes = 0;
+	mipChainBytesToLevel = 0;
+	for (i = 0; i < numMipLevels; i++) {
+		mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
+		bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
+		mipChainBytes += bytes;
+		if (i < mip)
+			mipChainBytesToLevel += bytes;
+	}
+
+	offset = mipChainBytes * face + mipChainBytesToLevel;
+
+	return offset;
+}
+
+
+/**
+ * svga3dsurface_is_gb_screen_target_format - Is the specified format usable as
+ *                                            a ScreenTarget?
+ *                                            (with just the GBObjects cap-bit
+ *                                             set)
+ * @format: format to queried
+ *
+ * RETURNS:
+ * true if queried format is valid for screen targets
+ */
+static inline bool
+svga3dsurface_is_gb_screen_target_format(SVGA3dSurfaceFormat format)
+{
+	return (format == SVGA3D_X8R8G8B8 ||
+		format == SVGA3D_A8R8G8B8 ||
+		format == SVGA3D_R5G6B5   ||
+		format == SVGA3D_X1R5G5B5 ||
+		format == SVGA3D_A1R5G5B5 ||
+		format == SVGA3D_P8);
+}
+
+
+/**
+ * svga3dsurface_is_dx_screen_target_format - Is the specified format usable as
+ *                                            a ScreenTarget?
+ *                                            (with DX10 enabled)
+ *
+ * @format: format to queried
+ *
+ * Results:
+ * true if queried format is valid for screen targets
+ */
+static inline bool
+svga3dsurface_is_dx_screen_target_format(SVGA3dSurfaceFormat format)
+{
+	return (format == SVGA3D_R8G8B8A8_UNORM ||
+		format == SVGA3D_B8G8R8A8_UNORM ||
+		format == SVGA3D_B8G8R8X8_UNORM);
+}
+
+
+/**
+ * svga3dsurface_is_screen_target_format - Is the specified format usable as a
+ *                                         ScreenTarget?
+ *                                         (for some combination of caps)
+ *
+ * @format: format to queried
+ *
+ * Results:
+ * true if queried format is valid for screen targets
+ */
+static inline bool
+svga3dsurface_is_screen_target_format(SVGA3dSurfaceFormat format)
+{
+	if (svga3dsurface_is_gb_screen_target_format(format)) {
+		return true;
+	}
+	return svga3dsurface_is_dx_screen_target_format(format);
+}
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
new file mode 100644
index 000000000000..27b33ba88430
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
@@ -0,0 +1,1633 @@
+/**********************************************************
+ * Copyright 2012-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_types.h --
+ *
+ *       SVGA 3d hardware definitions for basic types
+ */
+
+#ifndef _SVGA3D_TYPES_H_
+#define _SVGA3D_TYPES_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+
+/*
+ * Generic Types
+ */
+
+#define SVGA3D_INVALID_ID         ((uint32)-1)
+
+typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
+typedef uint32 SVGA3dColor; /* a, r, g, b */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCopyRect {
+   uint32               x;
+   uint32               y;
+   uint32               w;
+   uint32               h;
+   uint32               srcx;
+   uint32               srcy;
+}
+#include "vmware_pack_end.h"
+SVGA3dCopyRect;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCopyBox {
+   uint32               x;
+   uint32               y;
+   uint32               z;
+   uint32               w;
+   uint32               h;
+   uint32               d;
+   uint32               srcx;
+   uint32               srcy;
+   uint32               srcz;
+}
+#include "vmware_pack_end.h"
+SVGA3dCopyBox;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dRect {
+   uint32               x;
+   uint32               y;
+   uint32               w;
+   uint32               h;
+}
+#include "vmware_pack_end.h"
+SVGA3dRect;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               x;
+   uint32               y;
+   uint32               z;
+   uint32               w;
+   uint32               h;
+   uint32               d;
+}
+#include "vmware_pack_end.h"
+SVGA3dBox;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               x;
+   uint32               y;
+   uint32               z;
+}
+#include "vmware_pack_end.h"
+SVGA3dPoint;
+
+/*
+ * Surface formats.
+ */
+typedef enum SVGA3dSurfaceFormat {
+   SVGA3D_FORMAT_INVALID               = 0,
+
+   SVGA3D_X8R8G8B8                     = 1,
+   SVGA3D_FORMAT_MIN                   = 1,
+
+   SVGA3D_A8R8G8B8                     = 2,
+
+   SVGA3D_R5G6B5                       = 3,
+   SVGA3D_X1R5G5B5                     = 4,
+   SVGA3D_A1R5G5B5                     = 5,
+   SVGA3D_A4R4G4B4                     = 6,
+
+   SVGA3D_Z_D32                        = 7,
+   SVGA3D_Z_D16                        = 8,
+   SVGA3D_Z_D24S8                      = 9,
+   SVGA3D_Z_D15S1                      = 10,
+
+   SVGA3D_LUMINANCE8                   = 11,
+   SVGA3D_LUMINANCE4_ALPHA4            = 12,
+   SVGA3D_LUMINANCE16                  = 13,
+   SVGA3D_LUMINANCE8_ALPHA8            = 14,
+
+   SVGA3D_DXT1                         = 15,
+   SVGA3D_DXT2                         = 16,
+   SVGA3D_DXT3                         = 17,
+   SVGA3D_DXT4                         = 18,
+   SVGA3D_DXT5                         = 19,
+
+   SVGA3D_BUMPU8V8                     = 20,
+   SVGA3D_BUMPL6V5U5                   = 21,
+   SVGA3D_BUMPX8L8V8U8                 = 22,
+   SVGA3D_BUMPL8V8U8                   = 23,
+
+   SVGA3D_ARGB_S10E5                   = 24,   /* 16-bit floating-point ARGB */
+   SVGA3D_ARGB_S23E8                   = 25,   /* 32-bit floating-point ARGB */
+
+   SVGA3D_A2R10G10B10                  = 26,
+
+   /* signed formats */
+   SVGA3D_V8U8                         = 27,
+   SVGA3D_Q8W8V8U8                     = 28,
+   SVGA3D_CxV8U8                       = 29,
+
+   /* mixed formats */
+   SVGA3D_X8L8V8U8                     = 30,
+   SVGA3D_A2W10V10U10                  = 31,
+
+   SVGA3D_ALPHA8                       = 32,
+
+   /* Single- and dual-component floating point formats */
+   SVGA3D_R_S10E5                      = 33,
+   SVGA3D_R_S23E8                      = 34,
+   SVGA3D_RG_S10E5                     = 35,
+   SVGA3D_RG_S23E8                     = 36,
+
+   SVGA3D_BUFFER                       = 37,
+
+   SVGA3D_Z_D24X8                      = 38,
+
+   SVGA3D_V16U16                       = 39,
+
+   SVGA3D_G16R16                       = 40,
+   SVGA3D_A16B16G16R16                 = 41,
+
+   /* Packed Video formats */
+   SVGA3D_UYVY                         = 42,
+   SVGA3D_YUY2                         = 43,
+
+   /* Planar video formats */
+   SVGA3D_NV12                         = 44,
+
+   /* Video format with alpha */
+   SVGA3D_AYUV                         = 45,
+
+   SVGA3D_R32G32B32A32_TYPELESS        = 46,
+   SVGA3D_R32G32B32A32_UINT            = 47,
+   SVGA3D_R32G32B32A32_SINT            = 48,
+   SVGA3D_R32G32B32_TYPELESS           = 49,
+   SVGA3D_R32G32B32_FLOAT              = 50,
+   SVGA3D_R32G32B32_UINT               = 51,
+   SVGA3D_R32G32B32_SINT               = 52,
+   SVGA3D_R16G16B16A16_TYPELESS        = 53,
+   SVGA3D_R16G16B16A16_UINT            = 54,
+   SVGA3D_R16G16B16A16_SNORM           = 55,
+   SVGA3D_R16G16B16A16_SINT            = 56,
+   SVGA3D_R32G32_TYPELESS              = 57,
+   SVGA3D_R32G32_UINT                  = 58,
+   SVGA3D_R32G32_SINT                  = 59,
+   SVGA3D_R32G8X24_TYPELESS            = 60,
+   SVGA3D_D32_FLOAT_S8X24_UINT         = 61,
+   SVGA3D_R32_FLOAT_X8X24_TYPELESS     = 62,
+   SVGA3D_X32_TYPELESS_G8X24_UINT      = 63,
+   SVGA3D_R10G10B10A2_TYPELESS         = 64,
+   SVGA3D_R10G10B10A2_UINT             = 65,
+   SVGA3D_R11G11B10_FLOAT              = 66,
+   SVGA3D_R8G8B8A8_TYPELESS            = 67,
+   SVGA3D_R8G8B8A8_UNORM               = 68,
+   SVGA3D_R8G8B8A8_UNORM_SRGB          = 69,
+   SVGA3D_R8G8B8A8_UINT                = 70,
+   SVGA3D_R8G8B8A8_SINT                = 71,
+   SVGA3D_R16G16_TYPELESS              = 72,
+   SVGA3D_R16G16_UINT                  = 73,
+   SVGA3D_R16G16_SINT                  = 74,
+   SVGA3D_R32_TYPELESS                 = 75,
+   SVGA3D_D32_FLOAT                    = 76,
+   SVGA3D_R32_UINT                     = 77,
+   SVGA3D_R32_SINT                     = 78,
+   SVGA3D_R24G8_TYPELESS               = 79,
+   SVGA3D_D24_UNORM_S8_UINT            = 80,
+   SVGA3D_R24_UNORM_X8_TYPELESS        = 81,
+   SVGA3D_X24_TYPELESS_G8_UINT         = 82,
+   SVGA3D_R8G8_TYPELESS                = 83,
+   SVGA3D_R8G8_UNORM                   = 84,
+   SVGA3D_R8G8_UINT                    = 85,
+   SVGA3D_R8G8_SINT                    = 86,
+   SVGA3D_R16_TYPELESS                 = 87,
+   SVGA3D_R16_UNORM                    = 88,
+   SVGA3D_R16_UINT                     = 89,
+   SVGA3D_R16_SNORM                    = 90,
+   SVGA3D_R16_SINT                     = 91,
+   SVGA3D_R8_TYPELESS                  = 92,
+   SVGA3D_R8_UNORM                     = 93,
+   SVGA3D_R8_UINT                      = 94,
+   SVGA3D_R8_SNORM                     = 95,
+   SVGA3D_R8_SINT                      = 96,
+   SVGA3D_P8                           = 97,
+   SVGA3D_R9G9B9E5_SHAREDEXP           = 98,
+   SVGA3D_R8G8_B8G8_UNORM              = 99,
+   SVGA3D_G8R8_G8B8_UNORM              = 100,
+   SVGA3D_BC1_TYPELESS                 = 101,
+   SVGA3D_BC1_UNORM_SRGB               = 102,
+   SVGA3D_BC2_TYPELESS                 = 103,
+   SVGA3D_BC2_UNORM_SRGB               = 104,
+   SVGA3D_BC3_TYPELESS                 = 105,
+   SVGA3D_BC3_UNORM_SRGB               = 106,
+   SVGA3D_BC4_TYPELESS                 = 107,
+   SVGA3D_ATI1                         = 108,   /* DX9-specific BC4_UNORM */
+   SVGA3D_BC4_SNORM                    = 109,
+   SVGA3D_BC5_TYPELESS                 = 110,
+   SVGA3D_ATI2                         = 111,   /* DX9-specific BC5_UNORM */
+   SVGA3D_BC5_SNORM                    = 112,
+   SVGA3D_R10G10B10_XR_BIAS_A2_UNORM   = 113,
+   SVGA3D_B8G8R8A8_TYPELESS            = 114,
+   SVGA3D_B8G8R8A8_UNORM_SRGB          = 115,
+   SVGA3D_B8G8R8X8_TYPELESS            = 116,
+   SVGA3D_B8G8R8X8_UNORM_SRGB          = 117,
+
+   /* Advanced depth formats. */
+   SVGA3D_Z_DF16                       = 118,
+   SVGA3D_Z_DF24                       = 119,
+   SVGA3D_Z_D24S8_INT                  = 120,
+
+   /* Planar video formats. */
+   SVGA3D_YV12                         = 121,
+
+   SVGA3D_R32G32B32A32_FLOAT           = 122,
+   SVGA3D_R16G16B16A16_FLOAT           = 123,
+   SVGA3D_R16G16B16A16_UNORM           = 124,
+   SVGA3D_R32G32_FLOAT                 = 125,
+   SVGA3D_R10G10B10A2_UNORM            = 126,
+   SVGA3D_R8G8B8A8_SNORM               = 127,
+   SVGA3D_R16G16_FLOAT                 = 128,
+   SVGA3D_R16G16_UNORM                 = 129,
+   SVGA3D_R16G16_SNORM                 = 130,
+   SVGA3D_R32_FLOAT                    = 131,
+   SVGA3D_R8G8_SNORM                   = 132,
+   SVGA3D_R16_FLOAT                    = 133,
+   SVGA3D_D16_UNORM                    = 134,
+   SVGA3D_A8_UNORM                     = 135,
+   SVGA3D_BC1_UNORM                    = 136,
+   SVGA3D_BC2_UNORM                    = 137,
+   SVGA3D_BC3_UNORM                    = 138,
+   SVGA3D_B5G6R5_UNORM                 = 139,
+   SVGA3D_B5G5R5A1_UNORM               = 140,
+   SVGA3D_B8G8R8A8_UNORM               = 141,
+   SVGA3D_B8G8R8X8_UNORM               = 142,
+   SVGA3D_BC4_UNORM                    = 143,
+   SVGA3D_BC5_UNORM                    = 144,
+
+   SVGA3D_FORMAT_MAX
+} SVGA3dSurfaceFormat;
+
+typedef enum SVGA3dSurfaceFlags {
+   SVGA3D_SURFACE_CUBEMAP               = (1 << 0),
+
+   /*
+    * HINT flags are not enforced by the device but are useful for
+    * performance.
+    */
+   SVGA3D_SURFACE_HINT_STATIC           = (1 << 1),
+   SVGA3D_SURFACE_HINT_DYNAMIC          = (1 << 2),
+   SVGA3D_SURFACE_HINT_INDEXBUFFER      = (1 << 3),
+   SVGA3D_SURFACE_HINT_VERTEXBUFFER     = (1 << 4),
+   SVGA3D_SURFACE_HINT_TEXTURE          = (1 << 5),
+   SVGA3D_SURFACE_HINT_RENDERTARGET     = (1 << 6),
+   SVGA3D_SURFACE_HINT_DEPTHSTENCIL     = (1 << 7),
+   SVGA3D_SURFACE_HINT_WRITEONLY        = (1 << 8),
+   SVGA3D_SURFACE_MASKABLE_ANTIALIAS    = (1 << 9),
+   SVGA3D_SURFACE_AUTOGENMIPMAPS        = (1 << 10),
+   SVGA3D_SURFACE_DECODE_RENDERTARGET   = (1 << 11),
+
+   /*
+    * Is this surface using a base-level pitch for it's mob backing?
+    *
+    * This flag is not intended to be set by guest-drivers, but is instead
+    * set by the device when the surface is bound to a mob with a specified
+    * pitch.
+    */
+   SVGA3D_SURFACE_MOB_PITCH             = (1 << 12),
+
+   SVGA3D_SURFACE_INACTIVE              = (1 << 13),
+   SVGA3D_SURFACE_HINT_RT_LOCKABLE      = (1 << 14),
+   SVGA3D_SURFACE_VOLUME                = (1 << 15),
+
+   /*
+    * Required to be set on a surface to bind it to a screen target.
+    */
+   SVGA3D_SURFACE_SCREENTARGET          = (1 << 16),
+
+   /*
+    * Align images in the guest-backing mob to 16-bytes.
+    */
+   SVGA3D_SURFACE_ALIGN16               = (1 << 17),
+
+   SVGA3D_SURFACE_1D                    = (1 << 18),
+   SVGA3D_SURFACE_ARRAY                 = (1 << 19),
+
+   /*
+    * Bind flags.
+    * These are enforced for any surface defined with DefineGBSurface_v2.
+    */
+   SVGA3D_SURFACE_BIND_VERTEX_BUFFER    = (1 << 20),
+   SVGA3D_SURFACE_BIND_INDEX_BUFFER     = (1 << 21),
+   SVGA3D_SURFACE_BIND_CONSTANT_BUFFER  = (1 << 22),
+   SVGA3D_SURFACE_BIND_SHADER_RESOURCE  = (1 << 23),
+   SVGA3D_SURFACE_BIND_RENDER_TARGET    = (1 << 24),
+   SVGA3D_SURFACE_BIND_DEPTH_STENCIL    = (1 << 25),
+   SVGA3D_SURFACE_BIND_STREAM_OUTPUT    = (1 << 26),
+
+   /*
+    * A note on staging flags:
+    *
+    * The STAGING flags notes that the surface will not be used directly by the
+    * drawing pipeline, i.e. that it will not be bound to any bind point.
+    * Staging surfaces may be used by copy operations to move data in and out
+    * of other surfaces.
+    *
+    * The HINT_INDIRECT_UPDATE flag suggests that the surface will receive
+    * updates indirectly, i.e. the surface will not be updated directly, but
+    * will receive copies from staging surfaces.
+    */
+   SVGA3D_SURFACE_STAGING_UPLOAD        = (1 << 27),
+   SVGA3D_SURFACE_STAGING_DOWNLOAD      = (1 << 28),
+   SVGA3D_SURFACE_HINT_INDIRECT_UPDATE  = (1 << 29),
+
+   /*
+    * Setting this flag allow this surface to be used with the
+    * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command.  It is only valid for
+    * buffer surfaces, an no bind flags are allowed to be set on surfaces
+    * with this flag.
+    */
+   SVGA3D_SURFACE_TRANSFER_FROM_BUFFER  = (1 << 30),
+
+   /*
+    * Marker for the last defined bit.
+    */
+   SVGA3D_SURFACE_FLAG_MAX              = (1 << 31),
+} SVGA3dSurfaceFlags;
+
+#define SVGA3D_SURFACE_HB_DISALLOWED_MASK        \
+        (  SVGA3D_SURFACE_MOB_PITCH    |         \
+           SVGA3D_SURFACE_SCREENTARGET |         \
+           SVGA3D_SURFACE_ALIGN16 |              \
+           SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
+           SVGA3D_SURFACE_BIND_STREAM_OUTPUT |   \
+           SVGA3D_SURFACE_STAGING_UPLOAD |       \
+           SVGA3D_SURFACE_STAGING_DOWNLOAD |     \
+           SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
+           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER   \
+        )
+
+#define SVGA3D_SURFACE_2D_DISALLOWED_MASK           \
+        (  SVGA3D_SURFACE_CUBEMAP |                 \
+           SVGA3D_SURFACE_MASKABLE_ANTIALIAS |      \
+           SVGA3D_SURFACE_AUTOGENMIPMAPS |          \
+           SVGA3D_SURFACE_DECODE_RENDERTARGET |     \
+           SVGA3D_SURFACE_VOLUME |                  \
+           SVGA3D_SURFACE_1D |                      \
+           SVGA3D_SURFACE_ARRAY |                   \
+           SVGA3D_SURFACE_BIND_VERTEX_BUFFER |      \
+           SVGA3D_SURFACE_BIND_INDEX_BUFFER |       \
+           SVGA3D_SURFACE_BIND_CONSTANT_BUFFER |    \
+           SVGA3D_SURFACE_BIND_DEPTH_STENCIL |      \
+           SVGA3D_SURFACE_BIND_STREAM_OUTPUT |      \
+           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER      \
+        )
+
+#define SVGA3D_SURFACE_SCREENTARGET_DISALLOWED_MASK \
+        (  SVGA3D_SURFACE_CUBEMAP |                 \
+           SVGA3D_SURFACE_AUTOGENMIPMAPS |          \
+           SVGA3D_SURFACE_DECODE_RENDERTARGET |     \
+           SVGA3D_SURFACE_VOLUME |                  \
+           SVGA3D_SURFACE_1D |                      \
+           SVGA3D_SURFACE_BIND_VERTEX_BUFFER |      \
+           SVGA3D_SURFACE_BIND_INDEX_BUFFER |       \
+           SVGA3D_SURFACE_BIND_CONSTANT_BUFFER |    \
+           SVGA3D_SURFACE_BIND_DEPTH_STENCIL |      \
+           SVGA3D_SURFACE_BIND_STREAM_OUTPUT |      \
+           SVGA3D_SURFACE_INACTIVE |                \
+           SVGA3D_SURFACE_STAGING_UPLOAD |          \
+           SVGA3D_SURFACE_STAGING_DOWNLOAD |        \
+           SVGA3D_SURFACE_HINT_INDIRECT_UPDATE |    \
+           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER      \
+        )
+
+#define SVGA3D_SURFACE_DX_ONLY_MASK             \
+        (  SVGA3D_SURFACE_BIND_STREAM_OUTPUT |  \
+           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER  \
+
+#define SVGA3D_SURFACE_STAGING_MASK             \
+        (  SVGA3D_SURFACE_STAGING_UPLOAD |      \
+           SVGA3D_SURFACE_STAGING_DOWNLOAD      \
+        )
+
+#define SVGA3D_SURFACE_BIND_MASK                  \
+        (  SVGA3D_SURFACE_BIND_VERTEX_BUFFER   |  \
+           SVGA3D_SURFACE_BIND_INDEX_BUFFER    |  \
+           SVGA3D_SURFACE_BIND_CONSTANT_BUFFER |  \
+           SVGA3D_SURFACE_BIND_SHADER_RESOURCE |  \
+           SVGA3D_SURFACE_BIND_RENDER_TARGET   |  \
+           SVGA3D_SURFACE_BIND_DEPTH_STENCIL   |  \
+           SVGA3D_SURFACE_BIND_STREAM_OUTPUT      \
+        )
+
+typedef enum {
+   SVGA3DFORMAT_OP_TEXTURE                               = 0x00000001,
+   SVGA3DFORMAT_OP_VOLUMETEXTURE                         = 0x00000002,
+   SVGA3DFORMAT_OP_CUBETEXTURE                           = 0x00000004,
+   SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET                = 0x00000008,
+   SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET              = 0x00000010,
+   SVGA3DFORMAT_OP_ZSTENCIL                              = 0x00000040,
+   SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH   = 0x00000080,
+
+/*
+ * This format can be used as a render target if the current display mode
+ * is the same depth if the alpha channel is ignored. e.g. if the device
+ * can render to A8R8G8B8 when the display mode is X8R8G8B8, then the
+ * format op list entry for A8R8G8B8 should have this cap.
+ */
+   SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET  = 0x00000100,
+
+/*
+ * This format contains DirectDraw support (including Flip).  This flag
+ * should not to be set on alpha formats.
+ */
+   SVGA3DFORMAT_OP_DISPLAYMODE                           = 0x00000400,
+
+/*
+ * The rasterizer can support some level of Direct3D support in this format
+ * and implies that the driver can create a Context in this mode (for some
+ * render target format).  When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE
+ * flag must also be set.
+ */
+   SVGA3DFORMAT_OP_3DACCELERATION                        = 0x00000800,
+
+/*
+ * This is set for a private format when the driver has put the bpp in
+ * the structure.
+ */
+   SVGA3DFORMAT_OP_PIXELSIZE                             = 0x00001000,
+
+/*
+ * Indicates that this format can be converted to any RGB format for which
+ * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
+ */
+   SVGA3DFORMAT_OP_CONVERT_TO_ARGB                       = 0x00002000,
+
+/*
+ * Indicates that this format can be used to create offscreen plain surfaces.
+ */
+   SVGA3DFORMAT_OP_OFFSCREENPLAIN                        = 0x00004000,
+
+/*
+ * Indicated that this format can be read as an SRGB texture (meaning that the
+ * sampler will linearize the looked up data)
+ */
+   SVGA3DFORMAT_OP_SRGBREAD                              = 0x00008000,
+
+/*
+ * Indicates that this format can be used in the bumpmap instructions
+ */
+   SVGA3DFORMAT_OP_BUMPMAP                               = 0x00010000,
+
+/*
+ * Indicates that this format can be sampled by the displacement map sampler
+ */
+   SVGA3DFORMAT_OP_DMAP                                  = 0x00020000,
+
+/*
+ * Indicates that this format cannot be used with texture filtering
+ */
+   SVGA3DFORMAT_OP_NOFILTER                              = 0x00040000,
+
+/*
+ * Indicates that format conversions are supported to this RGB format if
+ * SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format.
+ */
+   SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB                    = 0x00080000,
+
+/*
+ * Indicated that this format can be written as an SRGB target
+ * (meaning that the pixel pipe will DE-linearize data on output to format)
+ */
+   SVGA3DFORMAT_OP_SRGBWRITE                             = 0x00100000,
+
+/*
+ * Indicates that this format cannot be used with alpha blending
+ */
+   SVGA3DFORMAT_OP_NOALPHABLEND                          = 0x00200000,
+
+/*
+ * Indicates that the device can auto-generated sublevels for resources
+ * of this format
+ */
+   SVGA3DFORMAT_OP_AUTOGENMIPMAP                         = 0x00400000,
+
+/*
+ * Indicates that this format can be used by vertex texture sampler
+ */
+   SVGA3DFORMAT_OP_VERTEXTEXTURE                         = 0x00800000,
+
+/*
+ * Indicates that this format supports neither texture coordinate
+ * wrap modes, nor mipmapping.
+ */
+   SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP                  = 0x01000000
+} SVGA3dFormatOp;
+
+#define SVGA3D_FORMAT_POSITIVE                             \
+   (SVGA3DFORMAT_OP_TEXTURE                              | \
+    SVGA3DFORMAT_OP_VOLUMETEXTURE                        | \
+    SVGA3DFORMAT_OP_CUBETEXTURE                          | \
+    SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET               | \
+    SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET             | \
+    SVGA3DFORMAT_OP_ZSTENCIL                             | \
+    SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH  | \
+    SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET | \
+    SVGA3DFORMAT_OP_DISPLAYMODE                          | \
+    SVGA3DFORMAT_OP_3DACCELERATION                       | \
+    SVGA3DFORMAT_OP_PIXELSIZE                            | \
+    SVGA3DFORMAT_OP_CONVERT_TO_ARGB                      | \
+    SVGA3DFORMAT_OP_OFFSCREENPLAIN                       | \
+    SVGA3DFORMAT_OP_SRGBREAD                             | \
+    SVGA3DFORMAT_OP_BUMPMAP                              | \
+    SVGA3DFORMAT_OP_DMAP                                 | \
+    SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB                   | \
+    SVGA3DFORMAT_OP_SRGBWRITE                            | \
+    SVGA3DFORMAT_OP_AUTOGENMIPMAP                        | \
+    SVGA3DFORMAT_OP_VERTEXTEXTURE)
+
+#define SVGA3D_FORMAT_NEGATIVE               \
+   (SVGA3DFORMAT_OP_NOFILTER               | \
+    SVGA3DFORMAT_OP_NOALPHABLEND           | \
+    SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP)
+
+/*
+ * This structure is a conversion of SVGA3DFORMAT_OP_*
+ * Entries must be located at the same position.
+ */
+typedef union {
+   uint32 value;
+   struct {
+      uint32 texture : 1;
+      uint32 volumeTexture : 1;
+      uint32 cubeTexture : 1;
+      uint32 offscreenRenderTarget : 1;
+      uint32 sameFormatRenderTarget : 1;
+      uint32 unknown1 : 1;
+      uint32 zStencil : 1;
+      uint32 zStencilArbitraryDepth : 1;
+      uint32 sameFormatUpToAlpha : 1;
+      uint32 unknown2 : 1;
+      uint32 displayMode : 1;
+      uint32 acceleration3d : 1;
+      uint32 pixelSize : 1;
+      uint32 convertToARGB : 1;
+      uint32 offscreenPlain : 1;
+      uint32 sRGBRead : 1;
+      uint32 bumpMap : 1;
+      uint32 dmap : 1;
+      uint32 noFilter : 1;
+      uint32 memberOfGroupARGB : 1;
+      uint32 sRGBWrite : 1;
+      uint32 noAlphaBlend : 1;
+      uint32 autoGenMipMap : 1;
+      uint32 vertexTexture : 1;
+      uint32 noTexCoordWrapNorMip : 1;
+   };
+} SVGA3dSurfaceFormatCaps;
+
+/*
+ * SVGA_3D_CMD_SETRENDERSTATE Types.  All value types
+ * must fit in a uint32.
+ */
+
+typedef enum {
+   SVGA3D_RS_INVALID                   = 0,
+   SVGA3D_RS_MIN                       = 1,
+   SVGA3D_RS_ZENABLE                   = 1,     /* SVGA3dBool */
+   SVGA3D_RS_ZWRITEENABLE              = 2,     /* SVGA3dBool */
+   SVGA3D_RS_ALPHATESTENABLE           = 3,     /* SVGA3dBool */
+   SVGA3D_RS_DITHERENABLE              = 4,     /* SVGA3dBool */
+   SVGA3D_RS_BLENDENABLE               = 5,     /* SVGA3dBool */
+   SVGA3D_RS_FOGENABLE                 = 6,     /* SVGA3dBool */
+   SVGA3D_RS_SPECULARENABLE            = 7,     /* SVGA3dBool */
+   SVGA3D_RS_STENCILENABLE             = 8,     /* SVGA3dBool */
+   SVGA3D_RS_LIGHTINGENABLE            = 9,     /* SVGA3dBool */
+   SVGA3D_RS_NORMALIZENORMALS          = 10,    /* SVGA3dBool */
+   SVGA3D_RS_POINTSPRITEENABLE         = 11,    /* SVGA3dBool */
+   SVGA3D_RS_POINTSCALEENABLE          = 12,    /* SVGA3dBool */
+   SVGA3D_RS_STENCILREF                = 13,    /* uint32 */
+   SVGA3D_RS_STENCILMASK               = 14,    /* uint32 */
+   SVGA3D_RS_STENCILWRITEMASK          = 15,    /* uint32 */
+   SVGA3D_RS_FOGSTART                  = 16,    /* float */
+   SVGA3D_RS_FOGEND                    = 17,    /* float */
+   SVGA3D_RS_FOGDENSITY                = 18,    /* float */
+   SVGA3D_RS_POINTSIZE                 = 19,    /* float */
+   SVGA3D_RS_POINTSIZEMIN              = 20,    /* float */
+   SVGA3D_RS_POINTSIZEMAX              = 21,    /* float */
+   SVGA3D_RS_POINTSCALE_A              = 22,    /* float */
+   SVGA3D_RS_POINTSCALE_B              = 23,    /* float */
+   SVGA3D_RS_POINTSCALE_C              = 24,    /* float */
+   SVGA3D_RS_FOGCOLOR                  = 25,    /* SVGA3dColor */
+   SVGA3D_RS_AMBIENT                   = 26,    /* SVGA3dColor */
+   SVGA3D_RS_CLIPPLANEENABLE           = 27,    /* SVGA3dClipPlanes */
+   SVGA3D_RS_FOGMODE                   = 28,    /* SVGA3dFogMode */
+   SVGA3D_RS_FILLMODE                  = 29,    /* SVGA3dFillMode */
+   SVGA3D_RS_SHADEMODE                 = 30,    /* SVGA3dShadeMode */
+   SVGA3D_RS_LINEPATTERN               = 31,    /* SVGA3dLinePattern */
+   SVGA3D_RS_SRCBLEND                  = 32,    /* SVGA3dBlendOp */
+   SVGA3D_RS_DSTBLEND                  = 33,    /* SVGA3dBlendOp */
+   SVGA3D_RS_BLENDEQUATION             = 34,    /* SVGA3dBlendEquation */
+   SVGA3D_RS_CULLMODE                  = 35,    /* SVGA3dFace */
+   SVGA3D_RS_ZFUNC                     = 36,    /* SVGA3dCmpFunc */
+   SVGA3D_RS_ALPHAFUNC                 = 37,    /* SVGA3dCmpFunc */
+   SVGA3D_RS_STENCILFUNC               = 38,    /* SVGA3dCmpFunc */
+   SVGA3D_RS_STENCILFAIL               = 39,    /* SVGA3dStencilOp */
+   SVGA3D_RS_STENCILZFAIL              = 40,    /* SVGA3dStencilOp */
+   SVGA3D_RS_STENCILPASS               = 41,    /* SVGA3dStencilOp */
+   SVGA3D_RS_ALPHAREF                  = 42,    /* float (0.0 .. 1.0) */
+   SVGA3D_RS_FRONTWINDING              = 43,    /* SVGA3dFrontWinding */
+   SVGA3D_RS_COORDINATETYPE            = 44,    /* SVGA3dCoordinateType */
+   SVGA3D_RS_ZBIAS                     = 45,    /* float */
+   SVGA3D_RS_RANGEFOGENABLE            = 46,    /* SVGA3dBool */
+   SVGA3D_RS_COLORWRITEENABLE          = 47,    /* SVGA3dColorMask */
+   SVGA3D_RS_VERTEXMATERIALENABLE      = 48,    /* SVGA3dBool */
+   SVGA3D_RS_DIFFUSEMATERIALSOURCE     = 49,    /* SVGA3dVertexMaterial */
+   SVGA3D_RS_SPECULARMATERIALSOURCE    = 50,    /* SVGA3dVertexMaterial */
+   SVGA3D_RS_AMBIENTMATERIALSOURCE     = 51,    /* SVGA3dVertexMaterial */
+   SVGA3D_RS_EMISSIVEMATERIALSOURCE    = 52,    /* SVGA3dVertexMaterial */
+   SVGA3D_RS_TEXTUREFACTOR             = 53,    /* SVGA3dColor */
+   SVGA3D_RS_LOCALVIEWER               = 54,    /* SVGA3dBool */
+   SVGA3D_RS_SCISSORTESTENABLE         = 55,    /* SVGA3dBool */
+   SVGA3D_RS_BLENDCOLOR                = 56,    /* SVGA3dColor */
+   SVGA3D_RS_STENCILENABLE2SIDED       = 57,    /* SVGA3dBool */
+   SVGA3D_RS_CCWSTENCILFUNC            = 58,    /* SVGA3dCmpFunc */
+   SVGA3D_RS_CCWSTENCILFAIL            = 59,    /* SVGA3dStencilOp */
+   SVGA3D_RS_CCWSTENCILZFAIL           = 60,    /* SVGA3dStencilOp */
+   SVGA3D_RS_CCWSTENCILPASS            = 61,    /* SVGA3dStencilOp */
+   SVGA3D_RS_VERTEXBLEND               = 62,    /* SVGA3dVertexBlendFlags */
+   SVGA3D_RS_SLOPESCALEDEPTHBIAS       = 63,    /* float */
+   SVGA3D_RS_DEPTHBIAS                 = 64,    /* float */
+
+
+   /*
+    * Output Gamma Level
+    *
+    * Output gamma effects the gamma curve of colors that are output from the
+    * rendering pipeline.  A value of 1.0 specifies a linear color space. If the
+    * value is <= 0.0, gamma correction is ignored and linear color space is
+    * used.
+    */
+
+   SVGA3D_RS_OUTPUTGAMMA               = 65,    /* float */
+   SVGA3D_RS_ZVISIBLE                  = 66,    /* SVGA3dBool */
+   SVGA3D_RS_LASTPIXEL                 = 67,    /* SVGA3dBool */
+   SVGA3D_RS_CLIPPING                  = 68,    /* SVGA3dBool */
+   SVGA3D_RS_WRAP0                     = 69,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP1                     = 70,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP2                     = 71,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP3                     = 72,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP4                     = 73,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP5                     = 74,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP6                     = 75,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP7                     = 76,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP8                     = 77,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP9                     = 78,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP10                    = 79,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP11                    = 80,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP12                    = 81,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP13                    = 82,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP14                    = 83,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP15                    = 84,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_MULTISAMPLEANTIALIAS      = 85,    /* SVGA3dBool */
+   SVGA3D_RS_MULTISAMPLEMASK           = 86,    /* uint32 */
+   SVGA3D_RS_INDEXEDVERTEXBLENDENABLE  = 87,    /* SVGA3dBool */
+   SVGA3D_RS_TWEENFACTOR               = 88,    /* float */
+   SVGA3D_RS_ANTIALIASEDLINEENABLE     = 89,    /* SVGA3dBool */
+   SVGA3D_RS_COLORWRITEENABLE1         = 90,    /* SVGA3dColorMask */
+   SVGA3D_RS_COLORWRITEENABLE2         = 91,    /* SVGA3dColorMask */
+   SVGA3D_RS_COLORWRITEENABLE3         = 92,    /* SVGA3dColorMask */
+   SVGA3D_RS_SEPARATEALPHABLENDENABLE  = 93,    /* SVGA3dBool */
+   SVGA3D_RS_SRCBLENDALPHA             = 94,    /* SVGA3dBlendOp */
+   SVGA3D_RS_DSTBLENDALPHA             = 95,    /* SVGA3dBlendOp */
+   SVGA3D_RS_BLENDEQUATIONALPHA        = 96,    /* SVGA3dBlendEquation */
+   SVGA3D_RS_TRANSPARENCYANTIALIAS     = 97,    /* SVGA3dTransparencyAntialiasType */
+   SVGA3D_RS_LINEWIDTH                 = 98,    /* float */
+   SVGA3D_RS_MAX
+} SVGA3dRenderStateName;
+
+typedef enum {
+   SVGA3D_TRANSPARENCYANTIALIAS_NORMAL            = 0,
+   SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE   = 1,
+   SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE       = 2,
+   SVGA3D_TRANSPARENCYANTIALIAS_MAX
+} SVGA3dTransparencyAntialiasType;
+
+typedef enum {
+   SVGA3D_VERTEXMATERIAL_NONE     = 0,    /* Use the value in the current material */
+   SVGA3D_VERTEXMATERIAL_DIFFUSE  = 1,    /* Use the value in the diffuse component */
+   SVGA3D_VERTEXMATERIAL_SPECULAR = 2,    /* Use the value in the specular component */
+   SVGA3D_VERTEXMATERIAL_MAX      = 3,
+} SVGA3dVertexMaterial;
+
+typedef enum {
+   SVGA3D_FILLMODE_INVALID = 0,
+   SVGA3D_FILLMODE_MIN     = 1,
+   SVGA3D_FILLMODE_POINT   = 1,
+   SVGA3D_FILLMODE_LINE    = 2,
+   SVGA3D_FILLMODE_FILL    = 3,
+   SVGA3D_FILLMODE_MAX
+} SVGA3dFillModeType;
+
+
+typedef
+#include "vmware_pack_begin.h"
+union {
+   struct {
+      uint16   mode;       /* SVGA3dFillModeType */
+      uint16   face;       /* SVGA3dFace */
+   };
+   uint32 uintValue;
+}
+#include "vmware_pack_end.h"
+SVGA3dFillMode;
+
+typedef enum {
+   SVGA3D_SHADEMODE_INVALID = 0,
+   SVGA3D_SHADEMODE_FLAT    = 1,
+   SVGA3D_SHADEMODE_SMOOTH  = 2,
+   SVGA3D_SHADEMODE_PHONG   = 3,     /* Not supported */
+   SVGA3D_SHADEMODE_MAX
+} SVGA3dShadeMode;
+
+typedef
+#include "vmware_pack_begin.h"
+union {
+   struct {
+      uint16 repeat;
+      uint16 pattern;
+   };
+   uint32 uintValue;
+}
+#include "vmware_pack_end.h"
+SVGA3dLinePattern;
+
+typedef enum {
+   SVGA3D_BLENDOP_INVALID             = 0,
+   SVGA3D_BLENDOP_MIN                 = 1,
+   SVGA3D_BLENDOP_ZERO                = 1,
+   SVGA3D_BLENDOP_ONE                 = 2,
+   SVGA3D_BLENDOP_SRCCOLOR            = 3,
+   SVGA3D_BLENDOP_INVSRCCOLOR         = 4,
+   SVGA3D_BLENDOP_SRCALPHA            = 5,
+   SVGA3D_BLENDOP_INVSRCALPHA         = 6,
+   SVGA3D_BLENDOP_DESTALPHA           = 7,
+   SVGA3D_BLENDOP_INVDESTALPHA        = 8,
+   SVGA3D_BLENDOP_DESTCOLOR           = 9,
+   SVGA3D_BLENDOP_INVDESTCOLOR        = 10,
+   SVGA3D_BLENDOP_SRCALPHASAT         = 11,
+   SVGA3D_BLENDOP_BLENDFACTOR         = 12,
+   SVGA3D_BLENDOP_INVBLENDFACTOR      = 13,
+   SVGA3D_BLENDOP_SRC1COLOR           = 14,
+   SVGA3D_BLENDOP_INVSRC1COLOR        = 15,
+   SVGA3D_BLENDOP_SRC1ALPHA           = 16,
+   SVGA3D_BLENDOP_INVSRC1ALPHA        = 17,
+   SVGA3D_BLENDOP_BLENDFACTORALPHA    = 18,
+   SVGA3D_BLENDOP_INVBLENDFACTORALPHA = 19,
+   SVGA3D_BLENDOP_MAX
+} SVGA3dBlendOp;
+
+typedef enum {
+   SVGA3D_BLENDEQ_INVALID            = 0,
+   SVGA3D_BLENDEQ_MIN                = 1,
+   SVGA3D_BLENDEQ_ADD                = 1,
+   SVGA3D_BLENDEQ_SUBTRACT           = 2,
+   SVGA3D_BLENDEQ_REVSUBTRACT        = 3,
+   SVGA3D_BLENDEQ_MINIMUM            = 4,
+   SVGA3D_BLENDEQ_MAXIMUM            = 5,
+   SVGA3D_BLENDEQ_MAX
+} SVGA3dBlendEquation;
+
+typedef enum {
+   SVGA3D_DX11_LOGICOP_MIN           = 0,
+   SVGA3D_DX11_LOGICOP_CLEAR         = 0,
+   SVGA3D_DX11_LOGICOP_SET           = 1,
+   SVGA3D_DX11_LOGICOP_COPY          = 2,
+   SVGA3D_DX11_LOGICOP_COPY_INVERTED = 3,
+   SVGA3D_DX11_LOGICOP_NOOP          = 4,
+   SVGA3D_DX11_LOGICOP_INVERT        = 5,
+   SVGA3D_DX11_LOGICOP_AND           = 6,
+   SVGA3D_DX11_LOGICOP_NAND          = 7,
+   SVGA3D_DX11_LOGICOP_OR            = 8,
+   SVGA3D_DX11_LOGICOP_NOR           = 9,
+   SVGA3D_DX11_LOGICOP_XOR           = 10,
+   SVGA3D_DX11_LOGICOP_EQUIV         = 11,
+   SVGA3D_DX11_LOGICOP_AND_REVERSE   = 12,
+   SVGA3D_DX11_LOGICOP_AND_INVERTED  = 13,
+   SVGA3D_DX11_LOGICOP_OR_REVERSE    = 14,
+   SVGA3D_DX11_LOGICOP_OR_INVERTED   = 15,
+   SVGA3D_DX11_LOGICOP_MAX
+} SVGA3dDX11LogicOp;
+
+typedef enum {
+   SVGA3D_FRONTWINDING_INVALID = 0,
+   SVGA3D_FRONTWINDING_CW      = 1,
+   SVGA3D_FRONTWINDING_CCW     = 2,
+   SVGA3D_FRONTWINDING_MAX
+} SVGA3dFrontWinding;
+
+typedef enum {
+   SVGA3D_FACE_INVALID  = 0,
+   SVGA3D_FACE_NONE     = 1,
+   SVGA3D_FACE_MIN      = 1,
+   SVGA3D_FACE_FRONT    = 2,
+   SVGA3D_FACE_BACK     = 3,
+   SVGA3D_FACE_FRONT_BACK = 4,
+   SVGA3D_FACE_MAX
+} SVGA3dFace;
+
+/*
+ * The order and the values should not be changed
+ */
+
+typedef enum {
+   SVGA3D_CMP_INVALID              = 0,
+   SVGA3D_CMP_NEVER                = 1,
+   SVGA3D_CMP_LESS                 = 2,
+   SVGA3D_CMP_EQUAL                = 3,
+   SVGA3D_CMP_LESSEQUAL            = 4,
+   SVGA3D_CMP_GREATER              = 5,
+   SVGA3D_CMP_NOTEQUAL             = 6,
+   SVGA3D_CMP_GREATEREQUAL         = 7,
+   SVGA3D_CMP_ALWAYS               = 8,
+   SVGA3D_CMP_MAX
+} SVGA3dCmpFunc;
+
+/*
+ * SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows
+ * the fog factor to be specified in the alpha component of the specular
+ * (a.k.a. secondary) vertex color.
+ */
+typedef enum {
+   SVGA3D_FOGFUNC_INVALID          = 0,
+   SVGA3D_FOGFUNC_EXP              = 1,
+   SVGA3D_FOGFUNC_EXP2             = 2,
+   SVGA3D_FOGFUNC_LINEAR           = 3,
+   SVGA3D_FOGFUNC_PER_VERTEX       = 4
+} SVGA3dFogFunction;
+
+/*
+ * SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex
+ * or per-pixel basis.
+ */
+typedef enum {
+   SVGA3D_FOGTYPE_INVALID          = 0,
+   SVGA3D_FOGTYPE_VERTEX           = 1,
+   SVGA3D_FOGTYPE_PIXEL            = 2,
+   SVGA3D_FOGTYPE_MAX              = 3
+} SVGA3dFogType;
+
+/*
+ * SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is
+ * computed using the eye Z value of each pixel (or vertex), whereas range-
+ * based fog is computed using the actual distance (range) to the eye.
+ */
+typedef enum {
+   SVGA3D_FOGBASE_INVALID          = 0,
+   SVGA3D_FOGBASE_DEPTHBASED       = 1,
+   SVGA3D_FOGBASE_RANGEBASED       = 2,
+   SVGA3D_FOGBASE_MAX              = 3
+} SVGA3dFogBase;
+
+typedef enum {
+   SVGA3D_STENCILOP_INVALID        = 0,
+   SVGA3D_STENCILOP_MIN            = 1,
+   SVGA3D_STENCILOP_KEEP           = 1,
+   SVGA3D_STENCILOP_ZERO           = 2,
+   SVGA3D_STENCILOP_REPLACE        = 3,
+   SVGA3D_STENCILOP_INCRSAT        = 4,
+   SVGA3D_STENCILOP_DECRSAT        = 5,
+   SVGA3D_STENCILOP_INVERT         = 6,
+   SVGA3D_STENCILOP_INCR           = 7,
+   SVGA3D_STENCILOP_DECR           = 8,
+   SVGA3D_STENCILOP_MAX
+} SVGA3dStencilOp;
+
+typedef enum {
+   SVGA3D_CLIPPLANE_0              = (1 << 0),
+   SVGA3D_CLIPPLANE_1              = (1 << 1),
+   SVGA3D_CLIPPLANE_2              = (1 << 2),
+   SVGA3D_CLIPPLANE_3              = (1 << 3),
+   SVGA3D_CLIPPLANE_4              = (1 << 4),
+   SVGA3D_CLIPPLANE_5              = (1 << 5),
+} SVGA3dClipPlanes;
+
+typedef enum {
+   SVGA3D_CLEAR_COLOR              = 0x1,
+   SVGA3D_CLEAR_DEPTH              = 0x2,
+   SVGA3D_CLEAR_STENCIL            = 0x4,
+
+   /*
+    * Hint only, must be used together with SVGA3D_CLEAR_COLOR. If
+    * SVGA3D_CLEAR_DEPTH or SVGA3D_CLEAR_STENCIL bit is set, this
+    * bit will be ignored.
+    */
+   SVGA3D_CLEAR_COLORFILL          = 0x8
+} SVGA3dClearFlag;
+
+typedef enum {
+   SVGA3D_RT_DEPTH                 = 0,
+   SVGA3D_RT_MIN                   = 0,
+   SVGA3D_RT_STENCIL               = 1,
+   SVGA3D_RT_COLOR0                = 2,
+   SVGA3D_RT_COLOR1                = 3,
+   SVGA3D_RT_COLOR2                = 4,
+   SVGA3D_RT_COLOR3                = 5,
+   SVGA3D_RT_COLOR4                = 6,
+   SVGA3D_RT_COLOR5                = 7,
+   SVGA3D_RT_COLOR6                = 8,
+   SVGA3D_RT_COLOR7                = 9,
+   SVGA3D_RT_MAX,
+   SVGA3D_RT_INVALID               = ((uint32)-1),
+} SVGA3dRenderTargetType;
+
+#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1)
+
+typedef
+#include "vmware_pack_begin.h"
+union {
+   struct {
+      uint32  red   : 1;
+      uint32  green : 1;
+      uint32  blue  : 1;
+      uint32  alpha : 1;
+   };
+   uint32 uintValue;
+}
+#include "vmware_pack_end.h"
+SVGA3dColorMask;
+
+typedef enum {
+   SVGA3D_VBLEND_DISABLE            = 0,
+   SVGA3D_VBLEND_1WEIGHT            = 1,
+   SVGA3D_VBLEND_2WEIGHT            = 2,
+   SVGA3D_VBLEND_3WEIGHT            = 3,
+   SVGA3D_VBLEND_MAX                = 4,
+} SVGA3dVertexBlendFlags;
+
+typedef enum {
+   SVGA3D_WRAPCOORD_0   = 1 << 0,
+   SVGA3D_WRAPCOORD_1   = 1 << 1,
+   SVGA3D_WRAPCOORD_2   = 1 << 2,
+   SVGA3D_WRAPCOORD_3   = 1 << 3,
+   SVGA3D_WRAPCOORD_ALL = 0xF,
+} SVGA3dWrapFlags;
+
+/*
+ * SVGA_3D_CMD_TEXTURESTATE Types.  All value types
+ * must fit in a uint32.
+ */
+
+typedef enum {
+   SVGA3D_TS_INVALID                    = 0,
+   SVGA3D_TS_MIN                        = 1,
+   SVGA3D_TS_BIND_TEXTURE               = 1,    /* SVGA3dSurfaceId */
+   SVGA3D_TS_COLOROP                    = 2,    /* SVGA3dTextureCombiner */
+   SVGA3D_TS_COLORARG1                  = 3,    /* SVGA3dTextureArgData */
+   SVGA3D_TS_COLORARG2                  = 4,    /* SVGA3dTextureArgData */
+   SVGA3D_TS_ALPHAOP                    = 5,    /* SVGA3dTextureCombiner */
+   SVGA3D_TS_ALPHAARG1                  = 6,    /* SVGA3dTextureArgData */
+   SVGA3D_TS_ALPHAARG2                  = 7,    /* SVGA3dTextureArgData */
+   SVGA3D_TS_ADDRESSU                   = 8,    /* SVGA3dTextureAddress */
+   SVGA3D_TS_ADDRESSV                   = 9,    /* SVGA3dTextureAddress */
+   SVGA3D_TS_MIPFILTER                  = 10,   /* SVGA3dTextureFilter */
+   SVGA3D_TS_MAGFILTER                  = 11,   /* SVGA3dTextureFilter */
+   SVGA3D_TS_MINFILTER                  = 12,   /* SVGA3dTextureFilter */
+   SVGA3D_TS_BORDERCOLOR                = 13,   /* SVGA3dColor */
+   SVGA3D_TS_TEXCOORDINDEX              = 14,   /* uint32 */
+   SVGA3D_TS_TEXTURETRANSFORMFLAGS      = 15,   /* SVGA3dTexTransformFlags */
+   SVGA3D_TS_TEXCOORDGEN                = 16,   /* SVGA3dTextureCoordGen */
+   SVGA3D_TS_BUMPENVMAT00               = 17,   /* float */
+   SVGA3D_TS_BUMPENVMAT01               = 18,   /* float */
+   SVGA3D_TS_BUMPENVMAT10               = 19,   /* float */
+   SVGA3D_TS_BUMPENVMAT11               = 20,   /* float */
+   SVGA3D_TS_TEXTURE_MIPMAP_LEVEL       = 21,   /* uint32 */
+   SVGA3D_TS_TEXTURE_LOD_BIAS           = 22,   /* float */
+   SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL  = 23,   /* uint32 */
+   SVGA3D_TS_ADDRESSW                   = 24,   /* SVGA3dTextureAddress */
+
+
+   /*
+    * Sampler Gamma Level
+    *
+    * Sampler gamma effects the color of samples taken from the sampler.  A
+    * value of 1.0 will produce linear samples.  If the value is <= 0.0 the
+    * gamma value is ignored and a linear space is used.
+    */
+
+   SVGA3D_TS_GAMMA                      = 25,   /* float */
+   SVGA3D_TS_BUMPENVLSCALE              = 26,   /* float */
+   SVGA3D_TS_BUMPENVLOFFSET             = 27,   /* float */
+   SVGA3D_TS_COLORARG0                  = 28,   /* SVGA3dTextureArgData */
+   SVGA3D_TS_ALPHAARG0                  = 29,   /* SVGA3dTextureArgData */
+   SVGA3D_TS_PREGB_MAX                  = 30,   /* Max value before GBObjects */
+   SVGA3D_TS_CONSTANT                   = 30,   /* SVGA3dColor */
+   SVGA3D_TS_COLOR_KEY_ENABLE           = 31,   /* SVGA3dBool */
+   SVGA3D_TS_COLOR_KEY                  = 32,   /* SVGA3dColor */
+   SVGA3D_TS_MAX
+} SVGA3dTextureStateName;
+
+typedef enum {
+   SVGA3D_TC_INVALID                   = 0,
+   SVGA3D_TC_DISABLE                   = 1,
+   SVGA3D_TC_SELECTARG1                = 2,
+   SVGA3D_TC_SELECTARG2                = 3,
+   SVGA3D_TC_MODULATE                  = 4,
+   SVGA3D_TC_ADD                       = 5,
+   SVGA3D_TC_ADDSIGNED                 = 6,
+   SVGA3D_TC_SUBTRACT                  = 7,
+   SVGA3D_TC_BLENDTEXTUREALPHA         = 8,
+   SVGA3D_TC_BLENDDIFFUSEALPHA         = 9,
+   SVGA3D_TC_BLENDCURRENTALPHA         = 10,
+   SVGA3D_TC_BLENDFACTORALPHA          = 11,
+   SVGA3D_TC_MODULATE2X                = 12,
+   SVGA3D_TC_MODULATE4X                = 13,
+   SVGA3D_TC_DSDT                      = 14,
+   SVGA3D_TC_DOTPRODUCT3               = 15,
+   SVGA3D_TC_BLENDTEXTUREALPHAPM       = 16,
+   SVGA3D_TC_ADDSIGNED2X               = 17,
+   SVGA3D_TC_ADDSMOOTH                 = 18,
+   SVGA3D_TC_PREMODULATE               = 19,
+   SVGA3D_TC_MODULATEALPHA_ADDCOLOR    = 20,
+   SVGA3D_TC_MODULATECOLOR_ADDALPHA    = 21,
+   SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22,
+   SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23,
+   SVGA3D_TC_BUMPENVMAPLUMINANCE       = 24,
+   SVGA3D_TC_MULTIPLYADD               = 25,
+   SVGA3D_TC_LERP                      = 26,
+   SVGA3D_TC_MAX
+} SVGA3dTextureCombiner;
+
+#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0)
+
+typedef enum {
+   SVGA3D_TEX_ADDRESS_INVALID    = 0,
+   SVGA3D_TEX_ADDRESS_MIN        = 1,
+   SVGA3D_TEX_ADDRESS_WRAP       = 1,
+   SVGA3D_TEX_ADDRESS_MIRROR     = 2,
+   SVGA3D_TEX_ADDRESS_CLAMP      = 3,
+   SVGA3D_TEX_ADDRESS_BORDER     = 4,
+   SVGA3D_TEX_ADDRESS_MIRRORONCE = 5,
+   SVGA3D_TEX_ADDRESS_EDGE       = 6,
+   SVGA3D_TEX_ADDRESS_MAX
+} SVGA3dTextureAddress;
+
+/*
+ * SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is
+ * disabled, and the rasterizer should use the magnification filter instead.
+ */
+typedef enum {
+   SVGA3D_TEX_FILTER_NONE           = 0,
+   SVGA3D_TEX_FILTER_MIN            = 0,
+   SVGA3D_TEX_FILTER_NEAREST        = 1,
+   SVGA3D_TEX_FILTER_LINEAR         = 2,
+   SVGA3D_TEX_FILTER_ANISOTROPIC    = 3,
+   SVGA3D_TEX_FILTER_FLATCUBIC      = 4, /* Deprecated, not implemented */
+   SVGA3D_TEX_FILTER_GAUSSIANCUBIC  = 5, /* Deprecated, not implemented */
+   SVGA3D_TEX_FILTER_PYRAMIDALQUAD  = 6, /* Not currently implemented */
+   SVGA3D_TEX_FILTER_GAUSSIANQUAD   = 7, /* Not currently implemented */
+   SVGA3D_TEX_FILTER_MAX
+} SVGA3dTextureFilter;
+
+typedef enum {
+   SVGA3D_TEX_TRANSFORM_OFF    = 0,
+   SVGA3D_TEX_TRANSFORM_S      = (1 << 0),
+   SVGA3D_TEX_TRANSFORM_T      = (1 << 1),
+   SVGA3D_TEX_TRANSFORM_R      = (1 << 2),
+   SVGA3D_TEX_TRANSFORM_Q      = (1 << 3),
+   SVGA3D_TEX_PROJECTED        = (1 << 15),
+} SVGA3dTexTransformFlags;
+
+typedef enum {
+   SVGA3D_TEXCOORD_GEN_OFF              = 0,
+   SVGA3D_TEXCOORD_GEN_EYE_POSITION     = 1,
+   SVGA3D_TEXCOORD_GEN_EYE_NORMAL       = 2,
+   SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3,
+   SVGA3D_TEXCOORD_GEN_SPHERE           = 4,
+   SVGA3D_TEXCOORD_GEN_MAX
+} SVGA3dTextureCoordGen;
+
+/*
+ * Texture argument constants for texture combiner
+ */
+typedef enum {
+   SVGA3D_TA_INVALID    = 0,
+   SVGA3D_TA_TFACTOR    = 1,
+   SVGA3D_TA_PREVIOUS   = 2,
+   SVGA3D_TA_DIFFUSE    = 3,
+   SVGA3D_TA_TEXTURE    = 4,
+   SVGA3D_TA_SPECULAR   = 5,
+   SVGA3D_TA_CONSTANT   = 6,
+   SVGA3D_TA_MAX
+} SVGA3dTextureArgData;
+
+#define SVGA3D_TM_MASK_LEN 4
+
+/* Modifiers for texture argument constants defined above. */
+typedef enum {
+   SVGA3D_TM_NONE       = 0,
+   SVGA3D_TM_ALPHA      = (1 << SVGA3D_TM_MASK_LEN),
+   SVGA3D_TM_ONE_MINUS  = (2 << SVGA3D_TM_MASK_LEN),
+} SVGA3dTextureArgModifier;
+
+/*
+ * Vertex declarations
+ *
+ * Notes:
+ *
+ * SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you
+ * draw with any POSITIONT vertex arrays, the programmable vertex
+ * pipeline will be implicitly disabled. Drawing will take place as if
+ * no vertex shader was bound.
+ */
+
+typedef enum {
+   SVGA3D_DECLUSAGE_POSITION     = 0,
+   SVGA3D_DECLUSAGE_BLENDWEIGHT,
+   SVGA3D_DECLUSAGE_BLENDINDICES,
+   SVGA3D_DECLUSAGE_NORMAL,
+   SVGA3D_DECLUSAGE_PSIZE,
+   SVGA3D_DECLUSAGE_TEXCOORD,
+   SVGA3D_DECLUSAGE_TANGENT,
+   SVGA3D_DECLUSAGE_BINORMAL,
+   SVGA3D_DECLUSAGE_TESSFACTOR,
+   SVGA3D_DECLUSAGE_POSITIONT,
+   SVGA3D_DECLUSAGE_COLOR,
+   SVGA3D_DECLUSAGE_FOG,
+   SVGA3D_DECLUSAGE_DEPTH,
+   SVGA3D_DECLUSAGE_SAMPLE,
+   SVGA3D_DECLUSAGE_MAX
+} SVGA3dDeclUsage;
+
+typedef enum {
+   SVGA3D_DECLMETHOD_DEFAULT     = 0,
+   SVGA3D_DECLMETHOD_PARTIALU,
+   SVGA3D_DECLMETHOD_PARTIALV,
+   SVGA3D_DECLMETHOD_CROSSUV,          /* Normal */
+   SVGA3D_DECLMETHOD_UV,
+   SVGA3D_DECLMETHOD_LOOKUP,           /* Lookup a displacement map */
+   SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement */
+                                       /* map */
+} SVGA3dDeclMethod;
+
+typedef enum {
+   SVGA3D_DECLTYPE_FLOAT1        =  0,
+   SVGA3D_DECLTYPE_FLOAT2        =  1,
+   SVGA3D_DECLTYPE_FLOAT3        =  2,
+   SVGA3D_DECLTYPE_FLOAT4        =  3,
+   SVGA3D_DECLTYPE_D3DCOLOR      =  4,
+   SVGA3D_DECLTYPE_UBYTE4        =  5,
+   SVGA3D_DECLTYPE_SHORT2        =  6,
+   SVGA3D_DECLTYPE_SHORT4        =  7,
+   SVGA3D_DECLTYPE_UBYTE4N       =  8,
+   SVGA3D_DECLTYPE_SHORT2N       =  9,
+   SVGA3D_DECLTYPE_SHORT4N       = 10,
+   SVGA3D_DECLTYPE_USHORT2N      = 11,
+   SVGA3D_DECLTYPE_USHORT4N      = 12,
+   SVGA3D_DECLTYPE_UDEC3         = 13,
+   SVGA3D_DECLTYPE_DEC3N         = 14,
+   SVGA3D_DECLTYPE_FLOAT16_2     = 15,
+   SVGA3D_DECLTYPE_FLOAT16_4     = 16,
+   SVGA3D_DECLTYPE_MAX,
+} SVGA3dDeclType;
+
+/*
+ * This structure is used for the divisor for geometry instancing;
+ * it's a direct translation of the Direct3D equivalent.
+ */
+typedef union {
+   struct {
+      /*
+       * For index data, this number represents the number of instances to draw.
+       * For instance data, this number represents the number of
+       * instances/vertex in this stream
+       */
+      uint32 count : 30;
+
+      /*
+       * This is 1 if this is supposed to be the data that is repeated for
+       * every instance.
+       */
+      uint32 indexedData : 1;
+
+      /*
+       * This is 1 if this is supposed to be the per-instance data.
+       */
+      uint32 instanceData : 1;
+   };
+
+   uint32 value;
+} SVGA3dVertexDivisor;
+
+typedef enum {
+   /*
+    * SVGA3D_PRIMITIVE_INVALID is a valid primitive type.
+    *
+    * List MIN second so debuggers will think INVALID is
+    * the correct name.
+    */
+   SVGA3D_PRIMITIVE_INVALID                     = 0,
+   SVGA3D_PRIMITIVE_MIN                         = 0,
+   SVGA3D_PRIMITIVE_TRIANGLELIST                = 1,
+   SVGA3D_PRIMITIVE_POINTLIST                   = 2,
+   SVGA3D_PRIMITIVE_LINELIST                    = 3,
+   SVGA3D_PRIMITIVE_LINESTRIP                   = 4,
+   SVGA3D_PRIMITIVE_TRIANGLESTRIP               = 5,
+   SVGA3D_PRIMITIVE_TRIANGLEFAN                 = 6,
+   SVGA3D_PRIMITIVE_LINELIST_ADJ                = 7,
+   SVGA3D_PRIMITIVE_PREDX_MAX                   = 7,
+   SVGA3D_PRIMITIVE_LINESTRIP_ADJ               = 8,
+   SVGA3D_PRIMITIVE_TRIANGLELIST_ADJ            = 9,
+   SVGA3D_PRIMITIVE_TRIANGLESTRIP_ADJ           = 10,
+   SVGA3D_PRIMITIVE_MAX
+} SVGA3dPrimitiveType;
+
+typedef enum {
+   SVGA3D_COORDINATE_INVALID                   = 0,
+   SVGA3D_COORDINATE_LEFTHANDED                = 1,
+   SVGA3D_COORDINATE_RIGHTHANDED               = 2,
+   SVGA3D_COORDINATE_MAX
+} SVGA3dCoordinateType;
+
+typedef enum {
+   SVGA3D_TRANSFORM_INVALID                     = 0,
+   SVGA3D_TRANSFORM_WORLD                       = 1,
+   SVGA3D_TRANSFORM_MIN                         = 1,
+   SVGA3D_TRANSFORM_VIEW                        = 2,
+   SVGA3D_TRANSFORM_PROJECTION                  = 3,
+   SVGA3D_TRANSFORM_TEXTURE0                    = 4,
+   SVGA3D_TRANSFORM_TEXTURE1                    = 5,
+   SVGA3D_TRANSFORM_TEXTURE2                    = 6,
+   SVGA3D_TRANSFORM_TEXTURE3                    = 7,
+   SVGA3D_TRANSFORM_TEXTURE4                    = 8,
+   SVGA3D_TRANSFORM_TEXTURE5                    = 9,
+   SVGA3D_TRANSFORM_TEXTURE6                    = 10,
+   SVGA3D_TRANSFORM_TEXTURE7                    = 11,
+   SVGA3D_TRANSFORM_WORLD1                      = 12,
+   SVGA3D_TRANSFORM_WORLD2                      = 13,
+   SVGA3D_TRANSFORM_WORLD3                      = 14,
+   SVGA3D_TRANSFORM_MAX
+} SVGA3dTransformType;
+
+typedef enum {
+   SVGA3D_LIGHTTYPE_INVALID                     = 0,
+   SVGA3D_LIGHTTYPE_MIN                         = 1,
+   SVGA3D_LIGHTTYPE_POINT                       = 1,
+   SVGA3D_LIGHTTYPE_SPOT1                       = 2, /* 1-cone, in degrees */
+   SVGA3D_LIGHTTYPE_SPOT2                       = 3, /* 2-cone, in radians */
+   SVGA3D_LIGHTTYPE_DIRECTIONAL                 = 4,
+   SVGA3D_LIGHTTYPE_MAX
+} SVGA3dLightType;
+
+typedef enum {
+   SVGA3D_CUBEFACE_POSX                         = 0,
+   SVGA3D_CUBEFACE_NEGX                         = 1,
+   SVGA3D_CUBEFACE_POSY                         = 2,
+   SVGA3D_CUBEFACE_NEGY                         = 3,
+   SVGA3D_CUBEFACE_POSZ                         = 4,
+   SVGA3D_CUBEFACE_NEGZ                         = 5,
+} SVGA3dCubeFace;
+
+typedef enum {
+   SVGA3D_SHADERTYPE_INVALID                    = 0,
+   SVGA3D_SHADERTYPE_MIN                        = 1,
+   SVGA3D_SHADERTYPE_VS                         = 1,
+   SVGA3D_SHADERTYPE_PS                         = 2,
+   SVGA3D_SHADERTYPE_PREDX_MAX                  = 3,
+   SVGA3D_SHADERTYPE_GS                         = 3,
+   SVGA3D_SHADERTYPE_DX10_MAX                   = 4,
+   SVGA3D_SHADERTYPE_HS                         = 4,
+   SVGA3D_SHADERTYPE_DS                         = 5,
+   SVGA3D_SHADERTYPE_CS                         = 6,
+   SVGA3D_SHADERTYPE_MAX                        = 7
+} SVGA3dShaderType;
+
+#define SVGA3D_NUM_SHADERTYPE_PREDX \
+   (SVGA3D_SHADERTYPE_PREDX_MAX - SVGA3D_SHADERTYPE_MIN)
+
+#define SVGA3D_NUM_SHADERTYPE_DX10 \
+   (SVGA3D_SHADERTYPE_DX10_MAX - SVGA3D_SHADERTYPE_MIN)
+
+#define SVGA3D_NUM_SHADERTYPE \
+   (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN)
+
+typedef enum {
+   SVGA3D_CONST_TYPE_MIN                        = 0,
+   SVGA3D_CONST_TYPE_FLOAT                      = 0,
+   SVGA3D_CONST_TYPE_INT                        = 1,
+   SVGA3D_CONST_TYPE_BOOL                       = 2,
+   SVGA3D_CONST_TYPE_MAX                        = 3,
+} SVGA3dShaderConstType;
+
+/*
+ * Register limits for shader consts.
+ */
+#define SVGA3D_CONSTREG_MAX            256
+#define SVGA3D_CONSTINTREG_MAX         16
+#define SVGA3D_CONSTBOOLREG_MAX        16
+
+typedef enum {
+   SVGA3D_STRETCH_BLT_POINT                     = 0,
+   SVGA3D_STRETCH_BLT_LINEAR                    = 1,
+   SVGA3D_STRETCH_BLT_MAX
+} SVGA3dStretchBltMode;
+
+typedef enum {
+   SVGA3D_QUERYTYPE_INVALID                     = ((uint8)-1),
+   SVGA3D_QUERYTYPE_MIN                         = 0,
+   SVGA3D_QUERYTYPE_OCCLUSION                   = 0,
+   SVGA3D_QUERYTYPE_TIMESTAMP                   = 1,
+   SVGA3D_QUERYTYPE_TIMESTAMPDISJOINT           = 2,
+   SVGA3D_QUERYTYPE_PIPELINESTATS               = 3,
+   SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE          = 4,
+   SVGA3D_QUERYTYPE_STREAMOUTPUTSTATS           = 5,
+   SVGA3D_QUERYTYPE_STREAMOVERFLOWPREDICATE     = 6,
+   SVGA3D_QUERYTYPE_OCCLUSION64                 = 7,
+   SVGA3D_QUERYTYPE_EVENT                       = 8,
+   SVGA3D_QUERYTYPE_DX10_MAX                    = 9,
+   SVGA3D_QUERYTYPE_SOSTATS_STREAM0             = 9,
+   SVGA3D_QUERYTYPE_SOSTATS_STREAM1             = 10,
+   SVGA3D_QUERYTYPE_SOSTATS_STREAM2             = 11,
+   SVGA3D_QUERYTYPE_SOSTATS_STREAM3             = 12,
+   SVGA3D_QUERYTYPE_SOP_STREAM0                 = 13,
+   SVGA3D_QUERYTYPE_SOP_STREAM1                 = 14,
+   SVGA3D_QUERYTYPE_SOP_STREAM2                 = 15,
+   SVGA3D_QUERYTYPE_SOP_STREAM3                 = 16,
+   SVGA3D_QUERYTYPE_MAX
+} SVGA3dQueryType;
+
+typedef uint8 SVGA3dQueryTypeUint8;
+
+#define SVGA3D_NUM_QUERYTYPE  (SVGA3D_QUERYTYPE_MAX - SVGA3D_QUERYTYPE_MIN)
+
+/*
+ * This is the maximum number of queries per context that can be active
+ * simultaneously between a beginQuery and endQuery.
+ */
+#define SVGA3D_MAX_QUERY 64
+
+/*
+ * Query result buffer formats
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 samplesRendered;
+}
+#include "vmware_pack_end.h"
+SVGADXOcclusionQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 passed;
+}
+#include "vmware_pack_end.h"
+SVGADXEventQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint64 timestamp;
+}
+#include "vmware_pack_end.h"
+SVGADXTimestampQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint64 realFrequency;
+   uint32 disjoint;
+}
+#include "vmware_pack_end.h"
+SVGADXTimestampDisjointQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint64 inputAssemblyVertices;
+   uint64 inputAssemblyPrimitives;
+   uint64 vertexShaderInvocations;
+   uint64 geometryShaderInvocations;
+   uint64 geometryShaderPrimitives;
+   uint64 clipperInvocations;
+   uint64 clipperPrimitives;
+   uint64 pixelShaderInvocations;
+   uint64 hullShaderInvocations;
+   uint64 domainShaderInvocations;
+   uint64 computeShaderInvocations;
+}
+#include "vmware_pack_end.h"
+SVGADXPipelineStatisticsQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 anySamplesRendered;
+}
+#include "vmware_pack_end.h"
+SVGADXOcclusionPredicateQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint64 numPrimitivesWritten;
+   uint64 numPrimitivesRequired;
+}
+#include "vmware_pack_end.h"
+SVGADXStreamOutStatisticsQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 overflowed;
+}
+#include "vmware_pack_end.h"
+SVGADXStreamOutPredicateQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint64 samplesRendered;
+}
+#include "vmware_pack_end.h"
+SVGADXOcclusion64QueryResult;
+
+/*
+ * SVGADXQueryResultUnion is not intended for use in the protocol, but is
+ * very helpful when working with queries generically.
+ */
+typedef
+#include "vmware_pack_begin.h"
+union SVGADXQueryResultUnion {
+   SVGADXOcclusionQueryResult occ;
+   SVGADXEventQueryResult event;
+   SVGADXTimestampQueryResult ts;
+   SVGADXTimestampDisjointQueryResult tsDisjoint;
+   SVGADXPipelineStatisticsQueryResult pipelineStats;
+   SVGADXOcclusionPredicateQueryResult occPred;
+   SVGADXStreamOutStatisticsQueryResult soStats;
+   SVGADXStreamOutPredicateQueryResult soPred;
+   SVGADXOcclusion64QueryResult occ64;
+}
+#include "vmware_pack_end.h"
+SVGADXQueryResultUnion;
+
+
+typedef enum {
+   SVGA3D_QUERYSTATE_PENDING     = 0,      /* Query is not finished yet */
+   SVGA3D_QUERYSTATE_SUCCEEDED   = 1,      /* Completed successfully */
+   SVGA3D_QUERYSTATE_FAILED      = 2,      /* Completed unsuccessfully */
+   SVGA3D_QUERYSTATE_NEW         = 3,      /* Never submitted (guest only) */
+} SVGA3dQueryState;
+
+typedef enum {
+   SVGA3D_WRITE_HOST_VRAM        = 1,
+   SVGA3D_READ_HOST_VRAM         = 2,
+} SVGA3dTransferType;
+
+typedef enum {
+   SVGA3D_LOGICOP_INVALID   = 0,
+   SVGA3D_LOGICOP_MIN       = 1,
+   SVGA3D_LOGICOP_COPY      = 1,
+   SVGA3D_LOGICOP_NOT       = 2,
+   SVGA3D_LOGICOP_AND       = 3,
+   SVGA3D_LOGICOP_OR        = 4,
+   SVGA3D_LOGICOP_XOR       = 5,
+   SVGA3D_LOGICOP_NXOR      = 6,
+   SVGA3D_LOGICOP_ROP3MIN   = 30,   /* 7-29 are reserved for future logic ops. */
+   SVGA3D_LOGICOP_ROP3MAX   = (SVGA3D_LOGICOP_ROP3MIN + 255),
+   SVGA3D_LOGICOP_MAX       = (SVGA3D_LOGICOP_ROP3MAX + 1),
+} SVGA3dLogicOp;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   union {
+      struct {
+	 uint16  function;       /* SVGA3dFogFunction */
+	 uint8   type;           /* SVGA3dFogType */
+	 uint8   base;           /* SVGA3dFogBase */
+      };
+      uint32     uintValue;
+   };
+}
+#include "vmware_pack_end.h"
+SVGA3dFogMode;
+
+/*
+ * Uniquely identify one image (a 1D/2D/3D array) from a surface. This
+ * is a surface ID as well as face/mipmap indices.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dSurfaceImageId {
+   uint32               sid;
+   uint32               face;
+   uint32               mipmap;
+}
+#include "vmware_pack_end.h"
+SVGA3dSurfaceImageId;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               width;
+   uint32               height;
+   uint32               depth;
+}
+#include "vmware_pack_end.h"
+SVGA3dSize;
+
+/*
+ * Guest-backed objects definitions.
+ */
+typedef enum {
+   SVGA_OTABLE_MOB             = 0,
+   SVGA_OTABLE_MIN             = 0,
+   SVGA_OTABLE_SURFACE         = 1,
+   SVGA_OTABLE_CONTEXT         = 2,
+   SVGA_OTABLE_SHADER          = 3,
+   SVGA_OTABLE_SCREENTARGET    = 4,
+
+   SVGA_OTABLE_DX9_MAX         = 5,
+
+   SVGA_OTABLE_DXCONTEXT       = 5,
+   SVGA_OTABLE_MAX             = 6
+} SVGAOTableType;
+
+/*
+ * Deprecated.
+ */
+#define SVGA_OTABLE_COUNT 4
+
+typedef enum {
+   SVGA_COTABLE_MIN             = 0,
+   SVGA_COTABLE_RTVIEW          = 0,
+   SVGA_COTABLE_DSVIEW          = 1,
+   SVGA_COTABLE_SRVIEW          = 2,
+   SVGA_COTABLE_ELEMENTLAYOUT   = 3,
+   SVGA_COTABLE_BLENDSTATE      = 4,
+   SVGA_COTABLE_DEPTHSTENCIL    = 5,
+   SVGA_COTABLE_RASTERIZERSTATE = 6,
+   SVGA_COTABLE_SAMPLER         = 7,
+   SVGA_COTABLE_STREAMOUTPUT    = 8,
+   SVGA_COTABLE_DXQUERY         = 9,
+   SVGA_COTABLE_DXSHADER        = 10,
+   SVGA_COTABLE_DX10_MAX        = 11,
+   SVGA_COTABLE_UAVIEW          = 11,
+   SVGA_COTABLE_MAX
+} SVGACOTableType;
+
+/*
+ * The largest size (number of entries) allowed in a COTable.
+ */
+#define SVGA_COTABLE_MAX_IDS (MAX_UINT16 - 2)
+
+typedef enum SVGAMobFormat {
+   SVGA3D_MOBFMT_INVALID     = SVGA3D_INVALID_ID,
+   SVGA3D_MOBFMT_PTDEPTH_0   = 0,
+   SVGA3D_MOBFMT_MIN         = 0,
+   SVGA3D_MOBFMT_PTDEPTH_1   = 1,
+   SVGA3D_MOBFMT_PTDEPTH_2   = 2,
+   SVGA3D_MOBFMT_RANGE       = 3,
+   SVGA3D_MOBFMT_PTDEPTH64_0 = 4,
+   SVGA3D_MOBFMT_PTDEPTH64_1 = 5,
+   SVGA3D_MOBFMT_PTDEPTH64_2 = 6,
+   SVGA3D_MOBFMT_PREDX_MAX   = 7,
+   SVGA3D_MOBFMT_EMPTY       = 7,
+   SVGA3D_MOBFMT_MAX,
+} SVGAMobFormat;
+
+#define SVGA3D_MOB_EMPTY_BASE 1
+
+#endif /* _SVGA3D_TYPES_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga_escape.h b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
index 8e8d9682e018..884b1d1fb85f 100644
--- a/drivers/gpu/drm/vmwgfx/svga_escape.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
@@ -1,5 +1,5 @@
 /**********************************************************
- * Copyright 2007-2009 VMware, Inc.  All rights reserved.
+ * Copyright 2007-2015 VMware, Inc.  All rights reserved.
  *
  * Permission is hereby granted, free of charge, to any person
  * obtaining a copy of this software and associated documentation
diff --git a/drivers/gpu/drm/vmwgfx/svga_overlay.h b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
index f38416fcb046..faf6d9b2b891 100644
--- a/drivers/gpu/drm/vmwgfx/svga_overlay.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
@@ -1,5 +1,5 @@
 /**********************************************************
- * Copyright 2007-2009 VMware, Inc.  All rights reserved.
+ * Copyright 2007-2015 VMware, Inc.  All rights reserved.
  *
  * Permission is hereby granted, free of charge, to any person
  * obtaining a copy of this software and associated documentation
@@ -152,19 +152,17 @@ VMwareVideoGetAttributes(const SVGAOverlayFormat format,    /* IN */
     switch (format) {
     case VMWARE_FOURCC_YV12:
        *height = (*height + 1) & ~1;
-       *size = (*width + 3) & ~3;
+       *size = (*width) * (*height);
 
        if (pitches) {
-          pitches[0] = *size;
+          pitches[0] = *width;
        }
 
-       *size *= *height;
-
        if (offsets) {
           offsets[1] = *size;
        }
 
-       tmp = ((*width >> 1) + 3) & ~3;
+       tmp = *width >> 1;
 
        if (pitches) {
           pitches[1] = pitches[2] = tmp;
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
index e4259c2c1acc..6e0ccb70a700 100644
--- a/drivers/gpu/drm/vmwgfx/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
@@ -1,5 +1,5 @@
 /**********************************************************
- * Copyright 1998-2009 VMware, Inc.  All rights reserved.
+ * Copyright 1998-2015 VMware, Inc.  All rights reserved.
  *
  * Permission is hereby granted, free of charge, to any person
  * obtaining a copy of this software and associated documentation
@@ -31,20 +31,38 @@
 
 #ifndef _SVGA_REG_H_
 #define _SVGA_REG_H_
+#include <linux/pci_ids.h>
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+
+#define INCLUDE_ALLOW_VMCORE
+#include "includeCheck.h"
+
+#include "svga_types.h"
 
 /*
- * PCI device IDs.
+ * SVGA_REG_ENABLE bit definitions.
  */
-#define PCI_DEVICE_ID_VMWARE_SVGA2      0x0405
+typedef enum {
+   SVGA_REG_ENABLE_DISABLE = 0,
+   SVGA_REG_ENABLE_ENABLE = (1 << 0),
+   SVGA_REG_ENABLE_HIDE = (1 << 1),
+} SvgaRegEnable;
+
+typedef uint32 SVGAMobId;
 
 /*
- * SVGA_REG_ENABLE bit definitions.
+ * Arbitrary and meaningless limits. Please ignore these when writing
+ * new drivers.
  */
-#define SVGA_REG_ENABLE_DISABLE     0
-#define SVGA_REG_ENABLE_ENABLE      1
-#define SVGA_REG_ENABLE_HIDE        2
-#define SVGA_REG_ENABLE_ENABLE_HIDE (SVGA_REG_ENABLE_ENABLE |\
-				     SVGA_REG_ENABLE_HIDE)
+#define SVGA_MAX_WIDTH                  2560
+#define SVGA_MAX_HEIGHT                 1600
+
+
+#define SVGA_MAX_BITS_PER_PIXEL         32
+#define SVGA_MAX_DEPTH                  24
+#define SVGA_MAX_DISPLAYS               10
 
 /*
  * Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
@@ -57,14 +75,9 @@
 #define SVGA_CURSOR_ON_RESTORE_TO_FB   0x3   /* Put the cursor back in the framebuffer so the user can see it */
 
 /*
- * The maximum framebuffer size that can traced for e.g. guests in VESA mode.
- * The changeMap in the monitor is proportional to this number. Therefore, we'd
- * like to keep it as small as possible to reduce monitor overhead (using
- * SVGA_VRAM_MAX_SIZE for this increases the size of the shared area by over
- * 4k!).
- *
- * NB: For compatibility reasons, this value must be greater than 0xff0000.
- *     See bug 335072.
+ * The maximum framebuffer size that can traced for guests unless the
+ * SVGA_CAP_GBOBJECTS is set in SVGA_REG_CAPABILITIES.  In that case
+ * the full framebuffer can be traced independent of this limit.
  */
 #define SVGA_FB_MAX_TRACEABLE_SIZE      0x1000000
 
@@ -106,6 +119,8 @@
 #define SVGA_IRQFLAG_ANY_FENCE            0x1    /* Any fence was passed */
 #define SVGA_IRQFLAG_FIFO_PROGRESS        0x2    /* Made forward progress in the FIFO */
 #define SVGA_IRQFLAG_FENCE_GOAL           0x4    /* SVGA_FIFO_FENCE_GOAL reached */
+#define SVGA_IRQFLAG_COMMAND_BUFFER       0x8    /* Command buffer completed */
+#define SVGA_IRQFLAG_ERROR                0x10   /* Error while processing commands */
 
 /*
  * Registers
@@ -131,6 +146,7 @@ enum {
    SVGA_REG_FB_SIZE = 16,
 
    /* ID 0 implementation only had the above registers, then the palette */
+   SVGA_REG_ID_0_TOP = 17,
 
    SVGA_REG_CAPABILITIES = 17,
    SVGA_REG_MEM_START = 18,           /* (Deprecated) */
@@ -171,7 +187,7 @@ enum {
    SVGA_REG_COMMAND_LOW = 48,       /* Lower 32 bits and submits commands */
    SVGA_REG_COMMAND_HIGH = 49,      /* Upper 32 bits of command buffer PA */
    SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50,   /* Max primary memory */
-   SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */
+   SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Sugested limit on mob mem */
    SVGA_REG_DEV_CAP = 52,           /* Write dev cap index, read value */
    SVGA_REG_CMD_PREPEND_LOW = 53,
    SVGA_REG_CMD_PREPEND_HIGH = 54,
@@ -182,7 +198,6 @@ enum {
 
    SVGA_PALETTE_BASE = 1024,        /* Base of SVGA color map */
    /* Next 768 (== 256*3) registers exist for colormap */
-
    SVGA_SCRATCH_BASE = SVGA_PALETTE_BASE + SVGA_NUM_PALETTE_REGS
                                     /* Base of scratch registers */
    /* Next reg[SVGA_REG_SCRATCH_SIZE] registers exist for scratch usage:
@@ -190,7 +205,6 @@ enum {
       the use of the current SVGA driver. */
 };
 
-
 /*
  * Guest memory regions (GMRs):
  *
@@ -288,17 +302,205 @@ enum {
 #define SVGA_GMR_FRAMEBUFFER  ((uint32) -2)  /* Guest Framebuffer (GFB) */
 
 typedef
+#include "vmware_pack_begin.h"
 struct SVGAGuestMemDescriptor {
    uint32 ppn;
    uint32 numPages;
-} SVGAGuestMemDescriptor;
+}
+#include "vmware_pack_end.h"
+SVGAGuestMemDescriptor;
 
 typedef
+#include "vmware_pack_begin.h"
 struct SVGAGuestPtr {
    uint32 gmrId;
    uint32 offset;
-} SVGAGuestPtr;
+}
+#include "vmware_pack_end.h"
+SVGAGuestPtr;
+
+/*
+ * Register based command buffers --
+ *
+ * Provide an SVGA device interface that allows the guest to submit
+ * command buffers to the SVGA device through an SVGA device register.
+ * The metadata for each command buffer is contained in the
+ * SVGACBHeader structure along with the return status codes.
+ *
+ * The SVGA device supports command buffers if
+ * SVGA_CAP_COMMAND_BUFFERS is set in the device caps register.  The
+ * fifo must be enabled for command buffers to be submitted.
+ *
+ * Command buffers are submitted when the guest writing the 64 byte
+ * aligned physical address into the SVGA_REG_COMMAND_LOW and
+ * SVGA_REG_COMMAND_HIGH.  SVGA_REG_COMMAND_HIGH contains the upper 32
+ * bits of the physical address.  SVGA_REG_COMMAND_LOW contains the
+ * lower 32 bits of the physical address, since the command buffer
+ * headers are required to be 64 byte aligned the lower 6 bits are
+ * used for the SVGACBContext value.  Writing to SVGA_REG_COMMAND_LOW
+ * submits the command buffer to the device and queues it for
+ * execution.  The SVGA device supports at least
+ * SVGA_CB_MAX_QUEUED_PER_CONTEXT command buffers that can be queued
+ * per context and if that limit is reached the device will write the
+ * status SVGA_CB_STATUS_QUEUE_FULL to the status value of the command
+ * buffer header synchronously and not raise any IRQs.
+ *
+ * It is invalid to submit a command buffer without a valid physical
+ * address and results are undefined.
+ *
+ * The device guarantees that command buffers of size SVGA_CB_MAX_SIZE
+ * will be supported.  If a larger command buffer is submitted results
+ * are unspecified and the device will either complete the command
+ * buffer or return an error.
+ *
+ * The device guarantees that any individual command in a command
+ * buffer can be up to SVGA_CB_MAX_COMMAND_SIZE in size which is
+ * enough to fit a 64x64 color-cursor definition.  If the command is
+ * too large the device is allowed to process the command or return an
+ * error.
+ *
+ * The device context is a special SVGACBContext that allows for
+ * synchronous register like accesses with the flexibility of
+ * commands.  There is a different command set defined by
+ * SVGADeviceContextCmdId.  The commands in each command buffer is not
+ * allowed to straddle physical pages.
+ *
+ * The offset field which is available starting with the
+ * SVGA_CAP_CMD_BUFFERS_2 cap bit can be set by the guest to bias the
+ * start of command processing into the buffer.  If an error is
+ * encountered the errorOffset will still be relative to the specific
+ * PA, not biased by the offset.  When the command buffer is finished
+ * the guest should not read the offset field as there is no guarantee
+ * what it will set to.
+ */
+
+#define SVGA_CB_MAX_SIZE (512 * 1024)  /* 512 KB */
+#define SVGA_CB_MAX_QUEUED_PER_CONTEXT 32
+#define SVGA_CB_MAX_COMMAND_SIZE (32 * 1024) /* 32 KB */
+
+#define SVGA_CB_CONTEXT_MASK 0x3f
+typedef enum {
+   SVGA_CB_CONTEXT_DEVICE = 0x3f,
+   SVGA_CB_CONTEXT_0      = 0x0,
+   SVGA_CB_CONTEXT_MAX    = 0x1,
+} SVGACBContext;
+
+
+typedef enum {
+   /*
+    * The guest is supposed to write SVGA_CB_STATUS_NONE to the status
+    * field before submitting the command buffer header, the host will
+    * change the value when it is done with the command buffer.
+    */
+   SVGA_CB_STATUS_NONE             = 0,
+
+   /*
+    * Written by the host when a command buffer completes successfully.
+    * The device raises an IRQ with SVGA_IRQFLAG_COMMAND_BUFFER unless
+    * the SVGA_CB_FLAG_NO_IRQ flag is set.
+    */
+   SVGA_CB_STATUS_COMPLETED        = 1,
+
+   /*
+    * Written by the host synchronously with the command buffer
+    * submission to indicate the command buffer was not submitted.  No
+    * IRQ is raised.
+    */
+   SVGA_CB_STATUS_QUEUE_FULL       = 2,
+
+   /*
+    * Written by the host when an error was detected parsing a command
+    * in the command buffer, errorOffset is written to contain the
+    * offset to the first byte of the failing command.  The device
+    * raises the IRQ with both SVGA_IRQFLAG_ERROR and
+    * SVGA_IRQFLAG_COMMAND_BUFFER.  Some of the commands may have been
+    * processed.
+    */
+   SVGA_CB_STATUS_COMMAND_ERROR    = 3,
+
+   /*
+    * Written by the host if there is an error parsing the command
+    * buffer header.  The device raises the IRQ with both
+    * SVGA_IRQFLAG_ERROR and SVGA_IRQFLAG_COMMAND_BUFFER.  The device
+    * did not processes any of the command buffer.
+    */
+   SVGA_CB_STATUS_CB_HEADER_ERROR  = 4,
 
+   /*
+    * Written by the host if the guest requested the host to preempt
+    * the command buffer.  The device will not raise any IRQs and the
+    * command buffer was not processed.
+    */
+   SVGA_CB_STATUS_PREEMPTED        = 5,
+
+   /*
+    * Written by the host synchronously with the command buffer
+    * submission to indicate the the command buffer was not submitted
+    * due to an error.  No IRQ is raised.
+    */
+   SVGA_CB_STATUS_SUBMISSION_ERROR = 6,
+} SVGACBStatus;
+
+typedef enum {
+   SVGA_CB_FLAG_NONE       = 0,
+   SVGA_CB_FLAG_NO_IRQ     = 1 << 0,
+   SVGA_CB_FLAG_DX_CONTEXT = 1 << 1,
+   SVGA_CB_FLAG_MOB        = 1 << 2,
+} SVGACBFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   volatile SVGACBStatus status;
+   volatile uint32 errorOffset;
+   uint64 id;
+   SVGACBFlags flags;
+   uint32 length;
+   union {
+      PA pa;
+      struct {
+         SVGAMobId mobid;
+         uint32 mobOffset;
+      } mob;
+   } ptr;
+   uint32 offset; /* Valid if CMD_BUFFERS_2 cap set, must be zero otherwise */
+   uint32 dxContext; /* Valid if DX_CONTEXT flag set, must be zero otherwise */
+   uint32 mustBeZero[6];
+}
+#include "vmware_pack_end.h"
+SVGACBHeader;
+
+typedef enum {
+   SVGA_DC_CMD_NOP                   = 0,
+   SVGA_DC_CMD_START_STOP_CONTEXT    = 1,
+   SVGA_DC_CMD_PREEMPT               = 2,
+   SVGA_DC_CMD_MAX                   = 3,
+   SVGA_DC_CMD_FORCE_UINT            = MAX_UINT32,
+} SVGADeviceContextCmdId;
+
+typedef struct {
+   uint32 enable;
+   SVGACBContext context;
+} SVGADCCmdStartStop;
+
+/*
+ * SVGADCCmdPreempt --
+ *
+ * This command allows the guest to request that all command buffers
+ * on the specified context be preempted that can be.  After execution
+ * of this command all command buffers that were preempted will
+ * already have SVGA_CB_STATUS_PREEMPTED written into the status
+ * field.  The device might still be processing a command buffer,
+ * assuming execution of it started before the preemption request was
+ * received.  Specifying the ignoreIDZero flag to TRUE will cause the
+ * device to not preempt command buffers with the id field in the
+ * command buffer header set to zero.
+ */
+
+typedef struct {
+   SVGACBContext context;
+   uint32 ignoreIDZero;
+} SVGADCCmdPreempt;
 
 /*
  * SVGAGMRImageFormat --
@@ -320,13 +522,12 @@ struct SVGAGuestPtr {
  *
  */
 
-typedef
-struct SVGAGMRImageFormat {
+typedef struct SVGAGMRImageFormat {
    union {
       struct {
          uint32 bitsPerPixel : 8;
          uint32 colorDepth   : 8;
-         uint32 reserved     : 16;  /* Must be zero */
+	 uint32 reserved     : 16;  /* Must be zero */
       };
 
       uint32 value;
@@ -334,6 +535,7 @@ struct SVGAGMRImageFormat {
 } SVGAGMRImageFormat;
 
 typedef
+#include "vmware_pack_begin.h"
 struct SVGAGuestImage {
    SVGAGuestPtr         ptr;
 
@@ -353,7 +555,9 @@ struct SVGAGuestImage {
     * assuming each row of blocks is tightly packed.
     */
    uint32 pitch;
-} SVGAGuestImage;
+}
+#include "vmware_pack_end.h"
+SVGAGuestImage;
 
 /*
  * SVGAColorBGRX --
@@ -363,14 +567,13 @@ struct SVGAGuestImage {
  *    GMRFB state.
  */
 
-typedef
-struct SVGAColorBGRX {
+typedef struct SVGAColorBGRX {
    union {
       struct {
          uint32 b : 8;
          uint32 g : 8;
          uint32 r : 8;
-         uint32 x : 8;  /* Unused */
+	 uint32 x : 8;  /* Unused */
       };
 
       uint32 value;
@@ -392,26 +595,49 @@ struct SVGAColorBGRX {
  */
 
 typedef
-struct SVGASignedRect {
+#include "vmware_pack_begin.h"
+struct {
    int32  left;
    int32  top;
    int32  right;
    int32  bottom;
-} SVGASignedRect;
+}
+#include "vmware_pack_end.h"
+SVGASignedRect;
 
 typedef
-struct SVGASignedPoint {
+#include "vmware_pack_begin.h"
+struct {
    int32  x;
    int32  y;
-} SVGASignedPoint;
+}
+#include "vmware_pack_end.h"
+SVGASignedPoint;
 
 
 /*
- *  Capabilities
+ * SVGA Device Capabilities
+ *
+ * Note the holes in the bitfield. Missing bits have been deprecated,
+ * and must not be reused. Those capabilities will never be reported
+ * by new versions of the SVGA device.
+ *
+ * XXX: Add longer descriptions for each capability, including a list
+ *      of the new features that each capability provides.
  *
- *  Note the holes in the bitfield. Missing bits have been deprecated,
- *  and must not be reused. Those capabilities will never be reported
- *  by new versions of the SVGA device.
+ * SVGA_CAP_IRQMASK --
+ *    Provides device interrupts.  Adds device register SVGA_REG_IRQMASK
+ *    to set interrupt mask and direct I/O port SVGA_IRQSTATUS_PORT to
+ *    set/clear pending interrupts.
+ *
+ * SVGA_CAP_GMR --
+ *    Provides synchronous mapping of guest memory regions (GMR).
+ *    Adds device registers SVGA_REG_GMR_ID, SVGA_REG_GMR_DESCRIPTOR,
+ *    SVGA_REG_GMR_MAX_IDS, and SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH.
+ *
+ * SVGA_CAP_TRACES --
+ *    Allows framebuffer trace-based updates even when FIFO is enabled.
+ *    Adds device register SVGA_REG_TRACES.
  *
  * SVGA_CAP_GMR2 --
  *    Provides asynchronous commands to define and remap guest memory
@@ -421,21 +647,39 @@ struct SVGASignedPoint {
  * SVGA_CAP_SCREEN_OBJECT_2 --
  *    Allow screen object support, and require backing stores from the
  *    guest for each screen object.
+ *
+ * SVGA_CAP_COMMAND_BUFFERS --
+ *    Enable register based command buffer submission.
+ *
+ * SVGA_CAP_DEAD1 --
+ *    This cap was incorrectly used by old drivers and should not be
+ *    reused.
+ *
+ * SVGA_CAP_CMD_BUFFERS_2 --
+ *    Enable support for the prepend command buffer submision
+ *    registers.  SVGA_REG_CMD_PREPEND_LOW and
+ *    SVGA_REG_CMD_PREPEND_HIGH.
+ *
+ * SVGA_CAP_GBOBJECTS --
+ *    Enable guest-backed objects and surfaces.
+ *
+ * SVGA_CAP_CMD_BUFFERS_3 --
+ *    Enable support for command buffers in a mob.
  */
 
 #define SVGA_CAP_NONE               0x00000000
 #define SVGA_CAP_RECT_COPY          0x00000002
 #define SVGA_CAP_CURSOR             0x00000020
-#define SVGA_CAP_CURSOR_BYPASS      0x00000040   /* Legacy (Use Cursor Bypass 3 instead) */
-#define SVGA_CAP_CURSOR_BYPASS_2    0x00000080   /* Legacy (Use Cursor Bypass 3 instead) */
+#define SVGA_CAP_CURSOR_BYPASS      0x00000040
+#define SVGA_CAP_CURSOR_BYPASS_2    0x00000080
 #define SVGA_CAP_8BIT_EMULATION     0x00000100
 #define SVGA_CAP_ALPHA_CURSOR       0x00000200
 #define SVGA_CAP_3D                 0x00004000
 #define SVGA_CAP_EXTENDED_FIFO      0x00008000
-#define SVGA_CAP_MULTIMON           0x00010000   /* Legacy multi-monitor support */
+#define SVGA_CAP_MULTIMON           0x00010000
 #define SVGA_CAP_PITCHLOCK          0x00020000
 #define SVGA_CAP_IRQMASK            0x00040000
-#define SVGA_CAP_DISPLAY_TOPOLOGY   0x00080000   /* Legacy multi-monitor support */
+#define SVGA_CAP_DISPLAY_TOPOLOGY   0x00080000
 #define SVGA_CAP_GMR                0x00100000
 #define SVGA_CAP_TRACES             0x00200000
 #define SVGA_CAP_GMR2               0x00400000
@@ -444,6 +688,33 @@ struct SVGASignedPoint {
 #define SVGA_CAP_DEAD1              0x02000000
 #define SVGA_CAP_CMD_BUFFERS_2      0x04000000
 #define SVGA_CAP_GBOBJECTS          0x08000000
+#define SVGA_CAP_DX                 0x10000000
+
+#define SVGA_CAP_CMD_RESERVED       0x80000000
+
+
+/*
+ * The Guest can optionally read some SVGA device capabilities through
+ * the backdoor with command BDOOR_CMD_GET_SVGA_CAPABILITIES before
+ * the SVGA device is initialized.  The type of capability the guest
+ * is requesting from the SVGABackdoorCapType enum should be placed in
+ * the upper 16 bits of the backdoor command id (ECX).  On success the
+ * the value of EBX will be set to BDOOR_MAGIC and EAX will be set to
+ * the requested capability.  If the command is not supported then EBX
+ * will be left unchanged and EAX will be set to -1.  Because it is
+ * possible that -1 is the value of the requested cap the correct way
+ * to check if the command was successful is to check if EBX was changed
+ * to BDOOR_MAGIC making sure to initialize the register to something
+ * else first.
+ */
+
+typedef enum {
+   SVGABackdoorCapDeviceCaps = 0,
+   SVGABackdoorCapFifoCaps = 1,
+   SVGABackdoorCap3dHWVersion = 2,
+   SVGABackdoorCapMax = 3,
+} SVGABackdoorCapType;
+
 
 /*
  * FIFO register indices.
@@ -883,7 +1154,8 @@ enum {
    SVGA_VIDEO_PITCH_2,
    SVGA_VIDEO_PITCH_3,
    SVGA_VIDEO_DATA_GMRID,    /* Optional, defaults to SVGA_GMR_FRAMEBUFFER */
-   SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords (SVGA_ID_INVALID) */
+   SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords */
+                             /* (SVGA_ID_INVALID) */
    SVGA_VIDEO_NUM_REGS
 };
 
@@ -896,7 +1168,9 @@ enum {
  *      video frame to be displayed.
  */
 
-typedef struct SVGAOverlayUnit {
+typedef
+#include "vmware_pack_begin.h"
+struct SVGAOverlayUnit {
    uint32 enabled;
    uint32 flags;
    uint32 dataOffset;
@@ -916,7 +1190,27 @@ typedef struct SVGAOverlayUnit {
    uint32 pitches[3];
    uint32 dataGMRId;
    uint32 dstScreenId;
-} SVGAOverlayUnit;
+}
+#include "vmware_pack_end.h"
+SVGAOverlayUnit;
+
+
+/*
+ * Guest display topology
+ *
+ * XXX: This structure is not part of the SVGA device's interface, and
+ * doesn't really belong here.
+ */
+#define SVGA_INVALID_DISPLAY_ID ((uint32)-1)
+
+typedef struct SVGADisplayTopology {
+   uint16 displayId;
+   uint16 isPrimary;
+   uint32 width;
+   uint32 height;
+   uint32 positionX;
+   uint32 positionY;
+} SVGADisplayTopology;
 
 
 /*
@@ -951,10 +1245,10 @@ typedef struct SVGAOverlayUnit {
  *    value of zero means no cloning should happen.
  */
 
-#define SVGA_SCREEN_MUST_BE_SET     (1 << 0) /* Must be set or results undefined */
+#define SVGA_SCREEN_MUST_BE_SET     (1 << 0)
 #define SVGA_SCREEN_HAS_ROOT SVGA_SCREEN_MUST_BE_SET /* Deprecated */
-#define SVGA_SCREEN_IS_PRIMARY      (1 << 1) /* Guest considers this screen to be 'primary' */
-#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) /* Guest is running a fullscreen app here */
+#define SVGA_SCREEN_IS_PRIMARY      (1 << 1)
+#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2)
 
 /*
  * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2.  When the screen is
@@ -977,7 +1271,8 @@ typedef struct SVGAOverlayUnit {
 #define SVGA_SCREEN_BLANKING (1 << 4)
 
 typedef
-struct SVGAScreenObject {
+#include "vmware_pack_begin.h"
+struct {
    uint32 structSize;   /* sizeof(SVGAScreenObject) */
    uint32 id;
    uint32 flags;
@@ -995,8 +1290,17 @@ struct SVGAScreenObject {
     * with SVGA_FIFO_CAP_SCREEN_OBJECT.
     */
    SVGAGuestImage backingStore;
+
+   /*
+    * The cloneCount field is treated as a hint from the guest that
+    * the user wants this display to be cloned, cloneCount times.
+    *
+    * A value of zero means no cloning should happen.
+    */
    uint32 cloneCount;
-} SVGAScreenObject;
+}
+#include "vmware_pack_end.h"
+SVGAScreenObject;
 
 
 /*
@@ -1009,7 +1313,7 @@ struct SVGAScreenObject {
  *  Note the holes in the command ID numbers: These commands have been
  *  deprecated, and the old IDs must not be reused.
  *
- *  Command IDs from 1000 to 1999 are reserved for use by the SVGA3D
+ *  Command IDs from 1000 to 2999 are reserved for use by the SVGA3D
  *  protocol.
  *
  *  Each command's parameters are described by the comments and
@@ -1020,6 +1324,7 @@ typedef enum {
    SVGA_CMD_INVALID_CMD           = 0,
    SVGA_CMD_UPDATE                = 1,
    SVGA_CMD_RECT_COPY             = 3,
+   SVGA_CMD_RECT_ROP_COPY         = 14,
    SVGA_CMD_DEFINE_CURSOR         = 19,
    SVGA_CMD_DEFINE_ALPHA_CURSOR   = 22,
    SVGA_CMD_UPDATE_VERBOSE        = 25,
@@ -1035,9 +1340,14 @@ typedef enum {
    SVGA_CMD_ANNOTATION_COPY       = 40,
    SVGA_CMD_DEFINE_GMR2           = 41,
    SVGA_CMD_REMAP_GMR2            = 42,
+   SVGA_CMD_DEAD                  = 43,
+   SVGA_CMD_DEAD_2                = 44,
+   SVGA_CMD_NOP                   = 45,
+   SVGA_CMD_NOP_ERROR             = 46,
    SVGA_CMD_MAX
 } SVGAFifoCmdId;
 
+#define SVGA_CMD_MAX_DATASIZE       (256 * 1024)
 #define SVGA_CMD_MAX_ARGS           64
 
 
@@ -1070,12 +1380,15 @@ typedef enum {
  */
 
 typedef
-struct SVGAFifoCmdUpdate {
+#include "vmware_pack_begin.h"
+struct {
    uint32 x;
    uint32 y;
    uint32 width;
    uint32 height;
-} SVGAFifoCmdUpdate;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdUpdate;
 
 
 /*
@@ -1089,14 +1402,44 @@ struct SVGAFifoCmdUpdate {
  */
 
 typedef
-struct SVGAFifoCmdRectCopy {
+#include "vmware_pack_begin.h"
+struct {
+   uint32 srcX;
+   uint32 srcY;
+   uint32 destX;
+   uint32 destY;
+   uint32 width;
+   uint32 height;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdRectCopy;
+
+
+/*
+ * SVGA_CMD_RECT_ROP_COPY --
+ *
+ *    Perform a rectangular DMA transfer from one area of the GFB to
+ *    another, and copy the result to any screens which intersect it.
+ *    The value of ROP may only be SVGA_ROP_COPY, and this command is
+ *    only supported for backwards compatibility reasons.
+ *
+ * Availability:
+ *    SVGA_CAP_RECT_COPY
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
    uint32 srcX;
    uint32 srcY;
    uint32 destX;
    uint32 destY;
    uint32 width;
    uint32 height;
-} SVGAFifoCmdRectCopy;
+   uint32 rop;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdRectRopCopy;
 
 
 /*
@@ -1113,7 +1456,8 @@ struct SVGAFifoCmdRectCopy {
  */
 
 typedef
-struct SVGAFifoCmdDefineCursor {
+#include "vmware_pack_begin.h"
+struct {
    uint32 id;             /* Reserved, must be zero. */
    uint32 hotspotX;
    uint32 hotspotY;
@@ -1125,7 +1469,9 @@ struct SVGAFifoCmdDefineCursor {
     * Followed by scanline data for AND mask, then XOR mask.
     * Each scanline is padded to a 32-bit boundary.
    */
-} SVGAFifoCmdDefineCursor;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineCursor;
 
 
 /*
@@ -1142,14 +1488,17 @@ struct SVGAFifoCmdDefineCursor {
  */
 
 typedef
-struct SVGAFifoCmdDefineAlphaCursor {
+#include "vmware_pack_begin.h"
+struct {
    uint32 id;             /* Reserved, must be zero. */
    uint32 hotspotX;
    uint32 hotspotY;
    uint32 width;
    uint32 height;
    /* Followed by scanline data */
-} SVGAFifoCmdDefineAlphaCursor;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineAlphaCursor;
 
 
 /*
@@ -1165,13 +1514,16 @@ struct SVGAFifoCmdDefineAlphaCursor {
  */
 
 typedef
-struct SVGAFifoCmdUpdateVerbose {
+#include "vmware_pack_begin.h"
+struct {
    uint32 x;
    uint32 y;
    uint32 width;
    uint32 height;
    uint32 reason;
-} SVGAFifoCmdUpdateVerbose;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdUpdateVerbose;
 
 
 /*
@@ -1190,14 +1542,17 @@ struct SVGAFifoCmdUpdateVerbose {
 #define  SVGA_ROP_COPY                    0x03
 
 typedef
-struct SVGAFifoCmdFrontRopFill {
+#include "vmware_pack_begin.h"
+struct {
    uint32 color;     /* In the same format as the GFB */
    uint32 x;
    uint32 y;
    uint32 width;
    uint32 height;
    uint32 rop;       /* Must be SVGA_ROP_COPY */
-} SVGAFifoCmdFrontRopFill;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdFrontRopFill;
 
 
 /*
@@ -1216,9 +1571,12 @@ struct SVGAFifoCmdFrontRopFill {
  */
 
 typedef
+#include "vmware_pack_begin.h"
 struct {
    uint32 fence;
-} SVGAFifoCmdFence;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdFence;
 
 
 /*
@@ -1233,11 +1591,14 @@ struct {
  */
 
 typedef
-struct SVGAFifoCmdEscape {
+#include "vmware_pack_begin.h"
+struct {
    uint32 nsid;
    uint32 size;
    /* followed by 'size' bytes of data */
-} SVGAFifoCmdEscape;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdEscape;
 
 
 /*
@@ -1267,9 +1628,12 @@ struct SVGAFifoCmdEscape {
  */
 
 typedef
+#include "vmware_pack_begin.h"
 struct {
    SVGAScreenObject screen;   /* Variable-length according to version */
-} SVGAFifoCmdDefineScreen;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineScreen;
 
 
 /*
@@ -1283,9 +1647,12 @@ struct {
  */
 
 typedef
+#include "vmware_pack_begin.h"
 struct {
    uint32 screenId;
-} SVGAFifoCmdDestroyScreen;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDestroyScreen;
 
 
 /*
@@ -1336,11 +1703,14 @@ struct {
  */
 
 typedef
+#include "vmware_pack_begin.h"
 struct {
    SVGAGuestPtr        ptr;
    uint32              bytesPerLine;
    SVGAGMRImageFormat  format;
-} SVGAFifoCmdDefineGMRFB;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineGMRFB;
 
 
 /*
@@ -1348,19 +1718,10 @@ struct {
  *
  *    This is a guest-to-host blit. It performs a DMA operation to
  *    copy a rectangular region of pixels from the current GMRFB to
- *    one or more Screen Objects.
+ *    a ScreenObject.
  *
  *    The destination coordinate may be specified relative to a
- *    screen's origin (if a screen ID is specified) or relative to the
- *    virtual coordinate system's origin (if the screen ID is
- *    SVGA_ID_INVALID). The actual destination may span zero or more
- *    screens, in the case of a virtual destination rect or a rect
- *    which extends off the edge of the specified screen.
- *
- *    This command writes to the screen's "base layer": the underlying
- *    framebuffer which exists below any cursor or video overlays. No
- *    action is necessary to explicitly hide or update any overlays
- *    which exist on top of the updated region.
+ *    screen's origin.  The provided screen ID must be valid.
  *
  *    The SVGA device is guaranteed to finish reading from the GMRFB
  *    by the time any subsequent FENCE commands are reached.
@@ -1373,46 +1734,27 @@ struct {
  */
 
 typedef
+#include "vmware_pack_begin.h"
 struct {
    SVGASignedPoint  srcOrigin;
    SVGASignedRect   destRect;
    uint32           destScreenId;
-} SVGAFifoCmdBlitGMRFBToScreen;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdBlitGMRFBToScreen;
 
 
 /*
  * SVGA_CMD_BLIT_SCREEN_TO_GMRFB --
  *
  *    This is a host-to-guest blit. It performs a DMA operation to
- *    copy a rectangular region of pixels from a single Screen Object
+ *    copy a rectangular region of pixels from a single ScreenObject
  *    back to the current GMRFB.
  *
- *    Usage note: This command should be used rarely. It will
- *    typically be inefficient, but it is necessary for some types of
- *    synchronization between 3D (GPU) and 2D (CPU) rendering into
- *    overlapping areas of a screen.
- *
  *    The source coordinate is specified relative to a screen's
- *    origin. The provided screen ID must be valid. If any parameters
+ *    origin.  The provided screen ID must be valid. If any parameters
  *    are invalid, the resulting pixel values are undefined.
  *
- *    This command reads the screen's "base layer". Overlays like
- *    video and cursor are not included, but any data which was sent
- *    using a blit-to-screen primitive will be available, no matter
- *    whether the data's original source was the GMRFB or the 3D
- *    acceleration hardware.
- *
- *    Note that our guest-to-host blits and host-to-guest blits aren't
- *    symmetric in their current implementation. While the parameters
- *    are identical, host-to-guest blits are a lot less featureful.
- *    They do not support clipping: If the source parameters don't
- *    fully fit within a screen, the blit fails. They must originate
- *    from exactly one screen. Virtual coordinates are not directly
- *    supported.
- *
- *    Host-to-guest blits do support the same set of GMRFB formats
- *    offered by guest-to-host blits.
- *
  *    The SVGA device is guaranteed to finish writing to the GMRFB by
  *    the time any subsequent FENCE commands are reached.
  *
@@ -1421,77 +1763,57 @@ struct {
  */
 
 typedef
+#include "vmware_pack_begin.h"
 struct {
    SVGASignedPoint  destOrigin;
    SVGASignedRect   srcRect;
    uint32           srcScreenId;
-} SVGAFifoCmdBlitScreenToGMRFB;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdBlitScreenToGMRFB;
 
 
 /*
  * SVGA_CMD_ANNOTATION_FILL --
  *
- *    This is a blit annotation. This command stores a small piece of
- *    device state which is consumed by the next blit-to-screen
- *    command. The state is only cleared by commands which are
- *    specifically documented as consuming an annotation. Other
- *    commands (such as ESCAPEs for debugging) may intervene between
- *    the annotation and its associated blit.
- *
- *    This annotation is a promise about the contents of the next
- *    blit: The video driver is guaranteeing that all pixels in that
- *    blit will have the same value, specified here as a color in
- *    SVGAColorBGRX format.
- *
- *    The SVGA device can still render the blit correctly even if it
- *    ignores this annotation, but the annotation may allow it to
- *    perform the blit more efficiently, for example by ignoring the
- *    source data and performing a fill in hardware.
- *
- *    This annotation is most important for performance when the
- *    user's display is being remoted over a network connection.
+ *    The annotation commands have been deprecated, should not be used
+ *    by new drivers.  They used to provide performance hints to the SVGA
+ *    device about the content of screen updates, but newer SVGA devices
+ *    ignore these.
  *
  * Availability:
  *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
  */
 
 typedef
+#include "vmware_pack_begin.h"
 struct {
    SVGAColorBGRX  color;
-} SVGAFifoCmdAnnotationFill;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdAnnotationFill;
 
 
 /*
  * SVGA_CMD_ANNOTATION_COPY --
  *
- *    This is a blit annotation. See SVGA_CMD_ANNOTATION_FILL for more
- *    information about annotations.
- *
- *    This annotation is a promise about the contents of the next
- *    blit: The video driver is guaranteeing that all pixels in that
- *    blit will have the same value as those which already exist at an
- *    identically-sized region on the same or a different screen.
- *
- *    Note that the source pixels for the COPY in this annotation are
- *    sampled before applying the anqnotation's associated blit. They
- *    are allowed to overlap with the blit's destination pixels.
- *
- *    The copy source rectangle is specified the same way as the blit
- *    destination: it can be a rectangle which spans zero or more
- *    screens, specified relative to either a screen or to the virtual
- *    coordinate system's origin. If the source rectangle includes
- *    pixels which are not from exactly one screen, the results are
- *    undefined.
+ *    The annotation commands have been deprecated, should not be used
+ *    by new drivers.  They used to provide performance hints to the SVGA
+ *    device about the content of screen updates, but newer SVGA devices
+ *    ignore these.
  *
  * Availability:
  *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
  */
 
 typedef
+#include "vmware_pack_begin.h"
 struct {
    SVGASignedPoint  srcOrigin;
    uint32           srcScreenId;
-} SVGAFifoCmdAnnotationCopy;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdAnnotationCopy;
 
 
 /*
@@ -1504,10 +1826,13 @@ struct {
  */
 
 typedef
+#include "vmware_pack_begin.h"
 struct {
    uint32 gmrId;
    uint32 numPages;
-} SVGAFifoCmdDefineGMR2;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineGMR2;
 
 
 /*
@@ -1546,6 +1871,7 @@ typedef enum {
 } SVGARemapGMR2Flags;
 
 typedef
+#include "vmware_pack_begin.h"
 struct {
    uint32 gmrId;
    SVGARemapGMR2Flags flags;
@@ -1559,6 +1885,52 @@ struct {
     * (according to flag SVGA_REMAP_GMR2_PPN64) follows.  If flag
     * SVGA_REMAP_GMR2_SINGLE_PPN is set, array contains a single entry.
     */
-} SVGAFifoCmdRemapGMR2;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdRemapGMR2;
+
+
+/*
+ * Size of SVGA device memory such as frame buffer and FIFO.
+ */
+#define SVGA_VRAM_MIN_SIZE             (4 * 640 * 480) /* bytes */
+#define SVGA_VRAM_MIN_SIZE_3D       (16 * 1024 * 1024)
+#define SVGA_VRAM_MAX_SIZE         (128 * 1024 * 1024)
+#define SVGA_MEMORY_SIZE_MAX      (1024 * 1024 * 1024)
+#define SVGA_FIFO_SIZE_MAX           (2 * 1024 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MIN       (32 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MAX       (2 * 1024 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_DEFAULT   (256 * 1024)
+
+#define SVGA_VRAM_SIZE_W2K          (64 * 1024 * 1024) /* 64 MB */
+
+/*
+ * To simplify autoDetect display configuration, support a minimum of
+ * two 1920x1200 monitors, 32bpp, side-by-side, optionally rotated:
+ *   numDisplays = 2
+ *   maxWidth = numDisplay * 1920 = 3840
+ *   maxHeight = rotated width of single monitor = 1920
+ *   vramSize = maxWidth * maxHeight * 4 = 29491200
+ */
+#define SVGA_VRAM_SIZE_AUTODETECT   (32 * 1024 * 1024)
+
+#if defined(VMX86_SERVER)
+#define SVGA_VRAM_SIZE               (4 * 1024 * 1024)
+#define SVGA_VRAM_SIZE_3D           (64 * 1024 * 1024)
+#define SVGA_FIFO_SIZE                    (256 * 1024)
+#define SVGA_FIFO_SIZE_3D                 (516 * 1024)
+#define SVGA_MEMORY_SIZE_DEFAULT   (160 * 1024 * 1024)
+#define SVGA_AUTODETECT_DEFAULT                  FALSE
+#else
+#define SVGA_VRAM_SIZE              (16 * 1024 * 1024)
+#define SVGA_VRAM_SIZE_3D           SVGA_VRAM_MAX_SIZE
+#define SVGA_FIFO_SIZE               (2 * 1024 * 1024)
+#define SVGA_FIFO_SIZE_3D               SVGA_FIFO_SIZE
+#define SVGA_MEMORY_SIZE_DEFAULT   (768 * 1024 * 1024)
+#define SVGA_AUTODETECT_DEFAULT                   TRUE
+#endif
+
+#define SVGA_FIFO_SIZE_GBOBJECTS          (256 * 1024)
+#define SVGA_VRAM_SIZE_GBOBJECTS     (4 * 1024 * 1024)
 
 #endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
new file mode 100644
index 000000000000..2e8ba4df8de9
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
@@ -0,0 +1,46 @@
+/**********************************************************
+ * Copyright 2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+#ifndef _VM_BASIC_TYPES_H_
+#define _VM_BASIC_TYPES_H_
+#include <linux/kernel.h>
+
+typedef u32 uint32;
+typedef s32 int32;
+typedef u64 uint64;
+typedef u16 uint16;
+typedef s16 int16;
+typedef u8  uint8;
+typedef s8  int8;
+
+typedef uint64 PA;
+typedef uint32 PPN;
+typedef uint64 PPN64;
+
+typedef bool Bool;
+
+#define MAX_UINT32 U32_MAX
+#define MAX_UINT16 U16_MAX
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h b/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
new file mode 100644
index 000000000000..120eab830eaf
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
@@ -0,0 +1,21 @@
+#ifndef _VM_BASIC_TYPES_H_
+#define _VM_BASIC_TYPES_H_
+#include <linux/kernel.h>
+
+typedef u32 uint32;
+typedef s32 int32;
+typedef u64 uint64;
+typedef u16 uint16;
+typedef s16 int16;
+typedef u8  uint8;
+typedef s8  int8;
+
+typedef uint64 PA;
+typedef uint32 PPN;
+typedef uint64 PPN64;
+
+typedef bool Bool;
+
+#define MAX_UINT32 U32_MAX
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
new file mode 100644
index 000000000000..7e7b0ce34aa2
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
@@ -0,0 +1,25 @@
+/**********************************************************
+ * Copyright 2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+#include <linux/compiler.h>
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
new file mode 100644
index 000000000000..e2e440ed3d44
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
@@ -0,0 +1,25 @@
+/**********************************************************
+ * Copyright 2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+__packed
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
deleted file mode 100644
index f58dc7dd15c5..000000000000
--- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+++ /dev/null
@@ -1,2627 +0,0 @@
-/**********************************************************
- * Copyright 1998-2009 VMware, Inc.  All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy,
- * modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- **********************************************************/
-
-/*
- * svga3d_reg.h --
- *
- *       SVGA 3D hardware definitions
- */
-
-#ifndef _SVGA3D_REG_H_
-#define _SVGA3D_REG_H_
-
-#include "svga_reg.h"
-
-typedef uint32 PPN;
-typedef __le64 PPN64;
-
-/*
- * 3D Hardware Version
- *
- *   The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
- *   register.   Is set by the host and read by the guest.  This lets
- *   us make new guest drivers which are backwards-compatible with old
- *   SVGA hardware revisions.  It does not let us support old guest
- *   drivers.  Good enough for now.
- *
- */
-
-#define SVGA3D_MAKE_HWVERSION(major, minor)      (((major) << 16) | ((minor) & 0xFF))
-#define SVGA3D_MAJOR_HWVERSION(version)          ((version) >> 16)
-#define SVGA3D_MINOR_HWVERSION(version)          ((version) & 0xFF)
-
-typedef enum {
-   SVGA3D_HWVERSION_WS5_RC1   = SVGA3D_MAKE_HWVERSION(0, 1),
-   SVGA3D_HWVERSION_WS5_RC2   = SVGA3D_MAKE_HWVERSION(0, 2),
-   SVGA3D_HWVERSION_WS51_RC1  = SVGA3D_MAKE_HWVERSION(0, 3),
-   SVGA3D_HWVERSION_WS6_B1    = SVGA3D_MAKE_HWVERSION(1, 1),
-   SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
-   SVGA3D_HWVERSION_WS65_B1   = SVGA3D_MAKE_HWVERSION(2, 0),
-   SVGA3D_HWVERSION_WS8_B1    = SVGA3D_MAKE_HWVERSION(2, 1),
-   SVGA3D_HWVERSION_CURRENT   = SVGA3D_HWVERSION_WS8_B1,
-} SVGA3dHardwareVersion;
-
-/*
- * Generic Types
- */
-
-typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
-#define SVGA3D_NUM_CLIPPLANES                   6
-#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS  8
-#define SVGA3D_MAX_CONTEXT_IDS                  256
-#define SVGA3D_MAX_SURFACE_IDS                  (32 * 1024)
-
-#define SVGA3D_NUM_TEXTURE_UNITS                32
-#define SVGA3D_NUM_LIGHTS                       8
-
-/*
- * Surface formats.
- *
- * If you modify this list, be sure to keep GLUtil.c in sync. It
- * includes the internal format definition of each surface in
- * GLUtil_ConvertSurfaceFormat, and it contains a table of
- * human-readable names in GLUtil_GetFormatName.
- */
-
-typedef enum SVGA3dSurfaceFormat {
-   SVGA3D_FORMAT_MIN                   = 0,
-   SVGA3D_FORMAT_INVALID               = 0,
-
-   SVGA3D_X8R8G8B8                     = 1,
-   SVGA3D_A8R8G8B8                     = 2,
-
-   SVGA3D_R5G6B5                       = 3,
-   SVGA3D_X1R5G5B5                     = 4,
-   SVGA3D_A1R5G5B5                     = 5,
-   SVGA3D_A4R4G4B4                     = 6,
-
-   SVGA3D_Z_D32                        = 7,
-   SVGA3D_Z_D16                        = 8,
-   SVGA3D_Z_D24S8                      = 9,
-   SVGA3D_Z_D15S1                      = 10,
-
-   SVGA3D_LUMINANCE8                   = 11,
-   SVGA3D_LUMINANCE4_ALPHA4            = 12,
-   SVGA3D_LUMINANCE16                  = 13,
-   SVGA3D_LUMINANCE8_ALPHA8            = 14,
-
-   SVGA3D_DXT1                         = 15,
-   SVGA3D_DXT2                         = 16,
-   SVGA3D_DXT3                         = 17,
-   SVGA3D_DXT4                         = 18,
-   SVGA3D_DXT5                         = 19,
-
-   SVGA3D_BUMPU8V8                     = 20,
-   SVGA3D_BUMPL6V5U5                   = 21,
-   SVGA3D_BUMPX8L8V8U8                 = 22,
-   SVGA3D_BUMPL8V8U8                   = 23,
-
-   SVGA3D_ARGB_S10E5                   = 24,   /* 16-bit floating-point ARGB */
-   SVGA3D_ARGB_S23E8                   = 25,   /* 32-bit floating-point ARGB */
-
-   SVGA3D_A2R10G10B10                  = 26,
-
-   /* signed formats */
-   SVGA3D_V8U8                         = 27,
-   SVGA3D_Q8W8V8U8                     = 28,
-   SVGA3D_CxV8U8                       = 29,
-
-   /* mixed formats */
-   SVGA3D_X8L8V8U8                     = 30,
-   SVGA3D_A2W10V10U10                  = 31,
-
-   SVGA3D_ALPHA8                       = 32,
-
-   /* Single- and dual-component floating point formats */
-   SVGA3D_R_S10E5                      = 33,
-   SVGA3D_R_S23E8                      = 34,
-   SVGA3D_RG_S10E5                     = 35,
-   SVGA3D_RG_S23E8                     = 36,
-
-   SVGA3D_BUFFER                       = 37,
-
-   SVGA3D_Z_D24X8                      = 38,
-
-   SVGA3D_V16U16                       = 39,
-
-   SVGA3D_G16R16                       = 40,
-   SVGA3D_A16B16G16R16                 = 41,
-
-   /* Packed Video formats */
-   SVGA3D_UYVY                         = 42,
-   SVGA3D_YUY2                         = 43,
-
-   /* Planar video formats */
-   SVGA3D_NV12                         = 44,
-
-   /* Video format with alpha */
-   SVGA3D_AYUV                         = 45,
-
-   SVGA3D_R32G32B32A32_TYPELESS        = 46,
-   SVGA3D_R32G32B32A32_FLOAT           = 25,
-   SVGA3D_R32G32B32A32_UINT            = 47,
-   SVGA3D_R32G32B32A32_SINT            = 48,
-   SVGA3D_R32G32B32_TYPELESS           = 49,
-   SVGA3D_R32G32B32_FLOAT              = 50,
-   SVGA3D_R32G32B32_UINT               = 51,
-   SVGA3D_R32G32B32_SINT               = 52,
-   SVGA3D_R16G16B16A16_TYPELESS        = 53,
-   SVGA3D_R16G16B16A16_FLOAT           = 24,
-   SVGA3D_R16G16B16A16_UNORM           = 41,
-   SVGA3D_R16G16B16A16_UINT            = 54,
-   SVGA3D_R16G16B16A16_SNORM           = 55,
-   SVGA3D_R16G16B16A16_SINT            = 56,
-   SVGA3D_R32G32_TYPELESS              = 57,
-   SVGA3D_R32G32_FLOAT                 = 36,
-   SVGA3D_R32G32_UINT                  = 58,
-   SVGA3D_R32G32_SINT                  = 59,
-   SVGA3D_R32G8X24_TYPELESS            = 60,
-   SVGA3D_D32_FLOAT_S8X24_UINT         = 61,
-   SVGA3D_R32_FLOAT_X8X24_TYPELESS     = 62,
-   SVGA3D_X32_TYPELESS_G8X24_UINT      = 63,
-   SVGA3D_R10G10B10A2_TYPELESS         = 64,
-   SVGA3D_R10G10B10A2_UNORM            = 26,
-   SVGA3D_R10G10B10A2_UINT             = 65,
-   SVGA3D_R11G11B10_FLOAT              = 66,
-   SVGA3D_R8G8B8A8_TYPELESS            = 67,
-   SVGA3D_R8G8B8A8_UNORM               = 68,
-   SVGA3D_R8G8B8A8_UNORM_SRGB          = 69,
-   SVGA3D_R8G8B8A8_UINT                = 70,
-   SVGA3D_R8G8B8A8_SNORM               = 28,
-   SVGA3D_R8G8B8A8_SINT                = 71,
-   SVGA3D_R16G16_TYPELESS              = 72,
-   SVGA3D_R16G16_FLOAT                 = 35,
-   SVGA3D_R16G16_UNORM                 = 40,
-   SVGA3D_R16G16_UINT                  = 73,
-   SVGA3D_R16G16_SNORM                 = 39,
-   SVGA3D_R16G16_SINT                  = 74,
-   SVGA3D_R32_TYPELESS                 = 75,
-   SVGA3D_D32_FLOAT                    = 76,
-   SVGA3D_R32_FLOAT                    = 34,
-   SVGA3D_R32_UINT                     = 77,
-   SVGA3D_R32_SINT                     = 78,
-   SVGA3D_R24G8_TYPELESS               = 79,
-   SVGA3D_D24_UNORM_S8_UINT            = 80,
-   SVGA3D_R24_UNORM_X8_TYPELESS        = 81,
-   SVGA3D_X24_TYPELESS_G8_UINT         = 82,
-   SVGA3D_R8G8_TYPELESS                = 83,
-   SVGA3D_R8G8_UNORM                   = 84,
-   SVGA3D_R8G8_UINT                    = 85,
-   SVGA3D_R8G8_SNORM                   = 27,
-   SVGA3D_R8G8_SINT                    = 86,
-   SVGA3D_R16_TYPELESS                 = 87,
-   SVGA3D_R16_FLOAT                    = 33,
-   SVGA3D_D16_UNORM                    = 8,
-   SVGA3D_R16_UNORM                    = 88,
-   SVGA3D_R16_UINT                     = 89,
-   SVGA3D_R16_SNORM                    = 90,
-   SVGA3D_R16_SINT                     = 91,
-   SVGA3D_R8_TYPELESS                  = 92,
-   SVGA3D_R8_UNORM                     = 93,
-   SVGA3D_R8_UINT                      = 94,
-   SVGA3D_R8_SNORM                     = 95,
-   SVGA3D_R8_SINT                      = 96,
-   SVGA3D_A8_UNORM                     = 32,
-   SVGA3D_R1_UNORM                     = 97,
-   SVGA3D_R9G9B9E5_SHAREDEXP           = 98,
-   SVGA3D_R8G8_B8G8_UNORM              = 99,
-   SVGA3D_G8R8_G8B8_UNORM              = 100,
-   SVGA3D_BC1_TYPELESS                 = 101,
-   SVGA3D_BC1_UNORM                    = 15,
-   SVGA3D_BC1_UNORM_SRGB               = 102,
-   SVGA3D_BC2_TYPELESS                 = 103,
-   SVGA3D_BC2_UNORM                    = 17,
-   SVGA3D_BC2_UNORM_SRGB               = 104,
-   SVGA3D_BC3_TYPELESS                 = 105,
-   SVGA3D_BC3_UNORM                    = 19,
-   SVGA3D_BC3_UNORM_SRGB               = 106,
-   SVGA3D_BC4_TYPELESS                 = 107,
-   SVGA3D_BC4_UNORM                    = 108,
-   SVGA3D_BC4_SNORM                    = 109,
-   SVGA3D_BC5_TYPELESS                 = 110,
-   SVGA3D_BC5_UNORM                    = 111,
-   SVGA3D_BC5_SNORM                    = 112,
-   SVGA3D_B5G6R5_UNORM                 = 3,
-   SVGA3D_B5G5R5A1_UNORM               = 5,
-   SVGA3D_B8G8R8A8_UNORM               = 2,
-   SVGA3D_B8G8R8X8_UNORM               = 1,
-   SVGA3D_R10G10B10_XR_BIAS_A2_UNORM   = 113,
-   SVGA3D_B8G8R8A8_TYPELESS            = 114,
-   SVGA3D_B8G8R8A8_UNORM_SRGB          = 115,
-   SVGA3D_B8G8R8X8_TYPELESS            = 116,
-   SVGA3D_B8G8R8X8_UNORM_SRGB          = 117,
-
-   /* Advanced D3D9 depth formats. */
-   SVGA3D_Z_DF16                       = 118,
-   SVGA3D_Z_DF24                       = 119,
-   SVGA3D_Z_D24S8_INT                  = 120,
-
-   /* Planar video formats. */
-   SVGA3D_YV12                         = 121,
-
-   SVGA3D_FORMAT_MAX                   = 122,
-} SVGA3dSurfaceFormat;
-
-typedef uint32 SVGA3dColor; /* a, r, g, b */
-
-/*
- * These match the D3DFORMAT_OP definitions used by Direct3D. We need
- * them so that we can query the host for what the supported surface
- * operations are (when we're using the D3D backend, in particular),
- * and so we can send those operations to the guest.
- */
-typedef enum {
-   SVGA3DFORMAT_OP_TEXTURE                               = 0x00000001,
-   SVGA3DFORMAT_OP_VOLUMETEXTURE                         = 0x00000002,
-   SVGA3DFORMAT_OP_CUBETEXTURE                           = 0x00000004,
-   SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET                = 0x00000008,
-   SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET              = 0x00000010,
-   SVGA3DFORMAT_OP_ZSTENCIL                              = 0x00000040,
-   SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH   = 0x00000080,
-
-/*
- * This format can be used as a render target if the current display mode
- * is the same depth if the alpha channel is ignored. e.g. if the device
- * can render to A8R8G8B8 when the display mode is X8R8G8B8, then the
- * format op list entry for A8R8G8B8 should have this cap.
- */
-   SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET  = 0x00000100,
-
-/*
- * This format contains DirectDraw support (including Flip).  This flag
- * should not to be set on alpha formats.
- */
-   SVGA3DFORMAT_OP_DISPLAYMODE                           = 0x00000400,
-
-/*
- * The rasterizer can support some level of Direct3D support in this format
- * and implies that the driver can create a Context in this mode (for some
- * render target format).  When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE
- * flag must also be set.
- */
-   SVGA3DFORMAT_OP_3DACCELERATION                        = 0x00000800,
-
-/*
- * This is set for a private format when the driver has put the bpp in
- * the structure.
- */
-   SVGA3DFORMAT_OP_PIXELSIZE                             = 0x00001000,
-
-/*
- * Indicates that this format can be converted to any RGB format for which
- * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
- */
-   SVGA3DFORMAT_OP_CONVERT_TO_ARGB                       = 0x00002000,
-
-/*
- * Indicates that this format can be used to create offscreen plain surfaces.
- */
-   SVGA3DFORMAT_OP_OFFSCREENPLAIN                        = 0x00004000,
-
-/*
- * Indicated that this format can be read as an SRGB texture (meaning that the
- * sampler will linearize the looked up data)
- */
-   SVGA3DFORMAT_OP_SRGBREAD                              = 0x00008000,
-
-/*
- * Indicates that this format can be used in the bumpmap instructions
- */
-   SVGA3DFORMAT_OP_BUMPMAP                               = 0x00010000,
-
-/*
- * Indicates that this format can be sampled by the displacement map sampler
- */
-   SVGA3DFORMAT_OP_DMAP                                  = 0x00020000,
-
-/*
- * Indicates that this format cannot be used with texture filtering
- */
-   SVGA3DFORMAT_OP_NOFILTER                              = 0x00040000,
-
-/*
- * Indicates that format conversions are supported to this RGB format if
- * SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format.
- */
-   SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB                    = 0x00080000,
-
-/*
- * Indicated that this format can be written as an SRGB target (meaning that the
- * pixel pipe will DE-linearize data on output to format)
- */
-   SVGA3DFORMAT_OP_SRGBWRITE                             = 0x00100000,
-
-/*
- * Indicates that this format cannot be used with alpha blending
- */
-   SVGA3DFORMAT_OP_NOALPHABLEND                          = 0x00200000,
-
-/*
- * Indicates that the device can auto-generated sublevels for resources
- * of this format
- */
-   SVGA3DFORMAT_OP_AUTOGENMIPMAP                         = 0x00400000,
-
-/*
- * Indicates that this format can be used by vertex texture sampler
- */
-   SVGA3DFORMAT_OP_VERTEXTEXTURE                         = 0x00800000,
-
-/*
- * Indicates that this format supports neither texture coordinate wrap
- * modes, nor mipmapping
- */
-   SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP                  = 0x01000000
-} SVGA3dFormatOp;
-
-/*
- * This structure is a conversion of SVGA3DFORMAT_OP_*.
- * Entries must be located at the same position.
- */
-typedef union {
-   uint32 value;
-   struct {
-      uint32 texture : 1;
-      uint32 volumeTexture : 1;
-      uint32 cubeTexture : 1;
-      uint32 offscreenRenderTarget : 1;
-      uint32 sameFormatRenderTarget : 1;
-      uint32 unknown1 : 1;
-      uint32 zStencil : 1;
-      uint32 zStencilArbitraryDepth : 1;
-      uint32 sameFormatUpToAlpha : 1;
-      uint32 unknown2 : 1;
-      uint32 displayMode : 1;
-      uint32 acceleration3d : 1;
-      uint32 pixelSize : 1;
-      uint32 convertToARGB : 1;
-      uint32 offscreenPlain : 1;
-      uint32 sRGBRead : 1;
-      uint32 bumpMap : 1;
-      uint32 dmap : 1;
-      uint32 noFilter : 1;
-      uint32 memberOfGroupARGB : 1;
-      uint32 sRGBWrite : 1;
-      uint32 noAlphaBlend : 1;
-      uint32 autoGenMipMap : 1;
-      uint32 vertexTexture : 1;
-      uint32 noTexCoordWrapNorMip : 1;
-   };
-} SVGA3dSurfaceFormatCaps;
-
-/*
- * SVGA_3D_CMD_SETRENDERSTATE Types.  All value types
- * must fit in a uint32.
- */
-
-typedef enum {
-   SVGA3D_RS_INVALID                   = 0,
-   SVGA3D_RS_ZENABLE                   = 1,     /* SVGA3dBool */
-   SVGA3D_RS_ZWRITEENABLE              = 2,     /* SVGA3dBool */
-   SVGA3D_RS_ALPHATESTENABLE           = 3,     /* SVGA3dBool */
-   SVGA3D_RS_DITHERENABLE              = 4,     /* SVGA3dBool */
-   SVGA3D_RS_BLENDENABLE               = 5,     /* SVGA3dBool */
-   SVGA3D_RS_FOGENABLE                 = 6,     /* SVGA3dBool */
-   SVGA3D_RS_SPECULARENABLE            = 7,     /* SVGA3dBool */
-   SVGA3D_RS_STENCILENABLE             = 8,     /* SVGA3dBool */
-   SVGA3D_RS_LIGHTINGENABLE            = 9,     /* SVGA3dBool */
-   SVGA3D_RS_NORMALIZENORMALS          = 10,    /* SVGA3dBool */
-   SVGA3D_RS_POINTSPRITEENABLE         = 11,    /* SVGA3dBool */
-   SVGA3D_RS_POINTSCALEENABLE          = 12,    /* SVGA3dBool */
-   SVGA3D_RS_STENCILREF                = 13,    /* uint32 */
-   SVGA3D_RS_STENCILMASK               = 14,    /* uint32 */
-   SVGA3D_RS_STENCILWRITEMASK          = 15,    /* uint32 */
-   SVGA3D_RS_FOGSTART                  = 16,    /* float */
-   SVGA3D_RS_FOGEND                    = 17,    /* float */
-   SVGA3D_RS_FOGDENSITY                = 18,    /* float */
-   SVGA3D_RS_POINTSIZE                 = 19,    /* float */
-   SVGA3D_RS_POINTSIZEMIN              = 20,    /* float */
-   SVGA3D_RS_POINTSIZEMAX              = 21,    /* float */
-   SVGA3D_RS_POINTSCALE_A              = 22,    /* float */
-   SVGA3D_RS_POINTSCALE_B              = 23,    /* float */
-   SVGA3D_RS_POINTSCALE_C              = 24,    /* float */
-   SVGA3D_RS_FOGCOLOR                  = 25,    /* SVGA3dColor */
-   SVGA3D_RS_AMBIENT                   = 26,    /* SVGA3dColor */
-   SVGA3D_RS_CLIPPLANEENABLE           = 27,    /* SVGA3dClipPlanes */
-   SVGA3D_RS_FOGMODE                   = 28,    /* SVGA3dFogMode */
-   SVGA3D_RS_FILLMODE                  = 29,    /* SVGA3dFillMode */
-   SVGA3D_RS_SHADEMODE                 = 30,    /* SVGA3dShadeMode */
-   SVGA3D_RS_LINEPATTERN               = 31,    /* SVGA3dLinePattern */
-   SVGA3D_RS_SRCBLEND                  = 32,    /* SVGA3dBlendOp */
-   SVGA3D_RS_DSTBLEND                  = 33,    /* SVGA3dBlendOp */
-   SVGA3D_RS_BLENDEQUATION             = 34,    /* SVGA3dBlendEquation */
-   SVGA3D_RS_CULLMODE                  = 35,    /* SVGA3dFace */
-   SVGA3D_RS_ZFUNC                     = 36,    /* SVGA3dCmpFunc */
-   SVGA3D_RS_ALPHAFUNC                 = 37,    /* SVGA3dCmpFunc */
-   SVGA3D_RS_STENCILFUNC               = 38,    /* SVGA3dCmpFunc */
-   SVGA3D_RS_STENCILFAIL               = 39,    /* SVGA3dStencilOp */
-   SVGA3D_RS_STENCILZFAIL              = 40,    /* SVGA3dStencilOp */
-   SVGA3D_RS_STENCILPASS               = 41,    /* SVGA3dStencilOp */
-   SVGA3D_RS_ALPHAREF                  = 42,    /* float (0.0 .. 1.0) */
-   SVGA3D_RS_FRONTWINDING              = 43,    /* SVGA3dFrontWinding */
-   SVGA3D_RS_COORDINATETYPE            = 44,    /* SVGA3dCoordinateType */
-   SVGA3D_RS_ZBIAS                     = 45,    /* float */
-   SVGA3D_RS_RANGEFOGENABLE            = 46,    /* SVGA3dBool */
-   SVGA3D_RS_COLORWRITEENABLE          = 47,    /* SVGA3dColorMask */
-   SVGA3D_RS_VERTEXMATERIALENABLE      = 48,    /* SVGA3dBool */
-   SVGA3D_RS_DIFFUSEMATERIALSOURCE     = 49,    /* SVGA3dVertexMaterial */
-   SVGA3D_RS_SPECULARMATERIALSOURCE    = 50,    /* SVGA3dVertexMaterial */
-   SVGA3D_RS_AMBIENTMATERIALSOURCE     = 51,    /* SVGA3dVertexMaterial */
-   SVGA3D_RS_EMISSIVEMATERIALSOURCE    = 52,    /* SVGA3dVertexMaterial */
-   SVGA3D_RS_TEXTUREFACTOR             = 53,    /* SVGA3dColor */
-   SVGA3D_RS_LOCALVIEWER               = 54,    /* SVGA3dBool */
-   SVGA3D_RS_SCISSORTESTENABLE         = 55,    /* SVGA3dBool */
-   SVGA3D_RS_BLENDCOLOR                = 56,    /* SVGA3dColor */
-   SVGA3D_RS_STENCILENABLE2SIDED       = 57,    /* SVGA3dBool */
-   SVGA3D_RS_CCWSTENCILFUNC            = 58,    /* SVGA3dCmpFunc */
-   SVGA3D_RS_CCWSTENCILFAIL            = 59,    /* SVGA3dStencilOp */
-   SVGA3D_RS_CCWSTENCILZFAIL           = 60,    /* SVGA3dStencilOp */
-   SVGA3D_RS_CCWSTENCILPASS            = 61,    /* SVGA3dStencilOp */
-   SVGA3D_RS_VERTEXBLEND               = 62,    /* SVGA3dVertexBlendFlags */
-   SVGA3D_RS_SLOPESCALEDEPTHBIAS       = 63,    /* float */
-   SVGA3D_RS_DEPTHBIAS                 = 64,    /* float */
-
-
-   /*
-    * Output Gamma Level
-    *
-    * Output gamma effects the gamma curve of colors that are output from the
-    * rendering pipeline.  A value of 1.0 specifies a linear color space. If the
-    * value is <= 0.0, gamma correction is ignored and linear color space is
-    * used.
-    */
-
-   SVGA3D_RS_OUTPUTGAMMA               = 65,    /* float */
-   SVGA3D_RS_ZVISIBLE                  = 66,    /* SVGA3dBool */
-   SVGA3D_RS_LASTPIXEL                 = 67,    /* SVGA3dBool */
-   SVGA3D_RS_CLIPPING                  = 68,    /* SVGA3dBool */
-   SVGA3D_RS_WRAP0                     = 69,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP1                     = 70,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP2                     = 71,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP3                     = 72,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP4                     = 73,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP5                     = 74,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP6                     = 75,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP7                     = 76,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP8                     = 77,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP9                     = 78,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP10                    = 79,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP11                    = 80,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP12                    = 81,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP13                    = 82,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP14                    = 83,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP15                    = 84,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_MULTISAMPLEANTIALIAS      = 85,    /* SVGA3dBool */
-   SVGA3D_RS_MULTISAMPLEMASK           = 86,    /* uint32 */
-   SVGA3D_RS_INDEXEDVERTEXBLENDENABLE  = 87,    /* SVGA3dBool */
-   SVGA3D_RS_TWEENFACTOR               = 88,    /* float */
-   SVGA3D_RS_ANTIALIASEDLINEENABLE     = 89,    /* SVGA3dBool */
-   SVGA3D_RS_COLORWRITEENABLE1         = 90,    /* SVGA3dColorMask */
-   SVGA3D_RS_COLORWRITEENABLE2         = 91,    /* SVGA3dColorMask */
-   SVGA3D_RS_COLORWRITEENABLE3         = 92,    /* SVGA3dColorMask */
-   SVGA3D_RS_SEPARATEALPHABLENDENABLE  = 93,    /* SVGA3dBool */
-   SVGA3D_RS_SRCBLENDALPHA             = 94,    /* SVGA3dBlendOp */
-   SVGA3D_RS_DSTBLENDALPHA             = 95,    /* SVGA3dBlendOp */
-   SVGA3D_RS_BLENDEQUATIONALPHA        = 96,    /* SVGA3dBlendEquation */
-   SVGA3D_RS_TRANSPARENCYANTIALIAS     = 97,    /* SVGA3dTransparencyAntialiasType */
-   SVGA3D_RS_LINEAA                    = 98,    /* SVGA3dBool */
-   SVGA3D_RS_LINEWIDTH                 = 99,    /* float */
-   SVGA3D_RS_MAX
-} SVGA3dRenderStateName;
-
-typedef enum {
-   SVGA3D_TRANSPARENCYANTIALIAS_NORMAL            = 0,
-   SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE   = 1,
-   SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE       = 2,
-   SVGA3D_TRANSPARENCYANTIALIAS_MAX
-} SVGA3dTransparencyAntialiasType;
-
-typedef enum {
-   SVGA3D_VERTEXMATERIAL_NONE     = 0,    /* Use the value in the current material */
-   SVGA3D_VERTEXMATERIAL_DIFFUSE  = 1,    /* Use the value in the diffuse component */
-   SVGA3D_VERTEXMATERIAL_SPECULAR = 2,    /* Use the value in the specular component */
-} SVGA3dVertexMaterial;
-
-typedef enum {
-   SVGA3D_FILLMODE_INVALID = 0,
-   SVGA3D_FILLMODE_POINT   = 1,
-   SVGA3D_FILLMODE_LINE    = 2,
-   SVGA3D_FILLMODE_FILL    = 3,
-   SVGA3D_FILLMODE_MAX
-} SVGA3dFillModeType;
-
-
-typedef
-union {
-   struct {
-      uint16   mode;       /* SVGA3dFillModeType */
-      uint16   face;       /* SVGA3dFace */
-   };
-   uint32 uintValue;
-} SVGA3dFillMode;
-
-typedef enum {
-   SVGA3D_SHADEMODE_INVALID = 0,
-   SVGA3D_SHADEMODE_FLAT    = 1,
-   SVGA3D_SHADEMODE_SMOOTH  = 2,
-   SVGA3D_SHADEMODE_PHONG   = 3,     /* Not supported */
-   SVGA3D_SHADEMODE_MAX
-} SVGA3dShadeMode;
-
-typedef
-union {
-   struct {
-      uint16 repeat;
-      uint16 pattern;
-   };
-   uint32 uintValue;
-} SVGA3dLinePattern;
-
-typedef enum {
-   SVGA3D_BLENDOP_INVALID            = 0,
-   SVGA3D_BLENDOP_ZERO               = 1,
-   SVGA3D_BLENDOP_ONE                = 2,
-   SVGA3D_BLENDOP_SRCCOLOR           = 3,
-   SVGA3D_BLENDOP_INVSRCCOLOR        = 4,
-   SVGA3D_BLENDOP_SRCALPHA           = 5,
-   SVGA3D_BLENDOP_INVSRCALPHA        = 6,
-   SVGA3D_BLENDOP_DESTALPHA          = 7,
-   SVGA3D_BLENDOP_INVDESTALPHA       = 8,
-   SVGA3D_BLENDOP_DESTCOLOR          = 9,
-   SVGA3D_BLENDOP_INVDESTCOLOR       = 10,
-   SVGA3D_BLENDOP_SRCALPHASAT        = 11,
-   SVGA3D_BLENDOP_BLENDFACTOR        = 12,
-   SVGA3D_BLENDOP_INVBLENDFACTOR     = 13,
-   SVGA3D_BLENDOP_MAX
-} SVGA3dBlendOp;
-
-typedef enum {
-   SVGA3D_BLENDEQ_INVALID            = 0,
-   SVGA3D_BLENDEQ_ADD                = 1,
-   SVGA3D_BLENDEQ_SUBTRACT           = 2,
-   SVGA3D_BLENDEQ_REVSUBTRACT        = 3,
-   SVGA3D_BLENDEQ_MINIMUM            = 4,
-   SVGA3D_BLENDEQ_MAXIMUM            = 5,
-   SVGA3D_BLENDEQ_MAX
-} SVGA3dBlendEquation;
-
-typedef enum {
-   SVGA3D_FRONTWINDING_INVALID = 0,
-   SVGA3D_FRONTWINDING_CW      = 1,
-   SVGA3D_FRONTWINDING_CCW     = 2,
-   SVGA3D_FRONTWINDING_MAX
-} SVGA3dFrontWinding;
-
-typedef enum {
-   SVGA3D_FACE_INVALID  = 0,
-   SVGA3D_FACE_NONE     = 1,
-   SVGA3D_FACE_FRONT    = 2,
-   SVGA3D_FACE_BACK     = 3,
-   SVGA3D_FACE_FRONT_BACK = 4,
-   SVGA3D_FACE_MAX
-} SVGA3dFace;
-
-/*
- * The order and the values should not be changed
- */
-
-typedef enum {
-   SVGA3D_CMP_INVALID              = 0,
-   SVGA3D_CMP_NEVER                = 1,
-   SVGA3D_CMP_LESS                 = 2,
-   SVGA3D_CMP_EQUAL                = 3,
-   SVGA3D_CMP_LESSEQUAL            = 4,
-   SVGA3D_CMP_GREATER              = 5,
-   SVGA3D_CMP_NOTEQUAL             = 6,
-   SVGA3D_CMP_GREATEREQUAL         = 7,
-   SVGA3D_CMP_ALWAYS               = 8,
-   SVGA3D_CMP_MAX
-} SVGA3dCmpFunc;
-
-/*
- * SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows
- * the fog factor to be specified in the alpha component of the specular
- * (a.k.a. secondary) vertex color.
- */
-typedef enum {
-   SVGA3D_FOGFUNC_INVALID          = 0,
-   SVGA3D_FOGFUNC_EXP              = 1,
-   SVGA3D_FOGFUNC_EXP2             = 2,
-   SVGA3D_FOGFUNC_LINEAR           = 3,
-   SVGA3D_FOGFUNC_PER_VERTEX       = 4
-} SVGA3dFogFunction;
-
-/*
- * SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex
- * or per-pixel basis.
- */
-typedef enum {
-   SVGA3D_FOGTYPE_INVALID          = 0,
-   SVGA3D_FOGTYPE_VERTEX           = 1,
-   SVGA3D_FOGTYPE_PIXEL            = 2,
-   SVGA3D_FOGTYPE_MAX              = 3
-} SVGA3dFogType;
-
-/*
- * SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is
- * computed using the eye Z value of each pixel (or vertex), whereas range-
- * based fog is computed using the actual distance (range) to the eye.
- */
-typedef enum {
-   SVGA3D_FOGBASE_INVALID          = 0,
-   SVGA3D_FOGBASE_DEPTHBASED       = 1,
-   SVGA3D_FOGBASE_RANGEBASED       = 2,
-   SVGA3D_FOGBASE_MAX              = 3
-} SVGA3dFogBase;
-
-typedef enum {
-   SVGA3D_STENCILOP_INVALID        = 0,
-   SVGA3D_STENCILOP_KEEP           = 1,
-   SVGA3D_STENCILOP_ZERO           = 2,
-   SVGA3D_STENCILOP_REPLACE        = 3,
-   SVGA3D_STENCILOP_INCRSAT        = 4,
-   SVGA3D_STENCILOP_DECRSAT        = 5,
-   SVGA3D_STENCILOP_INVERT         = 6,
-   SVGA3D_STENCILOP_INCR           = 7,
-   SVGA3D_STENCILOP_DECR           = 8,
-   SVGA3D_STENCILOP_MAX
-} SVGA3dStencilOp;
-
-typedef enum {
-   SVGA3D_CLIPPLANE_0              = (1 << 0),
-   SVGA3D_CLIPPLANE_1              = (1 << 1),
-   SVGA3D_CLIPPLANE_2              = (1 << 2),
-   SVGA3D_CLIPPLANE_3              = (1 << 3),
-   SVGA3D_CLIPPLANE_4              = (1 << 4),
-   SVGA3D_CLIPPLANE_5              = (1 << 5),
-} SVGA3dClipPlanes;
-
-typedef enum {
-   SVGA3D_CLEAR_COLOR              = 0x1,
-   SVGA3D_CLEAR_DEPTH              = 0x2,
-   SVGA3D_CLEAR_STENCIL            = 0x4
-} SVGA3dClearFlag;
-
-typedef enum {
-   SVGA3D_RT_DEPTH                 = 0,
-   SVGA3D_RT_STENCIL               = 1,
-   SVGA3D_RT_COLOR0                = 2,
-   SVGA3D_RT_COLOR1                = 3,
-   SVGA3D_RT_COLOR2                = 4,
-   SVGA3D_RT_COLOR3                = 5,
-   SVGA3D_RT_COLOR4                = 6,
-   SVGA3D_RT_COLOR5                = 7,
-   SVGA3D_RT_COLOR6                = 8,
-   SVGA3D_RT_COLOR7                = 9,
-   SVGA3D_RT_MAX,
-   SVGA3D_RT_INVALID               = ((uint32)-1),
-} SVGA3dRenderTargetType;
-
-#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1)
-
-typedef
-union {
-   struct {
-      uint32  red   : 1;
-      uint32  green : 1;
-      uint32  blue  : 1;
-      uint32  alpha : 1;
-   };
-   uint32 uintValue;
-} SVGA3dColorMask;
-
-typedef enum {
-   SVGA3D_VBLEND_DISABLE            = 0,
-   SVGA3D_VBLEND_1WEIGHT            = 1,
-   SVGA3D_VBLEND_2WEIGHT            = 2,
-   SVGA3D_VBLEND_3WEIGHT            = 3,
-} SVGA3dVertexBlendFlags;
-
-typedef enum {
-   SVGA3D_WRAPCOORD_0   = 1 << 0,
-   SVGA3D_WRAPCOORD_1   = 1 << 1,
-   SVGA3D_WRAPCOORD_2   = 1 << 2,
-   SVGA3D_WRAPCOORD_3   = 1 << 3,
-   SVGA3D_WRAPCOORD_ALL = 0xF,
-} SVGA3dWrapFlags;
-
-/*
- * SVGA_3D_CMD_TEXTURESTATE Types.  All value types
- * must fit in a uint32.
- */
-
-typedef enum {
-   SVGA3D_TS_INVALID                    = 0,
-   SVGA3D_TS_BIND_TEXTURE               = 1,    /* SVGA3dSurfaceId */
-   SVGA3D_TS_COLOROP                    = 2,    /* SVGA3dTextureCombiner */
-   SVGA3D_TS_COLORARG1                  = 3,    /* SVGA3dTextureArgData */
-   SVGA3D_TS_COLORARG2                  = 4,    /* SVGA3dTextureArgData */
-   SVGA3D_TS_ALPHAOP                    = 5,    /* SVGA3dTextureCombiner */
-   SVGA3D_TS_ALPHAARG1                  = 6,    /* SVGA3dTextureArgData */
-   SVGA3D_TS_ALPHAARG2                  = 7,    /* SVGA3dTextureArgData */
-   SVGA3D_TS_ADDRESSU                   = 8,    /* SVGA3dTextureAddress */
-   SVGA3D_TS_ADDRESSV                   = 9,    /* SVGA3dTextureAddress */
-   SVGA3D_TS_MIPFILTER                  = 10,   /* SVGA3dTextureFilter */
-   SVGA3D_TS_MAGFILTER                  = 11,   /* SVGA3dTextureFilter */
-   SVGA3D_TS_MINFILTER                  = 12,   /* SVGA3dTextureFilter */
-   SVGA3D_TS_BORDERCOLOR                = 13,   /* SVGA3dColor */
-   SVGA3D_TS_TEXCOORDINDEX              = 14,   /* uint32 */
-   SVGA3D_TS_TEXTURETRANSFORMFLAGS      = 15,   /* SVGA3dTexTransformFlags */
-   SVGA3D_TS_TEXCOORDGEN                = 16,   /* SVGA3dTextureCoordGen */
-   SVGA3D_TS_BUMPENVMAT00               = 17,   /* float */
-   SVGA3D_TS_BUMPENVMAT01               = 18,   /* float */
-   SVGA3D_TS_BUMPENVMAT10               = 19,   /* float */
-   SVGA3D_TS_BUMPENVMAT11               = 20,   /* float */
-   SVGA3D_TS_TEXTURE_MIPMAP_LEVEL       = 21,   /* uint32 */
-   SVGA3D_TS_TEXTURE_LOD_BIAS           = 22,   /* float */
-   SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL  = 23,   /* uint32 */
-   SVGA3D_TS_ADDRESSW                   = 24,   /* SVGA3dTextureAddress */
-
-
-   /*
-    * Sampler Gamma Level
-    *
-    * Sampler gamma effects the color of samples taken from the sampler.  A
-    * value of 1.0 will produce linear samples.  If the value is <= 0.0 the
-    * gamma value is ignored and a linear space is used.
-    */
-
-   SVGA3D_TS_GAMMA                      = 25,   /* float */
-   SVGA3D_TS_BUMPENVLSCALE              = 26,   /* float */
-   SVGA3D_TS_BUMPENVLOFFSET             = 27,   /* float */
-   SVGA3D_TS_COLORARG0                  = 28,   /* SVGA3dTextureArgData */
-   SVGA3D_TS_ALPHAARG0                  = 29,   /* SVGA3dTextureArgData */
-   SVGA3D_TS_MAX
-} SVGA3dTextureStateName;
-
-typedef enum {
-   SVGA3D_TC_INVALID                   = 0,
-   SVGA3D_TC_DISABLE                   = 1,
-   SVGA3D_TC_SELECTARG1                = 2,
-   SVGA3D_TC_SELECTARG2                = 3,
-   SVGA3D_TC_MODULATE                  = 4,
-   SVGA3D_TC_ADD                       = 5,
-   SVGA3D_TC_ADDSIGNED                 = 6,
-   SVGA3D_TC_SUBTRACT                  = 7,
-   SVGA3D_TC_BLENDTEXTUREALPHA         = 8,
-   SVGA3D_TC_BLENDDIFFUSEALPHA         = 9,
-   SVGA3D_TC_BLENDCURRENTALPHA         = 10,
-   SVGA3D_TC_BLENDFACTORALPHA          = 11,
-   SVGA3D_TC_MODULATE2X                = 12,
-   SVGA3D_TC_MODULATE4X                = 13,
-   SVGA3D_TC_DSDT                      = 14,
-   SVGA3D_TC_DOTPRODUCT3               = 15,
-   SVGA3D_TC_BLENDTEXTUREALPHAPM       = 16,
-   SVGA3D_TC_ADDSIGNED2X               = 17,
-   SVGA3D_TC_ADDSMOOTH                 = 18,
-   SVGA3D_TC_PREMODULATE               = 19,
-   SVGA3D_TC_MODULATEALPHA_ADDCOLOR    = 20,
-   SVGA3D_TC_MODULATECOLOR_ADDALPHA    = 21,
-   SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22,
-   SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23,
-   SVGA3D_TC_BUMPENVMAPLUMINANCE       = 24,
-   SVGA3D_TC_MULTIPLYADD               = 25,
-   SVGA3D_TC_LERP                      = 26,
-   SVGA3D_TC_MAX
-} SVGA3dTextureCombiner;
-
-#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0)
-
-typedef enum {
-   SVGA3D_TEX_ADDRESS_INVALID    = 0,
-   SVGA3D_TEX_ADDRESS_WRAP       = 1,
-   SVGA3D_TEX_ADDRESS_MIRROR     = 2,
-   SVGA3D_TEX_ADDRESS_CLAMP      = 3,
-   SVGA3D_TEX_ADDRESS_BORDER     = 4,
-   SVGA3D_TEX_ADDRESS_MIRRORONCE = 5,
-   SVGA3D_TEX_ADDRESS_EDGE       = 6,
-   SVGA3D_TEX_ADDRESS_MAX
-} SVGA3dTextureAddress;
-
-/*
- * SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is
- * disabled, and the rasterizer should use the magnification filter instead.
- */
-typedef enum {
-   SVGA3D_TEX_FILTER_NONE           = 0,
-   SVGA3D_TEX_FILTER_NEAREST        = 1,
-   SVGA3D_TEX_FILTER_LINEAR         = 2,
-   SVGA3D_TEX_FILTER_ANISOTROPIC    = 3,
-   SVGA3D_TEX_FILTER_FLATCUBIC      = 4, /* Deprecated, not implemented */
-   SVGA3D_TEX_FILTER_GAUSSIANCUBIC  = 5, /* Deprecated, not implemented */
-   SVGA3D_TEX_FILTER_PYRAMIDALQUAD  = 6, /* Not currently implemented */
-   SVGA3D_TEX_FILTER_GAUSSIANQUAD   = 7, /* Not currently implemented */
-   SVGA3D_TEX_FILTER_MAX
-} SVGA3dTextureFilter;
-
-typedef enum {
-   SVGA3D_TEX_TRANSFORM_OFF    = 0,
-   SVGA3D_TEX_TRANSFORM_S      = (1 << 0),
-   SVGA3D_TEX_TRANSFORM_T      = (1 << 1),
-   SVGA3D_TEX_TRANSFORM_R      = (1 << 2),
-   SVGA3D_TEX_TRANSFORM_Q      = (1 << 3),
-   SVGA3D_TEX_PROJECTED        = (1 << 15),
-} SVGA3dTexTransformFlags;
-
-typedef enum {
-   SVGA3D_TEXCOORD_GEN_OFF              = 0,
-   SVGA3D_TEXCOORD_GEN_EYE_POSITION     = 1,
-   SVGA3D_TEXCOORD_GEN_EYE_NORMAL       = 2,
-   SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3,
-   SVGA3D_TEXCOORD_GEN_SPHERE           = 4,
-   SVGA3D_TEXCOORD_GEN_MAX
-} SVGA3dTextureCoordGen;
-
-/*
- * Texture argument constants for texture combiner
- */
-typedef enum {
-   SVGA3D_TA_INVALID    = 0,
-   SVGA3D_TA_CONSTANT   = 1,
-   SVGA3D_TA_PREVIOUS   = 2,
-   SVGA3D_TA_DIFFUSE    = 3,
-   SVGA3D_TA_TEXTURE    = 4,
-   SVGA3D_TA_SPECULAR   = 5,
-   SVGA3D_TA_MAX
-} SVGA3dTextureArgData;
-
-#define SVGA3D_TM_MASK_LEN 4
-
-/* Modifiers for texture argument constants defined above. */
-typedef enum {
-   SVGA3D_TM_NONE       = 0,
-   SVGA3D_TM_ALPHA      = (1 << SVGA3D_TM_MASK_LEN),
-   SVGA3D_TM_ONE_MINUS  = (2 << SVGA3D_TM_MASK_LEN),
-} SVGA3dTextureArgModifier;
-
-#define SVGA3D_INVALID_ID         ((uint32)-1)
-#define SVGA3D_MAX_CLIP_PLANES    6
-
-/*
- * This is the limit to the number of fixed-function texture
- * transforms and texture coordinates we can support. It does *not*
- * correspond to the number of texture image units (samplers) we
- * support!
- */
-#define SVGA3D_MAX_TEXTURE_COORDS 8
-
-/*
- * Vertex declarations
- *
- * Notes:
- *
- * SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you
- * draw with any POSITIONT vertex arrays, the programmable vertex
- * pipeline will be implicitly disabled. Drawing will take place as if
- * no vertex shader was bound.
- */
-
-typedef enum {
-   SVGA3D_DECLUSAGE_POSITION     = 0,
-   SVGA3D_DECLUSAGE_BLENDWEIGHT,       /*  1 */
-   SVGA3D_DECLUSAGE_BLENDINDICES,      /*  2 */
-   SVGA3D_DECLUSAGE_NORMAL,            /*  3 */
-   SVGA3D_DECLUSAGE_PSIZE,             /*  4 */
-   SVGA3D_DECLUSAGE_TEXCOORD,          /*  5 */
-   SVGA3D_DECLUSAGE_TANGENT,           /*  6 */
-   SVGA3D_DECLUSAGE_BINORMAL,          /*  7 */
-   SVGA3D_DECLUSAGE_TESSFACTOR,        /*  8 */
-   SVGA3D_DECLUSAGE_POSITIONT,         /*  9 */
-   SVGA3D_DECLUSAGE_COLOR,             /* 10 */
-   SVGA3D_DECLUSAGE_FOG,               /* 11 */
-   SVGA3D_DECLUSAGE_DEPTH,             /* 12 */
-   SVGA3D_DECLUSAGE_SAMPLE,            /* 13 */
-   SVGA3D_DECLUSAGE_MAX
-} SVGA3dDeclUsage;
-
-typedef enum {
-   SVGA3D_DECLMETHOD_DEFAULT     = 0,
-   SVGA3D_DECLMETHOD_PARTIALU,
-   SVGA3D_DECLMETHOD_PARTIALV,
-   SVGA3D_DECLMETHOD_CROSSUV,          /* Normal */
-   SVGA3D_DECLMETHOD_UV,
-   SVGA3D_DECLMETHOD_LOOKUP,           /* Lookup a displacement map */
-   SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement map */
-} SVGA3dDeclMethod;
-
-typedef enum {
-   SVGA3D_DECLTYPE_FLOAT1        =  0,
-   SVGA3D_DECLTYPE_FLOAT2        =  1,
-   SVGA3D_DECLTYPE_FLOAT3        =  2,
-   SVGA3D_DECLTYPE_FLOAT4        =  3,
-   SVGA3D_DECLTYPE_D3DCOLOR      =  4,
-   SVGA3D_DECLTYPE_UBYTE4        =  5,
-   SVGA3D_DECLTYPE_SHORT2        =  6,
-   SVGA3D_DECLTYPE_SHORT4        =  7,
-   SVGA3D_DECLTYPE_UBYTE4N       =  8,
-   SVGA3D_DECLTYPE_SHORT2N       =  9,
-   SVGA3D_DECLTYPE_SHORT4N       = 10,
-   SVGA3D_DECLTYPE_USHORT2N      = 11,
-   SVGA3D_DECLTYPE_USHORT4N      = 12,
-   SVGA3D_DECLTYPE_UDEC3         = 13,
-   SVGA3D_DECLTYPE_DEC3N         = 14,
-   SVGA3D_DECLTYPE_FLOAT16_2     = 15,
-   SVGA3D_DECLTYPE_FLOAT16_4     = 16,
-   SVGA3D_DECLTYPE_MAX,
-} SVGA3dDeclType;
-
-/*
- * This structure is used for the divisor for geometry instancing;
- * it's a direct translation of the Direct3D equivalent.
- */
-typedef union {
-   struct {
-      /*
-       * For index data, this number represents the number of instances to draw.
-       * For instance data, this number represents the number of
-       * instances/vertex in this stream
-       */
-      uint32 count : 30;
-
-      /*
-       * This is 1 if this is supposed to be the data that is repeated for
-       * every instance.
-       */
-      uint32 indexedData : 1;
-
-      /*
-       * This is 1 if this is supposed to be the per-instance data.
-       */
-      uint32 instanceData : 1;
-   };
-
-   uint32 value;
-} SVGA3dVertexDivisor;
-
-typedef enum {
-   SVGA3D_PRIMITIVE_INVALID                     = 0,
-   SVGA3D_PRIMITIVE_TRIANGLELIST                = 1,
-   SVGA3D_PRIMITIVE_POINTLIST                   = 2,
-   SVGA3D_PRIMITIVE_LINELIST                    = 3,
-   SVGA3D_PRIMITIVE_LINESTRIP                   = 4,
-   SVGA3D_PRIMITIVE_TRIANGLESTRIP               = 5,
-   SVGA3D_PRIMITIVE_TRIANGLEFAN                 = 6,
-   SVGA3D_PRIMITIVE_MAX
-} SVGA3dPrimitiveType;
-
-typedef enum {
-   SVGA3D_COORDINATE_INVALID                   = 0,
-   SVGA3D_COORDINATE_LEFTHANDED                = 1,
-   SVGA3D_COORDINATE_RIGHTHANDED               = 2,
-   SVGA3D_COORDINATE_MAX
-} SVGA3dCoordinateType;
-
-typedef enum {
-   SVGA3D_TRANSFORM_INVALID                     = 0,
-   SVGA3D_TRANSFORM_WORLD                       = 1,
-   SVGA3D_TRANSFORM_VIEW                        = 2,
-   SVGA3D_TRANSFORM_PROJECTION                  = 3,
-   SVGA3D_TRANSFORM_TEXTURE0                    = 4,
-   SVGA3D_TRANSFORM_TEXTURE1                    = 5,
-   SVGA3D_TRANSFORM_TEXTURE2                    = 6,
-   SVGA3D_TRANSFORM_TEXTURE3                    = 7,
-   SVGA3D_TRANSFORM_TEXTURE4                    = 8,
-   SVGA3D_TRANSFORM_TEXTURE5                    = 9,
-   SVGA3D_TRANSFORM_TEXTURE6                    = 10,
-   SVGA3D_TRANSFORM_TEXTURE7                    = 11,
-   SVGA3D_TRANSFORM_WORLD1                      = 12,
-   SVGA3D_TRANSFORM_WORLD2                      = 13,
-   SVGA3D_TRANSFORM_WORLD3                      = 14,
-   SVGA3D_TRANSFORM_MAX
-} SVGA3dTransformType;
-
-typedef enum {
-   SVGA3D_LIGHTTYPE_INVALID                     = 0,
-   SVGA3D_LIGHTTYPE_POINT                       = 1,
-   SVGA3D_LIGHTTYPE_SPOT1                       = 2, /* 1-cone, in degrees */
-   SVGA3D_LIGHTTYPE_SPOT2                       = 3, /* 2-cone, in radians */
-   SVGA3D_LIGHTTYPE_DIRECTIONAL                 = 4,
-   SVGA3D_LIGHTTYPE_MAX
-} SVGA3dLightType;
-
-typedef enum {
-   SVGA3D_CUBEFACE_POSX                         = 0,
-   SVGA3D_CUBEFACE_NEGX                         = 1,
-   SVGA3D_CUBEFACE_POSY                         = 2,
-   SVGA3D_CUBEFACE_NEGY                         = 3,
-   SVGA3D_CUBEFACE_POSZ                         = 4,
-   SVGA3D_CUBEFACE_NEGZ                         = 5,
-} SVGA3dCubeFace;
-
-typedef enum {
-   SVGA3D_SHADERTYPE_INVALID                    = 0,
-   SVGA3D_SHADERTYPE_MIN                        = 1,
-   SVGA3D_SHADERTYPE_VS                         = 1,
-   SVGA3D_SHADERTYPE_PS                         = 2,
-   SVGA3D_SHADERTYPE_MAX                        = 3,
-   SVGA3D_SHADERTYPE_GS                         = 3,
-} SVGA3dShaderType;
-
-#define SVGA3D_NUM_SHADERTYPE (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN)
-
-typedef enum {
-   SVGA3D_CONST_TYPE_FLOAT                      = 0,
-   SVGA3D_CONST_TYPE_INT                        = 1,
-   SVGA3D_CONST_TYPE_BOOL                       = 2,
-   SVGA3D_CONST_TYPE_MAX
-} SVGA3dShaderConstType;
-
-#define SVGA3D_MAX_SURFACE_FACES                6
-
-typedef enum {
-   SVGA3D_STRETCH_BLT_POINT                     = 0,
-   SVGA3D_STRETCH_BLT_LINEAR                    = 1,
-   SVGA3D_STRETCH_BLT_MAX
-} SVGA3dStretchBltMode;
-
-typedef enum {
-   SVGA3D_QUERYTYPE_OCCLUSION                   = 0,
-   SVGA3D_QUERYTYPE_MAX
-} SVGA3dQueryType;
-
-typedef enum {
-   SVGA3D_QUERYSTATE_PENDING     = 0,      /* Waiting on the host (set by guest) */
-   SVGA3D_QUERYSTATE_SUCCEEDED   = 1,      /* Completed successfully (set by host) */
-   SVGA3D_QUERYSTATE_FAILED      = 2,      /* Completed unsuccessfully (set by host) */
-   SVGA3D_QUERYSTATE_NEW         = 3,      /* Never submitted (For guest use only) */
-} SVGA3dQueryState;
-
-typedef enum {
-   SVGA3D_WRITE_HOST_VRAM        = 1,
-   SVGA3D_READ_HOST_VRAM         = 2,
-} SVGA3dTransferType;
-
-/*
- * The maximum number of vertex arrays we're guaranteed to support in
- * SVGA_3D_CMD_DRAWPRIMITIVES.
- */
-#define SVGA3D_MAX_VERTEX_ARRAYS   32
-
-/*
- * The maximum number of primitive ranges we're guaranteed to support
- * in SVGA_3D_CMD_DRAWPRIMITIVES.
- */
-#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32
-
-/*
- * Identifiers for commands in the command FIFO.
- *
- * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
- * the SVGA3D protocol and remain reserved; they should not be used in the
- * future.
- *
- * IDs between 1040 and 1999 (inclusive) are available for use by the
- * current SVGA3D protocol.
- *
- * FIFO clients other than SVGA3D should stay below 1000, or at 2000
- * and up.
- */
-
-#define SVGA_3D_CMD_LEGACY_BASE            1000
-#define SVGA_3D_CMD_BASE                   1040
-
-#define SVGA_3D_CMD_SURFACE_DEFINE         SVGA_3D_CMD_BASE + 0     /* Deprecated */
-#define SVGA_3D_CMD_SURFACE_DESTROY        SVGA_3D_CMD_BASE + 1
-#define SVGA_3D_CMD_SURFACE_COPY           SVGA_3D_CMD_BASE + 2
-#define SVGA_3D_CMD_SURFACE_STRETCHBLT     SVGA_3D_CMD_BASE + 3
-#define SVGA_3D_CMD_SURFACE_DMA            SVGA_3D_CMD_BASE + 4
-#define SVGA_3D_CMD_CONTEXT_DEFINE         SVGA_3D_CMD_BASE + 5
-#define SVGA_3D_CMD_CONTEXT_DESTROY        SVGA_3D_CMD_BASE + 6
-#define SVGA_3D_CMD_SETTRANSFORM           SVGA_3D_CMD_BASE + 7
-#define SVGA_3D_CMD_SETZRANGE              SVGA_3D_CMD_BASE + 8
-#define SVGA_3D_CMD_SETRENDERSTATE         SVGA_3D_CMD_BASE + 9
-#define SVGA_3D_CMD_SETRENDERTARGET        SVGA_3D_CMD_BASE + 10
-#define SVGA_3D_CMD_SETTEXTURESTATE        SVGA_3D_CMD_BASE + 11
-#define SVGA_3D_CMD_SETMATERIAL            SVGA_3D_CMD_BASE + 12
-#define SVGA_3D_CMD_SETLIGHTDATA           SVGA_3D_CMD_BASE + 13
-#define SVGA_3D_CMD_SETLIGHTENABLED        SVGA_3D_CMD_BASE + 14
-#define SVGA_3D_CMD_SETVIEWPORT            SVGA_3D_CMD_BASE + 15
-#define SVGA_3D_CMD_SETCLIPPLANE           SVGA_3D_CMD_BASE + 16
-#define SVGA_3D_CMD_CLEAR                  SVGA_3D_CMD_BASE + 17
-#define SVGA_3D_CMD_PRESENT                SVGA_3D_CMD_BASE + 18    /* Deprecated */
-#define SVGA_3D_CMD_SHADER_DEFINE          SVGA_3D_CMD_BASE + 19
-#define SVGA_3D_CMD_SHADER_DESTROY         SVGA_3D_CMD_BASE + 20
-#define SVGA_3D_CMD_SET_SHADER             SVGA_3D_CMD_BASE + 21
-#define SVGA_3D_CMD_SET_SHADER_CONST       SVGA_3D_CMD_BASE + 22
-#define SVGA_3D_CMD_DRAW_PRIMITIVES        SVGA_3D_CMD_BASE + 23
-#define SVGA_3D_CMD_SETSCISSORRECT         SVGA_3D_CMD_BASE + 24
-#define SVGA_3D_CMD_BEGIN_QUERY            SVGA_3D_CMD_BASE + 25
-#define SVGA_3D_CMD_END_QUERY              SVGA_3D_CMD_BASE + 26
-#define SVGA_3D_CMD_WAIT_FOR_QUERY         SVGA_3D_CMD_BASE + 27
-#define SVGA_3D_CMD_PRESENT_READBACK       SVGA_3D_CMD_BASE + 28    /* Deprecated */
-#define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29
-#define SVGA_3D_CMD_SURFACE_DEFINE_V2      SVGA_3D_CMD_BASE + 30
-#define SVGA_3D_CMD_GENERATE_MIPMAPS       SVGA_3D_CMD_BASE + 31
-#define SVGA_3D_CMD_ACTIVATE_SURFACE       SVGA_3D_CMD_BASE + 40
-#define SVGA_3D_CMD_DEACTIVATE_SURFACE     SVGA_3D_CMD_BASE + 41
-#define SVGA_3D_CMD_SCREEN_DMA               1082
-#define SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE 1083
-#define SVGA_3D_CMD_OPEN_CONTEXT_SURFACE     1084
-
-#define SVGA_3D_CMD_LOGICOPS_BITBLT          1085
-#define SVGA_3D_CMD_LOGICOPS_TRANSBLT        1086
-#define SVGA_3D_CMD_LOGICOPS_STRETCHBLT      1087
-#define SVGA_3D_CMD_LOGICOPS_COLORFILL       1088
-#define SVGA_3D_CMD_LOGICOPS_ALPHABLEND      1089
-#define SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND  1090
-
-#define SVGA_3D_CMD_SET_OTABLE_BASE          1091
-#define SVGA_3D_CMD_READBACK_OTABLE          1092
-
-#define SVGA_3D_CMD_DEFINE_GB_MOB            1093
-#define SVGA_3D_CMD_DESTROY_GB_MOB           1094
-#define SVGA_3D_CMD_REDEFINE_GB_MOB          1095
-#define SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING    1096
-
-#define SVGA_3D_CMD_DEFINE_GB_SURFACE        1097
-#define SVGA_3D_CMD_DESTROY_GB_SURFACE       1098
-#define SVGA_3D_CMD_BIND_GB_SURFACE          1099
-#define SVGA_3D_CMD_COND_BIND_GB_SURFACE     1100
-#define SVGA_3D_CMD_UPDATE_GB_IMAGE          1101
-#define SVGA_3D_CMD_UPDATE_GB_SURFACE        1102
-#define SVGA_3D_CMD_READBACK_GB_IMAGE        1103
-#define SVGA_3D_CMD_READBACK_GB_SURFACE      1104
-#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE      1105
-#define SVGA_3D_CMD_INVALIDATE_GB_SURFACE    1106
-
-#define SVGA_3D_CMD_DEFINE_GB_CONTEXT        1107
-#define SVGA_3D_CMD_DESTROY_GB_CONTEXT       1108
-#define SVGA_3D_CMD_BIND_GB_CONTEXT          1109
-#define SVGA_3D_CMD_READBACK_GB_CONTEXT      1110
-#define SVGA_3D_CMD_INVALIDATE_GB_CONTEXT    1111
-
-#define SVGA_3D_CMD_DEFINE_GB_SHADER         1112
-#define SVGA_3D_CMD_DESTROY_GB_SHADER        1113
-#define SVGA_3D_CMD_BIND_GB_SHADER           1114
-
-#define SVGA_3D_CMD_SET_OTABLE_BASE64        1115
-
-#define SVGA_3D_CMD_BEGIN_GB_QUERY           1116
-#define SVGA_3D_CMD_END_GB_QUERY             1117
-#define SVGA_3D_CMD_WAIT_FOR_GB_QUERY        1118
-
-#define SVGA_3D_CMD_NOP                      1119
-
-#define SVGA_3D_CMD_ENABLE_GART              1120
-#define SVGA_3D_CMD_DISABLE_GART             1121
-#define SVGA_3D_CMD_MAP_MOB_INTO_GART        1122
-#define SVGA_3D_CMD_UNMAP_GART_RANGE         1123
-
-#define SVGA_3D_CMD_DEFINE_GB_SCREENTARGET   1124
-#define SVGA_3D_CMD_DESTROY_GB_SCREENTARGET  1125
-#define SVGA_3D_CMD_BIND_GB_SCREENTARGET     1126
-#define SVGA_3D_CMD_UPDATE_GB_SCREENTARGET   1127
-
-#define SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL   1128
-#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129
-
-#define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE  1130
-#define SVGA_3D_CMD_GB_SCREEN_DMA               1131
-#define SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH  1132
-#define SVGA_3D_CMD_GB_MOB_FENCE                1133
-#define SVGA_3D_CMD_DEFINE_GB_SURFACE_V2        1134
-#define SVGA_3D_CMD_DEFINE_GB_MOB64          1135
-#define SVGA_3D_CMD_REDEFINE_GB_MOB64        1136
-#define SVGA_3D_CMD_NOP_ERROR                1137
-
-#define SVGA_3D_CMD_RESERVED1                1138
-#define SVGA_3D_CMD_RESERVED2                1139
-#define SVGA_3D_CMD_RESERVED3                1140
-#define SVGA_3D_CMD_RESERVED4                1141
-#define SVGA_3D_CMD_RESERVED5                1142
-
-#define SVGA_3D_CMD_MAX                      1142
-#define SVGA_3D_CMD_FUTURE_MAX               3000
-
-/*
- * Common substructures used in multiple FIFO commands:
- */
-
-typedef struct {
-   union {
-      struct {
-         uint16  function;       /* SVGA3dFogFunction */
-         uint8   type;           /* SVGA3dFogType */
-         uint8   base;           /* SVGA3dFogBase */
-      };
-      uint32     uintValue;
-   };
-} SVGA3dFogMode;
-
-/*
- * Uniquely identify one image (a 1D/2D/3D array) from a surface. This
- * is a surface ID as well as face/mipmap indices.
- */
-
-typedef
-struct SVGA3dSurfaceImageId {
-   uint32               sid;
-   uint32               face;
-   uint32               mipmap;
-} SVGA3dSurfaceImageId;
-
-typedef
-struct SVGA3dGuestImage {
-   SVGAGuestPtr         ptr;
-
-   /*
-    * A note on interpretation of pitch: This value of pitch is the
-    * number of bytes between vertically adjacent image
-    * blocks. Normally this is the number of bytes between the first
-    * pixel of two adjacent scanlines. With compressed textures,
-    * however, this may represent the number of bytes between
-    * compression blocks rather than between rows of pixels.
-    *
-    * XXX: Compressed textures currently must be tightly packed in guest memory.
-    *
-    * If the image is 1-dimensional, pitch is ignored.
-    *
-    * If 'pitch' is zero, the SVGA3D device calculates a pitch value
-    * assuming each row of blocks is tightly packed.
-    */
-   uint32 pitch;
-} SVGA3dGuestImage;
-
-
-/*
- * FIFO command format definitions:
- */
-
-/*
- * The data size header following cmdNum for every 3d command
- */
-typedef
-struct {
-   uint32               id;
-   uint32               size;
-} SVGA3dCmdHeader;
-
-/*
- * A surface is a hierarchy of host VRAM surfaces: 1D, 2D, or 3D, with
- * optional mipmaps and cube faces.
- */
-
-typedef
-struct {
-   uint32               width;
-   uint32               height;
-   uint32               depth;
-} SVGA3dSize;
-
-typedef enum {
-   SVGA3D_SURFACE_CUBEMAP              = (1 << 0),
-   SVGA3D_SURFACE_HINT_STATIC          = (1 << 1),
-   SVGA3D_SURFACE_HINT_DYNAMIC         = (1 << 2),
-   SVGA3D_SURFACE_HINT_INDEXBUFFER     = (1 << 3),
-   SVGA3D_SURFACE_HINT_VERTEXBUFFER    = (1 << 4),
-   SVGA3D_SURFACE_HINT_TEXTURE         = (1 << 5),
-   SVGA3D_SURFACE_HINT_RENDERTARGET    = (1 << 6),
-   SVGA3D_SURFACE_HINT_DEPTHSTENCIL    = (1 << 7),
-   SVGA3D_SURFACE_HINT_WRITEONLY       = (1 << 8),
-   SVGA3D_SURFACE_MASKABLE_ANTIALIAS   = (1 << 9),
-   SVGA3D_SURFACE_AUTOGENMIPMAPS       = (1 << 10),
-} SVGA3dSurfaceFlags;
-
-typedef
-struct {
-   uint32               numMipLevels;
-} SVGA3dSurfaceFace;
-
-typedef
-struct {
-   uint32                      sid;
-   SVGA3dSurfaceFlags          surfaceFlags;
-   SVGA3dSurfaceFormat         format;
-   /*
-    * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
-    * structures must have the same value of numMipLevels field.
-    * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
-    * numMipLevels set to 0.
-    */
-   SVGA3dSurfaceFace           face[SVGA3D_MAX_SURFACE_FACES];
-   /*
-    * Followed by an SVGA3dSize structure for each mip level in each face.
-    *
-    * A note on surface sizes: Sizes are always specified in pixels,
-    * even if the true surface size is not a multiple of the minimum
-    * block size of the surface's format. For example, a 3x3x1 DXT1
-    * compressed texture would actually be stored as a 4x4x1 image in
-    * memory.
-    */
-} SVGA3dCmdDefineSurface;       /* SVGA_3D_CMD_SURFACE_DEFINE */
-
-typedef
-struct {
-   uint32                      sid;
-   SVGA3dSurfaceFlags          surfaceFlags;
-   SVGA3dSurfaceFormat         format;
-   /*
-    * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
-    * structures must have the same value of numMipLevels field.
-    * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
-    * numMipLevels set to 0.
-    */
-   SVGA3dSurfaceFace           face[SVGA3D_MAX_SURFACE_FACES];
-   uint32                      multisampleCount;
-   SVGA3dTextureFilter         autogenFilter;
-   /*
-    * Followed by an SVGA3dSize structure for each mip level in each face.
-    *
-    * A note on surface sizes: Sizes are always specified in pixels,
-    * even if the true surface size is not a multiple of the minimum
-    * block size of the surface's format. For example, a 3x3x1 DXT1
-    * compressed texture would actually be stored as a 4x4x1 image in
-    * memory.
-    */
-} SVGA3dCmdDefineSurface_v2;     /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */
-
-typedef
-struct {
-   uint32               sid;
-} SVGA3dCmdDestroySurface;      /* SVGA_3D_CMD_SURFACE_DESTROY */
-
-typedef
-struct {
-   uint32               cid;
-} SVGA3dCmdDefineContext;       /* SVGA_3D_CMD_CONTEXT_DEFINE */
-
-typedef
-struct {
-   uint32               cid;
-} SVGA3dCmdDestroyContext;      /* SVGA_3D_CMD_CONTEXT_DESTROY */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dClearFlag      clearFlag;
-   uint32               color;
-   float                depth;
-   uint32               stencil;
-   /* Followed by variable number of SVGA3dRect structures */
-} SVGA3dCmdClear;               /* SVGA_3D_CMD_CLEAR */
-
-typedef
-struct SVGA3dCopyRect {
-   uint32               x;
-   uint32               y;
-   uint32               w;
-   uint32               h;
-   uint32               srcx;
-   uint32               srcy;
-} SVGA3dCopyRect;
-
-typedef
-struct SVGA3dCopyBox {
-   uint32               x;
-   uint32               y;
-   uint32               z;
-   uint32               w;
-   uint32               h;
-   uint32               d;
-   uint32               srcx;
-   uint32               srcy;
-   uint32               srcz;
-} SVGA3dCopyBox;
-
-typedef
-struct {
-   uint32               x;
-   uint32               y;
-   uint32               w;
-   uint32               h;
-} SVGA3dRect;
-
-typedef
-struct {
-   uint32               x;
-   uint32               y;
-   uint32               z;
-   uint32               w;
-   uint32               h;
-   uint32               d;
-} SVGA3dBox;
-
-typedef
-struct {
-   uint32               x;
-   uint32               y;
-   uint32               z;
-} SVGA3dPoint;
-
-typedef
-struct {
-   SVGA3dLightType      type;
-   SVGA3dBool           inWorldSpace;
-   float                diffuse[4];
-   float                specular[4];
-   float                ambient[4];
-   float                position[4];
-   float                direction[4];
-   float                range;
-   float                falloff;
-   float                attenuation0;
-   float                attenuation1;
-   float                attenuation2;
-   float                theta;
-   float                phi;
-} SVGA3dLightData;
-
-typedef
-struct {
-   uint32               sid;
-   /* Followed by variable number of SVGA3dCopyRect structures */
-} SVGA3dCmdPresent;             /* SVGA_3D_CMD_PRESENT */
-
-typedef
-struct {
-   SVGA3dRenderStateName   state;
-   union {
-      uint32               uintValue;
-      float                floatValue;
-   };
-} SVGA3dRenderState;
-
-typedef
-struct {
-   uint32               cid;
-   /* Followed by variable number of SVGA3dRenderState structures */
-} SVGA3dCmdSetRenderState;      /* SVGA_3D_CMD_SETRENDERSTATE */
-
-typedef
-struct {
-   uint32                 cid;
-   SVGA3dRenderTargetType type;
-   SVGA3dSurfaceImageId   target;
-} SVGA3dCmdSetRenderTarget;     /* SVGA_3D_CMD_SETRENDERTARGET */
-
-typedef
-struct {
-   SVGA3dSurfaceImageId  src;
-   SVGA3dSurfaceImageId  dest;
-   /* Followed by variable number of SVGA3dCopyBox structures */
-} SVGA3dCmdSurfaceCopy;               /* SVGA_3D_CMD_SURFACE_COPY */
-
-typedef
-struct {
-   SVGA3dSurfaceImageId  src;
-   SVGA3dSurfaceImageId  dest;
-   SVGA3dBox             boxSrc;
-   SVGA3dBox             boxDest;
-   SVGA3dStretchBltMode  mode;
-} SVGA3dCmdSurfaceStretchBlt;         /* SVGA_3D_CMD_SURFACE_STRETCHBLT */
-
-typedef
-struct {
-   /*
-    * If the discard flag is present in a surface DMA operation, the host may
-    * discard the contents of the current mipmap level and face of the target
-    * surface before applying the surface DMA contents.
-    */
-   uint32 discard : 1;
-
-   /*
-    * If the unsynchronized flag is present, the host may perform this upload
-    * without syncing to pending reads on this surface.
-    */
-   uint32 unsynchronized : 1;
-
-   /*
-    * Guests *MUST* set the reserved bits to 0 before submitting the command
-    * suffix as future flags may occupy these bits.
-    */
-   uint32 reserved : 30;
-} SVGA3dSurfaceDMAFlags;
-
-typedef
-struct {
-   SVGA3dGuestImage      guest;
-   SVGA3dSurfaceImageId  host;
-   SVGA3dTransferType    transfer;
-   /*
-    * Followed by variable number of SVGA3dCopyBox structures. For consistency
-    * in all clipping logic and coordinate translation, we define the
-    * "source" in each copyBox as the guest image and the
-    * "destination" as the host image, regardless of transfer
-    * direction.
-    *
-    * For efficiency, the SVGA3D device is free to copy more data than
-    * specified. For example, it may round copy boxes outwards such
-    * that they lie on particular alignment boundaries.
-    */
-} SVGA3dCmdSurfaceDMA;                /* SVGA_3D_CMD_SURFACE_DMA */
-
-/*
- * SVGA3dCmdSurfaceDMASuffix --
- *
- *    This is a command suffix that will appear after a SurfaceDMA command in
- *    the FIFO.  It contains some extra information that hosts may use to
- *    optimize performance or protect the guest.  This suffix exists to preserve
- *    backwards compatibility while also allowing for new functionality to be
- *    implemented.
- */
-
-typedef
-struct {
-   uint32 suffixSize;
-
-   /*
-    * The maximum offset is used to determine the maximum offset from the
-    * guestPtr base address that will be accessed or written to during this
-    * surfaceDMA.  If the suffix is supported, the host will respect this
-    * boundary while performing surface DMAs.
-    *
-    * Defaults to MAX_UINT32
-    */
-   uint32 maximumOffset;
-
-   /*
-    * A set of flags that describes optimizations that the host may perform
-    * while performing this surface DMA operation.  The guest should never rely
-    * on behaviour that is different when these flags are set for correctness.
-    *
-    * Defaults to 0
-    */
-   SVGA3dSurfaceDMAFlags flags;
-} SVGA3dCmdSurfaceDMASuffix;
-
-/*
- * SVGA_3D_CMD_DRAW_PRIMITIVES --
- *
- *   This command is the SVGA3D device's generic drawing entry point.
- *   It can draw multiple ranges of primitives, optionally using an
- *   index buffer, using an arbitrary collection of vertex buffers.
- *
- *   Each SVGA3dVertexDecl defines a distinct vertex array to bind
- *   during this draw call. The declarations specify which surface
- *   the vertex data lives in, what that vertex data is used for,
- *   and how to interpret it.
- *
- *   Each SVGA3dPrimitiveRange defines a collection of primitives
- *   to render using the same vertex arrays. An index buffer is
- *   optional.
- */
-
-typedef
-struct {
-   /*
-    * A range hint is an optional specification for the range of indices
-    * in an SVGA3dArray that will be used. If 'last' is zero, it is assumed
-    * that the entire array will be used.
-    *
-    * These are only hints. The SVGA3D device may use them for
-    * performance optimization if possible, but it's also allowed to
-    * ignore these values.
-    */
-   uint32               first;
-   uint32               last;
-} SVGA3dArrayRangeHint;
-
-typedef
-struct {
-   /*
-    * Define the origin and shape of a vertex or index array. Both
-    * 'offset' and 'stride' are in bytes. The provided surface will be
-    * reinterpreted as a flat array of bytes in the same format used
-    * by surface DMA operations. To avoid unnecessary conversions, the
-    * surface should be created with the SVGA3D_BUFFER format.
-    *
-    * Index 0 in the array starts 'offset' bytes into the surface.
-    * Index 1 begins at byte 'offset + stride', etc. Array indices may
-    * not be negative.
-    */
-   uint32               surfaceId;
-   uint32               offset;
-   uint32               stride;
-} SVGA3dArray;
-
-typedef
-struct {
-   /*
-    * Describe a vertex array's data type, and define how it is to be
-    * used by the fixed function pipeline or the vertex shader. It
-    * isn't useful to have two VertexDecls with the same
-    * VertexArrayIdentity in one draw call.
-    */
-   SVGA3dDeclType       type;
-   SVGA3dDeclMethod     method;
-   SVGA3dDeclUsage      usage;
-   uint32               usageIndex;
-} SVGA3dVertexArrayIdentity;
-
-typedef
-struct {
-   SVGA3dVertexArrayIdentity  identity;
-   SVGA3dArray                array;
-   SVGA3dArrayRangeHint       rangeHint;
-} SVGA3dVertexDecl;
-
-typedef
-struct {
-   /*
-    * Define a group of primitives to render, from sequential indices.
-    *
-    * The value of 'primitiveType' and 'primitiveCount' imply the
-    * total number of vertices that will be rendered.
-    */
-   SVGA3dPrimitiveType  primType;
-   uint32               primitiveCount;
-
-   /*
-    * Optional index buffer. If indexArray.surfaceId is
-    * SVGA3D_INVALID_ID, we render without an index buffer. Rendering
-    * without an index buffer is identical to rendering with an index
-    * buffer containing the sequence [0, 1, 2, 3, ...].
-    *
-    * If an index buffer is in use, indexWidth specifies the width in
-    * bytes of each index value. It must be less than or equal to
-    * indexArray.stride.
-    *
-    * (Currently, the SVGA3D device requires index buffers to be tightly
-    * packed. In other words, indexWidth == indexArray.stride)
-    */
-   SVGA3dArray          indexArray;
-   uint32               indexWidth;
-
-   /*
-    * Optional index bias. This number is added to all indices from
-    * indexArray before they are used as vertex array indices. This
-    * can be used in multiple ways:
-    *
-    *  - When not using an indexArray, this bias can be used to
-    *    specify where in the vertex arrays to begin rendering.
-    *
-    *  - A positive number here is equivalent to increasing the
-    *    offset in each vertex array.
-    *
-    *  - A negative number can be used to render using a small
-    *    vertex array and an index buffer that contains large
-    *    values. This may be used by some applications that
-    *    crop a vertex buffer without modifying their index
-    *    buffer.
-    *
-    * Note that rendering with a negative bias value may be slower and
-    * use more memory than rendering with a positive or zero bias.
-    */
-   int32                indexBias;
-} SVGA3dPrimitiveRange;
-
-typedef
-struct {
-   uint32               cid;
-   uint32               numVertexDecls;
-   uint32               numRanges;
-
-   /*
-    * There are two variable size arrays after the
-    * SVGA3dCmdDrawPrimitives structure. In order,
-    * they are:
-    *
-    * 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than
-    *    SVGA3D_MAX_VERTEX_ARRAYS;
-    * 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than
-    *    SVGA3D_MAX_DRAW_PRIMITIVE_RANGES;
-    * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
-    *    the frequency divisor for the corresponding vertex decl).
-    */
-} SVGA3dCmdDrawPrimitives;      /* SVGA_3D_CMD_DRAWPRIMITIVES */
-
-typedef
-struct {
-   uint32                   stage;
-   SVGA3dTextureStateName   name;
-   union {
-      uint32                value;
-      float                 floatValue;
-   };
-} SVGA3dTextureState;
-
-typedef
-struct {
-   uint32               cid;
-   /* Followed by variable number of SVGA3dTextureState structures */
-} SVGA3dCmdSetTextureState;      /* SVGA_3D_CMD_SETTEXTURESTATE */
-
-typedef
-struct {
-   uint32                   cid;
-   SVGA3dTransformType      type;
-   float                    matrix[16];
-} SVGA3dCmdSetTransform;          /* SVGA_3D_CMD_SETTRANSFORM */
-
-typedef
-struct {
-   float                min;
-   float                max;
-} SVGA3dZRange;
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dZRange         zRange;
-} SVGA3dCmdSetZRange;             /* SVGA_3D_CMD_SETZRANGE */
-
-typedef
-struct {
-   float                diffuse[4];
-   float                ambient[4];
-   float                specular[4];
-   float                emissive[4];
-   float                shininess;
-} SVGA3dMaterial;
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dFace           face;
-   SVGA3dMaterial       material;
-} SVGA3dCmdSetMaterial;           /* SVGA_3D_CMD_SETMATERIAL */
-
-typedef
-struct {
-   uint32               cid;
-   uint32               index;
-   SVGA3dLightData      data;
-} SVGA3dCmdSetLightData;           /* SVGA_3D_CMD_SETLIGHTDATA */
-
-typedef
-struct {
-   uint32               cid;
-   uint32               index;
-   uint32               enabled;
-} SVGA3dCmdSetLightEnabled;      /* SVGA_3D_CMD_SETLIGHTENABLED */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dRect           rect;
-} SVGA3dCmdSetViewport;           /* SVGA_3D_CMD_SETVIEWPORT */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dRect           rect;
-} SVGA3dCmdSetScissorRect;         /* SVGA_3D_CMD_SETSCISSORRECT */
-
-typedef
-struct {
-   uint32               cid;
-   uint32               index;
-   float                plane[4];
-} SVGA3dCmdSetClipPlane;           /* SVGA_3D_CMD_SETCLIPPLANE */
-
-typedef
-struct {
-   uint32               cid;
-   uint32               shid;
-   SVGA3dShaderType     type;
-   /* Followed by variable number of DWORDs for shader bycode */
-} SVGA3dCmdDefineShader;           /* SVGA_3D_CMD_SHADER_DEFINE */
-
-typedef
-struct {
-   uint32               cid;
-   uint32               shid;
-   SVGA3dShaderType     type;
-} SVGA3dCmdDestroyShader;         /* SVGA_3D_CMD_SHADER_DESTROY */
-
-typedef
-struct {
-   uint32                  cid;
-   uint32                  reg;     /* register number */
-   SVGA3dShaderType        type;
-   SVGA3dShaderConstType   ctype;
-   uint32                  values[4];
-} SVGA3dCmdSetShaderConst;        /* SVGA_3D_CMD_SET_SHADER_CONST */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dShaderType     type;
-   uint32               shid;
-} SVGA3dCmdSetShader;             /* SVGA_3D_CMD_SET_SHADER */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dQueryType      type;
-} SVGA3dCmdBeginQuery;           /* SVGA_3D_CMD_BEGIN_QUERY */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dQueryType      type;
-   SVGAGuestPtr         guestResult;  /* Points to an SVGA3dQueryResult structure */
-} SVGA3dCmdEndQuery;                  /* SVGA_3D_CMD_END_QUERY */
-
-typedef
-struct {
-   uint32               cid;          /* Same parameters passed to END_QUERY */
-   SVGA3dQueryType      type;
-   SVGAGuestPtr         guestResult;
-} SVGA3dCmdWaitForQuery;              /* SVGA_3D_CMD_WAIT_FOR_QUERY */
-
-typedef
-struct {
-   uint32               totalSize;    /* Set by guest before query is ended. */
-   SVGA3dQueryState     state;        /* Set by host or guest. See SVGA3dQueryState. */
-   union {                            /* Set by host on exit from PENDING state */
-      uint32            result32;
-   };
-} SVGA3dQueryResult;
-
-/*
- * SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN --
- *
- *    This is a blit from an SVGA3D surface to a Screen Object. Just
- *    like GMR-to-screen blits, this blit may be directed at a
- *    specific screen or to the virtual coordinate space.
- *
- *    The blit copies from a rectangular region of an SVGA3D surface
- *    image to a rectangular region of a screen or screens.
- *
- *    This command takes an optional variable-length list of clipping
- *    rectangles after the body of the command. If no rectangles are
- *    specified, there is no clipping region. The entire destRect is
- *    drawn to. If one or more rectangles are included, they describe
- *    a clipping region. The clip rectangle coordinates are measured
- *    relative to the top-left corner of destRect.
- *
- *    This clipping region serves multiple purposes:
- *
- *      - It can be used to perform an irregularly shaped blit more
- *        efficiently than by issuing many separate blit commands.
- *
- *      - It is equivalent to allowing blits with non-integer
- *        source coordinates. You could blit just one half-pixel
- *        of a source, for example, by specifying a larger
- *        destination rectangle than you need, then removing
- *        part of it using a clip rectangle.
- *
- * Availability:
- *    SVGA_FIFO_CAP_SCREEN_OBJECT
- *
- * Limitations:
- *
- *    - Currently, no backend supports blits from a mipmap or face
- *      other than the first one.
- */
-
-typedef
-struct {
-   SVGA3dSurfaceImageId srcImage;
-   SVGASignedRect       srcRect;
-   uint32               destScreenId; /* Screen ID or SVGA_ID_INVALID for virt. coords */
-   SVGASignedRect       destRect;     /* Supports scaling if src/rest different size */
-   /* Clipping: zero or more SVGASignedRects follow */
-} SVGA3dCmdBlitSurfaceToScreen;         /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
-
-typedef
-struct {
-   uint32               sid;
-   SVGA3dTextureFilter  filter;
-} SVGA3dCmdGenerateMipmaps;             /* SVGA_3D_CMD_GENERATE_MIPMAPS */
-
-
-/*
- * Guest-backed surface definitions.
- */
-
-typedef uint32 SVGAMobId;
-
-typedef enum SVGAMobFormat {
-   SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID,
-   SVGA3D_MOBFMT_PTDEPTH_0 = 0,
-   SVGA3D_MOBFMT_PTDEPTH_1 = 1,
-   SVGA3D_MOBFMT_PTDEPTH_2 = 2,
-   SVGA3D_MOBFMT_RANGE     = 3,
-   SVGA3D_MOBFMT_PTDEPTH64_0 = 4,
-   SVGA3D_MOBFMT_PTDEPTH64_1 = 5,
-   SVGA3D_MOBFMT_PTDEPTH64_2 = 6,
-   SVGA3D_MOBFMT_MAX,
-} SVGAMobFormat;
-
-/*
- * Sizes of opaque types.
- */
-
-#define SVGA3D_OTABLE_MOB_ENTRY_SIZE 16
-#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE 8
-#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE 64
-#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE 16
-#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE 64
-#define SVGA3D_CONTEXT_DATA_SIZE 16384
-
-/*
- * SVGA3dCmdSetOTableBase --
- *
- * This command allows the guest to specify the base PPN of the
- * specified object table.
- */
-
-typedef enum {
-   SVGA_OTABLE_MOB           = 0,
-   SVGA_OTABLE_MIN           = 0,
-   SVGA_OTABLE_SURFACE       = 1,
-   SVGA_OTABLE_CONTEXT       = 2,
-   SVGA_OTABLE_SHADER        = 3,
-   SVGA_OTABLE_SCREEN_TARGET = 4,
-   SVGA_OTABLE_DX9_MAX       = 5,
-   SVGA_OTABLE_MAX           = 8
-} SVGAOTableType;
-
-typedef
-struct {
-   SVGAOTableType type;
-   PPN baseAddress;
-   uint32 sizeInBytes;
-   uint32 validSizeInBytes;
-   SVGAMobFormat ptDepth;
-} __packed
-SVGA3dCmdSetOTableBase;  /* SVGA_3D_CMD_SET_OTABLE_BASE */
-
-typedef
-struct {
-   SVGAOTableType type;
-   PPN64 baseAddress;
-   uint32 sizeInBytes;
-   uint32 validSizeInBytes;
-   SVGAMobFormat ptDepth;
-} __packed
-SVGA3dCmdSetOTableBase64;  /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
-
-typedef
-struct {
-   SVGAOTableType type;
-} __packed
-SVGA3dCmdReadbackOTable;  /* SVGA_3D_CMD_READBACK_OTABLE */
-
-/*
- * Define a memory object (Mob) in the OTable.
- */
-
-typedef
-struct SVGA3dCmdDefineGBMob {
-   SVGAMobId mobid;
-   SVGAMobFormat ptDepth;
-   PPN base;
-   uint32 sizeInBytes;
-} __packed
-SVGA3dCmdDefineGBMob;   /* SVGA_3D_CMD_DEFINE_GB_MOB */
-
-
-/*
- * Destroys an object in the OTable.
- */
-
-typedef
-struct SVGA3dCmdDestroyGBMob {
-   SVGAMobId mobid;
-} __packed
-SVGA3dCmdDestroyGBMob;   /* SVGA_3D_CMD_DESTROY_GB_MOB */
-
-/*
- * Redefine an object in the OTable.
- */
-
-typedef
-struct SVGA3dCmdRedefineGBMob {
-   SVGAMobId mobid;
-   SVGAMobFormat ptDepth;
-   PPN base;
-   uint32 sizeInBytes;
-} __packed
-SVGA3dCmdRedefineGBMob;   /* SVGA_3D_CMD_REDEFINE_GB_MOB */
-
-/*
- * Define a memory object (Mob) in the OTable with a PPN64 base.
- */
-
-typedef
-struct SVGA3dCmdDefineGBMob64 {
-   SVGAMobId mobid;
-   SVGAMobFormat ptDepth;
-   PPN64 base;
-   uint32 sizeInBytes;
-} __packed
-SVGA3dCmdDefineGBMob64;   /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
-
-/*
- * Redefine an object in the OTable with PPN64 base.
- */
-
-typedef
-struct SVGA3dCmdRedefineGBMob64 {
-   SVGAMobId mobid;
-   SVGAMobFormat ptDepth;
-   PPN64 base;
-   uint32 sizeInBytes;
-} __packed
-SVGA3dCmdRedefineGBMob64;   /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
-
-/*
- * Notification that the page tables have been modified.
- */
-
-typedef
-struct SVGA3dCmdUpdateGBMobMapping {
-   SVGAMobId mobid;
-} __packed
-SVGA3dCmdUpdateGBMobMapping;   /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
-
-/*
- * Define a guest-backed surface.
- */
-
-typedef
-struct SVGA3dCmdDefineGBSurface {
-   uint32 sid;
-   SVGA3dSurfaceFlags surfaceFlags;
-   SVGA3dSurfaceFormat format;
-   uint32 numMipLevels;
-   uint32 multisampleCount;
-   SVGA3dTextureFilter autogenFilter;
-   SVGA3dSize size;
-} __packed
-SVGA3dCmdDefineGBSurface;   /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
-
-/*
- * Destroy a guest-backed surface.
- */
-
-typedef
-struct SVGA3dCmdDestroyGBSurface {
-   uint32 sid;
-} __packed
-SVGA3dCmdDestroyGBSurface;   /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
-
-/*
- * Bind a guest-backed surface to an object.
- */
-
-typedef
-struct SVGA3dCmdBindGBSurface {
-   uint32 sid;
-   SVGAMobId mobid;
-} __packed
-SVGA3dCmdBindGBSurface;   /* SVGA_3D_CMD_BIND_GB_SURFACE */
-
-/*
- * Conditionally bind a mob to a guest backed surface if testMobid
- * matches the currently bound mob.  Optionally issue a readback on
- * the surface while it is still bound to the old mobid if the mobid
- * is changed by this command.
- */
-
-#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0)
-
-typedef
-struct{
-   uint32 sid;
-   SVGAMobId testMobid;
-   SVGAMobId mobid;
-   uint32 flags;
-} __packed
-SVGA3dCmdCondBindGBSurface;          /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
-
-/*
- * Update an image in a guest-backed surface.
- * (Inform the device that the guest-contents have been updated.)
- */
-
-typedef
-struct SVGA3dCmdUpdateGBImage {
-   SVGA3dSurfaceImageId image;
-   SVGA3dBox box;
-} __packed
-SVGA3dCmdUpdateGBImage;   /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
-
-/*
- * Update an entire guest-backed surface.
- * (Inform the device that the guest-contents have been updated.)
- */
-
-typedef
-struct SVGA3dCmdUpdateGBSurface {
-   uint32 sid;
-} __packed
-SVGA3dCmdUpdateGBSurface;   /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
-
-/*
- * Readback an image in a guest-backed surface.
- * (Request the device to flush the dirty contents into the guest.)
- */
-
-typedef
-struct SVGA3dCmdReadbackGBImage {
-   SVGA3dSurfaceImageId image;
-} __packed
-SVGA3dCmdReadbackGBImage;   /* SVGA_3D_CMD_READBACK_GB_IMAGE*/
-
-/*
- * Readback an entire guest-backed surface.
- * (Request the device to flush the dirty contents into the guest.)
- */
-
-typedef
-struct SVGA3dCmdReadbackGBSurface {
-   uint32 sid;
-} __packed
-SVGA3dCmdReadbackGBSurface;   /* SVGA_3D_CMD_READBACK_GB_SURFACE */
-
-/*
- * Readback a sub rect of an image in a guest-backed surface.  After
- * issuing this command the driver is required to issue an update call
- * of the same region before issuing any other commands that reference
- * this surface or rendering is not guaranteed.
- */
-
-typedef
-struct SVGA3dCmdReadbackGBImagePartial {
-   SVGA3dSurfaceImageId image;
-   SVGA3dBox box;
-   uint32 invertBox;
-} __packed
-SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
-
-/*
- * Invalidate an image in a guest-backed surface.
- * (Notify the device that the contents can be lost.)
- */
-
-typedef
-struct SVGA3dCmdInvalidateGBImage {
-   SVGA3dSurfaceImageId image;
-} __packed
-SVGA3dCmdInvalidateGBImage;   /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
-
-/*
- * Invalidate an entire guest-backed surface.
- * (Notify the device that the contents if all images can be lost.)
- */
-
-typedef
-struct SVGA3dCmdInvalidateGBSurface {
-   uint32 sid;
-} __packed
-SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
-
-/*
- * Invalidate a sub rect of an image in a guest-backed surface.  After
- * issuing this command the driver is required to issue an update call
- * of the same region before issuing any other commands that reference
- * this surface or rendering is not guaranteed.
- */
-
-typedef
-struct SVGA3dCmdInvalidateGBImagePartial {
-   SVGA3dSurfaceImageId image;
-   SVGA3dBox box;
-   uint32 invertBox;
-} __packed
-SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
-
-/*
- * Define a guest-backed context.
- */
-
-typedef
-struct SVGA3dCmdDefineGBContext {
-   uint32 cid;
-} __packed
-SVGA3dCmdDefineGBContext;   /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
-
-/*
- * Destroy a guest-backed context.
- */
-
-typedef
-struct SVGA3dCmdDestroyGBContext {
-   uint32 cid;
-} __packed
-SVGA3dCmdDestroyGBContext;   /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
-
-/*
- * Bind a guest-backed context.
- *
- * validContents should be set to 0 for new contexts,
- * and 1 if this is an old context which is getting paged
- * back on to the device.
- *
- * For new contexts, it is recommended that the driver
- * issue commands to initialize all interesting state
- * prior to rendering.
- */
-
-typedef
-struct SVGA3dCmdBindGBContext {
-   uint32 cid;
-   SVGAMobId mobid;
-   uint32 validContents;
-} __packed
-SVGA3dCmdBindGBContext;   /* SVGA_3D_CMD_BIND_GB_CONTEXT */
-
-/*
- * Readback a guest-backed context.
- * (Request that the device flush the contents back into guest memory.)
- */
-
-typedef
-struct SVGA3dCmdReadbackGBContext {
-   uint32 cid;
-} __packed
-SVGA3dCmdReadbackGBContext;   /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
-
-/*
- * Invalidate a guest-backed context.
- */
-typedef
-struct SVGA3dCmdInvalidateGBContext {
-   uint32 cid;
-} __packed
-SVGA3dCmdInvalidateGBContext;   /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
-
-/*
- * Define a guest-backed shader.
- */
-
-typedef
-struct SVGA3dCmdDefineGBShader {
-   uint32 shid;
-   SVGA3dShaderType type;
-   uint32 sizeInBytes;
-} __packed
-SVGA3dCmdDefineGBShader;   /* SVGA_3D_CMD_DEFINE_GB_SHADER */
-
-/*
- * Bind a guest-backed shader.
- */
-
-typedef struct SVGA3dCmdBindGBShader {
-   uint32 shid;
-   SVGAMobId mobid;
-   uint32 offsetInBytes;
-} __packed
-SVGA3dCmdBindGBShader;   /* SVGA_3D_CMD_BIND_GB_SHADER */
-
-/*
- * Destroy a guest-backed shader.
- */
-
-typedef struct SVGA3dCmdDestroyGBShader {
-   uint32 shid;
-} __packed
-SVGA3dCmdDestroyGBShader;   /* SVGA_3D_CMD_DESTROY_GB_SHADER */
-
-typedef
-struct {
-   uint32                  cid;
-   uint32                  regStart;
-   SVGA3dShaderType        shaderType;
-   SVGA3dShaderConstType   constType;
-
-   /*
-    * Followed by a variable number of shader constants.
-    *
-    * Note that FLOAT and INT constants are 4-dwords in length, while
-    * BOOL constants are 1-dword in length.
-    */
-} __packed
-SVGA3dCmdSetGBShaderConstInline;
-/* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dQueryType      type;
-} __packed
-SVGA3dCmdBeginGBQuery;           /* SVGA_3D_CMD_BEGIN_GB_QUERY */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dQueryType      type;
-   SVGAMobId mobid;
-   uint32 offset;
-} __packed
-SVGA3dCmdEndGBQuery;                  /* SVGA_3D_CMD_END_GB_QUERY */
-
-
-/*
- * SVGA_3D_CMD_WAIT_FOR_GB_QUERY --
- *
- *    The semantics of this command are identical to the
- *    SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written
- *    to a Mob instead of a GMR.
- */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dQueryType      type;
-   SVGAMobId mobid;
-   uint32 offset;
-} __packed
-SVGA3dCmdWaitForGBQuery;          /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
-
-typedef
-struct {
-   SVGAMobId mobid;
-   uint32 fbOffset;
-   uint32 initalized;
-} __packed
-SVGA3dCmdEnableGart;              /* SVGA_3D_CMD_ENABLE_GART */
-
-typedef
-struct {
-   SVGAMobId mobid;
-   uint32 gartOffset;
-} __packed
-SVGA3dCmdMapMobIntoGart;          /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
-
-
-typedef
-struct {
-   uint32 gartOffset;
-   uint32 numPages;
-} __packed
-SVGA3dCmdUnmapGartRange;          /* SVGA_3D_CMD_UNMAP_GART_RANGE */
-
-
-/*
- * Screen Targets
- */
-#define SVGA_STFLAG_PRIMARY (1 << 0)
-
-typedef
-struct {
-   uint32 stid;
-   uint32 width;
-   uint32 height;
-   int32 xRoot;
-   int32 yRoot;
-   uint32 flags;
-} __packed
-SVGA3dCmdDefineGBScreenTarget;    /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
-
-typedef
-struct {
-   uint32 stid;
-} __packed
-SVGA3dCmdDestroyGBScreenTarget;  /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
-
-typedef
-struct {
-   uint32 stid;
-   SVGA3dSurfaceImageId image;
-} __packed
-SVGA3dCmdBindGBScreenTarget;  /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
-
-typedef
-struct {
-   uint32 stid;
-   SVGA3dBox box;
-} __packed
-SVGA3dCmdUpdateGBScreenTarget;  /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
-
-/*
- * Capability query index.
- *
- * Notes:
- *
- *   1. SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
- *      fixed-function texture units available. Each of these units
- *      work in both FFP and Shader modes, and they support texture
- *      transforms and texture coordinates. The host may have additional
- *      texture image units that are only usable with shaders.
- *
- *   2. The BUFFER_FORMAT capabilities are deprecated, and they always
- *      return TRUE. Even on physical hardware that does not support
- *      these formats natively, the SVGA3D device will provide an emulation
- *      which should be invisible to the guest OS.
- *
- *      In general, the SVGA3D device should support any operation on
- *      any surface format, it just may perform some of these
- *      operations in software depending on the capabilities of the
- *      available physical hardware.
- *
- *      XXX: In the future, we will add capabilities that describe in
- *      detail what formats are supported in hardware for what kinds
- *      of operations.
- */
-
-typedef enum {
-   SVGA3D_DEVCAP_3D                                = 0,
-   SVGA3D_DEVCAP_MAX_LIGHTS                        = 1,
-   SVGA3D_DEVCAP_MAX_TEXTURES                      = 2,  /* See note (1) */
-   SVGA3D_DEVCAP_MAX_CLIP_PLANES                   = 3,
-   SVGA3D_DEVCAP_VERTEX_SHADER_VERSION             = 4,
-   SVGA3D_DEVCAP_VERTEX_SHADER                     = 5,
-   SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION           = 6,
-   SVGA3D_DEVCAP_FRAGMENT_SHADER                   = 7,
-   SVGA3D_DEVCAP_MAX_RENDER_TARGETS                = 8,
-   SVGA3D_DEVCAP_S23E8_TEXTURES                    = 9,
-   SVGA3D_DEVCAP_S10E5_TEXTURES                    = 10,
-   SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND             = 11,
-   SVGA3D_DEVCAP_D16_BUFFER_FORMAT                 = 12, /* See note (2) */
-   SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT               = 13, /* See note (2) */
-   SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT               = 14, /* See note (2) */
-   SVGA3D_DEVCAP_QUERY_TYPES                       = 15,
-   SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING         = 16,
-   SVGA3D_DEVCAP_MAX_POINT_SIZE                    = 17,
-   SVGA3D_DEVCAP_MAX_SHADER_TEXTURES               = 18,
-   SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH                 = 19,
-   SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT                = 20,
-   SVGA3D_DEVCAP_MAX_VOLUME_EXTENT                 = 21,
-   SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT                = 22,
-   SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO          = 23,
-   SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY            = 24,
-   SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT               = 25,
-   SVGA3D_DEVCAP_MAX_VERTEX_INDEX                  = 26,
-   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS    = 27,
-   SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS  = 28,
-   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS           = 29,
-   SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS         = 30,
-   SVGA3D_DEVCAP_TEXTURE_OPS                       = 31,
-   SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8               = 32,
-   SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8               = 33,
-   SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10            = 34,
-   SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5               = 35,
-   SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5               = 36,
-   SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4               = 37,
-   SVGA3D_DEVCAP_SURFACEFMT_R5G6B5                 = 38,
-   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16            = 39,
-   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8      = 40,
-   SVGA3D_DEVCAP_SURFACEFMT_ALPHA8                 = 41,
-   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8             = 42,
-   SVGA3D_DEVCAP_SURFACEFMT_Z_D16                  = 43,
-   SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8                = 44,
-   SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8                = 45,
-   SVGA3D_DEVCAP_SURFACEFMT_DXT1                   = 46,
-   SVGA3D_DEVCAP_SURFACEFMT_DXT2                   = 47,
-   SVGA3D_DEVCAP_SURFACEFMT_DXT3                   = 48,
-   SVGA3D_DEVCAP_SURFACEFMT_DXT4                   = 49,
-   SVGA3D_DEVCAP_SURFACEFMT_DXT5                   = 50,
-   SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8           = 51,
-   SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10            = 52,
-   SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8               = 53,
-   SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8               = 54,
-   SVGA3D_DEVCAP_SURFACEFMT_CxV8U8                 = 55,
-   SVGA3D_DEVCAP_SURFACEFMT_R_S10E5                = 56,
-   SVGA3D_DEVCAP_SURFACEFMT_R_S23E8                = 57,
-   SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5               = 58,
-   SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8               = 59,
-   SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5             = 60,
-   SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8             = 61,
-   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES        = 63,
-
-   /*
-    * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
-    * render targets.  This does no include the depth or stencil targets.
-    */
-   SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS   = 64,
-
-   SVGA3D_DEVCAP_SURFACEFMT_V16U16                 = 65,
-   SVGA3D_DEVCAP_SURFACEFMT_G16R16                 = 66,
-   SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16           = 67,
-   SVGA3D_DEVCAP_SURFACEFMT_UYVY                   = 68,
-   SVGA3D_DEVCAP_SURFACEFMT_YUY2                   = 69,
-   SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES    = 70,
-   SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES       = 71,
-   SVGA3D_DEVCAP_ALPHATOCOVERAGE                   = 72,
-   SVGA3D_DEVCAP_SUPERSAMPLE                       = 73,
-   SVGA3D_DEVCAP_AUTOGENMIPMAPS                    = 74,
-   SVGA3D_DEVCAP_SURFACEFMT_NV12                   = 75,
-   SVGA3D_DEVCAP_SURFACEFMT_AYUV                   = 76,
-
-   /*
-    * This is the maximum number of SVGA context IDs that the guest
-    * can define using SVGA_3D_CMD_CONTEXT_DEFINE.
-    */
-   SVGA3D_DEVCAP_MAX_CONTEXT_IDS                   = 77,
-
-   /*
-    * This is the maximum number of SVGA surface IDs that the guest
-    * can define using SVGA_3D_CMD_SURFACE_DEFINE*.
-    */
-   SVGA3D_DEVCAP_MAX_SURFACE_IDS                   = 78,
-
-   SVGA3D_DEVCAP_SURFACEFMT_Z_DF16                 = 79,
-   SVGA3D_DEVCAP_SURFACEFMT_Z_DF24                 = 80,
-   SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT            = 81,
-
-   SVGA3D_DEVCAP_SURFACEFMT_BC4_UNORM              = 82,
-   SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM              = 83,
-
-   /*
-    * Deprecated.
-    */
-   SVGA3D_DEVCAP_VGPU10                            = 84,
-
-   /*
-    * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements
-    * ored together, one for every type of video decoding supported.
-    */
-   SVGA3D_DEVCAP_VIDEO_DECODE                      = 85,
-
-   /*
-    * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements
-    * ored together, one for every type of video processing supported.
-    */
-   SVGA3D_DEVCAP_VIDEO_PROCESS                     = 86,
-
-   SVGA3D_DEVCAP_LINE_AA                           = 87,  /* boolean */
-   SVGA3D_DEVCAP_LINE_STIPPLE                      = 88,  /* boolean */
-   SVGA3D_DEVCAP_MAX_LINE_WIDTH                    = 89,  /* float */
-   SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH                 = 90,  /* float */
-
-   SVGA3D_DEVCAP_SURFACEFMT_YV12                   = 91,
-
-   /*
-    * Does the host support the SVGA logic ops commands?
-    */
-   SVGA3D_DEVCAP_LOGICOPS                          = 92,
-
-   /*
-    * What support does the host have for screen targets?
-    *
-    * See the SVGA3D_SCREENTARGET_CAP bits below.
-    */
-   SVGA3D_DEVCAP_SCREENTARGETS                     = 93,
-
-   SVGA3D_DEVCAP_MAX                                  /* This must be the last index. */
-} SVGA3dDevCapIndex;
-
-typedef union {
-   Bool   b;
-   uint32 u;
-   int32  i;
-   float  f;
-} SVGA3dDevCapResult;
-
-typedef enum {
-   SVGA3DCAPS_RECORD_UNKNOWN        = 0,
-   SVGA3DCAPS_RECORD_DEVCAPS_MIN    = 0x100,
-   SVGA3DCAPS_RECORD_DEVCAPS        = 0x100,
-   SVGA3DCAPS_RECORD_DEVCAPS_MAX    = 0x1ff,
-} SVGA3dCapsRecordType;
-
-typedef
-struct SVGA3dCapsRecordHeader {
-   uint32 length;
-   SVGA3dCapsRecordType type;
-}
-SVGA3dCapsRecordHeader;
-
-typedef
-struct SVGA3dCapsRecord {
-   SVGA3dCapsRecordHeader header;
-   uint32 data[1];
-}
-SVGA3dCapsRecord;
-
-
-typedef uint32 SVGA3dCapPair[2];
-
-#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
deleted file mode 100644
index ef3385096145..000000000000
--- a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
+++ /dev/null
@@ -1,912 +0,0 @@
-/**************************************************************************
- *
- * Copyright © 2008-2012 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#ifdef __KERNEL__
-
-#include <drm/vmwgfx_drm.h>
-#define surf_size_struct struct drm_vmw_size
-
-#else /* __KERNEL__ */
-
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
-#endif /* ARRAY_SIZE */
-
-#define DIV_ROUND_UP(x, y)  (((x) + (y) - 1) / (y))
-#define max_t(type, x, y)  ((x) > (y) ? (x) : (y))
-#define min_t(type, x, y)  ((x) < (y) ? (x) : (y))
-#define surf_size_struct SVGA3dSize
-#define u32 uint32
-#define u64 uint64_t
-#define U32_MAX ((u32)~0U)
-
-#endif /* __KERNEL__ */
-
-#include "svga3d_reg.h"
-
-/*
- * enum svga3d_block_desc describes the active data channels in a block.
- *
- * There can be at-most four active channels in a block:
- *    1. Red, bump W, luminance and depth are stored in the first channel.
- *    2. Green, bump V and stencil are stored in the second channel.
- *    3. Blue and bump U are stored in the third channel.
- *    4. Alpha and bump Q are stored in the fourth channel.
- *
- * Block channels can be used to store compressed and buffer data:
- *    1. For compressed formats, only the data channel is used and its size
- *       is equal to that of a singular block in the compression scheme.
- *    2. For buffer formats, only the data channel is used and its size is
- *       exactly one byte in length.
- *    3. In each case the bit depth represent the size of a singular block.
- *
- * Note: Compressed and IEEE formats do not use the bitMask structure.
- */
-
-enum svga3d_block_desc {
-	SVGA3DBLOCKDESC_NONE        = 0,         /* No channels are active */
-	SVGA3DBLOCKDESC_BLUE        = 1 << 0,    /* Block with red channel
-						    data */
-	SVGA3DBLOCKDESC_U           = 1 << 0,    /* Block with bump U channel
-						    data */
-	SVGA3DBLOCKDESC_UV_VIDEO    = 1 << 7,    /* Block with alternating video
-						    U and V */
-	SVGA3DBLOCKDESC_GREEN       = 1 << 1,    /* Block with green channel
-						    data */
-	SVGA3DBLOCKDESC_V           = 1 << 1,    /* Block with bump V channel
-						    data */
-	SVGA3DBLOCKDESC_STENCIL     = 1 << 1,    /* Block with a stencil
-						    channel */
-	SVGA3DBLOCKDESC_RED         = 1 << 2,    /* Block with blue channel
-						    data */
-	SVGA3DBLOCKDESC_W           = 1 << 2,    /* Block with bump W channel
-						    data */
-	SVGA3DBLOCKDESC_LUMINANCE   = 1 << 2,    /* Block with luminance channel
-						    data */
-	SVGA3DBLOCKDESC_Y           = 1 << 2,    /* Block with video luminance
-						    data */
-	SVGA3DBLOCKDESC_DEPTH       = 1 << 2,    /* Block with depth channel */
-	SVGA3DBLOCKDESC_ALPHA       = 1 << 3,    /* Block with an alpha
-						    channel */
-	SVGA3DBLOCKDESC_Q           = 1 << 3,    /* Block with bump Q channel
-						    data */
-	SVGA3DBLOCKDESC_BUFFER      = 1 << 4,    /* Block stores 1 byte of
-						    data */
-	SVGA3DBLOCKDESC_COMPRESSED  = 1 << 5,    /* Block stores n bytes of
-						    data depending on the
-						    compression method used */
-	SVGA3DBLOCKDESC_IEEE_FP     = 1 << 6,    /* Block stores data in an IEEE
-						    floating point
-						    representation in
-						    all channels */
-	SVGA3DBLOCKDESC_PLANAR_YUV  = 1 << 8,    /* Three separate blocks store
-						    data. */
-	SVGA3DBLOCKDESC_U_VIDEO     = 1 << 9,    /* Block with U video data */
-	SVGA3DBLOCKDESC_V_VIDEO     = 1 << 10,   /* Block with V video data */
-	SVGA3DBLOCKDESC_EXP         = 1 << 11,   /* Shared exponent */
-	SVGA3DBLOCKDESC_SRGB        = 1 << 12,   /* Data is in sRGB format */
-	SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13,   /* 2 planes of Y, UV,
-						    e.g., NV12. */
-	SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14,   /* 3 planes of separate
-						    Y, U, V, e.g., YV12. */
-
-	SVGA3DBLOCKDESC_RG         = SVGA3DBLOCKDESC_RED |
-	SVGA3DBLOCKDESC_GREEN,
-	SVGA3DBLOCKDESC_RGB        = SVGA3DBLOCKDESC_RG |
-	SVGA3DBLOCKDESC_BLUE,
-	SVGA3DBLOCKDESC_RGB_SRGB   = SVGA3DBLOCKDESC_RGB |
-	SVGA3DBLOCKDESC_SRGB,
-	SVGA3DBLOCKDESC_RGBA       = SVGA3DBLOCKDESC_RGB |
-	SVGA3DBLOCKDESC_ALPHA,
-	SVGA3DBLOCKDESC_RGBA_SRGB  = SVGA3DBLOCKDESC_RGBA |
-	SVGA3DBLOCKDESC_SRGB,
-	SVGA3DBLOCKDESC_UV         = SVGA3DBLOCKDESC_U |
-	SVGA3DBLOCKDESC_V,
-	SVGA3DBLOCKDESC_UVL        = SVGA3DBLOCKDESC_UV |
-	SVGA3DBLOCKDESC_LUMINANCE,
-	SVGA3DBLOCKDESC_UVW        = SVGA3DBLOCKDESC_UV |
-	SVGA3DBLOCKDESC_W,
-	SVGA3DBLOCKDESC_UVWA       = SVGA3DBLOCKDESC_UVW |
-	SVGA3DBLOCKDESC_ALPHA,
-	SVGA3DBLOCKDESC_UVWQ       = SVGA3DBLOCKDESC_U |
-	SVGA3DBLOCKDESC_V |
-	SVGA3DBLOCKDESC_W |
-	SVGA3DBLOCKDESC_Q,
-	SVGA3DBLOCKDESC_LA         = SVGA3DBLOCKDESC_LUMINANCE |
-	SVGA3DBLOCKDESC_ALPHA,
-	SVGA3DBLOCKDESC_R_FP       = SVGA3DBLOCKDESC_RED |
-	SVGA3DBLOCKDESC_IEEE_FP,
-	SVGA3DBLOCKDESC_RG_FP      = SVGA3DBLOCKDESC_R_FP |
-	SVGA3DBLOCKDESC_GREEN,
-	SVGA3DBLOCKDESC_RGB_FP     = SVGA3DBLOCKDESC_RG_FP |
-	SVGA3DBLOCKDESC_BLUE,
-	SVGA3DBLOCKDESC_RGBA_FP    = SVGA3DBLOCKDESC_RGB_FP |
-	SVGA3DBLOCKDESC_ALPHA,
-	SVGA3DBLOCKDESC_DS         = SVGA3DBLOCKDESC_DEPTH |
-	SVGA3DBLOCKDESC_STENCIL,
-	SVGA3DBLOCKDESC_YUV        = SVGA3DBLOCKDESC_UV_VIDEO |
-	SVGA3DBLOCKDESC_Y,
-	SVGA3DBLOCKDESC_AYUV       = SVGA3DBLOCKDESC_ALPHA |
-	SVGA3DBLOCKDESC_Y |
-	SVGA3DBLOCKDESC_U_VIDEO |
-	SVGA3DBLOCKDESC_V_VIDEO,
-	SVGA3DBLOCKDESC_RGBE       = SVGA3DBLOCKDESC_RGB |
-	SVGA3DBLOCKDESC_EXP,
-	SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
-	SVGA3DBLOCKDESC_SRGB,
-	SVGA3DBLOCKDESC_NV12       = SVGA3DBLOCKDESC_PLANAR_YUV |
-	SVGA3DBLOCKDESC_2PLANAR_YUV,
-	SVGA3DBLOCKDESC_YV12       = SVGA3DBLOCKDESC_PLANAR_YUV |
-	SVGA3DBLOCKDESC_3PLANAR_YUV,
-};
-
-/*
- * SVGA3dSurfaceDesc describes the actual pixel data.
- *
- * This structure provides the following information:
- *    1. Block description.
- *    2. Dimensions of a block in the surface.
- *    3. Size of block in bytes.
- *    4. Bit depth of the pixel data.
- *    5. Channel bit depths and masks (if applicable).
- */
-#define SVGA3D_CHANNEL_DEF(type)		\
-	struct {				\
-		union {				\
-			type blue;              \
-			type u;                 \
-			type uv_video;          \
-			type u_video;           \
-		};				\
-		union {				\
-			type green;             \
-			type v;                 \
-			type stencil;           \
-			type v_video;           \
-		};				\
-		union {				\
-			type red;               \
-			type w;                 \
-			type luminance;         \
-			type y;                 \
-			type depth;             \
-			type data;              \
-		};				\
-		union {				\
-			type alpha;             \
-			type q;                 \
-			type exp;               \
-		};				\
-	}
-
-struct svga3d_surface_desc {
-	enum svga3d_block_desc block_desc;
-	surf_size_struct block_size;
-	u32 bytes_per_block;
-	u32 pitch_bytes_per_block;
-
-	struct {
-		u32 total;
-		SVGA3D_CHANNEL_DEF(uint8);
-	} bit_depth;
-
-	struct {
-		SVGA3D_CHANNEL_DEF(uint8);
-	} bit_offset;
-};
-
-static const struct svga3d_surface_desc svga3d_surface_descs[] = {
-	{SVGA3DBLOCKDESC_NONE,
-	 {1, 1, 1},  0, 0, {0, {{0}, {0}, {0}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_FORMAT_INVALID */
-
-	{SVGA3DBLOCKDESC_RGB,
-	 {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_X8R8G8B8 */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_A8R8G8B8 */
-
-	{SVGA3DBLOCKDESC_RGB,
-	 {1, 1, 1},  2, 2, {16, {{5}, {6}, {5}, {0} } },
-	 {{{0}, {5}, {11}, {0} } } },    /* SVGA3D_R5G6B5 */
-
-	{SVGA3DBLOCKDESC_RGB,
-	 {1, 1, 1},  2, 2, {15, {{5}, {5}, {5}, {0} } },
-	 {{{0}, {5}, {10}, {0} } } },    /* SVGA3D_X1R5G5B5 */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  2, 2, {16, {{5}, {5}, {5}, {1} } },
-	 {{{0}, {5}, {10}, {15} } } },   /* SVGA3D_A1R5G5B5 */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  2, 2, {16, {{4}, {4}, {4}, {4} } },
-	 {{{0}, {4}, {8}, {12} } } },    /* SVGA3D_A4R4G4B4 */
-
-	{SVGA3DBLOCKDESC_DEPTH,
-	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_Z_D32 */
-
-	{SVGA3DBLOCKDESC_DEPTH,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_Z_D16 */
-
-	{SVGA3DBLOCKDESC_DS,
-	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
-	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_D24S8 */
-
-	{SVGA3DBLOCKDESC_DS,
-	 {1, 1, 1},  2, 2, {16, {{0}, {1}, {15}, {0} } },
-	 {{{0}, {15}, {0}, {0} } } },    /* SVGA3D_Z_D15S1 */
-
-	{SVGA3DBLOCKDESC_LUMINANCE,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_LUMINANCE8 */
-
-	{SVGA3DBLOCKDESC_LA,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {4}, {4} } },
-	 {{{0}, {0}, {0}, {4} } } },     /* SVGA3D_LUMINANCE4_ALPHA4 */
-
-	{SVGA3DBLOCKDESC_LUMINANCE,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_LUMINANCE16 */
-
-	{SVGA3DBLOCKDESC_LA,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {8}, {8} } },
-	 {{{0}, {0}, {0}, {8} } } },     /* SVGA3D_LUMINANCE8_ALPHA8 */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT1 */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT2 */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT3 */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT4 */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT5 */
-
-	{SVGA3DBLOCKDESC_UV,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {8}, {8} } },
-	 {{{0}, {0}, {0}, {8} } } },     /* SVGA3D_BUMPU8V8 */
-
-	{SVGA3DBLOCKDESC_UVL,
-	 {1, 1, 1},  2, 2, {16, {{5}, {5}, {6}, {0} } },
-	 {{{11}, {6}, {0}, {0} } } },    /* SVGA3D_BUMPL6V5U5 */
-
-	{SVGA3DBLOCKDESC_UVL,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {0} } },
-	 {{{16}, {8}, {0}, {0} } } },    /* SVGA3D_BUMPX8L8V8U8 */
-
-	{SVGA3DBLOCKDESC_UVL,
-	 {1, 1, 1},  3, 3, {24, {{8}, {8}, {8}, {0} } },
-	 {{{16}, {8}, {0}, {0} } } },    /* SVGA3D_BUMPL8V8U8 */
-
-	{SVGA3DBLOCKDESC_RGBA_FP,
-	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
-	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_ARGB_S10E5 */
-
-	{SVGA3DBLOCKDESC_RGBA_FP,
-	 {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
-	 {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_ARGB_S23E8 */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
-	 {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_A2R10G10B10 */
-
-	{SVGA3DBLOCKDESC_UV,
-	 {1, 1, 1},  2, 2, {16, {{8}, {8}, {0}, {0} } },
-	 {{{8}, {0}, {0}, {0} } } },     /* SVGA3D_V8U8 */
-
-	{SVGA3DBLOCKDESC_UVWQ,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{24}, {16}, {8}, {0} } } },   /* SVGA3D_Q8W8V8U8 */
-
-	{SVGA3DBLOCKDESC_UV,
-	 {1, 1, 1},  2, 2, {16, {{8}, {8}, {0}, {0} } },
-	 {{{8}, {0}, {0}, {0} } } },     /* SVGA3D_CxV8U8 */
-
-	{SVGA3DBLOCKDESC_UVL,
-	 {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
-	 {{{16}, {8}, {0}, {0} } } },    /* SVGA3D_X8L8V8U8 */
-
-	{SVGA3DBLOCKDESC_UVWA,
-	 {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
-	 {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_A2W10V10U10 */
-
-	{SVGA3DBLOCKDESC_ALPHA,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {0}, {8} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_ALPHA8 */
-
-	{SVGA3DBLOCKDESC_R_FP,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R_S10E5 */
-
-	{SVGA3DBLOCKDESC_R_FP,
-	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R_S23E8 */
-
-	{SVGA3DBLOCKDESC_RG_FP,
-	 {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
-	 {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_RG_S10E5 */
-
-	{SVGA3DBLOCKDESC_RG_FP,
-	 {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
-	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_RG_S23E8 */
-
-	{SVGA3DBLOCKDESC_BUFFER,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BUFFER */
-
-	{SVGA3DBLOCKDESC_DEPTH,
-	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {24}, {0} } },
-	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_D24X8 */
-
-	{SVGA3DBLOCKDESC_UV,
-	 {1, 1, 1},  4, 4, {32, {{16}, {16}, {0}, {0} } },
-	 {{{16}, {0}, {0}, {0} } } },    /* SVGA3D_V16U16 */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
-	 {{{0}, {0}, {16}, {0} } } },    /* SVGA3D_G16R16 */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
-	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_A16B16G16R16 */
-
-	{SVGA3DBLOCKDESC_YUV,
-	 {1, 1, 1},  2, 2, {16, {{8}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {8}, {0} } } },     /* SVGA3D_UYVY */
-
-	{SVGA3DBLOCKDESC_YUV,
-	 {1, 1, 1},  2, 2, {16, {{8}, {0}, {8}, {0} } },
-	 {{{8}, {0}, {0}, {0} } } },     /* SVGA3D_YUY2 */
-
-	{SVGA3DBLOCKDESC_NV12,
-	 {2, 2, 1},  6, 2, {48, {{0}, {0}, {48}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_NV12 */
-
-	{SVGA3DBLOCKDESC_AYUV,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_AYUV */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
-	 {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_R32G32B32A32_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
-	 {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_R32G32B32A32_UINT */
-
-	{SVGA3DBLOCKDESC_UVWQ,
-	 {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
-	 {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_R32G32B32A32_SINT */
-
-	{SVGA3DBLOCKDESC_RGB,
-	 {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
-	 {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RGB_FP,
-	 {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
-	 {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_FLOAT */
-
-	{SVGA3DBLOCKDESC_RGB,
-	 {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
-	 {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_UINT */
-
-	{SVGA3DBLOCKDESC_UVW,
-	 {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
-	 {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_SINT */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
-	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
-	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_UINT */
-
-	{SVGA3DBLOCKDESC_UVWQ,
-	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
-	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_SNORM */
-
-	{SVGA3DBLOCKDESC_UVWQ,
-	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
-	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_SINT */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
-	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G32_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
-	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G32_UINT */
-
-	{SVGA3DBLOCKDESC_UV,
-	 {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
-	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G32_SINT */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  8, 8, {64, {{0}, {8}, {32}, {0} } },
-	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G8X24_TYPELESS */
-
-	{SVGA3DBLOCKDESC_DS,
-	 {1, 1, 1},  8, 8, {64, {{0}, {8}, {32}, {0} } },
-	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_D32_FLOAT_S8X24_UINT */
-
-	{SVGA3DBLOCKDESC_R_FP,
-	 {1, 1, 1},  8, 8, {64, {{0}, {0}, {32}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },    /* SVGA3D_R32_FLOAT_X8_X24_TYPELESS */
-
-	{SVGA3DBLOCKDESC_GREEN,
-	 {1, 1, 1},  8, 8, {64, {{0}, {8}, {0}, {0} } },
-	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_X32_TYPELESS_G8X24_UINT */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
-	 {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_R10G10B10A2_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
-	 {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_R10G10B10A2_UINT */
-
-	{SVGA3DBLOCKDESC_RGB_FP,
-	 {1, 1, 1},  4, 4, {32, {{10}, {11}, {11}, {0} } },
-	 {{{0}, {10}, {21}, {0} } } },  /* SVGA3D_R11G11B10_FLOAT */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_UNORM */
-
-	{SVGA3DBLOCKDESC_RGBA_SRGB,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_UNORM_SRGB */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_UINT */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_SINT */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
-	 {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_R16G16_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RG_FP,
-	 {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
-	 {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_R16G16_UINT */
-
-	{SVGA3DBLOCKDESC_UV,
-	 {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
-	 {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_R16G16_SINT */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R32_TYPELESS */
-
-	{SVGA3DBLOCKDESC_DEPTH,
-	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_D32_FLOAT */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R32_UINT */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R32_SINT */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
-	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_R24G8_TYPELESS */
-
-	{SVGA3DBLOCKDESC_DS,
-	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
-	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_D24_UNORM_S8_UINT */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {24}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R24_UNORM_X8_TYPELESS */
-
-	{SVGA3DBLOCKDESC_GREEN,
-	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {0}, {0} } },
-	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_X24_TYPELESS_G8_UINT */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_UNORM */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_UINT */
-
-	{SVGA3DBLOCKDESC_UV,
-	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_SINT */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_UNORM */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_UINT */
-
-	{SVGA3DBLOCKDESC_U,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_SNORM */
-
-	{SVGA3DBLOCKDESC_U,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_SINT */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_UNORM */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_UINT */
-
-	{SVGA3DBLOCKDESC_U,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_SNORM */
-
-	{SVGA3DBLOCKDESC_U,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_SINT */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {8, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R1_UNORM */
-
-	{SVGA3DBLOCKDESC_RGBE,
-	 {1, 1, 1},  4, 4, {32, {{9}, {9}, {9}, {5} } },
-	 {{{18}, {9}, {0}, {27} } } },   /* SVGA3D_R9G9B9E5_SHAREDEXP */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_B8G8_UNORM */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_G8R8_G8B8_UNORM */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC1_TYPELESS */
-
-	{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
-	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC1_UNORM_SRGB */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC2_TYPELESS */
-
-	{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC2_UNORM_SRGB */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC3_TYPELESS */
-
-	{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC3_UNORM_SRGB */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC4_TYPELESS */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC4_UNORM */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC4_SNORM */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC5_TYPELESS */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC5_UNORM */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC5_SNORM */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
-	 {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_R10G10B10_XR_BIAS_A2_UNORM */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8A8_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RGBA_SRGB,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8A8_UNORM_SRGB */
-
-	{SVGA3DBLOCKDESC_RGB,
-	 {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8X8_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RGB_SRGB,
-	 {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8X8_UNORM_SRGB */
-
-	{SVGA3DBLOCKDESC_DEPTH,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_Z_DF16 */
-
-	{SVGA3DBLOCKDESC_DS,
-	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
-	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_DF24 */
-
-	{SVGA3DBLOCKDESC_DS,
-	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
-	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_D24S8_INT */
-};
-
-static inline u32 clamped_umul32(u32 a, u32 b)
-{
-	u64 tmp = (u64) a*b;
-	return (tmp > (u64) U32_MAX) ? U32_MAX : tmp;
-}
-
-static inline const struct svga3d_surface_desc *
-svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
-{
-	if (format < ARRAY_SIZE(svga3d_surface_descs))
-		return &svga3d_surface_descs[format];
-
-	return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * svga3dsurface_get_mip_size --
- *
- *      Given a base level size and the mip level, compute the size of
- *      the mip level.
- *
- * Results:
- *      See above.
- *
- * Side effects:
- *      None.
- *
- *----------------------------------------------------------------------
- */
-
-static inline surf_size_struct
-svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
-{
-	surf_size_struct size;
-
-	size.width = max_t(u32, base_level.width >> mip_level, 1);
-	size.height = max_t(u32, base_level.height >> mip_level, 1);
-	size.depth = max_t(u32, base_level.depth >> mip_level, 1);
-	return size;
-}
-
-static inline void
-svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
-				 const surf_size_struct *pixel_size,
-				 surf_size_struct *block_size)
-{
-	block_size->width = DIV_ROUND_UP(pixel_size->width,
-					 desc->block_size.width);
-	block_size->height = DIV_ROUND_UP(pixel_size->height,
-					  desc->block_size.height);
-	block_size->depth = DIV_ROUND_UP(pixel_size->depth,
-					 desc->block_size.depth);
-}
-
-static inline bool
-svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
-{
-	return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
-}
-
-static inline u32
-svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
-			      const surf_size_struct *size)
-{
-	u32 pitch;
-	surf_size_struct blocks;
-
-	svga3dsurface_get_size_in_blocks(desc, size, &blocks);
-
-	pitch = blocks.width * desc->pitch_bytes_per_block;
-
-	return pitch;
-}
-
-/*
- *-----------------------------------------------------------------------------
- *
- * svga3dsurface_get_image_buffer_size --
- *
- *      Return the number of bytes of buffer space required to store
- *      one image of a surface, optionally using the specified pitch.
- *
- *      If pitch is zero, it is assumed that rows are tightly packed.
- *
- *      This function is overflow-safe. If the result would have
- *      overflowed, instead we return MAX_UINT32.
- *
- * Results:
- *      Byte count.
- *
- * Side effects:
- *      None.
- *
- *-----------------------------------------------------------------------------
- */
-
-static inline u32
-svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
-				    const surf_size_struct *size,
-				    u32 pitch)
-{
-	surf_size_struct image_blocks;
-	u32 slice_size, total_size;
-
-	svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
-
-	if (svga3dsurface_is_planar_surface(desc)) {
-		total_size = clamped_umul32(image_blocks.width,
-					    image_blocks.height);
-		total_size = clamped_umul32(total_size, image_blocks.depth);
-		total_size = clamped_umul32(total_size, desc->bytes_per_block);
-		return total_size;
-	}
-
-	if (pitch == 0)
-		pitch = svga3dsurface_calculate_pitch(desc, size);
-
-	slice_size = clamped_umul32(image_blocks.height, pitch);
-	total_size = clamped_umul32(slice_size, image_blocks.depth);
-
-	return total_size;
-}
-
-static inline u32
-svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
-				  surf_size_struct base_level_size,
-				  u32 num_mip_levels,
-				  bool cubemap)
-{
-	const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
-	u64 total_size = 0;
-	u32 mip;
-
-	for (mip = 0; mip < num_mip_levels; mip++) {
-		surf_size_struct size =
-			svga3dsurface_get_mip_size(base_level_size, mip);
-		total_size += svga3dsurface_get_image_buffer_size(desc,
-								  &size, 0);
-	}
-
-	if (cubemap)
-		total_size *= SVGA3D_MAX_SURFACE_FACES;
-
-	return (u32) min_t(u64, total_size, (u64) U32_MAX);
-}
-
-
-/**
- * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
- * in an image (or volume).
- *
- * @width: The image width in pixels.
- * @height: The image height in pixels
- */
-static inline u32
-svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
-			       u32 width, u32 height,
-			       u32 x, u32 y, u32 z)
-{
-	const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
-	const u32 bw = desc->block_size.width, bh = desc->block_size.height;
-	const u32 bd = desc->block_size.depth;
-	const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
-	const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
-	const u32 offset = (z / bd * imgstride +
-			    y / bh * rowstride +
-			    x / bw * desc->bytes_per_block);
-	return offset;
-}
-
-
-static inline u32
-svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
-			       surf_size_struct baseLevelSize,
-			       u32 numMipLevels,
-			       u32 face,
-			       u32 mip)
-
-{
-	u32 offset;
-	u32 mipChainBytes;
-	u32 mipChainBytesToLevel;
-	u32 i;
-	const struct svga3d_surface_desc *desc;
-	surf_size_struct mipSize;
-	u32 bytes;
-
-	desc = svga3dsurface_get_desc(format);
-
-	mipChainBytes = 0;
-	mipChainBytesToLevel = 0;
-	for (i = 0; i < numMipLevels; i++) {
-		mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
-		bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
-		mipChainBytes += bytes;
-		if (i < mip)
-			mipChainBytesToLevel += bytes;
-	}
-
-	offset = mipChainBytes * face + mipChainBytesToLevel;
-
-	return offset;
-}
diff --git a/drivers/gpu/drm/vmwgfx/svga_types.h b/drivers/gpu/drm/vmwgfx/svga_types.h
deleted file mode 100644
index 55836dedcfc2..000000000000
--- a/drivers/gpu/drm/vmwgfx/svga_types.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/**************************************************************************
- *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-/**
- * Silly typedefs for the svga headers. Currently the headers are shared
- * between all components that talk to svga. And as such the headers are
- * are in a completely different style and use weird defines.
- *
- * This file lets all the ugly be prefixed with svga*.
- */
-
-#ifndef _SVGA_TYPES_H_
-#define _SVGA_TYPES_H_
-
-typedef uint16_t uint16;
-typedef uint32_t uint32;
-typedef uint8_t uint8;
-typedef int32_t int32;
-typedef bool Bool;
-
-#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
new file mode 100644
index 000000000000..9c42e96da510
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -0,0 +1,1294 @@
+/**************************************************************************
+ *
+ * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * This file implements the vmwgfx context binding manager,
+ * The sole reason for having to use this code is that vmware guest
+ * backed contexts can be swapped out to their backing mobs by the device
+ * at any time, also swapped in at any time. At swapin time, the device
+ * validates the context bindings to make sure they point to valid resources.
+ * It's this outside-of-drawcall validation (that can happen at any time),
+ * that makes this code necessary.
+ *
+ * We therefore need to kill any context bindings pointing to a resource
+ * when the resource is swapped out. Furthermore, if the vmwgfx driver has
+ * swapped out the context we can't swap it in again to kill bindings because
+ * of backing mob reservation lockdep violations, so as part of
+ * context swapout, also kill all bindings of a context, so that they are
+ * already killed if a resource to which a binding points
+ * needs to be swapped out.
+ *
+ * Note that a resource can be pointed to by bindings from multiple contexts,
+ * Therefore we can't easily protect this data by a per context mutex
+ * (unless we use deadlock-safe WW mutexes). So we use a global binding_mutex
+ * to protect all binding manager data.
+ *
+ * Finally, any association between a context and a global resource
+ * (surface, shader or even DX query) is conceptually a context binding that
+ * needs to be tracked by this code.
+ */
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_binding.h"
+#include "device_include/svga3d_reg.h"
+
+#define VMW_BINDING_RT_BIT     0
+#define VMW_BINDING_PS_BIT     1
+#define VMW_BINDING_SO_BIT     2
+#define VMW_BINDING_VB_BIT     3
+#define VMW_BINDING_NUM_BITS   4
+
+#define VMW_BINDING_PS_SR_BIT  0
+
+/**
+ * struct vmw_ctx_binding_state - per context binding state
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @list: linked list of individual active bindings.
+ * @render_targets: Render target bindings.
+ * @texture_units: Texture units bindings.
+ * @ds_view: Depth-stencil view binding.
+ * @so_targets: StreamOutput target bindings.
+ * @vertex_buffers: Vertex buffer bindings.
+ * @index_buffer: Index buffer binding.
+ * @per_shader: Per shader-type bindings.
+ * @dirty: Bitmap tracking per binding-type changes that have not yet
+ * been emitted to the device.
+ * @dirty_vb: Bitmap tracking individual vertex buffer binding changes that
+ * have not yet been emitted to the device.
+ * @bind_cmd_buffer: Scratch space used to construct binding commands.
+ * @bind_cmd_count: Number of binding command data entries in @bind_cmd_buffer
+ * @bind_first_slot: Used together with @bind_cmd_buffer to indicate the
+ * device binding slot of the first command data entry in @bind_cmd_buffer.
+ *
+ * Note that this structure also provides storage space for the individual
+ * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
+ * for individual bindings.
+ *
+ */
+struct vmw_ctx_binding_state {
+	struct vmw_private *dev_priv;
+	struct list_head list;
+	struct vmw_ctx_bindinfo_view render_targets[SVGA3D_RT_MAX];
+	struct vmw_ctx_bindinfo_tex texture_units[SVGA3D_NUM_TEXTURE_UNITS];
+	struct vmw_ctx_bindinfo_view ds_view;
+	struct vmw_ctx_bindinfo_so so_targets[SVGA3D_DX_MAX_SOTARGETS];
+	struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
+	struct vmw_ctx_bindinfo_ib index_buffer;
+	struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE_DX10];
+
+	unsigned long dirty;
+	DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS);
+
+	u32 bind_cmd_buffer[VMW_MAX_VIEW_BINDINGS];
+	u32 bind_cmd_count;
+	u32 bind_first_slot;
+};
+
+static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+					   bool rebind);
+static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs);
+static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi,
+				       bool rebind);
+static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind);
+static void vmw_binding_build_asserts(void) __attribute__ ((unused));
+
+typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
+
+/**
+ * struct vmw_binding_info - Per binding type information for the binding
+ * manager
+ *
+ * @size: The size of the struct binding derived from a struct vmw_ctx_bindinfo.
+ * @offsets: array[shader_slot] of offsets to the array[slot]
+ * of struct bindings for the binding type.
+ * @scrub_func: Pointer to the scrub function for this binding type.
+ *
+ * Holds static information to help optimize the binding manager and avoid
+ * an excessive amount of switch statements.
+ */
+struct vmw_binding_info {
+	size_t size;
+	const size_t *offsets;
+	vmw_scrub_func scrub_func;
+};
+
+/*
+ * A number of static variables that help determine the scrub func and the
+ * location of the struct vmw_ctx_bindinfo slots for each binding type.
+ */
+static const size_t vmw_binding_shader_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, per_shader[0].shader),
+	offsetof(struct vmw_ctx_binding_state, per_shader[1].shader),
+	offsetof(struct vmw_ctx_binding_state, per_shader[2].shader),
+};
+static const size_t vmw_binding_rt_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, render_targets),
+};
+static const size_t vmw_binding_tex_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, texture_units),
+};
+static const size_t vmw_binding_cb_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, per_shader[0].const_buffers),
+	offsetof(struct vmw_ctx_binding_state, per_shader[1].const_buffers),
+	offsetof(struct vmw_ctx_binding_state, per_shader[2].const_buffers),
+};
+static const size_t vmw_binding_dx_ds_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, ds_view),
+};
+static const size_t vmw_binding_sr_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, per_shader[0].shader_res),
+	offsetof(struct vmw_ctx_binding_state, per_shader[1].shader_res),
+	offsetof(struct vmw_ctx_binding_state, per_shader[2].shader_res),
+};
+static const size_t vmw_binding_so_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, so_targets),
+};
+static const size_t vmw_binding_vb_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, vertex_buffers),
+};
+static const size_t vmw_binding_ib_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, index_buffer),
+};
+
+static const struct vmw_binding_info vmw_binding_infos[] = {
+	[vmw_ctx_binding_shader] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_shader),
+		.offsets = vmw_binding_shader_offsets,
+		.scrub_func = vmw_binding_scrub_shader},
+	[vmw_ctx_binding_rt] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_view),
+		.offsets = vmw_binding_rt_offsets,
+		.scrub_func = vmw_binding_scrub_render_target},
+	[vmw_ctx_binding_tex] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_tex),
+		.offsets = vmw_binding_tex_offsets,
+		.scrub_func = vmw_binding_scrub_texture},
+	[vmw_ctx_binding_cb] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_cb),
+		.offsets = vmw_binding_cb_offsets,
+		.scrub_func = vmw_binding_scrub_cb},
+	[vmw_ctx_binding_dx_shader] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_shader),
+		.offsets = vmw_binding_shader_offsets,
+		.scrub_func = vmw_binding_scrub_dx_shader},
+	[vmw_ctx_binding_dx_rt] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_view),
+		.offsets = vmw_binding_rt_offsets,
+		.scrub_func = vmw_binding_scrub_dx_rt},
+	[vmw_ctx_binding_sr] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_view),
+		.offsets = vmw_binding_sr_offsets,
+		.scrub_func = vmw_binding_scrub_sr},
+	[vmw_ctx_binding_ds] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_view),
+		.offsets = vmw_binding_dx_ds_offsets,
+		.scrub_func = vmw_binding_scrub_dx_rt},
+	[vmw_ctx_binding_so] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_so),
+		.offsets = vmw_binding_so_offsets,
+		.scrub_func = vmw_binding_scrub_so},
+	[vmw_ctx_binding_vb] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_vb),
+		.offsets = vmw_binding_vb_offsets,
+		.scrub_func = vmw_binding_scrub_vb},
+	[vmw_ctx_binding_ib] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_ib),
+		.offsets = vmw_binding_ib_offsets,
+		.scrub_func = vmw_binding_scrub_ib},
+};
+
+/**
+ * vmw_cbs_context - Return a pointer to the context resource of a
+ * context binding state tracker.
+ *
+ * @cbs: The context binding state tracker.
+ *
+ * Provided there are any active bindings, this function will return an
+ * unreferenced pointer to the context resource that owns the context
+ * binding state tracker. If there are no active bindings, this function
+ * will return NULL. Note that the caller must somehow ensure that a reference
+ * is held on the context resource prior to calling this function.
+ */
+static const struct vmw_resource *
+vmw_cbs_context(const struct vmw_ctx_binding_state *cbs)
+{
+	if (list_empty(&cbs->list))
+		return NULL;
+
+	return list_first_entry(&cbs->list, struct vmw_ctx_bindinfo,
+				ctx_list)->ctx;
+}
+
+/**
+ * vmw_binding_loc - determine the struct vmw_ctx_bindinfo slot location.
+ *
+ * @cbs: Pointer to a struct vmw_ctx_binding state which holds the slot.
+ * @bt: The binding type.
+ * @shader_slot: The shader slot of the binding. If none, then set to 0.
+ * @slot: The slot of the binding.
+ */
+static struct vmw_ctx_bindinfo *
+vmw_binding_loc(struct vmw_ctx_binding_state *cbs,
+		enum vmw_ctx_binding_type bt, u32 shader_slot, u32 slot)
+{
+	const struct vmw_binding_info *b = &vmw_binding_infos[bt];
+	size_t offset = b->offsets[shader_slot] + b->size*slot;
+
+	return (struct vmw_ctx_bindinfo *)((u8 *) cbs + offset);
+}
+
+/**
+ * vmw_binding_drop: Stop tracking a context binding
+ *
+ * @bi: Pointer to binding tracker storage.
+ *
+ * Stops tracking a context binding, and re-initializes its storage.
+ * Typically used when the context binding is replaced with a binding to
+ * another (or the same, for that matter) resource.
+ */
+static void vmw_binding_drop(struct vmw_ctx_bindinfo *bi)
+{
+	list_del(&bi->ctx_list);
+	if (!list_empty(&bi->res_list))
+		list_del(&bi->res_list);
+	bi->ctx = NULL;
+}
+
+/**
+ * vmw_binding_add: Start tracking a context binding
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ * @bi: Information about the binding to track.
+ *
+ * Starts tracking the binding in the context binding
+ * state structure @cbs.
+ */
+void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
+		    const struct vmw_ctx_bindinfo *bi,
+		    u32 shader_slot, u32 slot)
+{
+	struct vmw_ctx_bindinfo *loc =
+		vmw_binding_loc(cbs, bi->bt, shader_slot, slot);
+	const struct vmw_binding_info *b = &vmw_binding_infos[bi->bt];
+
+	if (loc->ctx != NULL)
+		vmw_binding_drop(loc);
+
+	memcpy(loc, bi, b->size);
+	loc->scrubbed = false;
+	list_add(&loc->ctx_list, &cbs->list);
+	INIT_LIST_HEAD(&loc->res_list);
+}
+
+/**
+ * vmw_binding_transfer: Transfer a context binding tracking entry.
+ *
+ * @cbs: Pointer to the persistent context binding state tracker.
+ * @bi: Information about the binding to track.
+ *
+ */
+static void vmw_binding_transfer(struct vmw_ctx_binding_state *cbs,
+				 const struct vmw_ctx_binding_state *from,
+				 const struct vmw_ctx_bindinfo *bi)
+{
+	size_t offset = (unsigned long)bi - (unsigned long)from;
+	struct vmw_ctx_bindinfo *loc = (struct vmw_ctx_bindinfo *)
+		((unsigned long) cbs + offset);
+
+	if (loc->ctx != NULL) {
+		WARN_ON(bi->scrubbed);
+
+		vmw_binding_drop(loc);
+	}
+
+	if (bi->res != NULL) {
+		memcpy(loc, bi, vmw_binding_infos[bi->bt].size);
+		list_add_tail(&loc->ctx_list, &cbs->list);
+		list_add_tail(&loc->res_list, &loc->res->binding_head);
+	}
+}
+
+/**
+ * vmw_binding_state_kill - Kill all bindings associated with a
+ * struct vmw_ctx_binding state structure, and re-initialize the structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker. Then re-initializes the whole structure.
+ */
+void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_ctx_bindinfo *entry, *next;
+
+	vmw_binding_state_scrub(cbs);
+	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
+		vmw_binding_drop(entry);
+}
+
+/**
+ * vmw_binding_state_scrub - Scrub all bindings associated with a
+ * struct vmw_ctx_binding state structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker.
+ */
+void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_ctx_bindinfo *entry;
+
+	list_for_each_entry(entry, &cbs->list, ctx_list) {
+		if (!entry->scrubbed) {
+			(void) vmw_binding_infos[entry->bt].scrub_func
+				(entry, false);
+			entry->scrubbed = true;
+		}
+	}
+
+	(void) vmw_binding_emit_dirty(cbs);
+}
+
+/**
+ * vmw_binding_res_list_kill - Kill all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Kills all bindings associated with a specific resource. Typically
+ * called before the resource is destroyed.
+ */
+void vmw_binding_res_list_kill(struct list_head *head)
+{
+	struct vmw_ctx_bindinfo *entry, *next;
+
+	vmw_binding_res_list_scrub(head);
+	list_for_each_entry_safe(entry, next, head, res_list)
+		vmw_binding_drop(entry);
+}
+
+/**
+ * vmw_binding_res_list_scrub - Scrub all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Scrub all bindings associated with a specific resource. Typically
+ * called before the resource is evicted.
+ */
+void vmw_binding_res_list_scrub(struct list_head *head)
+{
+	struct vmw_ctx_bindinfo *entry;
+
+	list_for_each_entry(entry, head, res_list) {
+		if (!entry->scrubbed) {
+			(void) vmw_binding_infos[entry->bt].scrub_func
+				(entry, false);
+			entry->scrubbed = true;
+		}
+	}
+
+	list_for_each_entry(entry, head, res_list) {
+		struct vmw_ctx_binding_state *cbs =
+			vmw_context_binding_state(entry->ctx);
+
+		(void) vmw_binding_emit_dirty(cbs);
+	}
+}
+
+
+/**
+ * vmw_binding_state_commit - Commit staged binding info
+ *
+ * @ctx: Pointer to context to commit the staged binding info to.
+ * @from: Staged binding info built during execbuf.
+ * @scrubbed: Transfer only scrubbed bindings.
+ *
+ * Transfers binding info from a temporary structure
+ * (typically used by execbuf) to the persistent
+ * structure in the context. This can be done once commands have been
+ * submitted to hardware
+ */
+void vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
+			      struct vmw_ctx_binding_state *from)
+{
+	struct vmw_ctx_bindinfo *entry, *next;
+
+	list_for_each_entry_safe(entry, next, &from->list, ctx_list) {
+		vmw_binding_transfer(to, from, entry);
+		vmw_binding_drop(entry);
+	}
+}
+
+/**
+ * vmw_binding_rebind_all - Rebind all scrubbed bindings of a context
+ *
+ * @ctx: The context resource
+ *
+ * Walks through the context binding list and rebinds all scrubbed
+ * resources.
+ */
+int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_ctx_bindinfo *entry;
+	int ret;
+
+	list_for_each_entry(entry, &cbs->list, ctx_list) {
+		if (likely(!entry->scrubbed))
+			continue;
+
+		if ((entry->res == NULL || entry->res->id ==
+			    SVGA3D_INVALID_ID))
+			continue;
+
+		ret = vmw_binding_infos[entry->bt].scrub_func(entry, true);
+		if (unlikely(ret != 0))
+			return ret;
+
+		entry->scrubbed = false;
+	}
+
+	return vmw_binding_emit_dirty(cbs);
+}
+
+/**
+ * vmw_binding_scrub_shader - scrub a shader binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_bindinfo_shader *binding =
+		container_of(bi, typeof(*binding), bi);
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSetShader body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for shader "
+			  "unbinding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_SET_SHADER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = bi->ctx->id;
+	cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
+	cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_render_target - scrub a render target binding
+ * from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+					   bool rebind)
+{
+	struct vmw_ctx_bindinfo_view *binding =
+		container_of(bi, typeof(*binding), bi);
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSetRenderTarget body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for render target "
+			  "unbinding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = bi->ctx->id;
+	cmd->body.type = binding->slot;
+	cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+	cmd->body.target.face = 0;
+	cmd->body.target.mipmap = 0;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_texture - scrub a texture binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ *
+ * TODO: Possibly complement this function with a function that takes
+ * a list of texture bindings and combines them to a single command.
+ */
+static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
+				     bool rebind)
+{
+	struct vmw_ctx_bindinfo_tex *binding =
+		container_of(bi, typeof(*binding), bi);
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		struct {
+			SVGA3dCmdSetTextureState c;
+			SVGA3dTextureState s1;
+		} body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for texture "
+			  "unbinding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.c.cid = bi->ctx->id;
+	cmd->body.s1.stage = binding->texture_stage;
+	cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
+	cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_dx_shader - scrub a dx shader binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_bindinfo_shader *binding =
+		container_of(bi, typeof(*binding), bi);
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetShader body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for DX shader "
+			  "unbinding.\n");
+		return -ENOMEM;
+	}
+	cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
+	cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_cb - scrub a constant buffer binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_bindinfo_cb *binding =
+		container_of(bi, typeof(*binding), bi);
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetSingleConstantBuffer body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for DX shader "
+			  "unbinding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.slot = binding->slot;
+	cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
+	if (rebind) {
+		cmd->body.offsetInBytes = binding->offset;
+		cmd->body.sizeInBytes = binding->size;
+		cmd->body.sid = bi->res->id;
+	} else {
+		cmd->body.offsetInBytes = 0;
+		cmd->body.sizeInBytes = 0;
+		cmd->body.sid = SVGA3D_INVALID_ID;
+	}
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_collect_view_ids - Build view id data for a view binding command
+ * without checking which bindings actually need to be emitted
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings and builds a buffer of view id data.
+ * Stops at the first non-existing binding in the @bi array.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
+ * contains the command data.
+ */
+static void vmw_collect_view_ids(struct vmw_ctx_binding_state *cbs,
+				 const struct vmw_ctx_bindinfo *bi,
+				 u32 max_num)
+{
+	const struct vmw_ctx_bindinfo_view *biv =
+		container_of(bi, struct vmw_ctx_bindinfo_view, bi);
+	unsigned long i;
+
+	cbs->bind_cmd_count = 0;
+	cbs->bind_first_slot = 0;
+
+	for (i = 0; i < max_num; ++i, ++biv) {
+		if (!biv->bi.ctx)
+			break;
+
+		cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
+			((biv->bi.scrubbed) ?
+			 SVGA3D_INVALID_ID : biv->bi.res->id);
+	}
+}
+
+/**
+ * vmw_collect_dirty_view_ids - Build view id data for a view binding command
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @dirty: Bitmap indicating which bindings need to be emitted.
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings that need to be emitted and
+ * builds a buffer of view id data.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot indicates the index of the first emitted
+ * binding, and @cbs->bind_cmd_buffer contains the command data.
+ */
+static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs,
+				       const struct vmw_ctx_bindinfo *bi,
+				       unsigned long *dirty,
+				       u32 max_num)
+{
+	const struct vmw_ctx_bindinfo_view *biv =
+		container_of(bi, struct vmw_ctx_bindinfo_view, bi);
+	unsigned long i, next_bit;
+
+	cbs->bind_cmd_count = 0;
+	i = find_first_bit(dirty, max_num);
+	next_bit = i;
+	cbs->bind_first_slot = i;
+
+	biv += i;
+	for (; i < max_num; ++i, ++biv) {
+		cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
+			((!biv->bi.ctx || biv->bi.scrubbed) ?
+			 SVGA3D_INVALID_ID : biv->bi.res->id);
+
+		if (next_bit == i) {
+			next_bit = find_next_bit(dirty, max_num, i + 1);
+			if (next_bit >= max_num)
+				break;
+		}
+	}
+}
+
+/**
+ * vmw_binding_emit_set_sr - Issue delayed DX shader resource binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ */
+static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
+			   int shader_slot)
+{
+	const struct vmw_ctx_bindinfo *loc =
+		&cbs->per_shader[shader_slot].shader_res[0].bi;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetShaderResources body;
+	} *cmd;
+	size_t cmd_size, view_id_size;
+	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+	vmw_collect_dirty_view_ids(cbs, loc,
+				   cbs->per_shader[shader_slot].dirty_sr,
+				   SVGA3D_DX_MAX_SRVIEWS);
+	if (cbs->bind_cmd_count == 0)
+		return 0;
+
+	view_id_size = cbs->bind_cmd_count*sizeof(uint32);
+	cmd_size = sizeof(*cmd) + view_id_size;
+	cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for DX shader"
+			  " resource binding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER_RESOURCES;
+	cmd->header.size = sizeof(cmd->body) + view_id_size;
+	cmd->body.type = shader_slot + SVGA3D_SHADERTYPE_MIN;
+	cmd->body.startView = cbs->bind_first_slot;
+
+	memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
+
+	vmw_fifo_commit(ctx->dev_priv, cmd_size);
+	bitmap_clear(cbs->per_shader[shader_slot].dirty_sr,
+		     cbs->bind_first_slot, cbs->bind_cmd_count);
+
+	return 0;
+}
+
+/**
+ * vmw_binding_emit_set_rt - Issue delayed DX rendertarget binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ */
+static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
+{
+	const struct vmw_ctx_bindinfo *loc = &cbs->render_targets[0].bi;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetRenderTargets body;
+	} *cmd;
+	size_t cmd_size, view_id_size;
+	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+	vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS);
+	view_id_size = cbs->bind_cmd_count*sizeof(uint32);
+	cmd_size = sizeof(*cmd) + view_id_size;
+	cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for DX render-target"
+			  " binding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_SET_RENDERTARGETS;
+	cmd->header.size = sizeof(cmd->body) + view_id_size;
+
+	if (cbs->ds_view.bi.ctx && !cbs->ds_view.bi.scrubbed)
+		cmd->body.depthStencilViewId = cbs->ds_view.bi.res->id;
+	else
+		cmd->body.depthStencilViewId = SVGA3D_INVALID_ID;
+
+	memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
+
+	vmw_fifo_commit(ctx->dev_priv, cmd_size);
+
+	return 0;
+
+}
+
+/**
+ * vmw_collect_so_targets - Build SVGA3dSoTarget data for a binding command
+ * without checking which bindings actually need to be emitted
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings and builds a buffer of SVGA3dSoTarget data.
+ * Stops at the first non-existing binding in the @bi array.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
+ * contains the command data.
+ */
+static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
+				   const struct vmw_ctx_bindinfo *bi,
+				   u32 max_num)
+{
+	const struct vmw_ctx_bindinfo_so *biso =
+		container_of(bi, struct vmw_ctx_bindinfo_so, bi);
+	unsigned long i;
+	SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer;
+
+	cbs->bind_cmd_count = 0;
+	cbs->bind_first_slot = 0;
+
+	for (i = 0; i < max_num; ++i, ++biso, ++so_buffer,
+		    ++cbs->bind_cmd_count) {
+		if (!biso->bi.ctx)
+			break;
+
+		if (!biso->bi.scrubbed) {
+			so_buffer->sid = biso->bi.res->id;
+			so_buffer->offset = biso->offset;
+			so_buffer->sizeInBytes = biso->size;
+		} else {
+			so_buffer->sid = SVGA3D_INVALID_ID;
+			so_buffer->offset = 0;
+			so_buffer->sizeInBytes = 0;
+		}
+	}
+}
+
+/**
+ * vmw_binding_emit_set_so - Issue delayed streamout binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ */
+static int vmw_emit_set_so(struct vmw_ctx_binding_state *cbs)
+{
+	const struct vmw_ctx_bindinfo *loc = &cbs->so_targets[0].bi;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetSOTargets body;
+	} *cmd;
+	size_t cmd_size, so_target_size;
+	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+	vmw_collect_so_targets(cbs, loc, SVGA3D_DX_MAX_SOTARGETS);
+	if (cbs->bind_cmd_count == 0)
+		return 0;
+
+	so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget);
+	cmd_size = sizeof(*cmd) + so_target_size;
+	cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for DX SO target"
+			  " binding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_SET_SOTARGETS;
+	cmd->header.size = sizeof(cmd->body) + so_target_size;
+	memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size);
+
+	vmw_fifo_commit(ctx->dev_priv, cmd_size);
+
+	return 0;
+
+}
+
+/**
+ * vmw_binding_emit_dirty_ps - Issue delayed per shader binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ *
+ */
+static int vmw_binding_emit_dirty_ps(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_dx_shader_bindings *sb = &cbs->per_shader[0];
+	u32 i;
+	int ret;
+
+	for (i = 0; i < SVGA3D_NUM_SHADERTYPE_DX10; ++i, ++sb) {
+		if (!test_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty))
+			continue;
+
+		ret = vmw_emit_set_sr(cbs, i);
+		if (ret)
+			break;
+
+		__clear_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty);
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_collect_dirty_vbs - Build SVGA3dVertexBuffer data for a
+ * SVGA3dCmdDXSetVertexBuffers command
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @dirty: Bitmap indicating which bindings need to be emitted.
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings that need to be emitted and
+ * builds a buffer of SVGA3dVertexBuffer data.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot indicates the index of the first emitted
+ * binding, and @cbs->bind_cmd_buffer contains the command data.
+ */
+static void vmw_collect_dirty_vbs(struct vmw_ctx_binding_state *cbs,
+				  const struct vmw_ctx_bindinfo *bi,
+				  unsigned long *dirty,
+				  u32 max_num)
+{
+	const struct vmw_ctx_bindinfo_vb *biv =
+		container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
+	unsigned long i, next_bit;
+	SVGA3dVertexBuffer *vbs = (SVGA3dVertexBuffer *) &cbs->bind_cmd_buffer;
+
+	cbs->bind_cmd_count = 0;
+	i = find_first_bit(dirty, max_num);
+	next_bit = i;
+	cbs->bind_first_slot = i;
+
+	biv += i;
+	for (; i < max_num; ++i, ++biv, ++vbs) {
+		if (!biv->bi.ctx || biv->bi.scrubbed) {
+			vbs->sid = SVGA3D_INVALID_ID;
+			vbs->stride = 0;
+			vbs->offset = 0;
+		} else {
+			vbs->sid = biv->bi.res->id;
+			vbs->stride = biv->stride;
+			vbs->offset = biv->offset;
+		}
+		cbs->bind_cmd_count++;
+		if (next_bit == i) {
+			next_bit = find_next_bit(dirty, max_num, i + 1);
+			if (next_bit >= max_num)
+				break;
+		}
+	}
+}
+
+/**
+ * vmw_binding_emit_set_vb - Issue delayed vertex buffer binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ *
+ */
+static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
+{
+	const struct vmw_ctx_bindinfo *loc =
+		&cbs->vertex_buffers[0].bi;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetVertexBuffers body;
+	} *cmd;
+	size_t cmd_size, set_vb_size;
+	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+	vmw_collect_dirty_vbs(cbs, loc, cbs->dirty_vb,
+			     SVGA3D_DX_MAX_VERTEXBUFFERS);
+	if (cbs->bind_cmd_count == 0)
+		return 0;
+
+	set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer);
+	cmd_size = sizeof(*cmd) + set_vb_size;
+	cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for DX vertex buffer"
+			  " binding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS;
+	cmd->header.size = sizeof(cmd->body) + set_vb_size;
+	cmd->body.startBuffer = cbs->bind_first_slot;
+
+	memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size);
+
+	vmw_fifo_commit(ctx->dev_priv, cmd_size);
+	bitmap_clear(cbs->dirty_vb,
+		     cbs->bind_first_slot, cbs->bind_cmd_count);
+
+	return 0;
+}
+
+/**
+ * vmw_binding_emit_dirty - Issue delayed binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ *
+ * This function issues the delayed binding commands that arise from
+ * previous scrub / unscrub calls. These binding commands are typically
+ * commands that batch a number of bindings and therefore it makes sense
+ * to delay them.
+ */
+static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs)
+{
+	int ret = 0;
+	unsigned long hit = 0;
+
+	while ((hit = find_next_bit(&cbs->dirty, VMW_BINDING_NUM_BITS, hit))
+	      < VMW_BINDING_NUM_BITS) {
+
+		switch (hit) {
+		case VMW_BINDING_RT_BIT:
+			ret = vmw_emit_set_rt(cbs);
+			break;
+		case VMW_BINDING_PS_BIT:
+			ret = vmw_binding_emit_dirty_ps(cbs);
+			break;
+		case VMW_BINDING_SO_BIT:
+			ret = vmw_emit_set_so(cbs);
+			break;
+		case VMW_BINDING_VB_BIT:
+			ret = vmw_emit_set_vb(cbs);
+			break;
+		default:
+			BUG();
+		}
+		if (ret)
+			return ret;
+
+		__clear_bit(hit, &cbs->dirty);
+		hit++;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_sr - Schedule a dx shaderresource binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_bindinfo_view *biv =
+		container_of(bi, struct vmw_ctx_bindinfo_view, bi);
+	struct vmw_ctx_binding_state *cbs =
+		vmw_context_binding_state(bi->ctx);
+
+	__set_bit(biv->slot, cbs->per_shader[biv->shader_slot].dirty_sr);
+	__set_bit(VMW_BINDING_PS_SR_BIT,
+		  &cbs->per_shader[biv->shader_slot].dirty);
+	__set_bit(VMW_BINDING_PS_BIT, &cbs->dirty);
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_dx_rt - Schedule a dx rendertarget binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_binding_state *cbs =
+		vmw_context_binding_state(bi->ctx);
+
+	__set_bit(VMW_BINDING_RT_BIT, &cbs->dirty);
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_so - Schedule a dx streamoutput buffer binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_binding_state *cbs =
+		vmw_context_binding_state(bi->ctx);
+
+	__set_bit(VMW_BINDING_SO_BIT, &cbs->dirty);
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_vb - Schedule a dx vertex buffer binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_bindinfo_vb *bivb =
+		container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
+	struct vmw_ctx_binding_state *cbs =
+		vmw_context_binding_state(bi->ctx);
+
+	__set_bit(bivb->slot, cbs->dirty_vb);
+	__set_bit(VMW_BINDING_VB_BIT, &cbs->dirty);
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_ib - scrub a dx index buffer binding from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_bindinfo_ib *binding =
+		container_of(bi, typeof(*binding), bi);
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetIndexBuffer body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for DX index buffer "
+			  "binding.\n");
+		return -ENOMEM;
+	}
+	cmd->header.id = SVGA_3D_CMD_DX_SET_INDEX_BUFFER;
+	cmd->header.size = sizeof(cmd->body);
+	if (rebind) {
+		cmd->body.sid = bi->res->id;
+		cmd->body.format = binding->format;
+		cmd->body.offset = binding->offset;
+	} else {
+		cmd->body.sid = SVGA3D_INVALID_ID;
+		cmd->body.format = 0;
+		cmd->body.offset = 0;
+	}
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with
+ * memory accounting.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * Returns a pointer to a newly allocated struct or an error pointer on error.
+ */
+struct vmw_ctx_binding_state *
+vmw_binding_state_alloc(struct vmw_private *dev_priv)
+{
+	struct vmw_ctx_binding_state *cbs;
+	int ret;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs),
+				   false, false);
+	if (ret)
+		return ERR_PTR(ret);
+
+	cbs = vzalloc(sizeof(*cbs));
+	if (!cbs) {
+		ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
+		return ERR_PTR(-ENOMEM);
+	}
+
+	cbs->dev_priv = dev_priv;
+	INIT_LIST_HEAD(&cbs->list);
+
+	return cbs;
+}
+
+/**
+ * vmw_binding_state_free - Free a struct vmw_ctx_binding_state and its
+ * memory accounting info.
+ *
+ * @cbs: Pointer to the struct vmw_ctx_binding_state to be freed.
+ */
+void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_private *dev_priv = cbs->dev_priv;
+
+	vfree(cbs);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
+}
+
+/**
+ * vmw_binding_state_list - Get the binding list of a
+ * struct vmw_ctx_binding_state
+ *
+ * @cbs: Pointer to the struct vmw_ctx_binding_state
+ *
+ * Returns the binding list which can be used to traverse through the bindings
+ * and access the resource information of all bindings.
+ */
+struct list_head *vmw_binding_state_list(struct vmw_ctx_binding_state *cbs)
+{
+	return &cbs->list;
+}
+
+/**
+ * vmwgfx_binding_state_reset - clear a struct vmw_ctx_binding_state
+ *
+ * @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared
+ *
+ * Drops all bindings registered in @cbs. No device binding actions are
+ * performed.
+ */
+void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_ctx_bindinfo *entry, *next;
+
+	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
+		vmw_binding_drop(entry);
+}
+
+/*
+ * This function is unused at run-time, and only used to hold various build
+ * asserts important for code optimization assumptions.
+ */
+static void vmw_binding_build_asserts(void)
+{
+	BUILD_BUG_ON(SVGA3D_NUM_SHADERTYPE_DX10 != 3);
+	BUILD_BUG_ON(SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS > SVGA3D_RT_MAX);
+	BUILD_BUG_ON(sizeof(uint32) != sizeof(u32));
+
+	/*
+	 * struct vmw_ctx_binding_state::bind_cmd_buffer is used for various
+	 * view id arrays.
+	 */
+	BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_RT_MAX);
+	BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_SRVIEWS);
+	BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_CONSTBUFFERS);
+
+	/*
+	 * struct vmw_ctx_binding_state::bind_cmd_buffer is used for
+	 * u32 view ids, SVGA3dSoTargets and SVGA3dVertexBuffers
+	 */
+	BUILD_BUG_ON(SVGA3D_DX_MAX_SOTARGETS*sizeof(SVGA3dSoTarget) >
+		     VMW_MAX_VIEW_BINDINGS*sizeof(u32));
+	BUILD_BUG_ON(SVGA3D_DX_MAX_VERTEXBUFFERS*sizeof(SVGA3dVertexBuffer) >
+		     VMW_MAX_VIEW_BINDINGS*sizeof(u32));
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
new file mode 100644
index 000000000000..bf2e77ad5a20
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
@@ -0,0 +1,209 @@
+/**************************************************************************
+ *
+ * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#ifndef _VMWGFX_BINDING_H_
+#define _VMWGFX_BINDING_H_
+
+#include "device_include/svga3d_reg.h"
+#include <linux/list.h>
+
+#define VMW_MAX_VIEW_BINDINGS 128
+
+struct vmw_private;
+struct vmw_ctx_binding_state;
+
+/*
+ * enum vmw_ctx_binding_type - abstract resource to context binding types
+ */
+enum vmw_ctx_binding_type {
+	vmw_ctx_binding_shader,
+	vmw_ctx_binding_rt,
+	vmw_ctx_binding_tex,
+	vmw_ctx_binding_cb,
+	vmw_ctx_binding_dx_shader,
+	vmw_ctx_binding_dx_rt,
+	vmw_ctx_binding_sr,
+	vmw_ctx_binding_ds,
+	vmw_ctx_binding_so,
+	vmw_ctx_binding_vb,
+	vmw_ctx_binding_ib,
+	vmw_ctx_binding_max
+};
+
+/**
+ * struct vmw_ctx_bindinfo - single binding metadata
+ *
+ * @ctx_list: List head for the context's list of bindings.
+ * @res_list: List head for a resource's list of bindings.
+ * @ctx: Non-refcounted pointer to the context that owns the binding. NULL
+ * indicates no binding present.
+ * @res: Non-refcounted pointer to the resource the binding points to. This
+ * is typically a surface or a view.
+ * @bt: Binding type.
+ * @scrubbed: Whether the binding has been scrubbed from the context.
+ */
+struct vmw_ctx_bindinfo {
+	struct list_head ctx_list;
+	struct list_head res_list;
+	struct vmw_resource *ctx;
+	struct vmw_resource *res;
+	enum vmw_ctx_binding_type bt;
+	bool scrubbed;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_tex - texture stage binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @texture_stage: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_tex {
+	struct vmw_ctx_bindinfo bi;
+	uint32 texture_stage;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_shader - Shader binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @shader_slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_shader {
+	struct vmw_ctx_bindinfo bi;
+	SVGA3dShaderType shader_slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_cb - Constant buffer binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @shader_slot: Device data used to reconstruct binding command.
+ * @offset: Device data used to reconstruct binding command.
+ * @size: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_cb {
+	struct vmw_ctx_bindinfo bi;
+	SVGA3dShaderType shader_slot;
+	uint32 offset;
+	uint32 size;
+	uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_view - View binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @shader_slot: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_view {
+	struct vmw_ctx_bindinfo bi;
+	SVGA3dShaderType shader_slot;
+	uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_so - StreamOutput binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @offset: Device data used to reconstruct binding command.
+ * @size: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_so {
+	struct vmw_ctx_bindinfo bi;
+	uint32 offset;
+	uint32 size;
+	uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_vb - Vertex buffer binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @offset: Device data used to reconstruct binding command.
+ * @stride: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_vb {
+	struct vmw_ctx_bindinfo bi;
+	uint32 offset;
+	uint32 stride;
+	uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_ib - StreamOutput binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @offset: Device data used to reconstruct binding command.
+ * @format: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_ib {
+	struct vmw_ctx_bindinfo bi;
+	uint32 offset;
+	uint32 format;
+};
+
+/**
+ * struct vmw_dx_shader_bindings - per shader type context binding state
+ *
+ * @shader: The shader binding for this shader type
+ * @const_buffer: Const buffer bindings for this shader type.
+ * @shader_res: Shader resource view bindings for this shader type.
+ * @dirty_sr: Bitmap tracking individual shader resource bindings changes
+ * that have not yet been emitted to the device.
+ * @dirty: Bitmap tracking per-binding type binding changes that have not
+ * yet been emitted to the device.
+ */
+struct vmw_dx_shader_bindings {
+	struct vmw_ctx_bindinfo_shader shader;
+	struct vmw_ctx_bindinfo_cb const_buffers[SVGA3D_DX_MAX_CONSTBUFFERS];
+	struct vmw_ctx_bindinfo_view shader_res[SVGA3D_DX_MAX_SRVIEWS];
+	DECLARE_BITMAP(dirty_sr, SVGA3D_DX_MAX_SRVIEWS);
+	unsigned long dirty;
+};
+
+extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
+			    const struct vmw_ctx_bindinfo *ci,
+			    u32 shader_slot, u32 slot);
+extern void
+vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
+			 struct vmw_ctx_binding_state *from);
+extern void vmw_binding_res_list_kill(struct list_head *head);
+extern void vmw_binding_res_list_scrub(struct list_head *head);
+extern int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs);
+extern void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs);
+extern void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
+extern struct vmw_ctx_binding_state *
+vmw_binding_state_alloc(struct vmw_private *dev_priv);
+extern void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs);
+extern struct list_head *
+vmw_binding_state_list(struct vmw_ctx_binding_state *cbs);
+extern void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs);
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index cff2bf9db9d2..3329f623c8bf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -72,6 +72,12 @@ static struct ttm_place mob_placement_flags = {
 	.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
 };
 
+static struct ttm_place mob_ne_placement_flags = {
+	.fpfn = 0,
+	.lpfn = 0,
+	.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+};
+
 struct ttm_placement vmw_vram_placement = {
 	.num_placement = 1,
 	.placement = &vram_placement_flags,
@@ -200,6 +206,13 @@ struct ttm_placement vmw_mob_placement = {
 	.busy_placement = &mob_placement_flags
 };
 
+struct ttm_placement vmw_mob_ne_placement = {
+	.num_placement = 1,
+	.num_busy_placement = 1,
+	.placement = &mob_ne_placement_flags,
+	.busy_placement = &mob_ne_placement_flags
+};
+
 struct vmw_ttm_tt {
 	struct ttm_dma_tt dma_ttm;
 	struct vmw_private *dev_priv;
@@ -804,9 +817,9 @@ static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
 /**
  * vmw_move_notify - TTM move_notify_callback
  *
- * @bo:             The TTM buffer object about to move.
- * @mem:            The truct ttm_mem_reg indicating to what memory
- *                  region the move is taking place.
+ * @bo: The TTM buffer object about to move.
+ * @mem: The struct ttm_mem_reg indicating to what memory
+ *       region the move is taking place.
  *
  * Calls move_notify for all subsystems needing it.
  * (currently only resources).
@@ -815,13 +828,14 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
 			    struct ttm_mem_reg *mem)
 {
 	vmw_resource_move_notify(bo, mem);
+	vmw_query_move_notify(bo, mem);
 }
 
 
 /**
  * vmw_swap_notify - TTM move_notify_callback
  *
- * @bo:             The TTM buffer object about to be swapped out.
+ * @bo: The TTM buffer object about to be swapped out.
  */
 static void vmw_swap_notify(struct ttm_buffer_object *bo)
 {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
new file mode 100644
index 000000000000..5ae8f921da2a
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -0,0 +1,1303 @@
+/**************************************************************************
+ *
+ * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "ttm/ttm_bo_api.h"
+
+/*
+ * Size of inline command buffers. Try to make sure that a page size is a
+ * multiple of the DMA pool allocation size.
+ */
+#define VMW_CMDBUF_INLINE_ALIGN 64
+#define VMW_CMDBUF_INLINE_SIZE \
+	(1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
+
+/**
+ * struct vmw_cmdbuf_context - Command buffer context queues
+ *
+ * @submitted: List of command buffers that have been submitted to the
+ * manager but not yet submitted to hardware.
+ * @hw_submitted: List of command buffers submitted to hardware.
+ * @preempted: List of preempted command buffers.
+ * @num_hw_submitted: Number of buffers currently being processed by hardware
+ */
+struct vmw_cmdbuf_context {
+	struct list_head submitted;
+	struct list_head hw_submitted;
+	struct list_head preempted;
+	unsigned num_hw_submitted;
+};
+
+/**
+ * struct vmw_cmdbuf_man: - Command buffer manager
+ *
+ * @cur_mutex: Mutex protecting the command buffer used for incremental small
+ * kernel command submissions, @cur.
+ * @space_mutex: Mutex to protect against starvation when we allocate
+ * main pool buffer space.
+ * @work: A struct work_struct implementeing command buffer error handling.
+ * Immutable.
+ * @dev_priv: Pointer to the device private struct. Immutable.
+ * @ctx: Array of command buffer context queues. The queues and the context
+ * data is protected by @lock.
+ * @error: List of command buffers that have caused device errors.
+ * Protected by @lock.
+ * @mm: Range manager for the command buffer space. Manager allocations and
+ * frees are protected by @lock.
+ * @cmd_space: Buffer object for the command buffer space, unless we were
+ * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
+ * @map_obj: Mapping state for @cmd_space. Immutable.
+ * @map: Pointer to command buffer space. May be a mapped buffer object or
+ * a contigous coherent DMA memory allocation. Immutable.
+ * @cur: Command buffer for small kernel command submissions. Protected by
+ * the @cur_mutex.
+ * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
+ * @default_size: Default size for the @cur command buffer. Immutable.
+ * @max_hw_submitted: Max number of in-flight command buffers the device can
+ * handle. Immutable.
+ * @lock: Spinlock protecting command submission queues.
+ * @header: Pool of DMA memory for device command buffer headers.
+ * Internal protection.
+ * @dheaders: Pool of DMA memory for device command buffer headers with trailing
+ * space for inline data. Internal protection.
+ * @tasklet: Tasklet struct for irq processing. Immutable.
+ * @alloc_queue: Wait queue for processes waiting to allocate command buffer
+ * space.
+ * @idle_queue: Wait queue for processes waiting for command buffer idle.
+ * @irq_on: Whether the process function has requested irq to be turned on.
+ * Protected by @lock.
+ * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
+ * allocation. Immutable.
+ * @has_pool: Has a large pool of DMA memory which allows larger allocations.
+ * Typically this is false only during bootstrap.
+ * @handle: DMA address handle for the command buffer space if @using_mob is
+ * false. Immutable.
+ * @size: The size of the command buffer space. Immutable.
+ */
+struct vmw_cmdbuf_man {
+	struct mutex cur_mutex;
+	struct mutex space_mutex;
+	struct work_struct work;
+	struct vmw_private *dev_priv;
+	struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
+	struct list_head error;
+	struct drm_mm mm;
+	struct ttm_buffer_object *cmd_space;
+	struct ttm_bo_kmap_obj map_obj;
+	u8 *map;
+	struct vmw_cmdbuf_header *cur;
+	size_t cur_pos;
+	size_t default_size;
+	unsigned max_hw_submitted;
+	spinlock_t lock;
+	struct dma_pool *headers;
+	struct dma_pool *dheaders;
+	struct tasklet_struct tasklet;
+	wait_queue_head_t alloc_queue;
+	wait_queue_head_t idle_queue;
+	bool irq_on;
+	bool using_mob;
+	bool has_pool;
+	dma_addr_t handle;
+	size_t size;
+};
+
+/**
+ * struct vmw_cmdbuf_header - Command buffer metadata
+ *
+ * @man: The command buffer manager.
+ * @cb_header: Device command buffer header, allocated from a DMA pool.
+ * @cb_context: The device command buffer context.
+ * @list: List head for attaching to the manager lists.
+ * @node: The range manager node.
+ * @handle. The DMA address of @cb_header. Handed to the device on command
+ * buffer submission.
+ * @cmd: Pointer to the command buffer space of this buffer.
+ * @size: Size of the command buffer space of this buffer.
+ * @reserved: Reserved space of this buffer.
+ * @inline_space: Whether inline command buffer space is used.
+ */
+struct vmw_cmdbuf_header {
+	struct vmw_cmdbuf_man *man;
+	SVGACBHeader *cb_header;
+	SVGACBContext cb_context;
+	struct list_head list;
+	struct drm_mm_node node;
+	dma_addr_t handle;
+	u8 *cmd;
+	size_t size;
+	size_t reserved;
+	bool inline_space;
+};
+
+/**
+ * struct vmw_cmdbuf_dheader - Device command buffer header with inline
+ * command buffer space.
+ *
+ * @cb_header: Device command buffer header.
+ * @cmd: Inline command buffer space.
+ */
+struct vmw_cmdbuf_dheader {
+	SVGACBHeader cb_header;
+	u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
+};
+
+/**
+ * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
+ *
+ * @page_size: Size of requested command buffer space in pages.
+ * @node: Pointer to the range manager node.
+ * @done: True if this allocation has succeeded.
+ */
+struct vmw_cmdbuf_alloc_info {
+	size_t page_size;
+	struct drm_mm_node *node;
+	bool done;
+};
+
+/* Loop over each context in the command buffer manager. */
+#define for_each_cmdbuf_ctx(_man, _i, _ctx) \
+	for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \
+	     ++(_i), ++(_ctx))
+
+static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable);
+
+
+/**
+ * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
+ *
+ * @man: The range manager.
+ * @interruptible: Whether to wait interruptible when locking.
+ */
+static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
+{
+	if (interruptible) {
+		if (mutex_lock_interruptible(&man->cur_mutex))
+			return -ERESTARTSYS;
+	} else {
+		mutex_lock(&man->cur_mutex);
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
+ *
+ * @man: The range manager.
+ */
+static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
+{
+	mutex_unlock(&man->cur_mutex);
+}
+
+/**
+ * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
+ * been used for the device context with inline command buffers.
+ * Need not be called locked.
+ *
+ * @header: Pointer to the header to free.
+ */
+static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
+{
+	struct vmw_cmdbuf_dheader *dheader;
+
+	if (WARN_ON_ONCE(!header->inline_space))
+		return;
+
+	dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
+			       cb_header);
+	dma_pool_free(header->man->dheaders, dheader, header->handle);
+	kfree(header);
+}
+
+/**
+ * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
+ * associated structures.
+ *
+ * header: Pointer to the header to free.
+ *
+ * For internal use. Must be called with man::lock held.
+ */
+static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
+{
+	struct vmw_cmdbuf_man *man = header->man;
+
+	BUG_ON(!spin_is_locked(&man->lock));
+
+	if (header->inline_space) {
+		vmw_cmdbuf_header_inline_free(header);
+		return;
+	}
+
+	drm_mm_remove_node(&header->node);
+	wake_up_all(&man->alloc_queue);
+	if (header->cb_header)
+		dma_pool_free(man->headers, header->cb_header,
+			      header->handle);
+	kfree(header);
+}
+
+/**
+ * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
+ * associated structures.
+ *
+ * @header: Pointer to the header to free.
+ */
+void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
+{
+	struct vmw_cmdbuf_man *man = header->man;
+
+	/* Avoid locking if inline_space */
+	if (header->inline_space) {
+		vmw_cmdbuf_header_inline_free(header);
+		return;
+	}
+	spin_lock_bh(&man->lock);
+	__vmw_cmdbuf_header_free(header);
+	spin_unlock_bh(&man->lock);
+}
+
+
+/**
+ * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
+ *
+ * @header: The header of the buffer to submit.
+ */
+static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
+{
+	struct vmw_cmdbuf_man *man = header->man;
+	u32 val;
+
+	if (sizeof(header->handle) > 4)
+		val = (header->handle >> 32);
+	else
+		val = 0;
+	vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
+
+	val = (header->handle & 0xFFFFFFFFULL);
+	val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
+	vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
+
+	return header->cb_header->status;
+}
+
+/**
+ * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
+ *
+ * @ctx: The command buffer context to initialize
+ */
+static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
+{
+	INIT_LIST_HEAD(&ctx->hw_submitted);
+	INIT_LIST_HEAD(&ctx->submitted);
+	INIT_LIST_HEAD(&ctx->preempted);
+	ctx->num_hw_submitted = 0;
+}
+
+/**
+ * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
+ * context.
+ *
+ * @man: The command buffer manager.
+ * @ctx: The command buffer context.
+ *
+ * Submits command buffers to hardware until there are no more command
+ * buffers to submit or the hardware can't handle more command buffers.
+ */
+static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
+				  struct vmw_cmdbuf_context *ctx)
+{
+	while (ctx->num_hw_submitted < man->max_hw_submitted &&
+	      !list_empty(&ctx->submitted)) {
+		struct vmw_cmdbuf_header *entry;
+		SVGACBStatus status;
+
+		entry = list_first_entry(&ctx->submitted,
+					 struct vmw_cmdbuf_header,
+					 list);
+
+		status = vmw_cmdbuf_header_submit(entry);
+
+		/* This should never happen */
+		if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
+			entry->cb_header->status = SVGA_CB_STATUS_NONE;
+			break;
+		}
+
+		list_del(&entry->list);
+		list_add_tail(&entry->list, &ctx->hw_submitted);
+		ctx->num_hw_submitted++;
+	}
+
+}
+
+/**
+ * vmw_cmdbuf_ctx_submit: Process a command buffer context.
+ *
+ * @man: The command buffer manager.
+ * @ctx: The command buffer context.
+ *
+ * Submit command buffers to hardware if possible, and process finished
+ * buffers. Typically freeing them, but on preemption or error take
+ * appropriate action. Wake up waiters if appropriate.
+ */
+static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
+				   struct vmw_cmdbuf_context *ctx,
+				   int *notempty)
+{
+	struct vmw_cmdbuf_header *entry, *next;
+
+	vmw_cmdbuf_ctx_submit(man, ctx);
+
+	list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
+		SVGACBStatus status = entry->cb_header->status;
+
+		if (status == SVGA_CB_STATUS_NONE)
+			break;
+
+		list_del(&entry->list);
+		wake_up_all(&man->idle_queue);
+		ctx->num_hw_submitted--;
+		switch (status) {
+		case SVGA_CB_STATUS_COMPLETED:
+			__vmw_cmdbuf_header_free(entry);
+			break;
+		case SVGA_CB_STATUS_COMMAND_ERROR:
+		case SVGA_CB_STATUS_CB_HEADER_ERROR:
+			list_add_tail(&entry->list, &man->error);
+			schedule_work(&man->work);
+			break;
+		case SVGA_CB_STATUS_PREEMPTED:
+			list_add(&entry->list, &ctx->preempted);
+			break;
+		default:
+			WARN_ONCE(true, "Undefined command buffer status.\n");
+			__vmw_cmdbuf_header_free(entry);
+			break;
+		}
+	}
+
+	vmw_cmdbuf_ctx_submit(man, ctx);
+	if (!list_empty(&ctx->submitted))
+		(*notempty)++;
+}
+
+/**
+ * vmw_cmdbuf_man_process - Process all command buffer contexts and
+ * switch on and off irqs as appropriate.
+ *
+ * @man: The command buffer manager.
+ *
+ * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
+ * command buffers left that are not submitted to hardware, Make sure
+ * IRQ handling is turned on. Otherwise, make sure it's turned off. This
+ * function may return -EAGAIN to indicate it should be rerun due to
+ * possibly missed IRQs if IRQs has just been turned on.
+ */
+static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
+{
+	int notempty = 0;
+	struct vmw_cmdbuf_context *ctx;
+	int i;
+
+	for_each_cmdbuf_ctx(man, i, ctx)
+		vmw_cmdbuf_ctx_process(man, ctx, &notempty);
+
+	if (man->irq_on && !notempty) {
+		vmw_generic_waiter_remove(man->dev_priv,
+					  SVGA_IRQFLAG_COMMAND_BUFFER,
+					  &man->dev_priv->cmdbuf_waiters);
+		man->irq_on = false;
+	} else if (!man->irq_on && notempty) {
+		vmw_generic_waiter_add(man->dev_priv,
+				       SVGA_IRQFLAG_COMMAND_BUFFER,
+				       &man->dev_priv->cmdbuf_waiters);
+		man->irq_on = true;
+
+		/* Rerun in case we just missed an irq. */
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
+ * command buffer context
+ *
+ * @man: The command buffer manager.
+ * @header: The header of the buffer to submit.
+ * @cb_context: The command buffer context to use.
+ *
+ * This function adds @header to the "submitted" queue of the command
+ * buffer context identified by @cb_context. It then calls the command buffer
+ * manager processing to potentially submit the buffer to hardware.
+ * @man->lock needs to be held when calling this function.
+ */
+static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
+			       struct vmw_cmdbuf_header *header,
+			       SVGACBContext cb_context)
+{
+	if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
+		header->cb_header->dxContext = 0;
+	header->cb_context = cb_context;
+	list_add_tail(&header->list, &man->ctx[cb_context].submitted);
+
+	if (vmw_cmdbuf_man_process(man) == -EAGAIN)
+		vmw_cmdbuf_man_process(man);
+}
+
+/**
+ * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt
+ * handler implemented as a tasklet.
+ *
+ * @data: Tasklet closure. A pointer to the command buffer manager cast to
+ * an unsigned long.
+ *
+ * The bottom half (tasklet) of the interrupt handler simply calls into the
+ * command buffer processor to free finished buffers and submit any
+ * queued buffers to hardware.
+ */
+static void vmw_cmdbuf_man_tasklet(unsigned long data)
+{
+	struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
+
+	spin_lock(&man->lock);
+	if (vmw_cmdbuf_man_process(man) == -EAGAIN)
+		(void) vmw_cmdbuf_man_process(man);
+	spin_unlock(&man->lock);
+}
+
+/**
+ * vmw_cmdbuf_work_func - The deferred work function that handles
+ * command buffer errors.
+ *
+ * @work: The work func closure argument.
+ *
+ * Restarting the command buffer context after an error requires process
+ * context, so it is deferred to this work function.
+ */
+static void vmw_cmdbuf_work_func(struct work_struct *work)
+{
+	struct vmw_cmdbuf_man *man =
+		container_of(work, struct vmw_cmdbuf_man, work);
+	struct vmw_cmdbuf_header *entry, *next;
+	bool restart = false;
+
+	spin_lock_bh(&man->lock);
+	list_for_each_entry_safe(entry, next, &man->error, list) {
+		restart = true;
+		DRM_ERROR("Command buffer error.\n");
+
+		list_del(&entry->list);
+		__vmw_cmdbuf_header_free(entry);
+		wake_up_all(&man->idle_queue);
+	}
+	spin_unlock_bh(&man->lock);
+
+	if (restart && vmw_cmdbuf_startstop(man, true))
+		DRM_ERROR("Failed restarting command buffer context 0.\n");
+
+}
+
+/**
+ * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
+ *
+ * @man: The command buffer manager.
+ * @check_preempted: Check also the preempted queue for pending command buffers.
+ *
+ */
+static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
+				bool check_preempted)
+{
+	struct vmw_cmdbuf_context *ctx;
+	bool idle = false;
+	int i;
+
+	spin_lock_bh(&man->lock);
+	vmw_cmdbuf_man_process(man);
+	for_each_cmdbuf_ctx(man, i, ctx) {
+		if (!list_empty(&ctx->submitted) ||
+		    !list_empty(&ctx->hw_submitted) ||
+		    (check_preempted && !list_empty(&ctx->preempted)))
+			goto out_unlock;
+	}
+
+	idle = list_empty(&man->error);
+
+out_unlock:
+	spin_unlock_bh(&man->lock);
+
+	return idle;
+}
+
+/**
+ * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
+ * command submissions
+ *
+ * @man: The command buffer manager.
+ *
+ * Flushes the current command buffer without allocating a new one. A new one
+ * is automatically allocated when needed. Call with @man->cur_mutex held.
+ */
+static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
+{
+	struct vmw_cmdbuf_header *cur = man->cur;
+
+	WARN_ON(!mutex_is_locked(&man->cur_mutex));
+
+	if (!cur)
+		return;
+
+	spin_lock_bh(&man->lock);
+	if (man->cur_pos == 0) {
+		__vmw_cmdbuf_header_free(cur);
+		goto out_unlock;
+	}
+
+	man->cur->cb_header->length = man->cur_pos;
+	vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
+out_unlock:
+	spin_unlock_bh(&man->lock);
+	man->cur = NULL;
+	man->cur_pos = 0;
+}
+
+/**
+ * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
+ * command submissions
+ *
+ * @man: The command buffer manager.
+ * @interruptible: Whether to sleep interruptible when sleeping.
+ *
+ * Flushes the current command buffer without allocating a new one. A new one
+ * is automatically allocated when needed.
+ */
+int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
+			 bool interruptible)
+{
+	int ret = vmw_cmdbuf_cur_lock(man, interruptible);
+
+	if (ret)
+		return ret;
+
+	__vmw_cmdbuf_cur_flush(man);
+	vmw_cmdbuf_cur_unlock(man);
+
+	return 0;
+}
+
+/**
+ * vmw_cmdbuf_idle - Wait for command buffer manager idle.
+ *
+ * @man: The command buffer manager.
+ * @interruptible: Sleep interruptible while waiting.
+ * @timeout: Time out after this many ticks.
+ *
+ * Wait until the command buffer manager has processed all command buffers,
+ * or until a timeout occurs. If a timeout occurs, the function will return
+ * -EBUSY.
+ */
+int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
+		    unsigned long timeout)
+{
+	int ret;
+
+	ret = vmw_cmdbuf_cur_flush(man, interruptible);
+	vmw_generic_waiter_add(man->dev_priv,
+			       SVGA_IRQFLAG_COMMAND_BUFFER,
+			       &man->dev_priv->cmdbuf_waiters);
+
+	if (interruptible) {
+		ret = wait_event_interruptible_timeout
+			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
+			 timeout);
+	} else {
+		ret = wait_event_timeout
+			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
+			 timeout);
+	}
+	vmw_generic_waiter_remove(man->dev_priv,
+				  SVGA_IRQFLAG_COMMAND_BUFFER,
+				  &man->dev_priv->cmdbuf_waiters);
+	if (ret == 0) {
+		if (!vmw_cmdbuf_man_idle(man, true))
+			ret = -EBUSY;
+		else
+			ret = 0;
+	}
+	if (ret > 0)
+		ret = 0;
+
+	return ret;
+}
+
+/**
+ * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
+ *
+ * @man: The command buffer manager.
+ * @info: Allocation info. Will hold the size on entry and allocated mm node
+ * on successful return.
+ *
+ * Try to allocate buffer space from the main pool. Returns true if succeeded.
+ * If a fatal error was hit, the error code is returned in @info->ret.
+ */
+static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
+				 struct vmw_cmdbuf_alloc_info *info)
+{
+	int ret;
+
+	if (info->done)
+		return true;
+ 
+	memset(info->node, 0, sizeof(*info->node));
+	spin_lock_bh(&man->lock);
+	ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size,
+					 0, 0,
+					 DRM_MM_SEARCH_DEFAULT,
+					 DRM_MM_CREATE_DEFAULT);
+	spin_unlock_bh(&man->lock);
+	info->done = !ret;
+
+	return info->done;
+}
+
+/**
+ * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
+ *
+ * @man: The command buffer manager.
+ * @node: Pointer to pre-allocated range-manager node.
+ * @size: The size of the allocation.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ *
+ * This function allocates buffer space from the main pool, and if there is
+ * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
+ * become available.
+ */
+static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
+				  struct drm_mm_node *node,
+				  size_t size,
+				  bool interruptible)
+{
+	struct vmw_cmdbuf_alloc_info info;
+
+	info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	info.node = node;
+	info.done = false;
+
+	/*
+	 * To prevent starvation of large requests, only one allocating call
+	 * at a time waiting for space.
+	 */
+	if (interruptible) {
+		if (mutex_lock_interruptible(&man->space_mutex))
+			return -ERESTARTSYS;
+	} else {
+		mutex_lock(&man->space_mutex);
+	}
+
+	/* Try to allocate space without waiting. */
+	if (vmw_cmdbuf_try_alloc(man, &info))
+		goto out_unlock;
+
+	vmw_generic_waiter_add(man->dev_priv,
+			       SVGA_IRQFLAG_COMMAND_BUFFER,
+			       &man->dev_priv->cmdbuf_waiters);
+
+	if (interruptible) {
+		int ret;
+
+		ret = wait_event_interruptible
+			(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
+		if (ret) {
+			vmw_generic_waiter_remove
+				(man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
+				 &man->dev_priv->cmdbuf_waiters);
+			mutex_unlock(&man->space_mutex);
+			return ret;
+		}
+	} else {
+		wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
+	}
+	vmw_generic_waiter_remove(man->dev_priv,
+				  SVGA_IRQFLAG_COMMAND_BUFFER,
+				  &man->dev_priv->cmdbuf_waiters);
+
+out_unlock:
+	mutex_unlock(&man->space_mutex);
+
+	return 0;
+}
+
+/**
+ * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
+ * space from the main pool.
+ *
+ * @man: The command buffer manager.
+ * @header: Pointer to the header to set up.
+ * @size: The requested size of the buffer space.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ */
+static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
+				 struct vmw_cmdbuf_header *header,
+				 size_t size,
+				 bool interruptible)
+{
+	SVGACBHeader *cb_hdr;
+	size_t offset;
+	int ret;
+
+	if (!man->has_pool)
+		return -ENOMEM;
+
+	ret = vmw_cmdbuf_alloc_space(man, &header->node,  size, interruptible);
+
+	if (ret)
+		return ret;
+
+	header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL,
+					   &header->handle);
+	if (!header->cb_header) {
+		ret = -ENOMEM;
+		goto out_no_cb_header;
+	}
+
+	header->size = header->node.size << PAGE_SHIFT;
+	cb_hdr = header->cb_header;
+	offset = header->node.start << PAGE_SHIFT;
+	header->cmd = man->map + offset;
+	memset(cb_hdr, 0, sizeof(*cb_hdr));
+	if (man->using_mob) {
+		cb_hdr->flags = SVGA_CB_FLAG_MOB;
+		cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
+		cb_hdr->ptr.mob.mobOffset = offset;
+	} else {
+		cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
+	}
+
+	return 0;
+
+out_no_cb_header:
+	spin_lock_bh(&man->lock);
+	drm_mm_remove_node(&header->node);
+	spin_unlock_bh(&man->lock);
+
+	return ret;
+}
+
+/**
+ * vmw_cmdbuf_space_inline - Set up a command buffer header with
+ * inline command buffer space.
+ *
+ * @man: The command buffer manager.
+ * @header: Pointer to the header to set up.
+ * @size: The requested size of the buffer space.
+ */
+static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
+				   struct vmw_cmdbuf_header *header,
+				   int size)
+{
+	struct vmw_cmdbuf_dheader *dheader;
+	SVGACBHeader *cb_hdr;
+
+	if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
+		return -ENOMEM;
+
+	dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL,
+				 &header->handle);
+	if (!dheader)
+		return -ENOMEM;
+
+	header->inline_space = true;
+	header->size = VMW_CMDBUF_INLINE_SIZE;
+	cb_hdr = &dheader->cb_header;
+	header->cb_header = cb_hdr;
+	header->cmd = dheader->cmd;
+	memset(dheader, 0, sizeof(*dheader));
+	cb_hdr->status = SVGA_CB_STATUS_NONE;
+	cb_hdr->flags = SVGA_CB_FLAG_NONE;
+	cb_hdr->ptr.pa = (u64)header->handle +
+		(u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
+
+	return 0;
+}
+
+/**
+ * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
+ * command buffer space.
+ *
+ * @man: The command buffer manager.
+ * @size: The requested size of the buffer space.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ * @p_header: points to a header pointer to populate on successful return.
+ *
+ * Returns a pointer to command buffer space if successful. Otherwise
+ * returns an error pointer. The header pointer returned in @p_header should
+ * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
+ */
+void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
+		       size_t size, bool interruptible,
+		       struct vmw_cmdbuf_header **p_header)
+{
+	struct vmw_cmdbuf_header *header;
+	int ret = 0;
+
+	*p_header = NULL;
+
+	header = kzalloc(sizeof(*header), GFP_KERNEL);
+	if (!header)
+		return ERR_PTR(-ENOMEM);
+
+	if (size <= VMW_CMDBUF_INLINE_SIZE)
+		ret = vmw_cmdbuf_space_inline(man, header, size);
+	else
+		ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
+
+	if (ret) {
+		kfree(header);
+		return ERR_PTR(ret);
+	}
+
+	header->man = man;
+	INIT_LIST_HEAD(&header->list);
+	header->cb_header->status = SVGA_CB_STATUS_NONE;
+	*p_header = header;
+
+	return header->cmd;
+}
+
+/**
+ * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
+ * command buffer.
+ *
+ * @man: The command buffer manager.
+ * @size: The requested size of the commands.
+ * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ *
+ * Returns a pointer to command buffer space if successful. Otherwise
+ * returns an error pointer.
+ */
+static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
+				    size_t size,
+				    int ctx_id,
+				    bool interruptible)
+{
+	struct vmw_cmdbuf_header *cur;
+	void *ret;
+
+	if (vmw_cmdbuf_cur_lock(man, interruptible))
+		return ERR_PTR(-ERESTARTSYS);
+
+	cur = man->cur;
+	if (cur && (size + man->cur_pos > cur->size ||
+		    ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
+		     ctx_id != cur->cb_header->dxContext)))
+		__vmw_cmdbuf_cur_flush(man);
+
+	if (!man->cur) {
+		ret = vmw_cmdbuf_alloc(man,
+				       max_t(size_t, size, man->default_size),
+				       interruptible, &man->cur);
+		if (IS_ERR(ret)) {
+			vmw_cmdbuf_cur_unlock(man);
+			return ret;
+		}
+
+		cur = man->cur;
+	}
+
+	if (ctx_id != SVGA3D_INVALID_ID) {
+		cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
+		cur->cb_header->dxContext = ctx_id;
+	}
+
+	cur->reserved = size;
+
+	return (void *) (man->cur->cmd + man->cur_pos);
+}
+
+/**
+ * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
+ *
+ * @man: The command buffer manager.
+ * @size: The size of the commands actually written.
+ * @flush: Whether to flush the command buffer immediately.
+ */
+static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
+				  size_t size, bool flush)
+{
+	struct vmw_cmdbuf_header *cur = man->cur;
+
+	WARN_ON(!mutex_is_locked(&man->cur_mutex));
+
+	WARN_ON(size > cur->reserved);
+	man->cur_pos += size;
+	if (!size)
+		cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
+	if (flush)
+		__vmw_cmdbuf_cur_flush(man);
+	vmw_cmdbuf_cur_unlock(man);
+}
+
+/**
+ * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
+ *
+ * @man: The command buffer manager.
+ * @size: The requested size of the commands.
+ * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ * @header: Header of the command buffer. NULL if the current command buffer
+ * should be used.
+ *
+ * Returns a pointer to command buffer space if successful. Otherwise
+ * returns an error pointer.
+ */
+void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
+			 int ctx_id, bool interruptible,
+			 struct vmw_cmdbuf_header *header)
+{
+	if (!header)
+		return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
+
+	if (size > header->size)
+		return ERR_PTR(-EINVAL);
+
+	if (ctx_id != SVGA3D_INVALID_ID) {
+		header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
+		header->cb_header->dxContext = ctx_id;
+	}
+
+	header->reserved = size;
+	return header->cmd;
+}
+
+/**
+ * vmw_cmdbuf_commit - Commit commands in a command buffer.
+ *
+ * @man: The command buffer manager.
+ * @size: The size of the commands actually written.
+ * @header: Header of the command buffer. NULL if the current command buffer
+ * should be used.
+ * @flush: Whether to flush the command buffer immediately.
+ */
+void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
+		       struct vmw_cmdbuf_header *header, bool flush)
+{
+	if (!header) {
+		vmw_cmdbuf_commit_cur(man, size, flush);
+		return;
+	}
+
+	(void) vmw_cmdbuf_cur_lock(man, false);
+	__vmw_cmdbuf_cur_flush(man);
+	WARN_ON(size > header->reserved);
+	man->cur = header;
+	man->cur_pos = size;
+	if (!size)
+		header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
+	if (flush)
+		__vmw_cmdbuf_cur_flush(man);
+	vmw_cmdbuf_cur_unlock(man);
+}
+
+/**
+ * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half.
+ *
+ * @man: The command buffer manager.
+ */
+void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man)
+{
+	if (!man)
+		return;
+
+	tasklet_schedule(&man->tasklet);
+}
+
+/**
+ * vmw_cmdbuf_send_device_command - Send a command through the device context.
+ *
+ * @man: The command buffer manager.
+ * @command: Pointer to the command to send.
+ * @size: Size of the command.
+ *
+ * Synchronously sends a device context command.
+ */
+static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
+					  const void *command,
+					  size_t size)
+{
+	struct vmw_cmdbuf_header *header;
+	int status;
+	void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
+
+	if (IS_ERR(cmd))
+		return PTR_ERR(cmd);
+
+	memcpy(cmd, command, size);
+	header->cb_header->length = size;
+	header->cb_context = SVGA_CB_CONTEXT_DEVICE;
+	spin_lock_bh(&man->lock);
+	status = vmw_cmdbuf_header_submit(header);
+	spin_unlock_bh(&man->lock);
+	vmw_cmdbuf_header_free(header);
+
+	if (status != SVGA_CB_STATUS_COMPLETED) {
+		DRM_ERROR("Device context command failed with status %d\n",
+			  status);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_cmdbuf_startstop - Send a start / stop command through the device
+ * context.
+ *
+ * @man: The command buffer manager.
+ * @enable: Whether to enable or disable the context.
+ *
+ * Synchronously sends a device start / stop context command.
+ */
+static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man,
+				bool enable)
+{
+	struct {
+		uint32 id;
+		SVGADCCmdStartStop body;
+	} __packed cmd;
+
+	cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
+	cmd.body.enable = (enable) ? 1 : 0;
+	cmd.body.context = SVGA_CB_CONTEXT_0;
+
+	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
+}
+
+/**
+ * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
+ *
+ * @man: The command buffer manager.
+ * @size: The size of the main space pool.
+ * @default_size: The default size of the command buffer for small kernel
+ * submissions.
+ *
+ * Set the size and allocate the main command buffer space pool,
+ * as well as the default size of the command buffer for
+ * small kernel submissions. If successful, this enables large command
+ * submissions. Note that this function requires that rudimentary command
+ * submission is already available and that the MOB memory manager is alive.
+ * Returns 0 on success. Negative error code on failure.
+ */
+int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
+			     size_t size, size_t default_size)
+{
+	struct vmw_private *dev_priv = man->dev_priv;
+	bool dummy;
+	int ret;
+
+	if (man->has_pool)
+		return -EINVAL;
+
+	/* First, try to allocate a huge chunk of DMA memory */
+	size = PAGE_ALIGN(size);
+	man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
+				      &man->handle, GFP_KERNEL);
+	if (man->map) {
+		man->using_mob = false;
+	} else {
+		/*
+		 * DMA memory failed. If we can have command buffers in a
+		 * MOB, try to use that instead. Note that this will
+		 * actually call into the already enabled manager, when
+		 * binding the MOB.
+		 */
+		if (!(dev_priv->capabilities & SVGA_CAP_DX))
+			return -ENOMEM;
+
+		ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
+				    &vmw_mob_ne_placement, 0, false, NULL,
+				    &man->cmd_space);
+		if (ret)
+			return ret;
+
+		man->using_mob = true;
+		ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
+				  &man->map_obj);
+		if (ret)
+			goto out_no_map;
+
+		man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
+	}
+
+	man->size = size;
+	drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
+
+	man->has_pool = true;
+	man->default_size = default_size;
+	DRM_INFO("Using command buffers with %s pool.\n",
+		 (man->using_mob) ? "MOB" : "DMA");
+
+	return 0;
+
+out_no_map:
+	if (man->using_mob)
+		ttm_bo_unref(&man->cmd_space);
+
+	return ret;
+}
+
+/**
+ * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
+ * inline command buffer submissions only.
+ *
+ * @dev_priv: Pointer to device private structure.
+ *
+ * Returns a pointer to a cummand buffer manager to success or error pointer
+ * on failure. The command buffer manager will be enabled for submissions of
+ * size VMW_CMDBUF_INLINE_SIZE only.
+ */
+struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
+{
+	struct vmw_cmdbuf_man *man;
+	struct vmw_cmdbuf_context *ctx;
+	int i;
+	int ret;
+
+	if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
+		return ERR_PTR(-ENOSYS);
+
+	man = kzalloc(sizeof(*man), GFP_KERNEL);
+	if (!man)
+		return ERR_PTR(-ENOMEM);
+
+	man->headers = dma_pool_create("vmwgfx cmdbuf",
+				       &dev_priv->dev->pdev->dev,
+				       sizeof(SVGACBHeader),
+				       64, PAGE_SIZE);
+	if (!man->headers) {
+		ret = -ENOMEM;
+		goto out_no_pool;
+	}
+
+	man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
+					&dev_priv->dev->pdev->dev,
+					sizeof(struct vmw_cmdbuf_dheader),
+					64, PAGE_SIZE);
+	if (!man->dheaders) {
+		ret = -ENOMEM;
+		goto out_no_dpool;
+	}
+
+	for_each_cmdbuf_ctx(man, i, ctx)
+		vmw_cmdbuf_ctx_init(ctx);
+
+	INIT_LIST_HEAD(&man->error);
+	spin_lock_init(&man->lock);
+	mutex_init(&man->cur_mutex);
+	mutex_init(&man->space_mutex);
+	tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet,
+		     (unsigned long) man);
+	man->default_size = VMW_CMDBUF_INLINE_SIZE;
+	init_waitqueue_head(&man->alloc_queue);
+	init_waitqueue_head(&man->idle_queue);
+	man->dev_priv = dev_priv;
+	man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
+	INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
+	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
+			       &dev_priv->error_waiters);
+	ret = vmw_cmdbuf_startstop(man, true);
+	if (ret) {
+		DRM_ERROR("Failed starting command buffer context 0.\n");
+		vmw_cmdbuf_man_destroy(man);
+		return ERR_PTR(ret);
+	}
+
+	return man;
+
+out_no_dpool:
+	dma_pool_destroy(man->headers);
+out_no_pool:
+	kfree(man);
+
+	return ERR_PTR(ret);
+}
+
+/**
+ * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
+ *
+ * @man: Pointer to a command buffer manager.
+ *
+ * This function removes the main buffer space pool, and should be called
+ * before MOB memory management is removed. When this function has been called,
+ * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
+ * less are allowed, and the default size of the command buffer for small kernel
+ * submissions is also set to this size.
+ */
+void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
+{
+	if (!man->has_pool)
+		return;
+
+	man->has_pool = false;
+	man->default_size = VMW_CMDBUF_INLINE_SIZE;
+	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
+	if (man->using_mob) {
+		(void) ttm_bo_kunmap(&man->map_obj);
+		ttm_bo_unref(&man->cmd_space);
+	} else {
+		dma_free_coherent(&man->dev_priv->dev->pdev->dev,
+				  man->size, man->map, man->handle);
+	}
+}
+
+/**
+ * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
+ *
+ * @man: Pointer to a command buffer manager.
+ *
+ * This function idles and then destroys a command buffer manager.
+ */
+void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
+{
+	WARN_ON_ONCE(man->has_pool);
+	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
+	if (vmw_cmdbuf_startstop(man, false))
+		DRM_ERROR("Failed stopping command buffer context 0.\n");
+
+	vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
+				  &man->dev_priv->error_waiters);
+	tasklet_kill(&man->tasklet);
+	(void) cancel_work_sync(&man->work);
+	dma_pool_destroy(man->dheaders);
+	dma_pool_destroy(man->headers);
+	mutex_destroy(&man->cur_mutex);
+	mutex_destroy(&man->space_mutex);
+	kfree(man);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 21e9b7f8dad0..13db8a2851ed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,15 +26,10 @@
  **************************************************************************/
 
 #include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
 
 #define VMW_CMDBUF_RES_MAN_HT_ORDER 12
 
-enum vmw_cmdbuf_res_state {
-	VMW_CMDBUF_RES_COMMITED,
-	VMW_CMDBUF_RES_ADD,
-	VMW_CMDBUF_RES_DEL
-};
-
 /**
  * struct vmw_cmdbuf_res - Command buffer managed resource entry.
  *
@@ -132,9 +127,12 @@ void vmw_cmdbuf_res_commit(struct list_head *list)
 
 	list_for_each_entry_safe(entry, next, list, head) {
 		list_del(&entry->head);
+		if (entry->res->func->commit_notify)
+			entry->res->func->commit_notify(entry->res,
+							entry->state);
 		switch (entry->state) {
 		case VMW_CMDBUF_RES_ADD:
-			entry->state = VMW_CMDBUF_RES_COMMITED;
+			entry->state = VMW_CMDBUF_RES_COMMITTED;
 			list_add_tail(&entry->head, &entry->man->list);
 			break;
 		case VMW_CMDBUF_RES_DEL:
@@ -175,7 +173,7 @@ void vmw_cmdbuf_res_revert(struct list_head *list)
 						 &entry->hash);
 			list_del(&entry->head);
 			list_add_tail(&entry->head, &entry->man->list);
-			entry->state = VMW_CMDBUF_RES_COMMITED;
+			entry->state = VMW_CMDBUF_RES_COMMITTED;
 			break;
 		default:
 			BUG();
@@ -231,6 +229,9 @@ out_invalid_key:
  * @res_type: The resource type.
  * @user_key: The user-space id of the resource.
  * @list: The staging list.
+ * @res_p: If the resource is in an already committed state, points to the
+ * struct vmw_resource on successful return. The pointer will be
+ * non ref-counted.
  *
  * This function looks up the struct vmw_cmdbuf_res entry from the manager
  * hash table and, if it exists, removes it. Depending on its current staging
@@ -240,7 +241,8 @@ out_invalid_key:
 int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
 			  enum vmw_cmdbuf_res_type res_type,
 			  u32 user_key,
-			  struct list_head *list)
+			  struct list_head *list,
+			  struct vmw_resource **res_p)
 {
 	struct vmw_cmdbuf_res *entry;
 	struct drm_hash_item *hash;
@@ -256,12 +258,14 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
 	switch (entry->state) {
 	case VMW_CMDBUF_RES_ADD:
 		vmw_cmdbuf_res_free(man, entry);
+		*res_p = NULL;
 		break;
-	case VMW_CMDBUF_RES_COMMITED:
+	case VMW_CMDBUF_RES_COMMITTED:
 		(void) drm_ht_remove_item(&man->resources, &entry->hash);
 		list_del(&entry->head);
 		entry->state = VMW_CMDBUF_RES_DEL;
 		list_add_tail(&entry->head, list);
+		*res_p = entry->res;
 		break;
 	default:
 		BUG();
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 5ac92874404d..443d1ed00de7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,19 +27,19 @@
 
 #include "vmwgfx_drv.h"
 #include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
 #include "ttm/ttm_placement.h"
 
 struct vmw_user_context {
 	struct ttm_base_object base;
 	struct vmw_resource res;
-	struct vmw_ctx_binding_state cbs;
+	struct vmw_ctx_binding_state *cbs;
 	struct vmw_cmdbuf_res_manager *man;
+	struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
+	spinlock_t cotable_lock;
+	struct vmw_dma_buffer *dx_query_mob;
 };
 
-
-
-typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
-
 static void vmw_user_context_free(struct vmw_resource *res);
 static struct vmw_resource *
 vmw_user_context_base_to_res(struct ttm_base_object *base);
@@ -51,12 +51,14 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
 				 bool readback,
 				 struct ttm_validate_buffer *val_buf);
 static int vmw_gb_context_destroy(struct vmw_resource *res);
-static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
-static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
-					   bool rebind);
-static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
-static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
-static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
+static int vmw_dx_context_create(struct vmw_resource *res);
+static int vmw_dx_context_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf);
+static int vmw_dx_context_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf);
+static int vmw_dx_context_destroy(struct vmw_resource *res);
+
 static uint64_t vmw_user_context_size;
 
 static const struct vmw_user_resource_conv user_context_conv = {
@@ -93,15 +95,38 @@ static const struct vmw_res_func vmw_gb_context_func = {
 	.unbind = vmw_gb_context_unbind
 };
 
-static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
-	[vmw_ctx_binding_shader] = vmw_context_scrub_shader,
-	[vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
-	[vmw_ctx_binding_tex] = vmw_context_scrub_texture };
+static const struct vmw_res_func vmw_dx_context_func = {
+	.res_type = vmw_res_dx_context,
+	.needs_backup = true,
+	.may_evict = true,
+	.type_name = "dx contexts",
+	.backup_placement = &vmw_mob_placement,
+	.create = vmw_dx_context_create,
+	.destroy = vmw_dx_context_destroy,
+	.bind = vmw_dx_context_bind,
+	.unbind = vmw_dx_context_unbind
+};
 
 /**
  * Context management:
  */
 
+static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
+{
+	struct vmw_resource *res;
+	int i;
+
+	for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+		spin_lock(&uctx->cotable_lock);
+		res = uctx->cotables[i];
+		uctx->cotables[i] = NULL;
+		spin_unlock(&uctx->cotable_lock);
+
+		if (res)
+			vmw_resource_unreference(&res);
+	}
+}
+
 static void vmw_hw_context_destroy(struct vmw_resource *res)
 {
 	struct vmw_user_context *uctx =
@@ -113,17 +138,19 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
 	} *cmd;
 
 
-	if (res->func->destroy == vmw_gb_context_destroy) {
+	if (res->func->destroy == vmw_gb_context_destroy ||
+	    res->func->destroy == vmw_dx_context_destroy) {
 		mutex_lock(&dev_priv->cmdbuf_mutex);
 		vmw_cmdbuf_res_man_destroy(uctx->man);
 		mutex_lock(&dev_priv->binding_mutex);
-		(void) vmw_context_binding_state_kill(&uctx->cbs);
-		(void) vmw_gb_context_destroy(res);
+		vmw_binding_state_kill(uctx->cbs);
+		(void) res->func->destroy(res);
 		mutex_unlock(&dev_priv->binding_mutex);
 		if (dev_priv->pinned_bo != NULL &&
 		    !dev_priv->query_cid_valid)
 			__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
 		mutex_unlock(&dev_priv->cmdbuf_mutex);
+		vmw_context_cotables_unref(uctx);
 		return;
 	}
 
@@ -135,43 +162,67 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
 		return;
 	}
 
-	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
-	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
-	cmd->body.cid = cpu_to_le32(res->id);
+	cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
 
 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-	vmw_3d_resource_dec(dev_priv, false);
+	vmw_fifo_resource_dec(dev_priv);
 }
 
 static int vmw_gb_context_init(struct vmw_private *dev_priv,
+			       bool dx,
 			       struct vmw_resource *res,
-			       void (*res_free) (struct vmw_resource *res))
+			       void (*res_free)(struct vmw_resource *res))
 {
-	int ret;
+	int ret, i;
 	struct vmw_user_context *uctx =
 		container_of(res, struct vmw_user_context, res);
 
+	res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
+			    SVGA3D_CONTEXT_DATA_SIZE);
 	ret = vmw_resource_init(dev_priv, res, true,
-				res_free, &vmw_gb_context_func);
-	res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
+				res_free,
+				dx ? &vmw_dx_context_func :
+				&vmw_gb_context_func);
 	if (unlikely(ret != 0))
 		goto out_err;
 
 	if (dev_priv->has_mob) {
 		uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
-		if (unlikely(IS_ERR(uctx->man))) {
+		if (IS_ERR(uctx->man)) {
 			ret = PTR_ERR(uctx->man);
 			uctx->man = NULL;
 			goto out_err;
 		}
 	}
 
-	memset(&uctx->cbs, 0, sizeof(uctx->cbs));
-	INIT_LIST_HEAD(&uctx->cbs.list);
+	uctx->cbs = vmw_binding_state_alloc(dev_priv);
+	if (IS_ERR(uctx->cbs)) {
+		ret = PTR_ERR(uctx->cbs);
+		goto out_err;
+	}
+
+	spin_lock_init(&uctx->cotable_lock);
+
+	if (dx) {
+		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+			uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
+							      &uctx->res, i);
+			if (unlikely(uctx->cotables[i] == NULL)) {
+				ret = -ENOMEM;
+				goto out_cotables;
+			}
+		}
+	}
+
+
 
 	vmw_resource_activate(res, vmw_hw_context_destroy);
 	return 0;
 
+out_cotables:
+	vmw_context_cotables_unref(uctx);
 out_err:
 	if (res_free)
 		res_free(res);
@@ -182,7 +233,8 @@ out_err:
 
 static int vmw_context_init(struct vmw_private *dev_priv,
 			    struct vmw_resource *res,
-			    void (*res_free) (struct vmw_resource *res))
+			    void (*res_free)(struct vmw_resource *res),
+			    bool dx)
 {
 	int ret;
 
@@ -192,7 +244,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
 	} *cmd;
 
 	if (dev_priv->has_mob)
-		return vmw_gb_context_init(dev_priv, res, res_free);
+		return vmw_gb_context_init(dev_priv, dx, res, res_free);
 
 	ret = vmw_resource_init(dev_priv, res, false,
 				res_free, &vmw_legacy_context_func);
@@ -215,12 +267,12 @@ static int vmw_context_init(struct vmw_private *dev_priv,
 		return -ENOMEM;
 	}
 
-	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
-	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
-	cmd->body.cid = cpu_to_le32(res->id);
+	cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
 
 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-	(void) vmw_3d_resource_inc(dev_priv, false);
+	vmw_fifo_resource_inc(dev_priv);
 	vmw_resource_activate(res, vmw_hw_context_destroy);
 	return 0;
 
@@ -232,19 +284,10 @@ out_early:
 	return ret;
 }
 
-struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
-{
-	struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
-	int ret;
-
-	if (unlikely(res == NULL))
-		return NULL;
-
-	ret = vmw_context_init(dev_priv, res, NULL);
-
-	return (ret == 0) ? res : NULL;
-}
 
+/*
+ * GB context.
+ */
 
 static int vmw_gb_context_create(struct vmw_resource *res)
 {
@@ -281,7 +324,7 @@ static int vmw_gb_context_create(struct vmw_resource *res)
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.cid = res->id;
 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-	(void) vmw_3d_resource_inc(dev_priv, false);
+	vmw_fifo_resource_inc(dev_priv);
 
 	return 0;
 
@@ -309,7 +352,6 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
 			  "binding.\n");
 		return -ENOMEM;
 	}
-
 	cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.cid = res->id;
@@ -346,7 +388,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 
 	mutex_lock(&dev_priv->binding_mutex);
-	vmw_context_binding_state_scrub(&uctx->cbs);
+	vmw_binding_state_scrub(uctx->cbs);
 
 	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
 
@@ -414,7 +456,231 @@ static int vmw_gb_context_destroy(struct vmw_resource *res)
 	if (dev_priv->query_cid == res->id)
 		dev_priv->query_cid_valid = false;
 	vmw_resource_release_id(res);
-	vmw_3d_resource_dec(dev_priv, false);
+	vmw_fifo_resource_dec(dev_priv);
+
+	return 0;
+}
+
+/*
+ * DX context.
+ */
+
+static int vmw_dx_context_create(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	int ret;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXDefineContext body;
+	} *cmd;
+
+	if (likely(res->id != -1))
+		return 0;
+
+	ret = vmw_resource_alloc_id(res);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to allocate a context id.\n");
+		goto out_no_id;
+	}
+
+	if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
+		ret = -EBUSY;
+		goto out_no_fifo;
+	}
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for context "
+			  "creation.\n");
+		ret = -ENOMEM;
+		goto out_no_fifo;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_fifo_resource_inc(dev_priv);
+
+	return 0;
+
+out_no_fifo:
+	vmw_resource_release_id(res);
+out_no_id:
+	return ret;
+}
+
+static int vmw_dx_context_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindContext body;
+	} *cmd;
+	struct ttm_buffer_object *bo = val_buf->bo;
+
+	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for context "
+			  "binding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
+	cmd->body.mobid = bo->mem.start;
+	cmd->body.validContents = res->backup_dirty;
+	res->backup_dirty = false;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+
+	return 0;
+}
+
+/**
+ * vmw_dx_context_scrub_cotables - Scrub all bindings and
+ * cotables from a context
+ *
+ * @ctx: Pointer to the context resource
+ * @readback: Whether to save the otable contents on scrubbing.
+ *
+ * COtables must be unbound before their context, but unbinding requires
+ * the backup buffer being reserved, whereas scrubbing does not.
+ * This function scrubs all cotables of a context, potentially reading back
+ * the contents into their backup buffers. However, scrubbing cotables
+ * also makes the device context invalid, so scrub all bindings first so
+ * that doesn't have to be done later with an invalid context.
+ */
+void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
+				   bool readback)
+{
+	struct vmw_user_context *uctx =
+		container_of(ctx, struct vmw_user_context, res);
+	int i;
+
+	vmw_binding_state_scrub(uctx->cbs);
+	for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+		struct vmw_resource *res;
+
+		/* Avoid racing with ongoing cotable destruction. */
+		spin_lock(&uctx->cotable_lock);
+		res = uctx->cotables[vmw_cotable_scrub_order[i]];
+		if (res)
+			res = vmw_resource_reference_unless_doomed(res);
+		spin_unlock(&uctx->cotable_lock);
+		if (!res)
+			continue;
+
+		WARN_ON(vmw_cotable_scrub(res, readback));
+		vmw_resource_unreference(&res);
+	}
+}
+
+static int vmw_dx_context_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct ttm_buffer_object *bo = val_buf->bo;
+	struct vmw_fence_obj *fence;
+	struct vmw_user_context *uctx =
+		container_of(res, struct vmw_user_context, res);
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXReadbackContext body;
+	} *cmd1;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindContext body;
+	} *cmd2;
+	uint32_t submit_size;
+	uint8_t *cmd;
+
+
+	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+	mutex_lock(&dev_priv->binding_mutex);
+	vmw_dx_context_scrub_cotables(res, readback);
+
+	if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
+	    readback) {
+		WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
+		if (vmw_query_readback_all(uctx->dx_query_mob))
+			DRM_ERROR("Failed to read back query states\n");
+	}
+
+	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
+
+	cmd = vmw_fifo_reserve(dev_priv, submit_size);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for context "
+			  "unbinding.\n");
+		mutex_unlock(&dev_priv->binding_mutex);
+		return -ENOMEM;
+	}
+
+	cmd2 = (void *) cmd;
+	if (readback) {
+		cmd1 = (void *) cmd;
+		cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
+		cmd1->header.size = sizeof(cmd1->body);
+		cmd1->body.cid = res->id;
+		cmd2 = (void *) (&cmd1[1]);
+	}
+	cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
+	cmd2->header.size = sizeof(cmd2->body);
+	cmd2->body.cid = res->id;
+	cmd2->body.mobid = SVGA3D_INVALID_ID;
+
+	vmw_fifo_commit(dev_priv, submit_size);
+	mutex_unlock(&dev_priv->binding_mutex);
+
+	/*
+	 * Create a fence object and fence the backup buffer.
+	 */
+
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
+					  &fence, NULL);
+
+	vmw_fence_single_bo(bo, fence);
+
+	if (likely(fence != NULL))
+		vmw_fence_obj_unreference(&fence);
+
+	return 0;
+}
+
+static int vmw_dx_context_destroy(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXDestroyContext body;
+	} *cmd;
+
+	if (likely(res->id == -1))
+		return 0;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for context "
+			  "destruction.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	if (dev_priv->query_cid == res->id)
+		dev_priv->query_cid_valid = false;
+	vmw_resource_release_id(res);
+	vmw_fifo_resource_dec(dev_priv);
 
 	return 0;
 }
@@ -435,6 +701,11 @@ static void vmw_user_context_free(struct vmw_resource *res)
 	    container_of(res, struct vmw_user_context, res);
 	struct vmw_private *dev_priv = res->dev_priv;
 
+	if (ctx->cbs)
+		vmw_binding_state_free(ctx->cbs);
+
+	(void) vmw_context_bind_dx_query(res, NULL);
+
 	ttm_base_object_kfree(ctx, base);
 	ttm_mem_global_free(vmw_mem_glob(dev_priv),
 			    vmw_user_context_size);
@@ -465,8 +736,8 @@ int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
 	return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
 }
 
-int vmw_context_define_ioctl(struct drm_device *dev, void *data,
-			     struct drm_file *file_priv)
+static int vmw_context_define(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv, bool dx)
 {
 	struct vmw_private *dev_priv = vmw_priv(dev);
 	struct vmw_user_context *ctx;
@@ -476,6 +747,10 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 	int ret;
 
+	if (!dev_priv->has_dx && dx) {
+		DRM_ERROR("DX contexts not supported by device.\n");
+		return -EINVAL;
+	}
 
 	/*
 	 * Approximate idr memory usage with 128 bytes. It will be limited
@@ -516,7 +791,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
 	 * From here on, the destructor takes over resource freeing.
 	 */
 
-	ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
+	ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
 	if (unlikely(ret != 0))
 		goto out_unlock;
 
@@ -535,387 +810,128 @@ out_err:
 out_unlock:
 	ttm_read_unlock(&dev_priv->reservation_sem);
 	return ret;
-
-}
-
-/**
- * vmw_context_scrub_shader - scrub a shader binding from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- */
-static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
-{
-	struct vmw_private *dev_priv = bi->ctx->dev_priv;
-	struct {
-		SVGA3dCmdHeader header;
-		SVGA3dCmdSetShader body;
-	} *cmd;
-
-	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed reserving FIFO space for shader "
-			  "unbinding.\n");
-		return -ENOMEM;
-	}
-
-	cmd->header.id = SVGA_3D_CMD_SET_SHADER;
-	cmd->header.size = sizeof(cmd->body);
-	cmd->body.cid = bi->ctx->id;
-	cmd->body.type = bi->i1.shader_type;
-	cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
-	return 0;
-}
-
-/**
- * vmw_context_scrub_render_target - scrub a render target binding
- * from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- */
-static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
-					   bool rebind)
-{
-	struct vmw_private *dev_priv = bi->ctx->dev_priv;
-	struct {
-		SVGA3dCmdHeader header;
-		SVGA3dCmdSetRenderTarget body;
-	} *cmd;
-
-	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed reserving FIFO space for render target "
-			  "unbinding.\n");
-		return -ENOMEM;
-	}
-
-	cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
-	cmd->header.size = sizeof(cmd->body);
-	cmd->body.cid = bi->ctx->id;
-	cmd->body.type = bi->i1.rt_type;
-	cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
-	cmd->body.target.face = 0;
-	cmd->body.target.mipmap = 0;
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
-	return 0;
 }
 
-/**
- * vmw_context_scrub_texture - scrub a texture binding from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- *
- * TODO: Possibly complement this function with a function that takes
- * a list of texture bindings and combines them to a single command.
- */
-static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
-				     bool rebind)
-{
-	struct vmw_private *dev_priv = bi->ctx->dev_priv;
-	struct {
-		SVGA3dCmdHeader header;
-		struct {
-			SVGA3dCmdSetTextureState c;
-			SVGA3dTextureState s1;
-		} body;
-	} *cmd;
-
-	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed reserving FIFO space for texture "
-			  "unbinding.\n");
-		return -ENOMEM;
-	}
-
-
-	cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
-	cmd->header.size = sizeof(cmd->body);
-	cmd->body.c.cid = bi->ctx->id;
-	cmd->body.s1.stage = bi->i1.texture_stage;
-	cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
-	cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
-	return 0;
-}
-
-/**
- * vmw_context_binding_drop: Stop tracking a context binding
- *
- * @cb: Pointer to binding tracker storage.
- *
- * Stops tracking a context binding, and re-initializes its storage.
- * Typically used when the context binding is replaced with a binding to
- * another (or the same, for that matter) resource.
- */
-static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
+int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv)
 {
-	list_del(&cb->ctx_list);
-	if (!list_empty(&cb->res_list))
-		list_del(&cb->res_list);
-	cb->bi.ctx = NULL;
+	return vmw_context_define(dev, data, file_priv, false);
 }
 
-/**
- * vmw_context_binding_add: Start tracking a context binding
- *
- * @cbs: Pointer to the context binding state tracker.
- * @bi: Information about the binding to track.
- *
- * Performs basic checks on the binding to make sure arguments are within
- * bounds and then starts tracking the binding in the context binding
- * state structure @cbs.
- */
-int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
-			    const struct vmw_ctx_bindinfo *bi)
+int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
+				      struct drm_file *file_priv)
 {
-	struct vmw_ctx_binding *loc;
-
-	switch (bi->bt) {
-	case vmw_ctx_binding_rt:
-		if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
-			DRM_ERROR("Illegal render target type %u.\n",
-				  (unsigned) bi->i1.rt_type);
-			return -EINVAL;
-		}
-		loc = &cbs->render_targets[bi->i1.rt_type];
-		break;
-	case vmw_ctx_binding_tex:
-		if (unlikely((unsigned)bi->i1.texture_stage >=
-			     SVGA3D_NUM_TEXTURE_UNITS)) {
-			DRM_ERROR("Illegal texture/sampler unit %u.\n",
-				  (unsigned) bi->i1.texture_stage);
-			return -EINVAL;
-		}
-		loc = &cbs->texture_units[bi->i1.texture_stage];
-		break;
-	case vmw_ctx_binding_shader:
-		if (unlikely((unsigned)bi->i1.shader_type >=
-			     SVGA3D_SHADERTYPE_MAX)) {
-			DRM_ERROR("Illegal shader type %u.\n",
-				  (unsigned) bi->i1.shader_type);
-			return -EINVAL;
-		}
-		loc = &cbs->shaders[bi->i1.shader_type];
-		break;
+	union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
+	struct drm_vmw_context_arg *rep = &arg->rep;
+
+	switch (arg->req) {
+	case drm_vmw_context_legacy:
+		return vmw_context_define(dev, rep, file_priv, false);
+	case drm_vmw_context_dx:
+		return vmw_context_define(dev, rep, file_priv, true);
 	default:
-		BUG();
-	}
-
-	if (loc->bi.ctx != NULL)
-		vmw_context_binding_drop(loc);
-
-	loc->bi = *bi;
-	loc->bi.scrubbed = false;
-	list_add_tail(&loc->ctx_list, &cbs->list);
-	INIT_LIST_HEAD(&loc->res_list);
-
-	return 0;
-}
-
-/**
- * vmw_context_binding_transfer: Transfer a context binding tracking entry.
- *
- * @cbs: Pointer to the persistent context binding state tracker.
- * @bi: Information about the binding to track.
- *
- */
-static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
-					 const struct vmw_ctx_bindinfo *bi)
-{
-	struct vmw_ctx_binding *loc;
-
-	switch (bi->bt) {
-	case vmw_ctx_binding_rt:
-		loc = &cbs->render_targets[bi->i1.rt_type];
 		break;
-	case vmw_ctx_binding_tex:
-		loc = &cbs->texture_units[bi->i1.texture_stage];
-		break;
-	case vmw_ctx_binding_shader:
-		loc = &cbs->shaders[bi->i1.shader_type];
-		break;
-	default:
-		BUG();
-	}
-
-	if (loc->bi.ctx != NULL)
-		vmw_context_binding_drop(loc);
-
-	if (bi->res != NULL) {
-		loc->bi = *bi;
-		list_add_tail(&loc->ctx_list, &cbs->list);
-		list_add_tail(&loc->res_list, &bi->res->binding_head);
 	}
+	return -EINVAL;
 }
 
 /**
- * vmw_context_binding_kill - Kill a binding on the device
- * and stop tracking it.
- *
- * @cb: Pointer to binding tracker storage.
- *
- * Emits FIFO commands to scrub a binding represented by @cb.
- * Then stops tracking the binding and re-initializes its storage.
- */
-static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
-{
-	if (!cb->bi.scrubbed) {
-		(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
-		cb->bi.scrubbed = true;
-	}
-	vmw_context_binding_drop(cb);
-}
-
-/**
- * vmw_context_binding_state_kill - Kill all bindings associated with a
- * struct vmw_ctx_binding state structure, and re-initialize the structure.
+ * vmw_context_binding_list - Return a list of context bindings
  *
- * @cbs: Pointer to the context binding state tracker.
+ * @ctx: The context resource
  *
- * Emits commands to scrub all bindings associated with the
- * context binding state tracker. Then re-initializes the whole structure.
+ * Returns the current list of bindings of the given context. Note that
+ * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
  */
-static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
+struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
 {
-	struct vmw_ctx_binding *entry, *next;
+	struct vmw_user_context *uctx =
+		container_of(ctx, struct vmw_user_context, res);
 
-	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
-		vmw_context_binding_kill(entry);
+	return vmw_binding_state_list(uctx->cbs);
 }
 
-/**
- * vmw_context_binding_state_scrub - Scrub all bindings associated with a
- * struct vmw_ctx_binding state structure.
- *
- * @cbs: Pointer to the context binding state tracker.
- *
- * Emits commands to scrub all bindings associated with the
- * context binding state tracker.
- */
-static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
+struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
 {
-	struct vmw_ctx_binding *entry;
-
-	list_for_each_entry(entry, &cbs->list, ctx_list) {
-		if (!entry->bi.scrubbed) {
-			(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
-			entry->bi.scrubbed = true;
-		}
-	}
+	return container_of(ctx, struct vmw_user_context, res)->man;
 }
 
-/**
- * vmw_context_binding_res_list_kill - Kill all bindings on a
- * resource binding list
- *
- * @head: list head of resource binding list
- *
- * Kills all bindings associated with a specific resource. Typically
- * called before the resource is destroyed.
- */
-void vmw_context_binding_res_list_kill(struct list_head *head)
+struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
+					 SVGACOTableType cotable_type)
 {
-	struct vmw_ctx_binding *entry, *next;
+	if (cotable_type >= SVGA_COTABLE_DX10_MAX)
+		return ERR_PTR(-EINVAL);
 
-	list_for_each_entry_safe(entry, next, head, res_list)
-		vmw_context_binding_kill(entry);
+	return vmw_resource_reference
+		(container_of(ctx, struct vmw_user_context, res)->
+		 cotables[cotable_type]);
 }
 
 /**
- * vmw_context_binding_res_list_scrub - Scrub all bindings on a
- * resource binding list
+ * vmw_context_binding_state -
+ * Return a pointer to a context binding state structure
  *
- * @head: list head of resource binding list
+ * @ctx: The context resource
  *
- * Scrub all bindings associated with a specific resource. Typically
- * called before the resource is evicted.
+ * Returns the current state of bindings of the given context. Note that
+ * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
  */
-void vmw_context_binding_res_list_scrub(struct list_head *head)
+struct vmw_ctx_binding_state *
+vmw_context_binding_state(struct vmw_resource *ctx)
 {
-	struct vmw_ctx_binding *entry;
-
-	list_for_each_entry(entry, head, res_list) {
-		if (!entry->bi.scrubbed) {
-			(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
-			entry->bi.scrubbed = true;
-		}
-	}
+	return container_of(ctx, struct vmw_user_context, res)->cbs;
 }
 
 /**
- * vmw_context_binding_state_transfer - Commit staged binding info
+ * vmw_context_bind_dx_query -
+ * Sets query MOB for the context.  If @mob is NULL, then this function will
+ * remove the association between the MOB and the context.  This function
+ * assumes the binding_mutex is held.
  *
- * @ctx: Pointer to context to commit the staged binding info to.
- * @from: Staged binding info built during execbuf.
+ * @ctx_res: The context resource
+ * @mob: a reference to the query MOB
  *
- * Transfers binding info from a temporary structure to the persistent
- * structure in the context. This can be done once commands
+ * Returns -EINVAL if a MOB has already been set and does not match the one
+ * specified in the parameter.  0 otherwise.
  */
-void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
-					struct vmw_ctx_binding_state *from)
+int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
+			      struct vmw_dma_buffer *mob)
 {
 	struct vmw_user_context *uctx =
-		container_of(ctx, struct vmw_user_context, res);
-	struct vmw_ctx_binding *entry, *next;
-
-	list_for_each_entry_safe(entry, next, &from->list, ctx_list)
-		vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
-}
+		container_of(ctx_res, struct vmw_user_context, res);
 
-/**
- * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
- *
- * @ctx: The context resource
- *
- * Walks through the context binding list and rebinds all scrubbed
- * resources.
- */
-int vmw_context_rebind_all(struct vmw_resource *ctx)
-{
-	struct vmw_ctx_binding *entry;
-	struct vmw_user_context *uctx =
-		container_of(ctx, struct vmw_user_context, res);
-	struct vmw_ctx_binding_state *cbs = &uctx->cbs;
-	int ret;
+	if (mob == NULL) {
+		if (uctx->dx_query_mob) {
+			uctx->dx_query_mob->dx_query_ctx = NULL;
+			vmw_dmabuf_unreference(&uctx->dx_query_mob);
+			uctx->dx_query_mob = NULL;
+		}
 
-	list_for_each_entry(entry, &cbs->list, ctx_list) {
-		if (likely(!entry->bi.scrubbed))
-			continue;
+		return 0;
+	}
 
-		if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
-			    SVGA3D_INVALID_ID))
-			continue;
+	/* Can only have one MOB per context for queries */
+	if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
+		return -EINVAL;
 
-		ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
-		if (unlikely(ret != 0))
-			return ret;
+	mob->dx_query_ctx  = ctx_res;
 
-		entry->bi.scrubbed = false;
-	}
+	if (!uctx->dx_query_mob)
+		uctx->dx_query_mob = vmw_dmabuf_reference(mob);
 
 	return 0;
 }
 
 /**
- * vmw_context_binding_list - Return a list of context bindings
- *
- * @ctx: The context resource
+ * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
  *
- * Returns the current list of bindings of the given context. Note that
- * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
+ * @ctx_res: The context resource
  */
-struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
+struct vmw_dma_buffer *
+vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
 {
-	return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
-}
+	struct vmw_user_context *uctx =
+		container_of(ctx_res, struct vmw_user_context, res);
 
-struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
-{
-	return container_of(ctx, struct vmw_user_context, res)->man;
+	return uctx->dx_query_mob;
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
new file mode 100644
index 000000000000..ce659a125f2b
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -0,0 +1,662 @@
+/**************************************************************************
+ *
+ * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Treat context OTables as resources to make use of the resource
+ * backing MOB eviction mechanism, that is used to read back the COTable
+ * whenever the backing MOB is evicted.
+ */
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include <ttm/ttm_placement.h>
+#include "vmwgfx_so.h"
+
+/**
+ * struct vmw_cotable - Context Object Table resource
+ *
+ * @res: struct vmw_resource we are deriving from.
+ * @ctx: non-refcounted pointer to the owning context.
+ * @size_read_back: Size of data read back during eviction.
+ * @seen_entries: Seen entries in command stream for this cotable.
+ * @type: The cotable type.
+ * @scrubbed: Whether the cotable has been scrubbed.
+ * @resource_list: List of resources in the cotable.
+ */
+struct vmw_cotable {
+	struct vmw_resource res;
+	struct vmw_resource *ctx;
+	size_t size_read_back;
+	int seen_entries;
+	u32 type;
+	bool scrubbed;
+	struct list_head resource_list;
+};
+
+/**
+ * struct vmw_cotable_info - Static info about cotable types
+ *
+ * @min_initial_entries: Min number of initial intries at cotable allocation
+ * for this cotable type.
+ * @size: Size of each entry.
+ */
+struct vmw_cotable_info {
+	u32 min_initial_entries;
+	u32 size;
+	void (*unbind_func)(struct vmw_private *, struct list_head *,
+			    bool);
+};
+
+static const struct vmw_cotable_info co_info[] = {
+	{1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy},
+	{1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy},
+	{1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy},
+	{1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
+	{1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
+	{1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
+	{1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
+	{1, sizeof(SVGACOTableDXSamplerEntry), NULL},
+	{1, sizeof(SVGACOTableDXStreamOutputEntry), NULL},
+	{1, sizeof(SVGACOTableDXQueryEntry), NULL},
+	{1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub}
+};
+
+/*
+ * Cotables with bindings that we remove must be scrubbed first,
+ * otherwise, the device will swap in an invalid context when we remove
+ * bindings before scrubbing a cotable...
+ */
+const SVGACOTableType vmw_cotable_scrub_order[] = {
+	SVGA_COTABLE_RTVIEW,
+	SVGA_COTABLE_DSVIEW,
+	SVGA_COTABLE_SRVIEW,
+	SVGA_COTABLE_DXSHADER,
+	SVGA_COTABLE_ELEMENTLAYOUT,
+	SVGA_COTABLE_BLENDSTATE,
+	SVGA_COTABLE_DEPTHSTENCIL,
+	SVGA_COTABLE_RASTERIZERSTATE,
+	SVGA_COTABLE_SAMPLER,
+	SVGA_COTABLE_STREAMOUTPUT,
+	SVGA_COTABLE_DXQUERY,
+};
+
+static int vmw_cotable_bind(struct vmw_resource *res,
+			    struct ttm_validate_buffer *val_buf);
+static int vmw_cotable_unbind(struct vmw_resource *res,
+			      bool readback,
+			      struct ttm_validate_buffer *val_buf);
+static int vmw_cotable_create(struct vmw_resource *res);
+static int vmw_cotable_destroy(struct vmw_resource *res);
+
+static const struct vmw_res_func vmw_cotable_func = {
+	.res_type = vmw_res_cotable,
+	.needs_backup = true,
+	.may_evict = true,
+	.type_name = "context guest backed object tables",
+	.backup_placement = &vmw_mob_placement,
+	.create = vmw_cotable_create,
+	.destroy = vmw_cotable_destroy,
+	.bind = vmw_cotable_bind,
+	.unbind = vmw_cotable_unbind,
+};
+
+/**
+ * vmw_cotable - Convert a struct vmw_resource pointer to a struct
+ * vmw_cotable pointer
+ *
+ * @res: Pointer to the resource.
+ */
+static struct vmw_cotable *vmw_cotable(struct vmw_resource *res)
+{
+	return container_of(res, struct vmw_cotable, res);
+}
+
+/**
+ * vmw_cotable_destroy - Cotable resource destroy callback
+ *
+ * @res: Pointer to the cotable resource.
+ *
+ * There is no device cotable destroy command, so this function only
+ * makes sure that the resource id is set to invalid.
+ */
+static int vmw_cotable_destroy(struct vmw_resource *res)
+{
+	res->id = -1;
+	return 0;
+}
+
+/**
+ * vmw_cotable_unscrub - Undo a cotable unscrub operation
+ *
+ * @res: Pointer to the cotable resource
+ *
+ * This function issues commands to (re)bind the cotable to
+ * its backing mob, which needs to be validated and reserved at this point.
+ * This is identical to bind() except the function interface looks different.
+ */
+static int vmw_cotable_unscrub(struct vmw_resource *res)
+{
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct ttm_buffer_object *bo = &res->backup->base;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetCOTable body;
+	} *cmd;
+
+	WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
+	lockdep_assert_held(&bo->resv->lock.base);
+
+	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), SVGA3D_INVALID_ID);
+	if (!cmd) {
+		DRM_ERROR("Failed reserving FIFO space for cotable "
+			  "binding.\n");
+		return -ENOMEM;
+	}
+
+	WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
+	WARN_ON(bo->mem.mem_type != VMW_PL_MOB);
+	cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = vcotbl->ctx->id;
+	cmd->body.type = vcotbl->type;
+	cmd->body.mobid = bo->mem.start;
+	cmd->body.validSizeInBytes = vcotbl->size_read_back;
+
+	vmw_fifo_commit_flush(dev_priv, sizeof(*cmd));
+	vcotbl->scrubbed = false;
+
+	return 0;
+}
+
+/**
+ * vmw_cotable_bind - Undo a cotable unscrub operation
+ *
+ * @res: Pointer to the cotable resource
+ * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
+ * for convenience / fencing.
+ *
+ * This function issues commands to (re)bind the cotable to
+ * its backing mob, which needs to be validated and reserved at this point.
+ */
+static int vmw_cotable_bind(struct vmw_resource *res,
+			    struct ttm_validate_buffer *val_buf)
+{
+	/*
+	 * The create() callback may have changed @res->backup without
+	 * the caller noticing, and with val_buf->bo still pointing to
+	 * the old backup buffer. Although hackish, and not used currently,
+	 * take the opportunity to correct the value here so that it's not
+	 * misused in the future.
+	 */
+	val_buf->bo = &res->backup->base;
+
+	return vmw_cotable_unscrub(res);
+}
+
+/**
+ * vmw_cotable_scrub - Scrub the cotable from the device.
+ *
+ * @res: Pointer to the cotable resource.
+ * @readback: Whether initiate a readback of the cotable data to the backup
+ * buffer.
+ *
+ * In some situations (context swapouts) it might be desirable to make the
+ * device forget about the cotable without performing a full unbind. A full
+ * unbind requires reserved backup buffers and it might not be possible to
+ * reserve them due to locking order violation issues. The vmw_cotable_scrub
+ * function implements a partial unbind() without that requirement but with the
+ * following restrictions.
+ * 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must
+ *    be called.
+ * 2) Before the cotable backing buffer is used by the CPU, or during the
+ *    resource destruction, vmw_cotable_unbind() must be called.
+ */
+int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
+{
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+	size_t submit_size;
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXReadbackCOTable body;
+	} *cmd0;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetCOTable body;
+	} *cmd1;
+
+	if (vcotbl->scrubbed)
+		return 0;
+
+	if (co_info[vcotbl->type].unbind_func)
+		co_info[vcotbl->type].unbind_func(dev_priv,
+						  &vcotbl->resource_list,
+						  readback);
+	submit_size = sizeof(*cmd1);
+	if (readback)
+		submit_size += sizeof(*cmd0);
+
+	cmd1 = vmw_fifo_reserve_dx(dev_priv, submit_size, SVGA3D_INVALID_ID);
+	if (!cmd1) {
+		DRM_ERROR("Failed reserving FIFO space for cotable "
+			  "unbinding.\n");
+		return -ENOMEM;
+	}
+
+	vcotbl->size_read_back = 0;
+	if (readback) {
+		cmd0 = (void *) cmd1;
+		cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
+		cmd0->header.size = sizeof(cmd0->body);
+		cmd0->body.cid = vcotbl->ctx->id;
+		cmd0->body.type = vcotbl->type;
+		cmd1 = (void *) &cmd0[1];
+		vcotbl->size_read_back = res->backup_size;
+	}
+	cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
+	cmd1->header.size = sizeof(cmd1->body);
+	cmd1->body.cid = vcotbl->ctx->id;
+	cmd1->body.type = vcotbl->type;
+	cmd1->body.mobid = SVGA3D_INVALID_ID;
+	cmd1->body.validSizeInBytes = 0;
+	vmw_fifo_commit_flush(dev_priv, submit_size);
+	vcotbl->scrubbed = true;
+
+	/* Trigger a create() on next validate. */
+	res->id = -1;
+
+	return 0;
+}
+
+/**
+ * vmw_cotable_unbind - Cotable resource unbind callback
+ *
+ * @res: Pointer to the cotable resource.
+ * @readback: Whether to read back cotable data to the backup buffer.
+ * val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
+ * for convenience / fencing.
+ *
+ * Unbinds the cotable from the device and fences the backup buffer.
+ */
+static int vmw_cotable_unbind(struct vmw_resource *res,
+			      bool readback,
+			      struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct ttm_buffer_object *bo = val_buf->bo;
+	struct vmw_fence_obj *fence;
+	int ret;
+
+	if (list_empty(&res->mob_head))
+		return 0;
+
+	WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
+	lockdep_assert_held(&bo->resv->lock.base);
+
+	mutex_lock(&dev_priv->binding_mutex);
+	if (!vcotbl->scrubbed)
+		vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
+	mutex_unlock(&dev_priv->binding_mutex);
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+	vmw_fence_single_bo(bo, fence);
+	if (likely(fence != NULL))
+		vmw_fence_obj_unreference(&fence);
+
+	return ret;
+}
+
+/**
+ * vmw_cotable_readback - Read back a cotable without unbinding.
+ *
+ * @res: The cotable resource.
+ *
+ * Reads back a cotable to its backing mob without scrubbing the MOB from
+ * the cotable. The MOB is fenced for subsequent CPU access.
+ */
+static int vmw_cotable_readback(struct vmw_resource *res)
+{
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXReadbackCOTable body;
+	} *cmd;
+	struct vmw_fence_obj *fence;
+
+	if (!vcotbl->scrubbed) {
+		cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
+					  SVGA3D_INVALID_ID);
+		if (!cmd) {
+			DRM_ERROR("Failed reserving FIFO space for cotable "
+				  "readback.\n");
+			return -ENOMEM;
+		}
+		cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
+		cmd->header.size = sizeof(cmd->body);
+		cmd->body.cid = vcotbl->ctx->id;
+		cmd->body.type = vcotbl->type;
+		vcotbl->size_read_back = res->backup_size;
+		vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	}
+
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+	vmw_fence_single_bo(&res->backup->base, fence);
+	vmw_fence_obj_unreference(&fence);
+
+	return 0;
+}
+
+/**
+ * vmw_cotable_resize - Resize a cotable.
+ *
+ * @res: The cotable resource.
+ * @new_size: The new size.
+ *
+ * Resizes a cotable and binds the new backup buffer.
+ * On failure the cotable is left intact.
+ * Important! This function may not fail once the MOB switch has been
+ * committed to hardware. That would put the device context in an
+ * invalid state which we can't currently recover from.
+ */
+static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+	struct vmw_dma_buffer *buf, *old_buf = res->backup;
+	struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
+	size_t old_size = res->backup_size;
+	size_t old_size_read_back = vcotbl->size_read_back;
+	size_t cur_size_read_back;
+	struct ttm_bo_kmap_obj old_map, new_map;
+	int ret;
+	size_t i;
+
+	ret = vmw_cotable_readback(res);
+	if (ret)
+		return ret;
+
+	cur_size_read_back = vcotbl->size_read_back;
+	vcotbl->size_read_back = old_size_read_back;
+
+	/*
+	 * While device is processing, Allocate and reserve a buffer object
+	 * for the new COTable. Initially pin the buffer object to make sure
+	 * we can use tryreserve without failure.
+	 */
+	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
+			      true, vmw_dmabuf_bo_free);
+	if (ret) {
+		DRM_ERROR("Failed initializing new cotable MOB.\n");
+		return ret;
+	}
+
+	bo = &buf->base;
+	WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, false, NULL));
+
+	ret = ttm_bo_wait(old_bo, false, false, false);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed waiting for cotable unbind.\n");
+		goto out_wait;
+	}
+
+	/*
+	 * Do a page by page copy of COTables. This eliminates slow vmap()s.
+	 * This should really be a TTM utility.
+	 */
+	for (i = 0; i < old_bo->num_pages; ++i) {
+		bool dummy;
+
+		ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Failed mapping old COTable on resize.\n");
+			goto out_wait;
+		}
+		ret = ttm_bo_kmap(bo, i, 1, &new_map);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Failed mapping new COTable on resize.\n");
+			goto out_map_new;
+		}
+		memcpy(ttm_kmap_obj_virtual(&new_map, &dummy),
+		       ttm_kmap_obj_virtual(&old_map, &dummy),
+		       PAGE_SIZE);
+		ttm_bo_kunmap(&new_map);
+		ttm_bo_kunmap(&old_map);
+	}
+
+	/* Unpin new buffer, and switch backup buffers. */
+	ret = ttm_bo_validate(bo, &vmw_mob_placement, false, false);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed validating new COTable backup buffer.\n");
+		goto out_wait;
+	}
+
+	res->backup = buf;
+	res->backup_size = new_size;
+	vcotbl->size_read_back = cur_size_read_back;
+
+	/*
+	 * Now tell the device to switch. If this fails, then we need to
+	 * revert the full resize.
+	 */
+	ret = vmw_cotable_unscrub(res);
+	if (ret) {
+		DRM_ERROR("Failed switching COTable backup buffer.\n");
+		res->backup = old_buf;
+		res->backup_size = old_size;
+		vcotbl->size_read_back = old_size_read_back;
+		goto out_wait;
+	}
+
+	/* Let go of the old mob. */
+	list_del(&res->mob_head);
+	list_add_tail(&res->mob_head, &buf->res_list);
+	vmw_dmabuf_unreference(&old_buf);
+	res->id = vcotbl->type;
+
+	return 0;
+
+out_map_new:
+	ttm_bo_kunmap(&old_map);
+out_wait:
+	ttm_bo_unreserve(bo);
+	vmw_dmabuf_unreference(&buf);
+
+	return ret;
+}
+
+/**
+ * vmw_cotable_create - Cotable resource create callback
+ *
+ * @res: Pointer to a cotable resource.
+ *
+ * There is no separate create command for cotables, so this callback, which
+ * is called before bind() in the validation sequence is instead used for two
+ * things.
+ * 1) Unscrub the cotable if it is scrubbed and still attached to a backup
+ *    buffer, that is, if @res->mob_head is non-empty.
+ * 2) Resize the cotable if needed.
+ */
+static int vmw_cotable_create(struct vmw_resource *res)
+{
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+	size_t new_size = res->backup_size;
+	size_t needed_size;
+	int ret;
+
+	/* Check whether we need to resize the cotable */
+	needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size;
+	while (needed_size > new_size)
+		new_size *= 2;
+
+	if (likely(new_size <= res->backup_size)) {
+		if (vcotbl->scrubbed && !list_empty(&res->mob_head)) {
+			ret = vmw_cotable_unscrub(res);
+			if (ret)
+				return ret;
+		}
+		res->id = vcotbl->type;
+		return 0;
+	}
+
+	return vmw_cotable_resize(res, new_size);
+}
+
+/**
+ * vmw_hw_cotable_destroy - Cotable hw_destroy callback
+ *
+ * @res: Pointer to a cotable resource.
+ *
+ * The final (part of resource destruction) destroy callback.
+ */
+static void vmw_hw_cotable_destroy(struct vmw_resource *res)
+{
+	(void) vmw_cotable_destroy(res);
+}
+
+static size_t cotable_acc_size;
+
+/**
+ * vmw_cotable_free - Cotable resource destructor
+ *
+ * @res: Pointer to a cotable resource.
+ */
+static void vmw_cotable_free(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	kfree(res);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
+}
+
+/**
+ * vmw_cotable_alloc - Create a cotable resource
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @ctx: Pointer to the context resource.
+ * The cotable resource will not add a refcount.
+ * @type: The cotable type.
+ */
+struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
+				       struct vmw_resource *ctx,
+				       u32 type)
+{
+	struct vmw_cotable *vcotbl;
+	int ret;
+	u32 num_entries;
+
+	if (unlikely(cotable_acc_size == 0))
+		cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable));
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+				   cotable_acc_size, false, true);
+	if (unlikely(ret))
+		return ERR_PTR(ret);
+
+	vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
+	if (unlikely(vcotbl == NULL)) {
+		ret = -ENOMEM;
+		goto out_no_alloc;
+	}
+
+	ret = vmw_resource_init(dev_priv, &vcotbl->res, true,
+				vmw_cotable_free, &vmw_cotable_func);
+	if (unlikely(ret != 0))
+		goto out_no_init;
+
+	INIT_LIST_HEAD(&vcotbl->resource_list);
+	vcotbl->res.id = type;
+	vcotbl->res.backup_size = PAGE_SIZE;
+	num_entries = PAGE_SIZE / co_info[type].size;
+	if (num_entries < co_info[type].min_initial_entries) {
+		vcotbl->res.backup_size = co_info[type].min_initial_entries *
+			co_info[type].size;
+		vcotbl->res.backup_size =
+			(vcotbl->res.backup_size + PAGE_SIZE - 1) & PAGE_MASK;
+	}
+
+	vcotbl->scrubbed = true;
+	vcotbl->seen_entries = -1;
+	vcotbl->type = type;
+	vcotbl->ctx = ctx;
+
+	vmw_resource_activate(&vcotbl->res, vmw_hw_cotable_destroy);
+
+	return &vcotbl->res;
+
+out_no_init:
+	kfree(vcotbl);
+out_no_alloc:
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
+	return ERR_PTR(ret);
+}
+
+/**
+ * vmw_cotable_notify - Notify the cotable about an item creation
+ *
+ * @res: Pointer to a cotable resource.
+ * @id: Item id.
+ */
+int vmw_cotable_notify(struct vmw_resource *res, int id)
+{
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+
+	if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) {
+		DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n",
+			  (unsigned) vcotbl->type, id);
+		return -EINVAL;
+	}
+
+	if (vcotbl->seen_entries < id) {
+		/* Trigger a call to create() on next validate */
+		res->id = -1;
+		vcotbl->seen_entries = id;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_cotable_add_view - add a view to the cotable's list of active views.
+ *
+ * @res: pointer struct vmw_resource representing the cotable.
+ * @head: pointer to the struct list_head member of the resource, dedicated
+ * to the cotable active resource list.
+ */
+void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head)
+{
+	struct vmw_cotable *vcotbl =
+		container_of(res, struct vmw_cotable, res);
+
+	list_add_tail(head, &vcotbl->resource_list);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index 914b375763dc..299925a1f6c6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -32,25 +32,20 @@
 
 
 /**
- * vmw_dmabuf_to_placement - Validate a buffer to placement.
+ * vmw_dmabuf_pin_in_placement - Validate a buffer to placement.
  *
  * @dev_priv:  Driver private.
  * @buf:  DMA buffer to move.
- * @pin:  Pin buffer if true.
+ * @placement:  The placement to pin it.
  * @interruptible:  Use interruptible wait.
  *
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
- * Flushes and unpins the query bo to avoid failures.
- *
  * Returns
  *  -ERESTARTSYS if interrupted by a signal.
  */
-int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
-			    struct vmw_dma_buffer *buf,
-			    struct ttm_placement *placement,
-			    bool interruptible)
+int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
+				struct vmw_dma_buffer *buf,
+				struct ttm_placement *placement,
+				bool interruptible)
 {
 	struct ttm_buffer_object *bo = &buf->base;
 	int ret;
@@ -66,6 +61,8 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
 		goto err;
 
 	ret = ttm_bo_validate(bo, placement, interruptible, false);
+	if (!ret)
+		vmw_bo_pin_reserved(buf, true);
 
 	ttm_bo_unreserve(bo);
 
@@ -75,12 +72,10 @@ err:
 }
 
 /**
- * vmw_dmabuf_to_vram_or_gmr - Move a buffer to vram or gmr.
+ * vmw_dmabuf_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
  *
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
- * Flushes and unpins the query bo if @pin == true to avoid failures.
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
  *
  * @dev_priv:  Driver private.
  * @buf:  DMA buffer to move.
@@ -90,55 +85,34 @@ err:
  * Returns
  * -ERESTARTSYS if interrupted by a signal.
  */
-int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
-			      struct vmw_dma_buffer *buf,
-			      bool pin, bool interruptible)
+int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+				  struct vmw_dma_buffer *buf,
+				  bool interruptible)
 {
 	struct ttm_buffer_object *bo = &buf->base;
-	struct ttm_placement *placement;
 	int ret;
 
 	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
 	if (unlikely(ret != 0))
 		return ret;
 
-	if (pin)
-		vmw_execbuf_release_pinned_bo(dev_priv);
+	vmw_execbuf_release_pinned_bo(dev_priv);
 
 	ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
 	if (unlikely(ret != 0))
 		goto err;
 
-	/**
-	 * Put BO in VRAM if there is space, otherwise as a GMR.
-	 * If there is no space in VRAM and GMR ids are all used up,
-	 * start evicting GMRs to make room. If the DMA buffer can't be
-	 * used as a GMR, this will return -ENOMEM.
-	 */
-
-	if (pin)
-		placement = &vmw_vram_gmr_ne_placement;
-	else
-		placement = &vmw_vram_gmr_placement;
-
-	ret = ttm_bo_validate(bo, placement, interruptible, false);
+	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
+			      false);
 	if (likely(ret == 0) || ret == -ERESTARTSYS)
-		goto err_unreserve;
-
+		goto out_unreserve;
 
-	/**
-	 * If that failed, try VRAM again, this time evicting
-	 * previous contents.
-	 */
-
-	if (pin)
-		placement = &vmw_vram_ne_placement;
-	else
-		placement = &vmw_vram_placement;
+	ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
 
-	ret = ttm_bo_validate(bo, placement, interruptible, false);
+out_unreserve:
+	if (!ret)
+		vmw_bo_pin_reserved(buf, true);
 
-err_unreserve:
 	ttm_bo_unreserve(bo);
 err:
 	ttm_write_unlock(&dev_priv->reservation_sem);
@@ -146,67 +120,50 @@ err:
 }
 
 /**
- * vmw_dmabuf_to_vram - Move a buffer to vram.
+ * vmw_dmabuf_pin_in_vram - Move a buffer to vram.
  *
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
  *
  * @dev_priv:  Driver private.
  * @buf:  DMA buffer to move.
- * @pin:  Pin buffer in vram if true.
  * @interruptible:  Use interruptible wait.
  *
  * Returns
  * -ERESTARTSYS if interrupted by a signal.
  */
-int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
-		       struct vmw_dma_buffer *buf,
-		       bool pin, bool interruptible)
+int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
+			   struct vmw_dma_buffer *buf,
+			   bool interruptible)
 {
-	struct ttm_placement *placement;
-
-	if (pin)
-		placement = &vmw_vram_ne_placement;
-	else
-		placement = &vmw_vram_placement;
-
-	return vmw_dmabuf_to_placement(dev_priv, buf,
-				       placement,
-				       interruptible);
+	return vmw_dmabuf_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
+					   interruptible);
 }
 
 /**
- * vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram.
+ * vmw_dmabuf_pin_in_start_of_vram - Move a buffer to start of vram.
  *
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
- * Flushes and unpins the query bo if @pin == true to avoid failures.
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
  *
  * @dev_priv:  Driver private.
- * @buf:  DMA buffer to move.
- * @pin:  Pin buffer in vram if true.
+ * @buf:  DMA buffer to pin.
  * @interruptible:  Use interruptible wait.
  *
  * Returns
  * -ERESTARTSYS if interrupted by a signal.
  */
-int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
-				struct vmw_dma_buffer *buf,
-				bool pin, bool interruptible)
+int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
+				    struct vmw_dma_buffer *buf,
+				    bool interruptible)
 {
 	struct ttm_buffer_object *bo = &buf->base;
 	struct ttm_placement placement;
 	struct ttm_place place;
 	int ret = 0;
 
-	if (pin)
-		place = vmw_vram_ne_placement.placement[0];
-	else
-		place = vmw_vram_placement.placement[0];
+	place = vmw_vram_placement.placement[0];
 	place.lpfn = bo->num_pages;
-
 	placement.num_placement = 1;
 	placement.placement = &place;
 	placement.num_busy_placement = 1;
@@ -216,13 +173,16 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
 	if (unlikely(ret != 0))
 		return ret;
 
-	if (pin)
-		vmw_execbuf_release_pinned_bo(dev_priv);
+	vmw_execbuf_release_pinned_bo(dev_priv);
 	ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
 	if (unlikely(ret != 0))
 		goto err_unlock;
 
-	/* Is this buffer already in vram but not at the start of it? */
+	/*
+	 * Is this buffer already in vram but not at the start of it?
+	 * In that case, evict it first because TTM isn't good at handling
+	 * that situation.
+	 */
 	if (bo->mem.mem_type == TTM_PL_VRAM &&
 	    bo->mem.start < bo->num_pages &&
 	    bo->mem.start > 0)
@@ -230,8 +190,10 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
 
 	ret = ttm_bo_validate(bo, &placement, interruptible, false);
 
-	/* For some reason we didn't up at the start of vram */
+	/* For some reason we didn't end up at the start of vram */
 	WARN_ON(ret == 0 && bo->offset != 0);
+	if (!ret)
+		vmw_bo_pin_reserved(buf, true);
 
 	ttm_bo_unreserve(bo);
 err_unlock:
@@ -240,13 +202,10 @@ err_unlock:
 	return ret;
 }
 
-
 /**
- * vmw_dmabuf_upin - Unpin the buffer given buffer, does not move the buffer.
+ * vmw_dmabuf_unpin - Unpin the buffer given buffer, does not move the buffer.
  *
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
+ * This function takes the reservation_sem in write mode.
  *
  * @dev_priv:  Driver private.
  * @buf:  DMA buffer to unpin.
@@ -259,16 +218,25 @@ int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
 		     struct vmw_dma_buffer *buf,
 		     bool interruptible)
 {
-	/*
-	 * We could in theory early out if the buffer is
-	 * unpinned but we need to lock and reserve the buffer
-	 * anyways so we don't gain much by that.
-	 */
-	return vmw_dmabuf_to_placement(dev_priv, buf,
-				       &vmw_evictable_placement,
-				       interruptible);
-}
+	struct ttm_buffer_object *bo = &buf->base;
+	int ret;
+
+	ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
+	if (unlikely(ret != 0))
+		return ret;
 
+	ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
+	if (unlikely(ret != 0))
+		goto err;
+
+	vmw_bo_pin_reserved(buf, false);
+
+	ttm_bo_unreserve(bo);
+
+err:
+	ttm_read_unlock(&dev_priv->reservation_sem);
+	return ret;
+}
 
 /**
  * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
@@ -291,21 +259,31 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
 
 
 /**
- * vmw_bo_pin - Pin or unpin a buffer object without moving it.
+ * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
  *
- * @bo: The buffer object. Must be reserved.
+ * @vbo: The buffer object. Must be reserved.
  * @pin: Whether to pin or unpin.
  *
  */
-void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
+void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
 {
 	struct ttm_place pl;
 	struct ttm_placement placement;
+	struct ttm_buffer_object *bo = &vbo->base;
 	uint32_t old_mem_type = bo->mem.mem_type;
 	int ret;
 
 	lockdep_assert_held(&bo->resv->lock.base);
 
+	if (pin) {
+		if (vbo->pin_count++ > 0)
+			return;
+	} else {
+		WARN_ON(vbo->pin_count <= 0);
+		if (--vbo->pin_count > 0)
+			return;
+	}
+
 	pl.fpfn = 0;
 	pl.lpfn = 0;
 	pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 620bb5cf617c..e13b20bd9908 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -28,6 +28,7 @@
 
 #include <drm/drmP.h>
 #include "vmwgfx_drv.h"
+#include "vmwgfx_binding.h"
 #include <drm/ttm/ttm_placement.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_object.h>
@@ -127,6 +128,9 @@
 #define DRM_IOCTL_VMW_SYNCCPU					\
 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,		\
 		 struct drm_vmw_synccpu_arg)
+#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT			\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT,	\
+		struct drm_vmw_context_arg)
 
 /**
  * The core DRM version of this macro doesn't account for
@@ -168,8 +172,8 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
 		      DRM_UNLOCKED | DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
 		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
-	VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
-		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH | DRM_UNLOCKED |
+		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
 		      DRM_UNLOCKED | DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
@@ -206,6 +210,9 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
 	VMW_IOCTL_DEF(VMW_SYNCCPU,
 		      vmw_user_dmabuf_synccpu_ioctl,
 		      DRM_UNLOCKED | DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
+		      vmw_extended_context_define_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
 };
 
 static struct pci_device_id vmw_pci_id_list[] = {
@@ -278,6 +285,8 @@ static void vmw_print_capabilities(uint32_t capabilities)
 		DRM_INFO("  Command Buffers 2.\n");
 	if (capabilities & SVGA_CAP_GBOBJECTS)
 		DRM_INFO("  Guest Backed Resources.\n");
+	if (capabilities & SVGA_CAP_DX)
+		DRM_INFO("  DX Features.\n");
 }
 
 /**
@@ -296,30 +305,31 @@ static void vmw_print_capabilities(uint32_t capabilities)
 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
 {
 	int ret;
-	struct ttm_buffer_object *bo;
+	struct vmw_dma_buffer *vbo;
 	struct ttm_bo_kmap_obj map;
 	volatile SVGA3dQueryResult *result;
 	bool dummy;
 
 	/*
-	 * Create the bo as pinned, so that a tryreserve will
+	 * Create the vbo as pinned, so that a tryreserve will
 	 * immediately succeed. This is because we're the only
 	 * user of the bo currently.
 	 */
-	ret = ttm_bo_create(&dev_priv->bdev,
-			    PAGE_SIZE,
-			    ttm_bo_type_device,
-			    &vmw_sys_ne_placement,
-			    0, false, NULL,
-			    &bo);
+	vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
+	if (!vbo)
+		return -ENOMEM;
 
+	ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
+			      &vmw_sys_ne_placement, false,
+			      &vmw_dmabuf_bo_free);
 	if (unlikely(ret != 0))
 		return ret;
 
-	ret = ttm_bo_reserve(bo, false, true, false, NULL);
+	ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL);
 	BUG_ON(ret != 0);
+	vmw_bo_pin_reserved(vbo, true);
 
-	ret = ttm_bo_kmap(bo, 0, 1, &map);
+	ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
 	if (likely(ret == 0)) {
 		result = ttm_kmap_obj_virtual(&map, &dummy);
 		result->totalSize = sizeof(*result);
@@ -327,18 +337,55 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
 		result->result32 = 0xff;
 		ttm_bo_kunmap(&map);
 	}
-	vmw_bo_pin(bo, false);
-	ttm_bo_unreserve(bo);
+	vmw_bo_pin_reserved(vbo, false);
+	ttm_bo_unreserve(&vbo->base);
 
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Dummy query buffer map failed.\n");
-		ttm_bo_unref(&bo);
+		vmw_dmabuf_unreference(&vbo);
 	} else
-		dev_priv->dummy_query_bo = bo;
+		dev_priv->dummy_query_bo = vbo;
 
 	return ret;
 }
 
+/**
+ * vmw_request_device_late - Perform late device setup
+ *
+ * @dev_priv: Pointer to device private.
+ *
+ * This function performs setup of otables and enables large command
+ * buffer submission. These tasks are split out to a separate function
+ * because it reverts vmw_release_device_early and is intended to be used
+ * by an error path in the hibernation code.
+ */
+static int vmw_request_device_late(struct vmw_private *dev_priv)
+{
+	int ret;
+
+	if (dev_priv->has_mob) {
+		ret = vmw_otables_setup(dev_priv);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Unable to initialize "
+				  "guest Memory OBjects.\n");
+			return ret;
+		}
+	}
+
+	if (dev_priv->cman) {
+		ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
+					       256*4096, 2*4096);
+		if (ret) {
+			struct vmw_cmdbuf_man *man = dev_priv->cman;
+
+			dev_priv->cman = NULL;
+			vmw_cmdbuf_man_destroy(man);
+		}
+	}
+
+	return 0;
+}
+
 static int vmw_request_device(struct vmw_private *dev_priv)
 {
 	int ret;
@@ -349,14 +396,16 @@ static int vmw_request_device(struct vmw_private *dev_priv)
 		return ret;
 	}
 	vmw_fence_fifo_up(dev_priv->fman);
-	if (dev_priv->has_mob) {
-		ret = vmw_otables_setup(dev_priv);
-		if (unlikely(ret != 0)) {
-			DRM_ERROR("Unable to initialize "
-				  "guest Memory OBjects.\n");
-			goto out_no_mob;
-		}
+	dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
+	if (IS_ERR(dev_priv->cman)) {
+		dev_priv->cman = NULL;
+		dev_priv->has_dx = false;
 	}
+
+	ret = vmw_request_device_late(dev_priv);
+	if (ret)
+		goto out_no_mob;
+
 	ret = vmw_dummy_query_bo_create(dev_priv);
 	if (unlikely(ret != 0))
 		goto out_no_query_bo;
@@ -364,15 +413,29 @@ static int vmw_request_device(struct vmw_private *dev_priv)
 	return 0;
 
 out_no_query_bo:
-	if (dev_priv->has_mob)
+	if (dev_priv->cman)
+		vmw_cmdbuf_remove_pool(dev_priv->cman);
+	if (dev_priv->has_mob) {
+		(void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
 		vmw_otables_takedown(dev_priv);
+	}
+	if (dev_priv->cman)
+		vmw_cmdbuf_man_destroy(dev_priv->cman);
 out_no_mob:
 	vmw_fence_fifo_down(dev_priv->fman);
 	vmw_fifo_release(dev_priv, &dev_priv->fifo);
 	return ret;
 }
 
-static void vmw_release_device(struct vmw_private *dev_priv)
+/**
+ * vmw_release_device_early - Early part of fifo takedown.
+ *
+ * @dev_priv: Pointer to device private struct.
+ *
+ * This is the first part of command submission takedown, to be called before
+ * buffer management is taken down.
+ */
+static void vmw_release_device_early(struct vmw_private *dev_priv)
 {
 	/*
 	 * Previous destructions should've released
@@ -381,65 +444,31 @@ static void vmw_release_device(struct vmw_private *dev_priv)
 
 	BUG_ON(dev_priv->pinned_bo != NULL);
 
-	ttm_bo_unref(&dev_priv->dummy_query_bo);
-	if (dev_priv->has_mob)
-		vmw_otables_takedown(dev_priv);
-	vmw_fence_fifo_down(dev_priv->fman);
-	vmw_fifo_release(dev_priv, &dev_priv->fifo);
-}
-
+	vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
+	if (dev_priv->cman)
+		vmw_cmdbuf_remove_pool(dev_priv->cman);
 
-/**
- * Increase the 3d resource refcount.
- * If the count was prevously zero, initialize the fifo, switching to svga
- * mode. Note that the master holds a ref as well, and may request an
- * explicit switch to svga mode if fb is not running, using @unhide_svga.
- */
-int vmw_3d_resource_inc(struct vmw_private *dev_priv,
-			bool unhide_svga)
-{
-	int ret = 0;
-
-	mutex_lock(&dev_priv->release_mutex);
-	if (unlikely(dev_priv->num_3d_resources++ == 0)) {
-		ret = vmw_request_device(dev_priv);
-		if (unlikely(ret != 0))
-			--dev_priv->num_3d_resources;
-	} else if (unhide_svga) {
-		vmw_write(dev_priv, SVGA_REG_ENABLE,
-			  vmw_read(dev_priv, SVGA_REG_ENABLE) &
-			  ~SVGA_REG_ENABLE_HIDE);
+	if (dev_priv->has_mob) {
+		ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
+		vmw_otables_takedown(dev_priv);
 	}
-
-	mutex_unlock(&dev_priv->release_mutex);
-	return ret;
 }
 
 /**
- * Decrease the 3d resource refcount.
- * If the count reaches zero, disable the fifo, switching to vga mode.
- * Note that the master holds a refcount as well, and may request an
- * explicit switch to vga mode when it releases its refcount to account
- * for the situation of an X server vt switch to VGA with 3d resources
- * active.
+ * vmw_release_device_late - Late part of fifo takedown.
+ *
+ * @dev_priv: Pointer to device private struct.
+ *
+ * This is the last part of the command submission takedown, to be called when
+ * command submission is no longer needed. It may wait on pending fences.
  */
-void vmw_3d_resource_dec(struct vmw_private *dev_priv,
-			 bool hide_svga)
+static void vmw_release_device_late(struct vmw_private *dev_priv)
 {
-	int32_t n3d;
-
-	mutex_lock(&dev_priv->release_mutex);
-	if (unlikely(--dev_priv->num_3d_resources == 0))
-		vmw_release_device(dev_priv);
-	else if (hide_svga)
-		vmw_write(dev_priv, SVGA_REG_ENABLE,
-			  vmw_read(dev_priv, SVGA_REG_ENABLE) |
-			  SVGA_REG_ENABLE_HIDE);
-
-	n3d = (int32_t) dev_priv->num_3d_resources;
-	mutex_unlock(&dev_priv->release_mutex);
+	vmw_fence_fifo_down(dev_priv->fman);
+	if (dev_priv->cman)
+		vmw_cmdbuf_man_destroy(dev_priv->cman);
 
-	BUG_ON(n3d < 0);
+	vmw_fifo_release(dev_priv, &dev_priv->fifo);
 }
 
 /**
@@ -603,6 +632,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	spin_lock_init(&dev_priv->hw_lock);
 	spin_lock_init(&dev_priv->waiter_lock);
 	spin_lock_init(&dev_priv->cap_lock);
+	spin_lock_init(&dev_priv->svga_lock);
 
 	for (i = vmw_res_context; i < vmw_res_max; ++i) {
 		idr_init(&dev_priv->res_idr[i]);
@@ -673,22 +703,31 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 				 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
 		dev_priv->max_mob_size =
 			vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
-	} else
+		dev_priv->stdu_max_width =
+			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
+		dev_priv->stdu_max_height =
+			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
+
+		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
+			  SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
+		dev_priv->texture_max_width = vmw_read(dev_priv,
+						       SVGA_REG_DEV_CAP);
+		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
+			  SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
+		dev_priv->texture_max_height = vmw_read(dev_priv,
+							SVGA_REG_DEV_CAP);
+	} else {
+		dev_priv->texture_max_width = 8192;
+		dev_priv->texture_max_height = 8192;
 		dev_priv->prim_bb_mem = dev_priv->vram_size;
+	}
+
+	vmw_print_capabilities(dev_priv->capabilities);
 
 	ret = vmw_dma_masks(dev_priv);
 	if (unlikely(ret != 0))
 		goto out_err0;
 
-	/*
-	 * Limit back buffer size to VRAM size.  Remove this once
-	 * screen targets are implemented.
-	 */
-	if (dev_priv->prim_bb_mem > dev_priv->vram_size)
-		dev_priv->prim_bb_mem = dev_priv->vram_size;
-
-	vmw_print_capabilities(dev_priv->capabilities);
-
 	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
 		DRM_INFO("Max GMR ids is %u\n",
 			 (unsigned)dev_priv->max_gmr_ids);
@@ -714,17 +753,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	dev_priv->active_master = &dev_priv->fbdev_master;
 
 
-	ret = ttm_bo_device_init(&dev_priv->bdev,
-				 dev_priv->bo_global_ref.ref.object,
-				 &vmw_bo_driver,
-				 dev->anon_inode->i_mapping,
-				 VMWGFX_FILE_PAGE_OFFSET,
-				 false);
-	if (unlikely(ret != 0)) {
-		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
-		goto out_err1;
-	}
-
 	dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
 					       dev_priv->mmio_size);
 
@@ -787,13 +815,28 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 		goto out_no_fman;
 	}
 
+	ret = ttm_bo_device_init(&dev_priv->bdev,
+				 dev_priv->bo_global_ref.ref.object,
+				 &vmw_bo_driver,
+				 dev->anon_inode->i_mapping,
+				 VMWGFX_FILE_PAGE_OFFSET,
+				 false);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
+		goto out_no_bdev;
+	}
 
+	/*
+	 * Enable VRAM, but initially don't use it until SVGA is enabled and
+	 * unhidden.
+	 */
 	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
 			     (dev_priv->vram_size >> PAGE_SHIFT));
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
 		goto out_no_vram;
 	}
+	dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
 
 	dev_priv->has_gmr = true;
 	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
@@ -814,18 +857,28 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 		}
 	}
 
-	vmw_kms_save_vga(dev_priv);
+	if (dev_priv->has_mob) {
+		spin_lock(&dev_priv->cap_lock);
+		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
+		dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+		spin_unlock(&dev_priv->cap_lock);
+	}
+
 
-	/* Start kms and overlay systems, needs fifo. */
 	ret = vmw_kms_init(dev_priv);
 	if (unlikely(ret != 0))
 		goto out_no_kms;
 	vmw_overlay_init(dev_priv);
 
+	ret = vmw_request_device(dev_priv);
+	if (ret)
+		goto out_no_fifo;
+
+	DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
+
 	if (dev_priv->enable_fb) {
-		ret = vmw_3d_resource_inc(dev_priv, true);
-		if (unlikely(ret != 0))
-			goto out_no_fifo;
+		vmw_fifo_resource_inc(dev_priv);
+		vmw_svga_enable(dev_priv);
 		vmw_fb_init(dev_priv);
 	}
 
@@ -838,13 +891,14 @@ out_no_fifo:
 	vmw_overlay_close(dev_priv);
 	vmw_kms_close(dev_priv);
 out_no_kms:
-	vmw_kms_restore_vga(dev_priv);
 	if (dev_priv->has_mob)
 		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
 	if (dev_priv->has_gmr)
 		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
 	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
 out_no_vram:
+	(void)ttm_bo_device_release(&dev_priv->bdev);
+out_no_bdev:
 	vmw_fence_manager_takedown(dev_priv->fman);
 out_no_fman:
 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
@@ -860,13 +914,13 @@ out_err4:
 	iounmap(dev_priv->mmio_virt);
 out_err3:
 	arch_phys_wc_del(dev_priv->mmio_mtrr);
-	(void)ttm_bo_device_release(&dev_priv->bdev);
-out_err1:
 	vmw_ttm_global_release(dev_priv);
 out_err0:
 	for (i = vmw_res_context; i < vmw_res_max; ++i)
 		idr_destroy(&dev_priv->res_idr[i]);
 
+	if (dev_priv->ctx.staged_bindings)
+		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
 	kfree(dev_priv);
 	return ret;
 }
@@ -882,19 +936,24 @@ static int vmw_driver_unload(struct drm_device *dev)
 		drm_ht_remove(&dev_priv->ctx.res_ht);
 	vfree(dev_priv->ctx.cmd_bounce);
 	if (dev_priv->enable_fb) {
+		vmw_fb_off(dev_priv);
 		vmw_fb_close(dev_priv);
-		vmw_kms_restore_vga(dev_priv);
-		vmw_3d_resource_dec(dev_priv, false);
+		vmw_fifo_resource_dec(dev_priv);
+		vmw_svga_disable(dev_priv);
 	}
+
 	vmw_kms_close(dev_priv);
 	vmw_overlay_close(dev_priv);
 
-	if (dev_priv->has_mob)
-		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
 	if (dev_priv->has_gmr)
 		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
 	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
 
+	vmw_release_device_early(dev_priv);
+	if (dev_priv->has_mob)
+		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
+	(void) ttm_bo_device_release(&dev_priv->bdev);
+	vmw_release_device_late(dev_priv);
 	vmw_fence_manager_takedown(dev_priv->fman);
 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
 		drm_irq_uninstall(dev_priv->dev);
@@ -906,7 +965,8 @@ static int vmw_driver_unload(struct drm_device *dev)
 	ttm_object_device_release(&dev_priv->tdev);
 	iounmap(dev_priv->mmio_virt);
 	arch_phys_wc_del(dev_priv->mmio_mtrr);
-	(void)ttm_bo_device_release(&dev_priv->bdev);
+	if (dev_priv->ctx.staged_bindings)
+		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
 	vmw_ttm_global_release(dev_priv);
 
 	for (i = vmw_res_context; i < vmw_res_max; ++i)
@@ -992,10 +1052,15 @@ static struct vmw_master *vmw_master_check(struct drm_device *dev,
 	}
 
 	/*
-	 * Check if we were previously master, but now dropped.
+	 * Check if we were previously master, but now dropped. In that
+	 * case, allow at least render node functionality.
 	 */
 	if (vmw_fp->locked_master) {
 		mutex_unlock(&dev->master_mutex);
+
+		if (flags & DRM_RENDER_ALLOW)
+			return NULL;
+
 		DRM_ERROR("Dropped master trying to access ioctl that "
 			  "requires authentication.\n");
 		return ERR_PTR(-EACCES);
@@ -1044,17 +1109,27 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
 		const struct drm_ioctl_desc *ioctl =
 			&vmw_ioctls[nr - DRM_COMMAND_BASE];
 
-		if (unlikely(ioctl->cmd != cmd)) {
-			DRM_ERROR("Invalid command format, ioctl %d\n",
-				  nr - DRM_COMMAND_BASE);
-			return -EINVAL;
+		if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
+			ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
+			if (unlikely(ret != 0))
+				return ret;
+
+			if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
+				goto out_io_encoding;
+
+			return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
+							_IOC_SIZE(cmd));
 		}
+
+		if (unlikely(ioctl->cmd != cmd))
+			goto out_io_encoding;
+
 		flags = ioctl->flags;
 	} else if (!drm_ioctl_flags(nr, &flags))
 		return -EINVAL;
 
 	vmaster = vmw_master_check(dev, file_priv, flags);
-	if (unlikely(IS_ERR(vmaster))) {
+	if (IS_ERR(vmaster)) {
 		ret = PTR_ERR(vmaster);
 
 		if (ret != -ERESTARTSYS)
@@ -1068,6 +1143,12 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
 		ttm_read_unlock(&vmaster->lock);
 
 	return ret;
+
+out_io_encoding:
+	DRM_ERROR("Invalid command format, ioctl %d\n",
+		  nr - DRM_COMMAND_BASE);
+
+	return -EINVAL;
 }
 
 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
@@ -1086,30 +1167,11 @@ static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
 
 static void vmw_lastclose(struct drm_device *dev)
 {
-	struct drm_crtc *crtc;
-	struct drm_mode_set set;
-	int ret;
-
-	set.x = 0;
-	set.y = 0;
-	set.fb = NULL;
-	set.mode = NULL;
-	set.connectors = NULL;
-	set.num_connectors = 0;
-
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		set.crtc = crtc;
-		ret = drm_mode_set_config_internal(&set);
-		WARN_ON(ret != 0);
-	}
-
 }
 
 static void vmw_master_init(struct vmw_master *vmaster)
 {
 	ttm_lock_init(&vmaster->lock);
-	INIT_LIST_HEAD(&vmaster->fb_surf);
-	mutex_init(&vmaster->fb_surf_mutex);
 }
 
 static int vmw_master_create(struct drm_device *dev,
@@ -1137,7 +1199,6 @@ static void vmw_master_destroy(struct drm_device *dev,
 	kfree(vmaster);
 }
 
-
 static int vmw_master_set(struct drm_device *dev,
 			  struct drm_file *file_priv,
 			  bool from_open)
@@ -1148,27 +1209,13 @@ static int vmw_master_set(struct drm_device *dev,
 	struct vmw_master *vmaster = vmw_master(file_priv->master);
 	int ret = 0;
 
-	if (!dev_priv->enable_fb) {
-		ret = vmw_3d_resource_inc(dev_priv, true);
-		if (unlikely(ret != 0))
-			return ret;
-		vmw_kms_save_vga(dev_priv);
-		vmw_write(dev_priv, SVGA_REG_TRACES, 0);
-	}
-
 	if (active) {
 		BUG_ON(active != &dev_priv->fbdev_master);
 		ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
 		if (unlikely(ret != 0))
-			goto out_no_active_lock;
+			return ret;
 
 		ttm_lock_set_kill(&active->lock, true, SIGTERM);
-		ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
-		if (unlikely(ret != 0)) {
-			DRM_ERROR("Unable to clean VRAM on "
-				  "master drop.\n");
-		}
-
 		dev_priv->active_master = NULL;
 	}
 
@@ -1182,14 +1229,6 @@ static int vmw_master_set(struct drm_device *dev,
 	dev_priv->active_master = vmaster;
 
 	return 0;
-
-out_no_active_lock:
-	if (!dev_priv->enable_fb) {
-		vmw_kms_restore_vga(dev_priv);
-		vmw_3d_resource_dec(dev_priv, true);
-		vmw_write(dev_priv, SVGA_REG_TRACES, 1);
-	}
-	return ret;
 }
 
 static void vmw_master_drop(struct drm_device *dev,
@@ -1214,16 +1253,9 @@ static void vmw_master_drop(struct drm_device *dev,
 	}
 
 	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
-	vmw_execbuf_release_pinned_bo(dev_priv);
 
-	if (!dev_priv->enable_fb) {
-		ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
-		if (unlikely(ret != 0))
-			DRM_ERROR("Unable to clean VRAM on master drop.\n");
-		vmw_kms_restore_vga(dev_priv);
-		vmw_3d_resource_dec(dev_priv, true);
-		vmw_write(dev_priv, SVGA_REG_TRACES, 1);
-	}
+	if (!dev_priv->enable_fb)
+		vmw_svga_disable(dev_priv);
 
 	dev_priv->active_master = &dev_priv->fbdev_master;
 	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
@@ -1233,6 +1265,76 @@ static void vmw_master_drop(struct drm_device *dev,
 		vmw_fb_on(dev_priv);
 }
 
+/**
+ * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * Needs the reservation sem to be held in non-exclusive mode.
+ */
+static void __vmw_svga_enable(struct vmw_private *dev_priv)
+{
+	spin_lock(&dev_priv->svga_lock);
+	if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+		vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
+		dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
+	}
+	spin_unlock(&dev_priv->svga_lock);
+}
+
+/**
+ * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
+ *
+ * @dev_priv: Pointer to device private struct.
+ */
+void vmw_svga_enable(struct vmw_private *dev_priv)
+{
+	ttm_read_lock(&dev_priv->reservation_sem, false);
+	__vmw_svga_enable(dev_priv);
+	ttm_read_unlock(&dev_priv->reservation_sem);
+}
+
+/**
+ * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * Needs the reservation sem to be held in exclusive mode.
+ * Will not empty VRAM. VRAM must be emptied by caller.
+ */
+static void __vmw_svga_disable(struct vmw_private *dev_priv)
+{
+	spin_lock(&dev_priv->svga_lock);
+	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
+		vmw_write(dev_priv, SVGA_REG_ENABLE,
+			  SVGA_REG_ENABLE_HIDE |
+			  SVGA_REG_ENABLE_ENABLE);
+	}
+	spin_unlock(&dev_priv->svga_lock);
+}
+
+/**
+ * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
+ * running.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * Will empty VRAM.
+ */
+void vmw_svga_disable(struct vmw_private *dev_priv)
+{
+	ttm_write_lock(&dev_priv->reservation_sem, false);
+	spin_lock(&dev_priv->svga_lock);
+	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
+		spin_unlock(&dev_priv->svga_lock);
+		if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
+			DRM_ERROR("Failed evicting VRAM buffers.\n");
+		vmw_write(dev_priv, SVGA_REG_ENABLE,
+			  SVGA_REG_ENABLE_HIDE |
+			  SVGA_REG_ENABLE_ENABLE);
+	} else
+		spin_unlock(&dev_priv->svga_lock);
+	ttm_write_unlock(&dev_priv->reservation_sem);
+}
 
 static void vmw_remove(struct pci_dev *pdev)
 {
@@ -1250,23 +1352,26 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
 
 	switch (val) {
 	case PM_HIBERNATION_PREPARE:
-	case PM_SUSPEND_PREPARE:
+		if (dev_priv->enable_fb)
+			vmw_fb_off(dev_priv);
 		ttm_suspend_lock(&dev_priv->reservation_sem);
 
-		/**
+		/*
 		 * This empties VRAM and unbinds all GMR bindings.
 		 * Buffer contents is moved to swappable memory.
 		 */
 		vmw_execbuf_release_pinned_bo(dev_priv);
 		vmw_resource_evict_all(dev_priv);
+		vmw_release_device_early(dev_priv);
 		ttm_bo_swapout_all(&dev_priv->bdev);
-
+		vmw_fence_fifo_down(dev_priv->fman);
 		break;
 	case PM_POST_HIBERNATION:
-	case PM_POST_SUSPEND:
 	case PM_POST_RESTORE:
+		vmw_fence_fifo_up(dev_priv->fman);
 		ttm_suspend_unlock(&dev_priv->reservation_sem);
-
+		if (dev_priv->enable_fb)
+			vmw_fb_on(dev_priv);
 		break;
 	case PM_RESTORE_PREPARE:
 		break;
@@ -1276,20 +1381,13 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
 	return 0;
 }
 
-/**
- * These might not be needed with the virtual SVGA device.
- */
-
 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
 {
 	struct drm_device *dev = pci_get_drvdata(pdev);
 	struct vmw_private *dev_priv = vmw_priv(dev);
 
-	if (dev_priv->num_3d_resources != 0) {
-		DRM_INFO("Can't suspend or hibernate "
-			 "while 3D resources are active.\n");
+	if (dev_priv->refuse_hibernation)
 		return -EBUSY;
-	}
 
 	pci_save_state(pdev);
 	pci_disable_device(pdev);
@@ -1321,56 +1419,62 @@ static int vmw_pm_resume(struct device *kdev)
 	return vmw_pci_resume(pdev);
 }
 
-static int vmw_pm_prepare(struct device *kdev)
+static int vmw_pm_freeze(struct device *kdev)
 {
 	struct pci_dev *pdev = to_pci_dev(kdev);
 	struct drm_device *dev = pci_get_drvdata(pdev);
 	struct vmw_private *dev_priv = vmw_priv(dev);
 
-	/**
-	 * Release 3d reference held by fbdev and potentially
-	 * stop fifo.
-	 */
 	dev_priv->suspended = true;
 	if (dev_priv->enable_fb)
-			vmw_3d_resource_dec(dev_priv, true);
-
-	if (dev_priv->num_3d_resources != 0) {
-
-		DRM_INFO("Can't suspend or hibernate "
-			 "while 3D resources are active.\n");
+		vmw_fifo_resource_dec(dev_priv);
 
+	if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
+		DRM_ERROR("Can't hibernate while 3D resources are active.\n");
 		if (dev_priv->enable_fb)
-			vmw_3d_resource_inc(dev_priv, true);
+			vmw_fifo_resource_inc(dev_priv);
+		WARN_ON(vmw_request_device_late(dev_priv));
 		dev_priv->suspended = false;
 		return -EBUSY;
 	}
 
+	if (dev_priv->enable_fb)
+		__vmw_svga_disable(dev_priv);
+	
+	vmw_release_device_late(dev_priv);
+
 	return 0;
 }
 
-static void vmw_pm_complete(struct device *kdev)
+static int vmw_pm_restore(struct device *kdev)
 {
 	struct pci_dev *pdev = to_pci_dev(kdev);
 	struct drm_device *dev = pci_get_drvdata(pdev);
 	struct vmw_private *dev_priv = vmw_priv(dev);
+	int ret;
 
 	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
 	(void) vmw_read(dev_priv, SVGA_REG_ID);
 
-	/**
-	 * Reclaim 3d reference held by fbdev and potentially
-	 * start fifo.
-	 */
 	if (dev_priv->enable_fb)
-			vmw_3d_resource_inc(dev_priv, false);
+		vmw_fifo_resource_inc(dev_priv);
+
+	ret = vmw_request_device(dev_priv);
+	if (ret)
+		return ret;
+
+	if (dev_priv->enable_fb)
+		__vmw_svga_enable(dev_priv);
 
 	dev_priv->suspended = false;
+
+	return 0;
 }
 
 static const struct dev_pm_ops vmw_pm_ops = {
-	.prepare = vmw_pm_prepare,
-	.complete = vmw_pm_complete,
+	.freeze = vmw_pm_freeze,
+	.thaw = vmw_pm_restore,
+	.restore = vmw_pm_restore,
 	.suspend = vmw_pm_suspend,
 	.resume = vmw_pm_resume,
 };
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 0336d49e3d4c..6d02de6dc36c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -40,17 +40,17 @@
 #include <drm/ttm/ttm_module.h>
 #include "vmwgfx_fence.h"
 
-#define VMWGFX_DRIVER_DATE "20140704"
+#define VMWGFX_DRIVER_DATE "20150810"
 #define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 6
-#define VMWGFX_DRIVER_PATCHLEVEL 1
+#define VMWGFX_DRIVER_MINOR 9
+#define VMWGFX_DRIVER_PATCHLEVEL 0
 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
 #define VMWGFX_MAX_RELOCATIONS 2048
 #define VMWGFX_MAX_VALIDATIONS 2048
 #define VMWGFX_MAX_DISPLAYS 16
 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
-#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0
+#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
 
 /*
  * Perhaps we should have sysfs entries for these.
@@ -59,6 +59,8 @@
 #define VMWGFX_NUM_GB_SHADER 20000
 #define VMWGFX_NUM_GB_SURFACE 32768
 #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
+#define VMWGFX_NUM_DXCONTEXT 256
+#define VMWGFX_NUM_DXQUERY 512
 #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
 			VMWGFX_NUM_GB_SHADER +\
 			VMWGFX_NUM_GB_SURFACE +\
@@ -85,6 +87,9 @@ struct vmw_fpriv {
 struct vmw_dma_buffer {
 	struct ttm_buffer_object base;
 	struct list_head res_list;
+	s32 pin_count;
+	/* Not ref-counted.  Protected by binding_mutex */
+	struct vmw_resource *dx_query_ctx;
 };
 
 /**
@@ -113,6 +118,7 @@ struct vmw_resource {
 	bool backup_dirty; /* Protected by backup buffer reserved */
 	struct vmw_dma_buffer *backup;
 	unsigned long backup_offset;
+	unsigned long pin_count; /* Protected by resource reserved */
 	const struct vmw_res_func *func;
 	struct list_head lru_head; /* Protected by the resource lock */
 	struct list_head mob_head; /* Protected by @backup reserved */
@@ -130,6 +136,9 @@ enum vmw_res_type {
 	vmw_res_surface,
 	vmw_res_stream,
 	vmw_res_shader,
+	vmw_res_dx_context,
+	vmw_res_cotable,
+	vmw_res_view,
 	vmw_res_max
 };
 
@@ -137,7 +146,8 @@ enum vmw_res_type {
  * Resources that are managed using command streams.
  */
 enum vmw_cmdbuf_res_type {
-	vmw_cmdbuf_res_compat_shader
+	vmw_cmdbuf_res_shader,
+	vmw_cmdbuf_res_view
 };
 
 struct vmw_cmdbuf_res_manager;
@@ -160,11 +170,13 @@ struct vmw_surface {
 	struct drm_vmw_size *sizes;
 	uint32_t num_sizes;
 	bool scanout;
+	uint32_t array_size;
 	/* TODO so far just a extra pointer */
 	struct vmw_cursor_snooper snooper;
 	struct vmw_surface_offset *offsets;
 	SVGA3dTextureFilter autogen_filter;
 	uint32_t multisample_count;
+	struct list_head view_list;
 };
 
 struct vmw_marker_queue {
@@ -176,14 +188,15 @@ struct vmw_marker_queue {
 
 struct vmw_fifo_state {
 	unsigned long reserved_size;
-	__le32 *dynamic_buffer;
-	__le32 *static_buffer;
+	u32 *dynamic_buffer;
+	u32 *static_buffer;
 	unsigned long static_buffer_size;
 	bool using_bounce_buffer;
 	uint32_t capabilities;
 	struct mutex fifo_mutex;
 	struct rw_semaphore rwsem;
 	struct vmw_marker_queue marker_queue;
+	bool dx;
 };
 
 struct vmw_relocation {
@@ -264,70 +277,15 @@ struct vmw_piter {
 };
 
 /*
- * enum vmw_ctx_binding_type - abstract resource to context binding types
+ * enum vmw_display_unit_type - Describes the display unit
  */
-enum vmw_ctx_binding_type {
-	vmw_ctx_binding_shader,
-	vmw_ctx_binding_rt,
-	vmw_ctx_binding_tex,
-	vmw_ctx_binding_max
+enum vmw_display_unit_type {
+	vmw_du_invalid = 0,
+	vmw_du_legacy,
+	vmw_du_screen_object,
+	vmw_du_screen_target
 };
 
-/**
- * struct vmw_ctx_bindinfo - structure representing a single context binding
- *
- * @ctx: Pointer to the context structure. NULL means the binding is not
- * active.
- * @res: Non ref-counted pointer to the bound resource.
- * @bt: The binding type.
- * @i1: Union of information needed to unbind.
- */
-struct vmw_ctx_bindinfo {
-	struct vmw_resource *ctx;
-	struct vmw_resource *res;
-	enum vmw_ctx_binding_type bt;
-	bool scrubbed;
-	union {
-		SVGA3dShaderType shader_type;
-		SVGA3dRenderTargetType rt_type;
-		uint32 texture_stage;
-	} i1;
-};
-
-/**
- * struct vmw_ctx_binding - structure representing a single context binding
- *                        - suitable for tracking in a context
- *
- * @ctx_list: List head for context.
- * @res_list: List head for bound resource.
- * @bi: Binding info
- */
-struct vmw_ctx_binding {
-	struct list_head ctx_list;
-	struct list_head res_list;
-	struct vmw_ctx_bindinfo bi;
-};
-
-
-/**
- * struct vmw_ctx_binding_state - context binding state
- *
- * @list: linked list of individual bindings.
- * @render_targets: Render target bindings.
- * @texture_units: Texture units/samplers bindings.
- * @shaders: Shader bindings.
- *
- * Note that this structure also provides storage space for the individual
- * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
- * for individual bindings.
- *
- */
-struct vmw_ctx_binding_state {
-	struct list_head list;
-	struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX];
-	struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS];
-	struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX];
-};
 
 struct vmw_sw_context{
 	struct drm_open_hash res_ht;
@@ -342,15 +300,21 @@ struct vmw_sw_context{
 	uint32_t *cmd_bounce;
 	uint32_t cmd_bounce_size;
 	struct list_head resource_list;
-	struct ttm_buffer_object *cur_query_bo;
+	struct list_head ctx_resource_list; /* For contexts and cotables */
+	struct vmw_dma_buffer *cur_query_bo;
 	struct list_head res_relocations;
 	uint32_t *buf_start;
 	struct vmw_res_cache_entry res_cache[vmw_res_max];
 	struct vmw_resource *last_query_ctx;
 	bool needs_post_query_barrier;
 	struct vmw_resource *error_resource;
-	struct vmw_ctx_binding_state staged_bindings;
+	struct vmw_ctx_binding_state *staged_bindings;
+	bool staged_bindings_inuse;
 	struct list_head staged_cmd_res;
+	struct vmw_resource_val_node *dx_ctx_node;
+	struct vmw_dma_buffer *dx_query_mob;
+	struct vmw_resource *dx_query_ctx;
+	struct vmw_cmdbuf_res_manager *man;
 };
 
 struct vmw_legacy_display;
@@ -358,8 +322,6 @@ struct vmw_overlay;
 
 struct vmw_master {
 	struct ttm_lock lock;
-	struct mutex fb_surf_mutex;
-	struct list_head fb_surf;
 };
 
 struct vmw_vga_topology_state {
@@ -370,6 +332,26 @@ struct vmw_vga_topology_state {
 	uint32_t pos_y;
 };
 
+
+/*
+ * struct vmw_otable - Guest Memory OBject table metadata
+ *
+ * @size:           Size of the table (page-aligned).
+ * @page_table:     Pointer to a struct vmw_mob holding the page table.
+ */
+struct vmw_otable {
+	unsigned long size;
+	struct vmw_mob *page_table;
+	bool enabled;
+};
+
+struct vmw_otable_batch {
+	unsigned num_otables;
+	struct vmw_otable *otables;
+	struct vmw_resource *context;
+	struct ttm_buffer_object *otable_bo;
+};
+
 struct vmw_private {
 	struct ttm_bo_device bdev;
 	struct ttm_bo_global_ref bo_global_ref;
@@ -387,9 +369,13 @@ struct vmw_private {
 	uint32_t mmio_size;
 	uint32_t fb_max_width;
 	uint32_t fb_max_height;
+	uint32_t texture_max_width;
+	uint32_t texture_max_height;
+	uint32_t stdu_max_width;
+	uint32_t stdu_max_height;
 	uint32_t initial_width;
 	uint32_t initial_height;
-	__le32 __iomem *mmio_virt;
+	u32 __iomem *mmio_virt;
 	int mmio_mtrr;
 	uint32_t capabilities;
 	uint32_t max_gmr_ids;
@@ -401,6 +387,7 @@ struct vmw_private {
 	bool has_mob;
 	spinlock_t hw_lock;
 	spinlock_t cap_lock;
+	bool has_dx;
 
 	/*
 	 * VGA registers.
@@ -420,6 +407,7 @@ struct vmw_private {
 	 */
 
 	void *fb_info;
+	enum vmw_display_unit_type active_display_unit;
 	struct vmw_legacy_display *ldu_priv;
 	struct vmw_screen_object_display *sou_priv;
 	struct vmw_overlay *overlay_priv;
@@ -453,6 +441,8 @@ struct vmw_private {
 	spinlock_t waiter_lock;
 	int fence_queue_waiters; /* Protected by waiter_lock */
 	int goal_queue_waiters; /* Protected by waiter_lock */
+	int cmdbuf_waiters; /* Protected by irq_lock */
+	int error_waiters; /* Protected by irq_lock */
 	atomic_t fifo_queue_waiters;
 	uint32_t last_read_seqno;
 	spinlock_t irq_lock;
@@ -484,6 +474,7 @@ struct vmw_private {
 
 	bool stealth;
 	bool enable_fb;
+	spinlock_t svga_lock;
 
 	/**
 	 * Master management.
@@ -493,9 +484,10 @@ struct vmw_private {
 	struct vmw_master fbdev_master;
 	struct notifier_block pm_nb;
 	bool suspended;
+	bool refuse_hibernation;
 
 	struct mutex release_mutex;
-	uint32_t num_3d_resources;
+	atomic_t num_fifo_resources;
 
 	/*
 	 * Replace this with an rwsem as soon as we have down_xx_interruptible()
@@ -507,8 +499,8 @@ struct vmw_private {
 	 * are protected by the cmdbuf mutex.
 	 */
 
-	struct ttm_buffer_object *dummy_query_bo;
-	struct ttm_buffer_object *pinned_bo;
+	struct vmw_dma_buffer *dummy_query_bo;
+	struct vmw_dma_buffer *pinned_bo;
 	uint32_t query_cid;
 	uint32_t query_cid_valid;
 	bool dummy_query_bo_pinned;
@@ -531,8 +523,9 @@ struct vmw_private {
 	/*
 	 * Guest Backed stuff
 	 */
-	struct ttm_buffer_object *otable_bo;
-	struct vmw_otable *otables;
+	struct vmw_otable_batch otable_batch;
+
+	struct vmw_cmdbuf_man *cman;
 };
 
 static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -587,8 +580,9 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv,
 	return val;
 }
 
-int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga);
-void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga);
+extern void vmw_svga_enable(struct vmw_private *dev_priv);
+extern void vmw_svga_disable(struct vmw_private *dev_priv);
+
 
 /**
  * GMR utilities - vmwgfx_gmr.c
@@ -610,7 +604,8 @@ extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
 extern struct vmw_resource *
 vmw_resource_reference_unless_doomed(struct vmw_resource *res);
 extern int vmw_resource_validate(struct vmw_resource *res);
-extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
+extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
+				bool no_backup);
 extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
 extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
 				  struct ttm_object_file *tfile,
@@ -660,10 +655,14 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
 				  uint32_t *inout_id,
 				  struct vmw_resource **out);
 extern void vmw_resource_unreserve(struct vmw_resource *res,
+				   bool switch_backup,
 				   struct vmw_dma_buffer *new_backup,
 				   unsigned long new_backup_offset);
 extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
 				     struct ttm_mem_reg *mem);
+extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
+				  struct ttm_mem_reg *mem);
+extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
 extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
 				struct vmw_fence_obj *fence);
 extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
@@ -671,25 +670,25 @@ extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
 /**
  * DMA buffer helper routines - vmwgfx_dmabuf.c
  */
-extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv,
-				   struct vmw_dma_buffer *bo,
-				   struct ttm_placement *placement,
-				   bool interruptible);
-extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
-			      struct vmw_dma_buffer *buf,
-			      bool pin, bool interruptible);
-extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
-				     struct vmw_dma_buffer *buf,
-				     bool pin, bool interruptible);
-extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
+extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv,
 				       struct vmw_dma_buffer *bo,
-				       bool pin, bool interruptible);
+				       struct ttm_placement *placement,
+				       bool interruptible);
+extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
+				  struct vmw_dma_buffer *buf,
+				  bool interruptible);
+extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+					 struct vmw_dma_buffer *buf,
+					 bool interruptible);
+extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
+					   struct vmw_dma_buffer *bo,
+					   bool interruptible);
 extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
 			    struct vmw_dma_buffer *bo,
 			    bool interruptible);
 extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
 				 SVGAGuestPtr *ptr);
-extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin);
+extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
 
 /**
  * Misc Ioctl functionality - vmwgfx_ioctl.c
@@ -717,7 +716,10 @@ extern int vmw_fifo_init(struct vmw_private *dev_priv,
 extern void vmw_fifo_release(struct vmw_private *dev_priv,
 			     struct vmw_fifo_state *fifo);
 extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
+extern void *
+vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
 extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
+extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
 extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
 			       uint32_t *seqno);
 extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
@@ -726,6 +728,8 @@ extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
 extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
 extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
 				     uint32_t cid);
+extern int vmw_fifo_flush(struct vmw_private *dev_priv,
+			  bool interruptible);
 
 /**
  * TTM glue - vmwgfx_ttm_glue.c
@@ -750,6 +754,7 @@ extern struct ttm_placement vmw_sys_ne_placement;
 extern struct ttm_placement vmw_evictable_placement;
 extern struct ttm_placement vmw_srf_placement;
 extern struct ttm_placement vmw_mob_placement;
+extern struct ttm_placement vmw_mob_ne_placement;
 extern struct ttm_bo_driver vmw_bo_driver;
 extern int vmw_dma_quiescent(struct drm_device *dev);
 extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
@@ -800,14 +805,15 @@ static inline struct page *vmw_piter_page(struct vmw_piter *viter)
  * Command submission - vmwgfx_execbuf.c
  */
 
-extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
-			     struct drm_file *file_priv);
+extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
+			     struct drm_file *file_priv, size_t size);
 extern int vmw_execbuf_process(struct drm_file *file_priv,
 			       struct vmw_private *dev_priv,
 			       void __user *user_commands,
 			       void *kernel_commands,
 			       uint32_t command_size,
 			       uint64_t throttle_us,
+			       uint32_t dx_context_handle,
 			       struct drm_vmw_fence_rep __user
 			       *user_fence_rep,
 			       struct vmw_fence_obj **out_fence);
@@ -826,6 +832,11 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
 					*user_fence_rep,
 					struct vmw_fence_obj *fence,
 					uint32_t fence_handle);
+extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
+				      struct ttm_buffer_object *bo,
+				      bool interruptible,
+				      bool validate_as_mob);
+
 
 /**
  * IRQs and wating - vmwgfx_irq.c
@@ -833,8 +844,8 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
 
 extern irqreturn_t vmw_irq_handler(int irq, void *arg);
 extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
-			     uint32_t seqno, bool interruptible,
-			     unsigned long timeout);
+			  uint32_t seqno, bool interruptible,
+			  unsigned long timeout);
 extern void vmw_irq_preinstall(struct drm_device *dev);
 extern int vmw_irq_postinstall(struct drm_device *dev);
 extern void vmw_irq_uninstall(struct drm_device *dev);
@@ -852,6 +863,10 @@ extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
 extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
 extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
 extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
+extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
+				   int *waiter_count);
+extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
+				      u32 flag, int *waiter_count);
 
 /**
  * Rudimentary fence-like objects currently used only for throttling -
@@ -861,9 +876,9 @@ extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
 extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
 extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
 extern int vmw_marker_push(struct vmw_marker_queue *queue,
-			  uint32_t seqno);
+			   uint32_t seqno);
 extern int vmw_marker_pull(struct vmw_marker_queue *queue,
-			  uint32_t signaled_seqno);
+			   uint32_t signaled_seqno);
 extern int vmw_wait_lag(struct vmw_private *dev_priv,
 			struct vmw_marker_queue *queue, uint32_t us);
 
@@ -908,12 +923,6 @@ int vmw_kms_present(struct vmw_private *dev_priv,
 		    uint32_t sid, int32_t destX, int32_t destY,
 		    struct drm_vmw_rect *clips,
 		    uint32_t num_clips);
-int vmw_kms_readback(struct vmw_private *dev_priv,
-		     struct drm_file *file_priv,
-		     struct vmw_framebuffer *vfb,
-		     struct drm_vmw_fence_rep __user *user_fence_rep,
-		     struct drm_vmw_rect *clips,
-		     uint32_t num_clips);
 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
 				struct drm_file *file_priv);
 
@@ -927,6 +936,10 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
 int vmw_dumb_destroy(struct drm_file *file_priv,
 		     struct drm_device *dev,
 		     uint32_t handle);
+extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
+extern void vmw_resource_unpin(struct vmw_resource *res);
+extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
+
 /**
  * Overlay control - vmwgfx_overlay.c
  */
@@ -982,27 +995,33 @@ extern void vmw_otables_takedown(struct vmw_private *dev_priv);
 
 extern const struct vmw_user_resource_conv *user_context_converter;
 
-extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
-
 extern int vmw_context_check(struct vmw_private *dev_priv,
 			     struct ttm_object_file *tfile,
 			     int id,
 			     struct vmw_resource **p_res);
 extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
 				    struct drm_file *file_priv);
+extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
+					     struct drm_file *file_priv);
 extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
 				     struct drm_file *file_priv);
-extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
-				   const struct vmw_ctx_bindinfo *ci);
-extern void
-vmw_context_binding_state_transfer(struct vmw_resource *res,
-				   struct vmw_ctx_binding_state *cbs);
-extern void vmw_context_binding_res_list_kill(struct list_head *head);
-extern void vmw_context_binding_res_list_scrub(struct list_head *head);
-extern int vmw_context_rebind_all(struct vmw_resource *ctx);
 extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
 extern struct vmw_cmdbuf_res_manager *
 vmw_context_res_man(struct vmw_resource *ctx);
+extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
+						SVGACOTableType cotable_type);
+extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
+struct vmw_ctx_binding_state;
+extern struct vmw_ctx_binding_state *
+vmw_context_binding_state(struct vmw_resource *ctx);
+extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
+					  bool readback);
+extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
+				     struct vmw_dma_buffer *mob);
+extern struct vmw_dma_buffer *
+vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
+
+
 /*
  * Surface management - vmwgfx_surface.c
  */
@@ -1025,6 +1044,16 @@ extern int vmw_surface_check(struct vmw_private *dev_priv,
 			     uint32_t handle, int *id);
 extern int vmw_surface_validate(struct vmw_private *dev_priv,
 				struct vmw_surface *srf);
+int vmw_surface_gb_priv_define(struct drm_device *dev,
+			       uint32_t user_accounting_size,
+			       uint32_t svga3d_flags,
+			       SVGA3dSurfaceFormat format,
+			       bool for_scanout,
+			       uint32_t num_mip_levels,
+			       uint32_t multisample_count,
+			       uint32_t array_size,
+			       struct drm_vmw_size size,
+			       struct vmw_surface **srf_out);
 
 /*
  * Shader management - vmwgfx_shader.c
@@ -1042,12 +1071,21 @@ extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
 				 SVGA3dShaderType shader_type,
 				 size_t size,
 				 struct list_head *list);
-extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
-				    u32 user_key, SVGA3dShaderType shader_type,
-				    struct list_head *list);
+extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
+			     u32 user_key, SVGA3dShaderType shader_type,
+			     struct list_head *list);
+extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
+			     struct vmw_resource *ctx,
+			     u32 user_key,
+			     SVGA3dShaderType shader_type,
+			     struct list_head *list);
+extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
+					     struct list_head *list,
+					     bool readback);
+
 extern struct vmw_resource *
-vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
-			 u32 user_key, SVGA3dShaderType shader_type);
+vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
+		  u32 user_key, SVGA3dShaderType shader_type);
 
 /*
  * Command buffer managed resources - vmwgfx_cmdbuf_res.c
@@ -1071,7 +1109,48 @@ extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
 extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
 				 enum vmw_cmdbuf_res_type res_type,
 				 u32 user_key,
-				 struct list_head *list);
+				 struct list_head *list,
+				 struct vmw_resource **res);
+
+/*
+ * COTable management - vmwgfx_cotable.c
+ */
+extern const SVGACOTableType vmw_cotable_scrub_order[];
+extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
+					      struct vmw_resource *ctx,
+					      u32 type);
+extern int vmw_cotable_notify(struct vmw_resource *res, int id);
+extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback);
+extern void vmw_cotable_add_resource(struct vmw_resource *ctx,
+				     struct list_head *head);
+
+/*
+ * Command buffer managerment vmwgfx_cmdbuf.c
+ */
+struct vmw_cmdbuf_man;
+struct vmw_cmdbuf_header;
+
+extern struct vmw_cmdbuf_man *
+vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
+extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
+				    size_t size, size_t default_size);
+extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
+extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
+extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
+			   unsigned long timeout);
+extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
+				int ctx_id, bool interruptible,
+				struct vmw_cmdbuf_header *header);
+extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
+			      struct vmw_cmdbuf_header *header,
+			      bool flush);
+extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
+extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
+			      size_t size, bool interruptible,
+			      struct vmw_cmdbuf_header **p_header);
+extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
+extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
+				bool interruptible);
 
 
 /**
@@ -1116,4 +1195,14 @@ static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
 {
 	return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
 }
+
+static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
+{
+	atomic_inc(&dev_priv->num_fifo_resources);
+}
+
+static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
+{
+	atomic_dec(&dev_priv->num_fifo_resources);
+}
 #endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 97ad3bcb99a7..b56565457c96 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -29,6 +29,8 @@
 #include "vmwgfx_reg.h"
 #include <drm/ttm/ttm_bo_api.h>
 #include <drm/ttm/ttm_placement.h>
+#include "vmwgfx_so.h"
+#include "vmwgfx_binding.h"
 
 #define VMW_RES_HT_ORDER 12
 
@@ -59,8 +61,11 @@ struct vmw_resource_relocation {
  * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
  * @first_usage: Set to true the first time the resource is referenced in
  * the command stream.
- * @no_buffer_needed: Resources do not need to allocate buffer backup on
- * reservation. The command stream will provide one.
+ * @switching_backup: The command stream provides a new backup buffer for a
+ * resource.
+ * @no_buffer_needed: This means @switching_backup is true on first buffer
+ * reference. So resource reservation does not need to allocate a backup
+ * buffer for the resource.
  */
 struct vmw_resource_val_node {
 	struct list_head head;
@@ -69,8 +74,9 @@ struct vmw_resource_val_node {
 	struct vmw_dma_buffer *new_backup;
 	struct vmw_ctx_binding_state *staged_bindings;
 	unsigned long new_backup_offset;
-	bool first_usage;
-	bool no_buffer_needed;
+	u32 first_usage : 1;
+	u32 switching_backup : 1;
+	u32 no_buffer_needed : 1;
 };
 
 /**
@@ -92,22 +98,40 @@ struct vmw_cmd_entry {
 	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
 				       (_gb_disable), (_gb_enable)}
 
+static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
+					struct vmw_sw_context *sw_context,
+					struct vmw_resource *ctx);
+static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
+				 struct vmw_sw_context *sw_context,
+				 SVGAMobId *id,
+				 struct vmw_dma_buffer **vmw_bo_p);
+static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
+				   struct vmw_dma_buffer *vbo,
+				   bool validate_as_mob,
+				   uint32_t *p_val_node);
+
+
 /**
- * vmw_resource_unreserve - unreserve resources previously reserved for
+ * vmw_resources_unreserve - unreserve resources previously reserved for
  * command submission.
  *
- * @list_head: list of resources to unreserve.
+ * @sw_context: pointer to the software context
  * @backoff: Whether command submission failed.
  */
-static void vmw_resource_list_unreserve(struct list_head *list,
-					bool backoff)
+static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
+				    bool backoff)
 {
 	struct vmw_resource_val_node *val;
+	struct list_head *list = &sw_context->resource_list;
+
+	if (sw_context->dx_query_mob && !backoff)
+		vmw_context_bind_dx_query(sw_context->dx_query_ctx,
+					  sw_context->dx_query_mob);
 
 	list_for_each_entry(val, list, head) {
 		struct vmw_resource *res = val->res;
-		struct vmw_dma_buffer *new_backup =
-			backoff ? NULL : val->new_backup;
+		bool switch_backup =
+			(backoff) ? false : val->switching_backup;
 
 		/*
 		 * Transfer staged context bindings to the
@@ -115,18 +139,71 @@ static void vmw_resource_list_unreserve(struct list_head *list,
 		 */
 		if (unlikely(val->staged_bindings)) {
 			if (!backoff) {
-				vmw_context_binding_state_transfer
-					(val->res, val->staged_bindings);
+				vmw_binding_state_commit
+					(vmw_context_binding_state(val->res),
+					 val->staged_bindings);
 			}
-			kfree(val->staged_bindings);
+
+			if (val->staged_bindings != sw_context->staged_bindings)
+				vmw_binding_state_free(val->staged_bindings);
+			else
+				sw_context->staged_bindings_inuse = false;
 			val->staged_bindings = NULL;
 		}
-		vmw_resource_unreserve(res, new_backup,
-			val->new_backup_offset);
+		vmw_resource_unreserve(res, switch_backup, val->new_backup,
+				       val->new_backup_offset);
 		vmw_dmabuf_unreference(&val->new_backup);
 	}
 }
 
+/**
+ * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
+ * added to the validate list.
+ *
+ * @dev_priv: Pointer to the device private:
+ * @sw_context: The validation context:
+ * @node: The validation node holding this context.
+ */
+static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
+				   struct vmw_sw_context *sw_context,
+				   struct vmw_resource_val_node *node)
+{
+	int ret;
+
+	ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
+	if (unlikely(ret != 0))
+		goto out_err;
+
+	if (!sw_context->staged_bindings) {
+		sw_context->staged_bindings =
+			vmw_binding_state_alloc(dev_priv);
+		if (IS_ERR(sw_context->staged_bindings)) {
+			DRM_ERROR("Failed to allocate context binding "
+				  "information.\n");
+			ret = PTR_ERR(sw_context->staged_bindings);
+			sw_context->staged_bindings = NULL;
+			goto out_err;
+		}
+	}
+
+	if (sw_context->staged_bindings_inuse) {
+		node->staged_bindings = vmw_binding_state_alloc(dev_priv);
+		if (IS_ERR(node->staged_bindings)) {
+			DRM_ERROR("Failed to allocate context binding "
+				  "information.\n");
+			ret = PTR_ERR(node->staged_bindings);
+			node->staged_bindings = NULL;
+			goto out_err;
+		}
+	} else {
+		node->staged_bindings = sw_context->staged_bindings;
+		sw_context->staged_bindings_inuse = true;
+	}
+
+	return 0;
+out_err:
+	return ret;
+}
 
 /**
  * vmw_resource_val_add - Add a resource to the software context's
@@ -141,6 +218,7 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
 				struct vmw_resource *res,
 				struct vmw_resource_val_node **p_node)
 {
+	struct vmw_private *dev_priv = res->dev_priv;
 	struct vmw_resource_val_node *node;
 	struct drm_hash_item *hash;
 	int ret;
@@ -169,14 +247,90 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
 		kfree(node);
 		return ret;
 	}
-	list_add_tail(&node->head, &sw_context->resource_list);
 	node->res = vmw_resource_reference(res);
 	node->first_usage = true;
-
 	if (unlikely(p_node != NULL))
 		*p_node = node;
 
-	return 0;
+	if (!dev_priv->has_mob) {
+		list_add_tail(&node->head, &sw_context->resource_list);
+		return 0;
+	}
+
+	switch (vmw_res_type(res)) {
+	case vmw_res_context:
+	case vmw_res_dx_context:
+		list_add(&node->head, &sw_context->ctx_resource_list);
+		ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
+		break;
+	case vmw_res_cotable:
+		list_add_tail(&node->head, &sw_context->ctx_resource_list);
+		break;
+	default:
+		list_add_tail(&node->head, &sw_context->resource_list);
+		break;
+	}
+
+	return ret;
+}
+
+/**
+ * vmw_view_res_val_add - Add a view and the surface it's pointing to
+ * to the validation list
+ *
+ * @sw_context: The software context holding the validation list.
+ * @view: Pointer to the view resource.
+ *
+ * Returns 0 if success, negative error code otherwise.
+ */
+static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
+				struct vmw_resource *view)
+{
+	int ret;
+
+	/*
+	 * First add the resource the view is pointing to, otherwise
+	 * it may be swapped out when the view is validated.
+	 */
+	ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
+	if (ret)
+		return ret;
+
+	return vmw_resource_val_add(sw_context, view, NULL);
+}
+
+/**
+ * vmw_view_id_val_add - Look up a view and add it and the surface it's
+ * pointing to to the validation list.
+ *
+ * @sw_context: The software context holding the validation list.
+ * @view_type: The view type to look up.
+ * @id: view id of the view.
+ *
+ * The view is represented by a view id and the DX context it's created on,
+ * or scheduled for creation on. If there is no DX context set, the function
+ * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
+ */
+static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
+			       enum vmw_view_type view_type, u32 id)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_resource *view;
+	int ret;
+
+	if (!ctx_node) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	view = vmw_view_lookup(sw_context->man, view_type, id);
+	if (IS_ERR(view))
+		return PTR_ERR(view);
+
+	ret = vmw_view_res_val_add(sw_context, view);
+	vmw_resource_unreference(&view);
+
+	return ret;
 }
 
 /**
@@ -195,24 +349,56 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 					struct vmw_resource *ctx)
 {
 	struct list_head *binding_list;
-	struct vmw_ctx_binding *entry;
+	struct vmw_ctx_bindinfo *entry;
 	int ret = 0;
 	struct vmw_resource *res;
+	u32 i;
 
+	/* Add all cotables to the validation list. */
+	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
+		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+			res = vmw_context_cotable(ctx, i);
+			if (IS_ERR(res))
+				continue;
+
+			ret = vmw_resource_val_add(sw_context, res, NULL);
+			vmw_resource_unreference(&res);
+			if (unlikely(ret != 0))
+				return ret;
+		}
+	}
+
+
+	/* Add all resources bound to the context to the validation list */
 	mutex_lock(&dev_priv->binding_mutex);
 	binding_list = vmw_context_binding_list(ctx);
 
 	list_for_each_entry(entry, binding_list, ctx_list) {
-		res = vmw_resource_reference_unless_doomed(entry->bi.res);
+		/* entry->res is not refcounted */
+		res = vmw_resource_reference_unless_doomed(entry->res);
 		if (unlikely(res == NULL))
 			continue;
 
-		ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
+		if (vmw_res_type(entry->res) == vmw_res_view)
+			ret = vmw_view_res_val_add(sw_context, entry->res);
+		else
+			ret = vmw_resource_val_add(sw_context, entry->res,
+						   NULL);
 		vmw_resource_unreference(&res);
 		if (unlikely(ret != 0))
 			break;
 	}
 
+	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
+		struct vmw_dma_buffer *dx_query_mob;
+
+		dx_query_mob = vmw_context_get_dx_query_mob(ctx);
+		if (dx_query_mob)
+			ret = vmw_bo_to_validate_list(sw_context,
+						      dx_query_mob,
+						      true, NULL);
+	}
+
 	mutex_unlock(&dev_priv->binding_mutex);
 	return ret;
 }
@@ -308,7 +494,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
  * submission is reached.
  */
 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
-				   struct ttm_buffer_object *bo,
+				   struct vmw_dma_buffer *vbo,
 				   bool validate_as_mob,
 				   uint32_t *p_val_node)
 {
@@ -318,7 +504,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
 	struct drm_hash_item *hash;
 	int ret;
 
-	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
+	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
 				    &hash) == 0)) {
 		vval_buf = container_of(hash, struct vmw_validate_buffer,
 					hash);
@@ -336,7 +522,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
 			return -EINVAL;
 		}
 		vval_buf = &sw_context->val_bufs[val_node];
-		vval_buf->hash.key = (unsigned long) bo;
+		vval_buf->hash.key = (unsigned long) vbo;
 		ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
 		if (unlikely(ret != 0)) {
 			DRM_ERROR("Failed to initialize a buffer validation "
@@ -345,7 +531,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
 		}
 		++sw_context->cur_val_buf;
 		val_buf = &vval_buf->base;
-		val_buf->bo = ttm_bo_reference(bo);
+		val_buf->bo = ttm_bo_reference(&vbo->base);
 		val_buf->shared = false;
 		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
 		vval_buf->validate_as_mob = validate_as_mob;
@@ -370,27 +556,39 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
 {
 	struct vmw_resource_val_node *val;
-	int ret;
+	int ret = 0;
 
 	list_for_each_entry(val, &sw_context->resource_list, head) {
 		struct vmw_resource *res = val->res;
 
-		ret = vmw_resource_reserve(res, val->no_buffer_needed);
+		ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
 		if (unlikely(ret != 0))
 			return ret;
 
 		if (res->backup) {
-			struct ttm_buffer_object *bo = &res->backup->base;
+			struct vmw_dma_buffer *vbo = res->backup;
 
 			ret = vmw_bo_to_validate_list
-				(sw_context, bo,
+				(sw_context, vbo,
 				 vmw_resource_needs_backup(res), NULL);
 
 			if (unlikely(ret != 0))
 				return ret;
 		}
 	}
-	return 0;
+
+	if (sw_context->dx_query_mob) {
+		struct vmw_dma_buffer *expected_dx_query_mob;
+
+		expected_dx_query_mob =
+			vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
+		if (expected_dx_query_mob &&
+		    expected_dx_query_mob != sw_context->dx_query_mob) {
+			ret = -EINVAL;
+		}
+	}
+
+	return ret;
 }
 
 /**
@@ -409,6 +607,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
 
 	list_for_each_entry(val, &sw_context->resource_list, head) {
 		struct vmw_resource *res = val->res;
+		struct vmw_dma_buffer *backup = res->backup;
 
 		ret = vmw_resource_validate(res);
 		if (unlikely(ret != 0)) {
@@ -416,18 +615,29 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
 				DRM_ERROR("Failed to validate resource.\n");
 			return ret;
 		}
+
+		/* Check if the resource switched backup buffer */
+		if (backup && res->backup && (backup != res->backup)) {
+			struct vmw_dma_buffer *vbo = res->backup;
+
+			ret = vmw_bo_to_validate_list
+				(sw_context, vbo,
+				 vmw_resource_needs_backup(res), NULL);
+			if (ret) {
+				ttm_bo_unreserve(&vbo->base);
+				return ret;
+			}
+		}
 	}
 	return 0;
 }
 
-
 /**
  * vmw_cmd_res_reloc_add - Add a resource to a software context's
  * relocation- and validation lists.
  *
  * @dev_priv: Pointer to a struct vmw_private identifying the device.
  * @sw_context: Pointer to the software context.
- * @res_type: Resource type.
  * @id_loc: Pointer to where the id that needs translation is located.
  * @res: Valid pointer to a struct vmw_resource.
  * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
@@ -435,7 +645,6 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
  */
 static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
 				 struct vmw_sw_context *sw_context,
-				 enum vmw_res_type res_type,
 				 uint32_t *id_loc,
 				 struct vmw_resource *res,
 				 struct vmw_resource_val_node **p_val)
@@ -454,29 +663,6 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
 	if (unlikely(ret != 0))
 		return ret;
 
-	if (res_type == vmw_res_context && dev_priv->has_mob &&
-	    node->first_usage) {
-
-		/*
-		 * Put contexts first on the list to be able to exit
-		 * list traversal for contexts early.
-		 */
-		list_del(&node->head);
-		list_add(&node->head, &sw_context->resource_list);
-
-		ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
-		if (unlikely(ret != 0))
-			return ret;
-		node->staged_bindings =
-			kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
-		if (node->staged_bindings == NULL) {
-			DRM_ERROR("Failed to allocate context binding "
-				  "information.\n");
-			return -ENOMEM;
-		}
-		INIT_LIST_HEAD(&node->staged_bindings->list);
-	}
-
 	if (p_val)
 		*p_val = node;
 
@@ -554,7 +740,7 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
 	rcache->res = res;
 	rcache->handle = *id_loc;
 
-	ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
+	ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
 				    res, &node);
 	if (unlikely(ret != 0))
 		goto out_no_reloc;
@@ -573,6 +759,46 @@ out_no_reloc:
 }
 
 /**
+ * vmw_rebind_dx_query - Rebind DX query associated with the context
+ *
+ * @ctx_res: context the query belongs to
+ *
+ * This function assumes binding_mutex is held.
+ */
+static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
+{
+	struct vmw_private *dev_priv = ctx_res->dev_priv;
+	struct vmw_dma_buffer *dx_query_mob;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindAllQuery body;
+	} *cmd;
+
+
+	dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
+
+	if (!dx_query_mob || dx_query_mob->dx_query_ctx)
+		return 0;
+
+	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
+
+	if (cmd == NULL) {
+		DRM_ERROR("Failed to rebind queries.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = ctx_res->id;
+	cmd->body.mobid = dx_query_mob->base.mem.start;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	vmw_context_bind_dx_query(ctx_res, dx_query_mob);
+
+	return 0;
+}
+
+/**
  * vmw_rebind_contexts - Rebind all resources previously bound to
  * referenced contexts.
  *
@@ -589,12 +815,80 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
 		if (unlikely(!val->staged_bindings))
 			break;
 
-		ret = vmw_context_rebind_all(val->res);
+		ret = vmw_binding_rebind_all
+			(vmw_context_binding_state(val->res));
 		if (unlikely(ret != 0)) {
 			if (ret != -ERESTARTSYS)
 				DRM_ERROR("Failed to rebind context.\n");
 			return ret;
 		}
+
+		ret = vmw_rebind_all_dx_query(val->res);
+		if (ret != 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_view_bindings_add - Add an array of view bindings to a context
+ * binding state tracker.
+ *
+ * @sw_context: The execbuf state used for this command.
+ * @view_type: View type for the bindings.
+ * @binding_type: Binding type for the bindings.
+ * @shader_slot: The shader slot to user for the bindings.
+ * @view_ids: Array of view ids to be bound.
+ * @num_views: Number of view ids in @view_ids.
+ * @first_slot: The binding slot to be used for the first view id in @view_ids.
+ */
+static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
+				 enum vmw_view_type view_type,
+				 enum vmw_ctx_binding_type binding_type,
+				 uint32 shader_slot,
+				 uint32 view_ids[], u32 num_views,
+				 u32 first_slot)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_cmdbuf_res_manager *man;
+	u32 i;
+	int ret;
+
+	if (!ctx_node) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	man = sw_context->man;
+	for (i = 0; i < num_views; ++i) {
+		struct vmw_ctx_bindinfo_view binding;
+		struct vmw_resource *view = NULL;
+
+		if (view_ids[i] != SVGA3D_INVALID_ID) {
+			view = vmw_view_lookup(man, view_type, view_ids[i]);
+			if (IS_ERR(view)) {
+				DRM_ERROR("View not found.\n");
+				return PTR_ERR(view);
+			}
+
+			ret = vmw_view_res_val_add(sw_context, view);
+			if (ret) {
+				DRM_ERROR("Could not add view to "
+					  "validation list.\n");
+				vmw_resource_unreference(&view);
+				return ret;
+			}
+		}
+		binding.bi.ctx = ctx_node->res;
+		binding.bi.res = view;
+		binding.bi.bt = binding_type;
+		binding.shader_slot = shader_slot;
+		binding.slot = first_slot + i;
+		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+				shader_slot, binding.slot);
+		if (view)
+			vmw_resource_unreference(&view);
 	}
 
 	return 0;
@@ -638,6 +932,12 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
 
 	cmd = container_of(header, struct vmw_sid_cmd, header);
 
+	if (cmd->body.type >= SVGA3D_RT_MAX) {
+		DRM_ERROR("Illegal render target type %u.\n",
+			  (unsigned) cmd->body.type);
+		return -EINVAL;
+	}
+
 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 				user_context_converter, &cmd->body.cid,
 				&ctx_node);
@@ -651,13 +951,14 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
 		return ret;
 
 	if (dev_priv->has_mob) {
-		struct vmw_ctx_bindinfo bi;
-
-		bi.ctx = ctx_node->res;
-		bi.res = res_node ? res_node->res : NULL;
-		bi.bt = vmw_ctx_binding_rt;
-		bi.i1.rt_type = cmd->body.type;
-		return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+		struct vmw_ctx_bindinfo_view binding;
+
+		binding.bi.ctx = ctx_node->res;
+		binding.bi.res = res_node ? res_node->res : NULL;
+		binding.bi.bt = vmw_ctx_binding_rt;
+		binding.slot = cmd->body.type;
+		vmw_binding_add(ctx_node->staged_bindings,
+				&binding.bi, 0, binding.slot);
 	}
 
 	return 0;
@@ -674,16 +975,62 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
 	int ret;
 
 	cmd = container_of(header, struct vmw_sid_cmd, header);
+
 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 				user_surface_converter,
 				&cmd->body.src.sid, NULL);
-	if (unlikely(ret != 0))
+	if (ret)
 		return ret;
+
 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 				 user_surface_converter,
 				 &cmd->body.dest.sid, NULL);
 }
 
+static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
+				      struct vmw_sw_context *sw_context,
+				      SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBufferCopy body;
+	} *cmd;
+	int ret;
+
+	cmd = container_of(header, typeof(*cmd), header);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter,
+				&cmd->body.src, NULL);
+	if (ret != 0)
+		return ret;
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter,
+				 &cmd->body.dest, NULL);
+}
+
+static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
+				   struct vmw_sw_context *sw_context,
+				   SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXPredCopyRegion body;
+	} *cmd;
+	int ret;
+
+	cmd = container_of(header, typeof(*cmd), header);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter,
+				&cmd->body.srcSid, NULL);
+	if (ret != 0)
+		return ret;
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter,
+				 &cmd->body.dstSid, NULL);
+}
+
 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
 				     struct vmw_sw_context *sw_context,
 				     SVGA3dCmdHeader *header)
@@ -752,7 +1099,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
  * command batch.
  */
 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
-				       struct ttm_buffer_object *new_query_bo,
+				       struct vmw_dma_buffer *new_query_bo,
 				       struct vmw_sw_context *sw_context)
 {
 	struct vmw_res_cache_entry *ctx_entry =
@@ -764,7 +1111,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
 
 	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
 
-		if (unlikely(new_query_bo->num_pages > 4)) {
+		if (unlikely(new_query_bo->base.num_pages > 4)) {
 			DRM_ERROR("Query buffer too large.\n");
 			return -EINVAL;
 		}
@@ -833,12 +1180,12 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
 
 	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
 		if (dev_priv->pinned_bo) {
-			vmw_bo_pin(dev_priv->pinned_bo, false);
-			ttm_bo_unref(&dev_priv->pinned_bo);
+			vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
+			vmw_dmabuf_unreference(&dev_priv->pinned_bo);
 		}
 
 		if (!sw_context->needs_post_query_barrier) {
-			vmw_bo_pin(sw_context->cur_query_bo, true);
+			vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
 
 			/*
 			 * We pin also the dummy_query_bo buffer so that we
@@ -846,14 +1193,17 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
 			 * dummy queries in context destroy paths.
 			 */
 
-			vmw_bo_pin(dev_priv->dummy_query_bo, true);
-			dev_priv->dummy_query_bo_pinned = true;
+			if (!dev_priv->dummy_query_bo_pinned) {
+				vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
+						    true);
+				dev_priv->dummy_query_bo_pinned = true;
+			}
 
 			BUG_ON(sw_context->last_query_ctx == NULL);
 			dev_priv->query_cid = sw_context->last_query_ctx->id;
 			dev_priv->query_cid_valid = true;
 			dev_priv->pinned_bo =
-				ttm_bo_reference(sw_context->cur_query_bo);
+				vmw_dmabuf_reference(sw_context->cur_query_bo);
 		}
 	}
 }
@@ -882,7 +1232,6 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
 				 struct vmw_dma_buffer **vmw_bo_p)
 {
 	struct vmw_dma_buffer *vmw_bo = NULL;
-	struct ttm_buffer_object *bo;
 	uint32_t handle = *id;
 	struct vmw_relocation *reloc;
 	int ret;
@@ -893,7 +1242,6 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
 		ret = -EINVAL;
 		goto out_no_reloc;
 	}
-	bo = &vmw_bo->base;
 
 	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
 		DRM_ERROR("Max number relocations per submission"
@@ -906,7 +1254,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
 	reloc->mob_loc = id;
 	reloc->location = NULL;
 
-	ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
+	ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
 	if (unlikely(ret != 0))
 		goto out_no_reloc;
 
@@ -944,7 +1292,6 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
 				   struct vmw_dma_buffer **vmw_bo_p)
 {
 	struct vmw_dma_buffer *vmw_bo = NULL;
-	struct ttm_buffer_object *bo;
 	uint32_t handle = ptr->gmrId;
 	struct vmw_relocation *reloc;
 	int ret;
@@ -955,7 +1302,6 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
 		ret = -EINVAL;
 		goto out_no_reloc;
 	}
-	bo = &vmw_bo->base;
 
 	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
 		DRM_ERROR("Max number relocations per submission"
@@ -967,7 +1313,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
 	reloc = &sw_context->relocs[sw_context->cur_reloc++];
 	reloc->location = ptr;
 
-	ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
+	ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
 	if (unlikely(ret != 0))
 		goto out_no_reloc;
 
@@ -980,6 +1326,98 @@ out_no_reloc:
 	return ret;
 }
 
+
+
+/**
+ * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ *
+ * This function adds the new query into the query COTABLE
+ */
+static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
+				   struct vmw_sw_context *sw_context,
+				   SVGA3dCmdHeader *header)
+{
+	struct vmw_dx_define_query_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXDefineQuery q;
+	} *cmd;
+
+	int    ret;
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_resource *cotable_res;
+
+
+	if (ctx_node == NULL) {
+		DRM_ERROR("DX Context not set for query.\n");
+		return -EINVAL;
+	}
+
+	cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
+
+	if (cmd->q.type <  SVGA3D_QUERYTYPE_MIN ||
+	    cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
+		return -EINVAL;
+
+	cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
+	ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
+	vmw_resource_unreference(&cotable_res);
+
+	return ret;
+}
+
+
+
+/**
+ * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ *
+ * The query bind operation will eventually associate the query ID
+ * with its backing MOB.  In this function, we take the user mode
+ * MOB ID and use vmw_translate_mob_ptr() to translate it to its
+ * kernel mode equivalent.
+ */
+static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
+				 struct vmw_sw_context *sw_context,
+				 SVGA3dCmdHeader *header)
+{
+	struct vmw_dx_bind_query_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindQuery q;
+	} *cmd;
+
+	struct vmw_dma_buffer *vmw_bo;
+	int    ret;
+
+
+	cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
+
+	/*
+	 * Look up the buffer pointed to by q.mobid, put it on the relocation
+	 * list so its kernel mode MOB ID can be filled in later
+	 */
+	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
+				    &vmw_bo);
+
+	if (ret != 0)
+		return ret;
+
+	sw_context->dx_query_mob = vmw_bo;
+	sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
+
+	vmw_dmabuf_unreference(&vmw_bo);
+
+	return ret;
+}
+
+
+
 /**
  * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
  *
@@ -1074,7 +1512,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
 	if (unlikely(ret != 0))
 		return ret;
 
-	ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
+	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
 
 	vmw_dmabuf_unreference(&vmw_bo);
 	return ret;
@@ -1128,7 +1566,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
 	if (unlikely(ret != 0))
 		return ret;
 
-	ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
+	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
 
 	vmw_dmabuf_unreference(&vmw_bo);
 	return ret;
@@ -1363,6 +1801,12 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
 		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
 			continue;
 
+		if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
+			DRM_ERROR("Illegal texture/sampler unit %u.\n",
+				  (unsigned) cur_state->stage);
+			return -EINVAL;
+		}
+
 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 					user_surface_converter,
 					&cur_state->value, &res_node);
@@ -1370,14 +1814,14 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
 			return ret;
 
 		if (dev_priv->has_mob) {
-			struct vmw_ctx_bindinfo bi;
-
-			bi.ctx = ctx_node->res;
-			bi.res = res_node ? res_node->res : NULL;
-			bi.bt = vmw_ctx_binding_tex;
-			bi.i1.texture_stage = cur_state->stage;
-			vmw_context_binding_add(ctx_node->staged_bindings,
-						&bi);
+			struct vmw_ctx_bindinfo_tex binding;
+
+			binding.bi.ctx = ctx_node->res;
+			binding.bi.res = res_node ? res_node->res : NULL;
+			binding.bi.bt = vmw_ctx_binding_tex;
+			binding.texture_stage = cur_state->stage;
+			vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+					0, binding.texture_stage);
 		}
 	}
 
@@ -1407,6 +1851,47 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
 	return ret;
 }
 
+
+/**
+ * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
+ * switching
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @val_node: The validation node representing the resource.
+ * @buf_id: Pointer to the user-space backup buffer handle in the command
+ * stream.
+ * @backup_offset: Offset of backup into MOB.
+ *
+ * This function prepares for registering a switch of backup buffers
+ * in the resource metadata just prior to unreserving. It's basically a wrapper
+ * around vmw_cmd_res_switch_backup with a different interface.
+ */
+static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context,
+				     struct vmw_resource_val_node *val_node,
+				     uint32_t *buf_id,
+				     unsigned long backup_offset)
+{
+	struct vmw_dma_buffer *dma_buf;
+	int ret;
+
+	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
+	if (ret)
+		return ret;
+
+	val_node->switching_backup = true;
+	if (val_node->first_usage)
+		val_node->no_buffer_needed = true;
+
+	vmw_dmabuf_unreference(&val_node->new_backup);
+	val_node->new_backup = dma_buf;
+	val_node->new_backup_offset = backup_offset;
+
+	return 0;
+}
+
+
 /**
  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
  *
@@ -1420,7 +1905,8 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
  * @backup_offset: Offset of backup into MOB.
  *
  * This function prepares for registering a switch of backup buffers
- * in the resource metadata just prior to unreserving.
+ * in the resource metadata just prior to unreserving. It's basically a wrapper
+ * around vmw_cmd_res_switch_backup with a different interface.
  */
 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
 				 struct vmw_sw_context *sw_context,
@@ -1431,27 +1917,16 @@ static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
 				 uint32_t *buf_id,
 				 unsigned long backup_offset)
 {
-	int ret;
-	struct vmw_dma_buffer *dma_buf;
 	struct vmw_resource_val_node *val_node;
+	int ret;
 
 	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
 				converter, res_id, &val_node);
-	if (unlikely(ret != 0))
-		return ret;
-
-	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
-	if (unlikely(ret != 0))
+	if (ret)
 		return ret;
 
-	if (val_node->first_usage)
-		val_node->no_buffer_needed = true;
-
-	vmw_dmabuf_unreference(&val_node->new_backup);
-	val_node->new_backup = dma_buf;
-	val_node->new_backup_offset = backup_offset;
-
-	return 0;
+	return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
+					 buf_id, backup_offset);
 }
 
 /**
@@ -1703,10 +2178,10 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
 	if (unlikely(!dev_priv->has_mob))
 		return 0;
 
-	ret = vmw_compat_shader_remove(vmw_context_res_man(val->res),
-				       cmd->body.shid,
-				       cmd->body.type,
-				       &sw_context->staged_cmd_res);
+	ret = vmw_shader_remove(vmw_context_res_man(val->res),
+				cmd->body.shid,
+				cmd->body.type,
+				&sw_context->staged_cmd_res);
 	if (unlikely(ret != 0))
 		return ret;
 
@@ -1734,13 +2209,19 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
 		SVGA3dCmdSetShader body;
 	} *cmd;
 	struct vmw_resource_val_node *ctx_node, *res_node = NULL;
-	struct vmw_ctx_bindinfo bi;
+	struct vmw_ctx_bindinfo_shader binding;
 	struct vmw_resource *res = NULL;
 	int ret;
 
 	cmd = container_of(header, struct vmw_set_shader_cmd,
 			   header);
 
+	if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
+		DRM_ERROR("Illegal shader type %u.\n",
+			  (unsigned) cmd->body.type);
+		return -EINVAL;
+	}
+
 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 				user_context_converter, &cmd->body.cid,
 				&ctx_node);
@@ -1751,14 +2232,12 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
 		return 0;
 
 	if (cmd->body.shid != SVGA3D_INVALID_ID) {
-		res = vmw_compat_shader_lookup
-			(vmw_context_res_man(ctx_node->res),
-			 cmd->body.shid,
-			 cmd->body.type);
+		res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
+					cmd->body.shid,
+					cmd->body.type);
 
 		if (!IS_ERR(res)) {
 			ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
-						    vmw_res_shader,
 						    &cmd->body.shid, res,
 						    &res_node);
 			vmw_resource_unreference(&res);
@@ -1776,11 +2255,13 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
 			return ret;
 	}
 
-	bi.ctx = ctx_node->res;
-	bi.res = res_node ? res_node->res : NULL;
-	bi.bt = vmw_ctx_binding_shader;
-	bi.i1.shader_type = cmd->body.type;
-	return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+	binding.bi.ctx = ctx_node->res;
+	binding.bi.res = res_node ? res_node->res : NULL;
+	binding.bi.bt = vmw_ctx_binding_shader;
+	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
+	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+			binding.shader_slot, 0);
+	return 0;
 }
 
 /**
@@ -1842,6 +2323,690 @@ static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
 				     cmd->body.offsetInBytes);
 }
 
+/**
+ * vmw_cmd_dx_set_single_constant_buffer - Validate an
+ * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int
+vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
+				      struct vmw_sw_context *sw_context,
+				      SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetSingleConstantBuffer body;
+	} *cmd;
+	struct vmw_resource_val_node *res_node = NULL;
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_ctx_bindinfo_cb binding;
+	int ret;
+
+	if (unlikely(ctx_node == NULL)) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	cmd = container_of(header, typeof(*cmd), header);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter,
+				&cmd->body.sid, &res_node);
+	if (unlikely(ret != 0))
+		return ret;
+
+	binding.bi.ctx = ctx_node->res;
+	binding.bi.res = res_node ? res_node->res : NULL;
+	binding.bi.bt = vmw_ctx_binding_cb;
+	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
+	binding.offset = cmd->body.offsetInBytes;
+	binding.size = cmd->body.sizeInBytes;
+	binding.slot = cmd->body.slot;
+
+	if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
+	    binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
+		DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
+			  (unsigned) cmd->body.type,
+			  (unsigned) binding.slot);
+		return -EINVAL;
+	}
+
+	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+			binding.shader_slot, binding.slot);
+
+	return 0;
+}
+
+/**
+ * vmw_cmd_dx_set_shader_res - Validate an
+ * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context,
+				     SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetShaderResources body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+	u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
+		sizeof(SVGA3dShaderResourceViewId);
+
+	if ((u64) cmd->body.startView + (u64) num_sr_view >
+	    (u64) SVGA3D_DX_MAX_SRVIEWS ||
+	    cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
+		DRM_ERROR("Invalid shader binding.\n");
+		return -EINVAL;
+	}
+
+	return vmw_view_bindings_add(sw_context, vmw_view_sr,
+				     vmw_ctx_binding_sr,
+				     cmd->body.type - SVGA3D_SHADERTYPE_MIN,
+				     (void *) &cmd[1], num_sr_view,
+				     cmd->body.startView);
+}
+
+/**
+ * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
+				 struct vmw_sw_context *sw_context,
+				 SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetShader body;
+	} *cmd;
+	struct vmw_resource *res = NULL;
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_ctx_bindinfo_shader binding;
+	int ret = 0;
+
+	if (unlikely(ctx_node == NULL)) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	cmd = container_of(header, typeof(*cmd), header);
+
+	if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
+		DRM_ERROR("Illegal shader type %u.\n",
+			  (unsigned) cmd->body.type);
+		return -EINVAL;
+	}
+
+	if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
+		res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
+		if (IS_ERR(res)) {
+			DRM_ERROR("Could not find shader for binding.\n");
+			return PTR_ERR(res);
+		}
+
+		ret = vmw_resource_val_add(sw_context, res, NULL);
+		if (ret)
+			goto out_unref;
+	}
+
+	binding.bi.ctx = ctx_node->res;
+	binding.bi.res = res;
+	binding.bi.bt = vmw_ctx_binding_dx_shader;
+	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
+
+	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+			binding.shader_slot, 0);
+out_unref:
+	if (res)
+		vmw_resource_unreference(&res);
+
+	return ret;
+}
+
+/**
+ * vmw_cmd_dx_set_vertex_buffers - Validates an
+ * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
+					 struct vmw_sw_context *sw_context,
+					 SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_ctx_bindinfo_vb binding;
+	struct vmw_resource_val_node *res_node;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetVertexBuffers body;
+		SVGA3dVertexBuffer buf[];
+	} *cmd;
+	int i, ret, num;
+
+	if (unlikely(ctx_node == NULL)) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	cmd = container_of(header, typeof(*cmd), header);
+	num = (cmd->header.size - sizeof(cmd->body)) /
+		sizeof(SVGA3dVertexBuffer);
+	if ((u64)num + (u64)cmd->body.startBuffer >
+	    (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
+		DRM_ERROR("Invalid number of vertex buffers.\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num; i++) {
+		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+					user_surface_converter,
+					&cmd->buf[i].sid, &res_node);
+		if (unlikely(ret != 0))
+			return ret;
+
+		binding.bi.ctx = ctx_node->res;
+		binding.bi.bt = vmw_ctx_binding_vb;
+		binding.bi.res = ((res_node) ? res_node->res : NULL);
+		binding.offset = cmd->buf[i].offset;
+		binding.stride = cmd->buf[i].stride;
+		binding.slot = i + cmd->body.startBuffer;
+
+		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+				0, binding.slot);
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
+ * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
+				       struct vmw_sw_context *sw_context,
+				       SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_ctx_bindinfo_ib binding;
+	struct vmw_resource_val_node *res_node;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetIndexBuffer body;
+	} *cmd;
+	int ret;
+
+	if (unlikely(ctx_node == NULL)) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	cmd = container_of(header, typeof(*cmd), header);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter,
+				&cmd->body.sid, &res_node);
+	if (unlikely(ret != 0))
+		return ret;
+
+	binding.bi.ctx = ctx_node->res;
+	binding.bi.res = ((res_node) ? res_node->res : NULL);
+	binding.bi.bt = vmw_ctx_binding_ib;
+	binding.offset = cmd->body.offset;
+	binding.format = cmd->body.format;
+
+	vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
+
+	return 0;
+}
+
+/**
+ * vmw_cmd_dx_set_rendertarget - Validate an
+ * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
+					struct vmw_sw_context *sw_context,
+					SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetRenderTargets body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+	int ret;
+	u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
+		sizeof(SVGA3dRenderTargetViewId);
+
+	if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
+		DRM_ERROR("Invalid DX Rendertarget binding.\n");
+		return -EINVAL;
+	}
+
+	ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
+				    vmw_ctx_binding_ds, 0,
+				    &cmd->body.depthStencilViewId, 1, 0);
+	if (ret)
+		return ret;
+
+	return vmw_view_bindings_add(sw_context, vmw_view_rt,
+				     vmw_ctx_binding_dx_rt, 0,
+				     (void *)&cmd[1], num_rt_view, 0);
+}
+
+/**
+ * vmw_cmd_dx_clear_rendertarget_view - Validate an
+ * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
+					      struct vmw_sw_context *sw_context,
+					      SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXClearRenderTargetView body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+
+	return vmw_view_id_val_add(sw_context, vmw_view_rt,
+				   cmd->body.renderTargetViewId);
+}
+
+/**
+ * vmw_cmd_dx_clear_rendertarget_view - Validate an
+ * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
+					      struct vmw_sw_context *sw_context,
+					      SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXClearDepthStencilView body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+
+	return vmw_view_id_val_add(sw_context, vmw_view_ds,
+				   cmd->body.depthStencilViewId);
+}
+
+static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
+				  struct vmw_sw_context *sw_context,
+				  SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_resource_val_node *srf_node;
+	struct vmw_resource *res;
+	enum vmw_view_type view_type;
+	int ret;
+	/*
+	 * This is based on the fact that all affected define commands have
+	 * the same initial command body layout.
+	 */
+	struct {
+		SVGA3dCmdHeader header;
+		uint32 defined_id;
+		uint32 sid;
+	} *cmd;
+
+	if (unlikely(ctx_node == NULL)) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	view_type = vmw_view_cmd_to_type(header->id);
+	cmd = container_of(header, typeof(*cmd), header);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter,
+				&cmd->sid, &srf_node);
+	if (unlikely(ret != 0))
+		return ret;
+
+	res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
+	ret = vmw_cotable_notify(res, cmd->defined_id);
+	vmw_resource_unreference(&res);
+	if (unlikely(ret != 0))
+		return ret;
+
+	return vmw_view_add(sw_context->man,
+			    ctx_node->res,
+			    srf_node->res,
+			    view_type,
+			    cmd->defined_id,
+			    header,
+			    header->size + sizeof(*header),
+			    &sw_context->staged_cmd_res);
+}
+
+/**
+ * vmw_cmd_dx_set_so_targets - Validate an
+ * SVGA_3D_CMD_DX_SET_SOTARGETS command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context,
+				     SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_ctx_bindinfo_so binding;
+	struct vmw_resource_val_node *res_node;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetSOTargets body;
+		SVGA3dSoTarget targets[];
+	} *cmd;
+	int i, ret, num;
+
+	if (unlikely(ctx_node == NULL)) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	cmd = container_of(header, typeof(*cmd), header);
+	num = (cmd->header.size - sizeof(cmd->body)) /
+		sizeof(SVGA3dSoTarget);
+
+	if (num > SVGA3D_DX_MAX_SOTARGETS) {
+		DRM_ERROR("Invalid DX SO binding.\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num; i++) {
+		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+					user_surface_converter,
+					&cmd->targets[i].sid, &res_node);
+		if (unlikely(ret != 0))
+			return ret;
+
+		binding.bi.ctx = ctx_node->res;
+		binding.bi.res = ((res_node) ? res_node->res : NULL);
+		binding.bi.bt = vmw_ctx_binding_so,
+		binding.offset = cmd->targets[i].offset;
+		binding.size = cmd->targets[i].sizeInBytes;
+		binding.slot = i;
+
+		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+				0, binding.slot);
+	}
+
+	return 0;
+}
+
+static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
+				struct vmw_sw_context *sw_context,
+				SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_resource *res;
+	/*
+	 * This is based on the fact that all affected define commands have
+	 * the same initial command body layout.
+	 */
+	struct {
+		SVGA3dCmdHeader header;
+		uint32 defined_id;
+	} *cmd;
+	enum vmw_so_type so_type;
+	int ret;
+
+	if (unlikely(ctx_node == NULL)) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	so_type = vmw_so_cmd_to_type(header->id);
+	res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
+	cmd = container_of(header, typeof(*cmd), header);
+	ret = vmw_cotable_notify(res, cmd->defined_id);
+	vmw_resource_unreference(&res);
+
+	return ret;
+}
+
+/**
+ * vmw_cmd_dx_check_subresource - Validate an
+ * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
+					struct vmw_sw_context *sw_context,
+					SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		union {
+			SVGA3dCmdDXReadbackSubResource r_body;
+			SVGA3dCmdDXInvalidateSubResource i_body;
+			SVGA3dCmdDXUpdateSubResource u_body;
+			SVGA3dSurfaceId sid;
+		};
+	} *cmd;
+
+	BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
+		     offsetof(typeof(*cmd), sid));
+	BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
+		     offsetof(typeof(*cmd), sid));
+	BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
+		     offsetof(typeof(*cmd), sid));
+
+	cmd = container_of(header, typeof(*cmd), header);
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter,
+				 &cmd->sid, NULL);
+}
+
+static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
+				struct vmw_sw_context *sw_context,
+				SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+
+	if (unlikely(ctx_node == NULL)) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_cmd_dx_view_remove - validate a view remove command and
+ * schedule the view resource for removal.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ *
+ * Check that the view exists, and if it was not created using this
+ * command batch, make sure it's validated (present in the device) so that
+ * the remove command will not confuse the device.
+ */
+static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
+				  struct vmw_sw_context *sw_context,
+				  SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct {
+		SVGA3dCmdHeader header;
+		union vmw_view_destroy body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+	enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
+	struct vmw_resource *view;
+	int ret;
+
+	if (!ctx_node) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	ret = vmw_view_remove(sw_context->man,
+			      cmd->body.view_id, view_type,
+			      &sw_context->staged_cmd_res,
+			      &view);
+	if (ret || !view)
+		return ret;
+
+	/*
+	 * Add view to the validate list iff it was not created using this
+	 * command batch.
+	 */
+	return vmw_view_res_val_add(sw_context, view);
+}
+
+/**
+ * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
+				    struct vmw_sw_context *sw_context,
+				    SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_resource *res;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXDefineShader body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+	int ret;
+
+	if (!ctx_node) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
+	ret = vmw_cotable_notify(res, cmd->body.shaderId);
+	vmw_resource_unreference(&res);
+	if (ret)
+		return ret;
+
+	return vmw_dx_shader_add(sw_context->man, ctx_node->res,
+				 cmd->body.shaderId, cmd->body.type,
+				 &sw_context->staged_cmd_res);
+}
+
+/**
+ * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context,
+				     SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXDestroyShader body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+	int ret;
+
+	if (!ctx_node) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
+				&sw_context->staged_cmd_res);
+	if (ret)
+		DRM_ERROR("Could not find shader to remove.\n");
+
+	return ret;
+}
+
+/**
+ * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
+				  struct vmw_sw_context *sw_context,
+				  SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node;
+	struct vmw_resource_val_node *res_node;
+	struct vmw_resource *res;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindShader body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+	int ret;
+
+	if (cmd->body.cid != SVGA3D_INVALID_ID) {
+		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+					user_context_converter,
+					&cmd->body.cid, &ctx_node);
+		if (ret)
+			return ret;
+	} else {
+		ctx_node = sw_context->dx_ctx_node;
+		if (!ctx_node) {
+			DRM_ERROR("DX Context not set.\n");
+			return -EINVAL;
+		}
+	}
+
+	res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
+				cmd->body.shid, 0);
+	if (IS_ERR(res)) {
+		DRM_ERROR("Could not find shader to bind.\n");
+		return PTR_ERR(res);
+	}
+
+	ret = vmw_resource_val_add(sw_context, res, &res_node);
+	if (ret) {
+		DRM_ERROR("Error creating resource validation node.\n");
+		goto out_unref;
+	}
+
+
+	ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
+					&cmd->body.mobid,
+					cmd->body.offsetInBytes);
+out_unref:
+	vmw_resource_unreference(&res);
+
+	return ret;
+}
+
 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
 				struct vmw_sw_context *sw_context,
 				void *buf, uint32_t *size)
@@ -1849,7 +3014,7 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
 	uint32_t size_remaining = *size;
 	uint32_t cmd_id;
 
-	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
+	cmd_id = ((uint32_t *)buf)[0];
 	switch (cmd_id) {
 	case SVGA_CMD_UPDATE:
 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
@@ -1980,7 +3145,7 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
 		    false, false, true),
 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
 		    false, false, true),
-	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
+	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
 		    false, false, true),
 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
 		    false, false, true),
@@ -2051,7 +3216,147 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
 		    false, false, true),
 	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
-		    true, false, true)
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
+		    false, false, true),
+
+	/*
+	 * DX commands
+	 */
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
+		    &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
+		    &vmw_cmd_dx_set_shader_res, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
+		    &vmw_cmd_dx_set_vertex_buffers, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
+		    &vmw_cmd_dx_set_index_buffer, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
+		    &vmw_cmd_dx_set_rendertargets, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
+		    &vmw_cmd_ok, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
+		    &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
+		    &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_invalid,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
+		    &vmw_cmd_dx_check_subresource, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
+		    &vmw_cmd_dx_check_subresource, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
+		    &vmw_cmd_dx_check_subresource, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
+		    &vmw_cmd_dx_view_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
+		    &vmw_cmd_dx_view_remove, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
+		    &vmw_cmd_dx_view_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
+		    &vmw_cmd_dx_view_remove, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
+		    &vmw_cmd_dx_view_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
+		    &vmw_cmd_dx_view_remove, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
+		    &vmw_cmd_dx_so_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
+		    &vmw_cmd_dx_so_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
+		    &vmw_cmd_dx_so_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
+		    &vmw_cmd_dx_so_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
+		    &vmw_cmd_dx_so_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
+		    &vmw_cmd_dx_define_shader, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
+		    &vmw_cmd_dx_destroy_shader, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
+		    &vmw_cmd_dx_bind_shader, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
+		    &vmw_cmd_dx_so_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
+		    &vmw_cmd_dx_set_so_targets, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
+		    &vmw_cmd_buffer_copy_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
+		    &vmw_cmd_pred_copy_check, true, false, true),
 };
 
 static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -2065,14 +3370,14 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
 	const struct vmw_cmd_entry *entry;
 	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
 
-	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
+	cmd_id = ((uint32_t *)buf)[0];
 	/* Handle any none 3D commands */
 	if (unlikely(cmd_id < SVGA_CMD_MAX))
 		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
 
 
-	cmd_id = le32_to_cpu(header->id);
-	*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
+	cmd_id = header->id;
+	*size = header->size + sizeof(SVGA3dCmdHeader);
 
 	cmd_id -= SVGA_3D_CMD_BASE;
 	if (unlikely(*size > size_remaining))
@@ -2184,7 +3489,8 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
  *
  * @list: The resource list.
  */
-static void vmw_resource_list_unreference(struct list_head *list)
+static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
+					  struct list_head *list)
 {
 	struct vmw_resource_val_node *val, *val_next;
 
@@ -2195,8 +3501,15 @@ static void vmw_resource_list_unreference(struct list_head *list)
 	list_for_each_entry_safe(val, val_next, list, head) {
 		list_del_init(&val->head);
 		vmw_resource_unreference(&val->res);
-		if (unlikely(val->staged_bindings))
-			kfree(val->staged_bindings);
+
+		if (val->staged_bindings) {
+			if (val->staged_bindings != sw_context->staged_bindings)
+				vmw_binding_state_free(val->staged_bindings);
+			else
+				sw_context->staged_bindings_inuse = false;
+			val->staged_bindings = NULL;
+		}
+
 		kfree(val);
 	}
 }
@@ -2222,24 +3535,21 @@ static void vmw_clear_validations(struct vmw_sw_context *sw_context)
 		(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
 }
 
-static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
-				      struct ttm_buffer_object *bo,
-				      bool validate_as_mob)
+int vmw_validate_single_buffer(struct vmw_private *dev_priv,
+			       struct ttm_buffer_object *bo,
+			       bool interruptible,
+			       bool validate_as_mob)
 {
+	struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
+						  base);
 	int ret;
 
-
-	/*
-	 * Don't validate pinned buffers.
-	 */
-
-	if (bo == dev_priv->pinned_bo ||
-	    (bo == dev_priv->dummy_query_bo &&
-	     dev_priv->dummy_query_bo_pinned))
+	if (vbo->pin_count > 0)
 		return 0;
 
 	if (validate_as_mob)
-		return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
+		return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
+				       false);
 
 	/**
 	 * Put BO in VRAM if there is space, otherwise as a GMR.
@@ -2248,7 +3558,8 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
 	 * used as a GMR, this will return -ENOMEM.
 	 */
 
-	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
+	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
+			      false);
 	if (likely(ret == 0 || ret == -ERESTARTSYS))
 		return ret;
 
@@ -2257,8 +3568,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
 	 * previous contents.
 	 */
 
-	DRM_INFO("Falling through to VRAM.\n");
-	ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
+	ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
 	return ret;
 }
 
@@ -2270,6 +3580,7 @@ static int vmw_validate_buffers(struct vmw_private *dev_priv,
 
 	list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
 		ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
+						 true,
 						 entry->validate_as_mob);
 		if (unlikely(ret != 0))
 			return ret;
@@ -2417,7 +3728,164 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
 	}
 }
 
+/**
+ * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
+ * the fifo.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @kernel_commands: Pointer to the unpatched command batch.
+ * @command_size: Size of the unpatched command batch.
+ * @sw_context: Structure holding the relocation lists.
+ *
+ * Side effects: If this function returns 0, then the command batch
+ * pointed to by @kernel_commands will have been modified.
+ */
+static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
+				   void *kernel_commands,
+				   u32 command_size,
+				   struct vmw_sw_context *sw_context)
+{
+	void *cmd;
 
+	if (sw_context->dx_ctx_node)
+		cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
+					  sw_context->dx_ctx_node->res->id);
+	else
+		cmd = vmw_fifo_reserve(dev_priv, command_size);
+	if (!cmd) {
+		DRM_ERROR("Failed reserving fifo space for commands.\n");
+		return -ENOMEM;
+	}
+
+	vmw_apply_relocations(sw_context);
+	memcpy(cmd, kernel_commands, command_size);
+	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
+	vmw_resource_relocations_free(&sw_context->res_relocations);
+	vmw_fifo_commit(dev_priv, command_size);
+
+	return 0;
+}
+
+/**
+ * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
+ * the command buffer manager.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @header: Opaque handle to the command buffer allocation.
+ * @command_size: Size of the unpatched command batch.
+ * @sw_context: Structure holding the relocation lists.
+ *
+ * Side effects: If this function returns 0, then the command buffer
+ * represented by @header will have been modified.
+ */
+static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
+				     struct vmw_cmdbuf_header *header,
+				     u32 command_size,
+				     struct vmw_sw_context *sw_context)
+{
+	u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
+		  SVGA3D_INVALID_ID);
+	void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
+				       id, false, header);
+
+	vmw_apply_relocations(sw_context);
+	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
+	vmw_resource_relocations_free(&sw_context->res_relocations);
+	vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
+
+	return 0;
+}
+
+/**
+ * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
+ * submission using a command buffer.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @user_commands: User-space pointer to the commands to be submitted.
+ * @command_size: Size of the unpatched command batch.
+ * @header: Out parameter returning the opaque pointer to the command buffer.
+ *
+ * This function checks whether we can use the command buffer manager for
+ * submission and if so, creates a command buffer of suitable size and
+ * copies the user data into that buffer.
+ *
+ * On successful return, the function returns a pointer to the data in the
+ * command buffer and *@header is set to non-NULL.
+ * If command buffers could not be used, the function will return the value
+ * of @kernel_commands on function call. That value may be NULL. In that case,
+ * the value of *@header will be set to NULL.
+ * If an error is encountered, the function will return a pointer error value.
+ * If the function is interrupted by a signal while sleeping, it will return
+ * -ERESTARTSYS casted to a pointer error value.
+ */
+static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
+				void __user *user_commands,
+				void *kernel_commands,
+				u32 command_size,
+				struct vmw_cmdbuf_header **header)
+{
+	size_t cmdbuf_size;
+	int ret;
+
+	*header = NULL;
+	if (!dev_priv->cman || kernel_commands)
+		return kernel_commands;
+
+	if (command_size > SVGA_CB_MAX_SIZE) {
+		DRM_ERROR("Command buffer is too large.\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* If possible, add a little space for fencing. */
+	cmdbuf_size = command_size + 512;
+	cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
+	kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
+					   true, header);
+	if (IS_ERR(kernel_commands))
+		return kernel_commands;
+
+	ret = copy_from_user(kernel_commands, user_commands,
+			     command_size);
+	if (ret) {
+		DRM_ERROR("Failed copying commands.\n");
+		vmw_cmdbuf_header_free(*header);
+		*header = NULL;
+		return ERR_PTR(-EFAULT);
+	}
+
+	return kernel_commands;
+}
+
+static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
+				   struct vmw_sw_context *sw_context,
+				   uint32_t handle)
+{
+	struct vmw_resource_val_node *ctx_node;
+	struct vmw_resource *res;
+	int ret;
+
+	if (handle == SVGA3D_INVALID_ID)
+		return 0;
+
+	ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
+					      handle, user_context_converter,
+					      &res);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Could not find or user DX context 0x%08x.\n",
+			  (unsigned) handle);
+		return ret;
+	}
+
+	ret = vmw_resource_val_add(sw_context, res, &ctx_node);
+	if (unlikely(ret != 0))
+		goto out_err;
+
+	sw_context->dx_ctx_node = ctx_node;
+	sw_context->man = vmw_context_res_man(res);
+out_err:
+	vmw_resource_unreference(&res);
+	return ret;
+}
 
 int vmw_execbuf_process(struct drm_file *file_priv,
 			struct vmw_private *dev_priv,
@@ -2425,6 +3893,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 			void *kernel_commands,
 			uint32_t command_size,
 			uint64_t throttle_us,
+			uint32_t dx_context_handle,
 			struct drm_vmw_fence_rep __user *user_fence_rep,
 			struct vmw_fence_obj **out_fence)
 {
@@ -2432,18 +3901,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 	struct vmw_fence_obj *fence = NULL;
 	struct vmw_resource *error_resource;
 	struct list_head resource_list;
+	struct vmw_cmdbuf_header *header;
 	struct ww_acquire_ctx ticket;
 	uint32_t handle;
-	void *cmd;
 	int ret;
 
+	if (throttle_us) {
+		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
+				   throttle_us);
+
+		if (ret)
+			return ret;
+	}
+
+	kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
+					     kernel_commands, command_size,
+					     &header);
+	if (IS_ERR(kernel_commands))
+		return PTR_ERR(kernel_commands);
+
 	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
-	if (unlikely(ret != 0))
-		return -ERESTARTSYS;
+	if (ret) {
+		ret = -ERESTARTSYS;
+		goto out_free_header;
+	}
 
+	sw_context->kernel = false;
 	if (kernel_commands == NULL) {
-		sw_context->kernel = false;
-
 		ret = vmw_resize_cmd_bounce(sw_context, command_size);
 		if (unlikely(ret != 0))
 			goto out_unlock;
@@ -2458,19 +3942,26 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 			goto out_unlock;
 		}
 		kernel_commands = sw_context->cmd_bounce;
-	} else
+	} else if (!header)
 		sw_context->kernel = true;
 
 	sw_context->fp = vmw_fpriv(file_priv);
 	sw_context->cur_reloc = 0;
 	sw_context->cur_val_buf = 0;
 	INIT_LIST_HEAD(&sw_context->resource_list);
+	INIT_LIST_HEAD(&sw_context->ctx_resource_list);
 	sw_context->cur_query_bo = dev_priv->pinned_bo;
 	sw_context->last_query_ctx = NULL;
 	sw_context->needs_post_query_barrier = false;
+	sw_context->dx_ctx_node = NULL;
+	sw_context->dx_query_mob = NULL;
+	sw_context->dx_query_ctx = NULL;
 	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
 	INIT_LIST_HEAD(&sw_context->validate_nodes);
 	INIT_LIST_HEAD(&sw_context->res_relocations);
+	if (sw_context->staged_bindings)
+		vmw_binding_state_reset(sw_context->staged_bindings);
+
 	if (!sw_context->res_ht_initialized) {
 		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
 		if (unlikely(ret != 0))
@@ -2478,10 +3969,24 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 		sw_context->res_ht_initialized = true;
 	}
 	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
-
 	INIT_LIST_HEAD(&resource_list);
+	ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
+	if (unlikely(ret != 0)) {
+		list_splice_init(&sw_context->ctx_resource_list,
+				 &sw_context->resource_list);
+		goto out_err_nores;
+	}
+
 	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
 				command_size);
+	/*
+	 * Merge the resource lists before checking the return status
+	 * from vmd_cmd_check_all so that all the open hashtabs will
+	 * be handled properly even if vmw_cmd_check_all fails.
+	 */
+	list_splice_init(&sw_context->ctx_resource_list,
+			 &sw_context->resource_list);
+
 	if (unlikely(ret != 0))
 		goto out_err_nores;
 
@@ -2502,14 +4007,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 	if (unlikely(ret != 0))
 		goto out_err;
 
-	if (throttle_us) {
-		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
-				   throttle_us);
-
-		if (unlikely(ret != 0))
-			goto out_err;
-	}
-
 	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
 	if (unlikely(ret != 0)) {
 		ret = -ERESTARTSYS;
@@ -2522,21 +4019,17 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 			goto out_unlock_binding;
 	}
 
-	cmd = vmw_fifo_reserve(dev_priv, command_size);
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed reserving fifo space for commands.\n");
-		ret = -ENOMEM;
-		goto out_unlock_binding;
+	if (!header) {
+		ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
+					      command_size, sw_context);
+	} else {
+		ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
+						sw_context);
+		header = NULL;
 	}
-
-	vmw_apply_relocations(sw_context);
-	memcpy(cmd, kernel_commands, command_size);
-
-	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
-	vmw_resource_relocations_free(&sw_context->res_relocations);
-
-	vmw_fifo_commit(dev_priv, command_size);
 	mutex_unlock(&dev_priv->binding_mutex);
+	if (ret)
+		goto out_err;
 
 	vmw_query_bo_switch_commit(dev_priv, sw_context);
 	ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
@@ -2551,7 +4044,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 	if (ret != 0)
 		DRM_ERROR("Fence submission error. Syncing.\n");
 
-	vmw_resource_list_unreserve(&sw_context->resource_list, false);
+	vmw_resources_unreserve(sw_context, false);
 
 	ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
 				    (void *) fence);
@@ -2580,7 +4073,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 	 * Unreference resources outside of the cmdbuf_mutex to
 	 * avoid deadlocks in resource destruction paths.
 	 */
-	vmw_resource_list_unreference(&resource_list);
+	vmw_resource_list_unreference(sw_context, &resource_list);
 
 	return 0;
 
@@ -2589,7 +4082,7 @@ out_unlock_binding:
 out_err:
 	ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
 out_err_nores:
-	vmw_resource_list_unreserve(&sw_context->resource_list, true);
+	vmw_resources_unreserve(sw_context, true);
 	vmw_resource_relocations_free(&sw_context->res_relocations);
 	vmw_free_relocations(sw_context);
 	vmw_clear_validations(sw_context);
@@ -2607,9 +4100,12 @@ out_unlock:
 	 * Unreference resources outside of the cmdbuf_mutex to
 	 * avoid deadlocks in resource destruction paths.
 	 */
-	vmw_resource_list_unreference(&resource_list);
+	vmw_resource_list_unreference(sw_context, &resource_list);
 	if (unlikely(error_resource != NULL))
 		vmw_resource_unreference(&error_resource);
+out_free_header:
+	if (header)
+		vmw_cmdbuf_header_free(header);
 
 	return ret;
 }
@@ -2628,9 +4124,11 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
 	DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
 
 	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
-	vmw_bo_pin(dev_priv->pinned_bo, false);
-	vmw_bo_pin(dev_priv->dummy_query_bo, false);
-	dev_priv->dummy_query_bo_pinned = false;
+	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
+	if (dev_priv->dummy_query_bo_pinned) {
+		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
+		dev_priv->dummy_query_bo_pinned = false;
+	}
 }
 
 
@@ -2672,11 +4170,11 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
 
 	INIT_LIST_HEAD(&validate_list);
 
-	pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
+	pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
 	pinned_val.shared = false;
 	list_add_tail(&pinned_val.head, &validate_list);
 
-	query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
+	query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
 	query_val.shared = false;
 	list_add_tail(&query_val.head, &validate_list);
 
@@ -2697,10 +4195,11 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
 		dev_priv->query_cid_valid = false;
 	}
 
-	vmw_bo_pin(dev_priv->pinned_bo, false);
-	vmw_bo_pin(dev_priv->dummy_query_bo, false);
-	dev_priv->dummy_query_bo_pinned = false;
-
+	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
+	if (dev_priv->dummy_query_bo_pinned) {
+		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
+		dev_priv->dummy_query_bo_pinned = false;
+	}
 	if (fence == NULL) {
 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
 						  NULL);
@@ -2712,7 +4211,9 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
 
 	ttm_bo_unref(&query_val.bo);
 	ttm_bo_unref(&pinned_val.bo);
-	ttm_bo_unref(&dev_priv->pinned_bo);
+	vmw_dmabuf_unreference(&dev_priv->pinned_bo);
+	DRM_INFO("Dummy query bo pin count: %d\n",
+		 dev_priv->dummy_query_bo->pin_count);
 
 out_unlock:
 	return;
@@ -2722,7 +4223,7 @@ out_no_emit:
 out_no_reserve:
 	ttm_bo_unref(&query_val.bo);
 	ttm_bo_unref(&pinned_val.bo);
-	ttm_bo_unref(&dev_priv->pinned_bo);
+	vmw_dmabuf_unreference(&dev_priv->pinned_bo);
 }
 
 /**
@@ -2751,36 +4252,68 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
 	mutex_unlock(&dev_priv->cmdbuf_mutex);
 }
 
-
-int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
-		      struct drm_file *file_priv)
+int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
+		      struct drm_file *file_priv, size_t size)
 {
 	struct vmw_private *dev_priv = vmw_priv(dev);
-	struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
+	struct drm_vmw_execbuf_arg arg;
 	int ret;
+	static const size_t copy_offset[] = {
+		offsetof(struct drm_vmw_execbuf_arg, context_handle),
+		sizeof(struct drm_vmw_execbuf_arg)};
+
+	if (unlikely(size < copy_offset[0])) {
+		DRM_ERROR("Invalid command size, ioctl %d\n",
+			  DRM_VMW_EXECBUF);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
+		return -EFAULT;
 
 	/*
-	 * This will allow us to extend the ioctl argument while
+	 * Extend the ioctl argument while
 	 * maintaining backwards compatibility:
 	 * We take different code paths depending on the value of
-	 * arg->version.
+	 * arg.version.
 	 */
 
-	if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
+	if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
+		     arg.version == 0)) {
 		DRM_ERROR("Incorrect execbuf version.\n");
-		DRM_ERROR("You're running outdated experimental "
-			  "vmwgfx user-space drivers.");
 		return -EINVAL;
 	}
 
+	if (arg.version > 1 &&
+	    copy_from_user(&arg.context_handle,
+			   (void __user *) (data + copy_offset[0]),
+			   copy_offset[arg.version - 1] -
+			   copy_offset[0]) != 0)
+		return -EFAULT;
+
+	switch (arg.version) {
+	case 1:
+		arg.context_handle = (uint32_t) -1;
+		break;
+	case 2:
+		if (arg.pad64 != 0) {
+			DRM_ERROR("Unused IOCTL data not set to zero.\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		break;
+	}
+
 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 	if (unlikely(ret != 0))
 		return ret;
 
 	ret = vmw_execbuf_process(file_priv, dev_priv,
-				  (void __user *)(unsigned long)arg->commands,
-				  NULL, arg->command_size, arg->throttle_us,
-				  (void __user *)(unsigned long)arg->fence_rep,
+				  (void __user *)(unsigned long)arg.commands,
+				  NULL, arg.command_size, arg.throttle_us,
+				  arg.context_handle,
+				  (void __user *)(unsigned long)arg.fence_rep,
 				  NULL);
 	ttm_read_unlock(&dev_priv->reservation_sem);
 	if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 0a474f391fad..3b1faf7862a5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -1,7 +1,7 @@
 /**************************************************************************
  *
  * Copyright © 2007 David Airlie
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -30,6 +30,7 @@
 
 #include <drm/drmP.h>
 #include "vmwgfx_drv.h"
+#include "vmwgfx_kms.h"
 
 #include <drm/ttm/ttm_placement.h>
 
@@ -40,21 +41,22 @@ struct vmw_fb_par {
 
 	void *vmalloc;
 
+	struct mutex bo_mutex;
 	struct vmw_dma_buffer *vmw_bo;
 	struct ttm_bo_kmap_obj map;
+	void *bo_ptr;
+	unsigned bo_size;
+	struct drm_framebuffer *set_fb;
+	struct drm_display_mode *set_mode;
+	u32 fb_x;
+	u32 fb_y;
+	bool bo_iowrite;
 
 	u32 pseudo_palette[17];
 
-	unsigned depth;
-	unsigned bpp;
-
 	unsigned max_width;
 	unsigned max_height;
 
-	void *bo_ptr;
-	unsigned bo_size;
-	bool bo_iowrite;
-
 	struct {
 		spinlock_t lock;
 		bool active;
@@ -63,6 +65,10 @@ struct vmw_fb_par {
 		unsigned x2;
 		unsigned y2;
 	} dirty;
+
+	struct drm_crtc *crtc;
+	struct drm_connector *con;
+	struct delayed_work local_work;
 };
 
 static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
@@ -77,7 +83,7 @@ static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
 		return 1;
 	}
 
-	switch (par->depth) {
+	switch (par->set_fb->depth) {
 	case 24:
 	case 32:
 		pal[regno] = ((red & 0xff00) << 8) |
@@ -85,7 +91,8 @@ static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
 			     ((blue  & 0xff00) >> 8);
 		break;
 	default:
-		DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
+		DRM_ERROR("Bad depth %u, bpp %u.\n", par->set_fb->depth,
+			  par->set_fb->bits_per_pixel);
 		return 1;
 	}
 
@@ -134,12 +141,6 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
 		return -EINVAL;
 	}
 
-	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
-	    (var->xoffset != 0 || var->yoffset != 0)) {
-		DRM_ERROR("Can not handle panning without display topology\n");
-		return -EINVAL;
-	}
-
 	if ((var->xoffset + var->xres) > par->max_width ||
 	    (var->yoffset + var->yres) > par->max_height) {
 		DRM_ERROR("Requested geom can not fit in framebuffer\n");
@@ -156,46 +157,6 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
 	return 0;
 }
 
-static int vmw_fb_set_par(struct fb_info *info)
-{
-	struct vmw_fb_par *par = info->par;
-	struct vmw_private *vmw_priv = par->vmw_priv;
-	int ret;
-
-	info->fix.line_length = info->var.xres * info->var.bits_per_pixel/8;
-
-	ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
-				 info->fix.line_length,
-				 par->bpp, par->depth);
-	if (ret)
-		return ret;
-
-	if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
-		/* TODO check if pitch and offset changes */
-		vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
-	}
-
-	/* This is really helpful since if this fails the user
-	 * can probably not see anything on the screen.
-	 */
-	WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
-
-	return 0;
-}
-
-static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
-			      struct fb_info *info)
-{
-	return 0;
-}
-
 static int vmw_fb_blank(int blank, struct fb_info *info)
 {
 	return 0;
@@ -205,65 +166,89 @@ static int vmw_fb_blank(int blank, struct fb_info *info)
  * Dirty code
  */
 
-static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
+static void vmw_fb_dirty_flush(struct work_struct *work)
 {
+	struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
+					      local_work.work);
 	struct vmw_private *vmw_priv = par->vmw_priv;
 	struct fb_info *info = vmw_priv->fb_info;
-	int stride = (info->fix.line_length / 4);
-	int *src = (int *)info->screen_base;
-	__le32 __iomem *vram_mem = par->bo_ptr;
-	unsigned long flags;
-	unsigned x, y, w, h;
-	int i, k;
-	struct {
-		uint32_t header;
-		SVGAFifoCmdUpdate body;
-	} *cmd;
+	unsigned long irq_flags;
+	s32 dst_x1, dst_x2, dst_y1, dst_y2, w, h;
+	u32 cpp, max_x, max_y;
+	struct drm_clip_rect clip;
+	struct drm_framebuffer *cur_fb;
+	u8 *src_ptr, *dst_ptr;
 
 	if (vmw_priv->suspended)
 		return;
 
-	spin_lock_irqsave(&par->dirty.lock, flags);
-	if (!par->dirty.active) {
-		spin_unlock_irqrestore(&par->dirty.lock, flags);
-		return;
-	}
-	x = par->dirty.x1;
-	y = par->dirty.y1;
-	w = min(par->dirty.x2, info->var.xres) - x;
-	h = min(par->dirty.y2, info->var.yres) - y;
-	par->dirty.x1 = par->dirty.x2 = 0;
-	par->dirty.y1 = par->dirty.y2 = 0;
-	spin_unlock_irqrestore(&par->dirty.lock, flags);
+	mutex_lock(&par->bo_mutex);
+	cur_fb = par->set_fb;
+	if (!cur_fb)
+		goto out_unlock;
 
-	for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
-		for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
-			iowrite32(src[k], vram_mem + k);
+	spin_lock_irqsave(&par->dirty.lock, irq_flags);
+	if (!par->dirty.active) {
+		spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
+		goto out_unlock;
 	}
 
-#if 0
-	DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
-#endif
+	/*
+	 * Handle panning when copying from vmalloc to framebuffer.
+	 * Clip dirty area to framebuffer.
+	 */
+	cpp = (cur_fb->bits_per_pixel + 7) / 8;
+	max_x = par->fb_x + cur_fb->width;
+	max_y = par->fb_y + cur_fb->height;
+
+	dst_x1 = par->dirty.x1 - par->fb_x;
+	dst_y1 = par->dirty.y1 - par->fb_y;
+	dst_x1 = max_t(s32, dst_x1, 0);
+	dst_y1 = max_t(s32, dst_y1, 0);
+
+	dst_x2 = par->dirty.x2 - par->fb_x;
+	dst_y2 = par->dirty.y2 - par->fb_y;
+	dst_x2 = min_t(s32, dst_x2, max_x);
+	dst_y2 = min_t(s32, dst_y2, max_y);
+	w = dst_x2 - dst_x1;
+	h = dst_y2 - dst_y1;
+	w = max_t(s32, 0, w);
+	h = max_t(s32, 0, h);
 
-	cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Fifo reserve failed.\n");
-		return;
+	par->dirty.x1 = par->dirty.x2 = 0;
+	par->dirty.y1 = par->dirty.y2 = 0;
+	spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
+
+	if (w && h) {
+		dst_ptr = (u8 *)par->bo_ptr  +
+			(dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
+		src_ptr = (u8 *)par->vmalloc +
+			((dst_y1 + par->fb_y) * info->fix.line_length +
+			 (dst_x1 + par->fb_x) * cpp);
+
+		while (h-- > 0) {
+			memcpy(dst_ptr, src_ptr, w*cpp);
+			dst_ptr += par->set_fb->pitches[0];
+			src_ptr += info->fix.line_length;
+		}
+
+		clip.x1 = dst_x1;
+		clip.x2 = dst_x2;
+		clip.y1 = dst_y1;
+		clip.y2 = dst_y2;
+
+		WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
+						       &clip, 1));
+		vmw_fifo_flush(vmw_priv, false);
 	}
-
-	cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
-	cmd->body.x = cpu_to_le32(x);
-	cmd->body.y = cpu_to_le32(y);
-	cmd->body.width = cpu_to_le32(w);
-	cmd->body.height = cpu_to_le32(h);
-	vmw_fifo_commit(vmw_priv, sizeof(*cmd));
+out_unlock:
+	mutex_unlock(&par->bo_mutex);
 }
 
 static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
 			      unsigned x1, unsigned y1,
 			      unsigned width, unsigned height)
 {
-	struct fb_info *info = par->vmw_priv->fb_info;
 	unsigned long flags;
 	unsigned x2 = x1 + width;
 	unsigned y2 = y1 + height;
@@ -277,7 +262,8 @@ static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
 		/* if we are active start the dirty work
 		 * we share the work with the defio system */
 		if (par->dirty.active)
-			schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
+			schedule_delayed_work(&par->local_work,
+					      VMW_DIRTY_DELAY);
 	} else {
 		if (x1 < par->dirty.x1)
 			par->dirty.x1 = x1;
@@ -291,6 +277,28 @@ static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
 	spin_unlock_irqrestore(&par->dirty.lock, flags);
 }
 
+static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
+			      struct fb_info *info)
+{
+	struct vmw_fb_par *par = info->par;
+
+	if ((var->xoffset + var->xres) > var->xres_virtual ||
+	    (var->yoffset + var->yres) > var->yres_virtual) {
+		DRM_ERROR("Requested panning can not fit in framebuffer\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&par->bo_mutex);
+	par->fb_x = var->xoffset;
+	par->fb_y = var->yoffset;
+	if (par->set_fb)
+		vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
+				  par->set_fb->height);
+	mutex_unlock(&par->bo_mutex);
+
+	return 0;
+}
+
 static void vmw_deferred_io(struct fb_info *info,
 			    struct list_head *pagelist)
 {
@@ -319,12 +327,17 @@ static void vmw_deferred_io(struct fb_info *info,
 		par->dirty.x2 = info->var.xres;
 		par->dirty.y2 = y2;
 		spin_unlock_irqrestore(&par->dirty.lock, flags);
-	}
 
-	vmw_fb_dirty_flush(par);
+		/*
+		 * Since we've already waited on this work once, try to
+		 * execute asap.
+		 */
+		cancel_delayed_work(&par->local_work);
+		schedule_delayed_work(&par->local_work, 0);
+	}
 };
 
-struct fb_deferred_io vmw_defio = {
+static struct fb_deferred_io vmw_defio = {
 	.delay		= VMW_DIRTY_DELAY,
 	.deferred_io	= vmw_deferred_io,
 };
@@ -358,33 +371,12 @@ static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
  * Bring up code
  */
 
-static struct fb_ops vmw_fb_ops = {
-	.owner = THIS_MODULE,
-	.fb_check_var = vmw_fb_check_var,
-	.fb_set_par = vmw_fb_set_par,
-	.fb_setcolreg = vmw_fb_setcolreg,
-	.fb_fillrect = vmw_fb_fillrect,
-	.fb_copyarea = vmw_fb_copyarea,
-	.fb_imageblit = vmw_fb_imageblit,
-	.fb_pan_display = vmw_fb_pan_display,
-	.fb_blank = vmw_fb_blank,
-};
-
 static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
 			    size_t size, struct vmw_dma_buffer **out)
 {
 	struct vmw_dma_buffer *vmw_bo;
-	struct ttm_place ne_place = vmw_vram_ne_placement.placement[0];
-	struct ttm_placement ne_placement;
 	int ret;
 
-	ne_placement.num_placement = 1;
-	ne_placement.placement = &ne_place;
-	ne_placement.num_busy_placement = 1;
-	ne_placement.busy_placement = &ne_place;
-
-	ne_place.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-
 	(void) ttm_write_lock(&vmw_priv->reservation_sem, false);
 
 	vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
@@ -394,31 +386,261 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
 	}
 
 	ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
-			      &ne_placement,
+			      &vmw_sys_placement,
 			      false,
 			      &vmw_dmabuf_bo_free);
 	if (unlikely(ret != 0))
 		goto err_unlock; /* init frees the buffer on failure */
 
 	*out = vmw_bo;
-
-	ttm_write_unlock(&vmw_priv->fbdev_master.lock);
+	ttm_write_unlock(&vmw_priv->reservation_sem);
 
 	return 0;
 
 err_unlock:
-	ttm_write_unlock(&vmw_priv->fbdev_master.lock);
+	ttm_write_unlock(&vmw_priv->reservation_sem);
 	return ret;
 }
 
+static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
+				int *depth)
+{
+	switch (var->bits_per_pixel) {
+	case 32:
+		*depth = (var->transp.length > 0) ? 32 : 24;
+		break;
+	default:
+		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int vmw_fb_kms_detach(struct vmw_fb_par *par,
+			     bool detach_bo,
+			     bool unref_bo)
+{
+	struct drm_framebuffer *cur_fb = par->set_fb;
+	int ret;
+
+	/* Detach the KMS framebuffer from crtcs */
+	if (par->set_mode) {
+		struct drm_mode_set set;
+
+		set.crtc = par->crtc;
+		set.x = 0;
+		set.y = 0;
+		set.mode = NULL;
+		set.fb = NULL;
+		set.num_connectors = 1;
+		set.connectors = &par->con;
+		ret = drm_mode_set_config_internal(&set);
+		if (ret) {
+			DRM_ERROR("Could not unset a mode.\n");
+			return ret;
+		}
+		drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
+		par->set_mode = NULL;
+	}
+
+	if (cur_fb) {
+		drm_framebuffer_unreference(cur_fb);
+		par->set_fb = NULL;
+	}
+
+	if (par->vmw_bo && detach_bo) {
+		if (par->bo_ptr) {
+			ttm_bo_kunmap(&par->map);
+			par->bo_ptr = NULL;
+		}
+		if (unref_bo)
+			vmw_dmabuf_unreference(&par->vmw_bo);
+		else
+			vmw_dmabuf_unpin(par->vmw_priv, par->vmw_bo, false);
+	}
+
+	return 0;
+}
+
+static int vmw_fb_kms_framebuffer(struct fb_info *info)
+{
+	struct drm_mode_fb_cmd mode_cmd;
+	struct vmw_fb_par *par = info->par;
+	struct fb_var_screeninfo *var = &info->var;
+	struct drm_framebuffer *cur_fb;
+	struct vmw_framebuffer *vfb;
+	int ret = 0;
+	size_t new_bo_size;
+
+	ret = vmw_fb_compute_depth(var, &mode_cmd.depth);
+	if (ret)
+		return ret;
+
+	mode_cmd.width = var->xres;
+	mode_cmd.height = var->yres;
+	mode_cmd.bpp = var->bits_per_pixel;
+	mode_cmd.pitch = ((mode_cmd.bpp + 7) / 8) * mode_cmd.width;
+
+	cur_fb = par->set_fb;
+	if (cur_fb && cur_fb->width == mode_cmd.width &&
+	    cur_fb->height == mode_cmd.height &&
+	    cur_fb->bits_per_pixel == mode_cmd.bpp &&
+	    cur_fb->depth == mode_cmd.depth &&
+	    cur_fb->pitches[0] == mode_cmd.pitch)
+		return 0;
+
+	/* Need new buffer object ? */
+	new_bo_size = (size_t) mode_cmd.pitch * (size_t) mode_cmd.height;
+	ret = vmw_fb_kms_detach(par,
+				par->bo_size < new_bo_size ||
+				par->bo_size > 2*new_bo_size,
+				true);
+	if (ret)
+		return ret;
+
+	if (!par->vmw_bo) {
+		ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
+				       &par->vmw_bo);
+		if (ret) {
+			DRM_ERROR("Failed creating a buffer object for "
+				  "fbdev.\n");
+			return ret;
+		}
+		par->bo_size = new_bo_size;
+	}
+
+	vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
+				      true, &mode_cmd);
+	if (IS_ERR(vfb))
+		return PTR_ERR(vfb);
+
+	par->set_fb = &vfb->base;
+
+	if (!par->bo_ptr) {
+		/*
+		 * Pin before mapping. Since we don't know in what placement
+		 * to pin, call into KMS to do it for us.
+		 */
+		ret = vfb->pin(vfb);
+		if (ret) {
+			DRM_ERROR("Could not pin the fbdev framebuffer.\n");
+			return ret;
+		}
+
+		ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
+				  par->vmw_bo->base.num_pages, &par->map);
+		if (ret) {
+			vfb->unpin(vfb);
+			DRM_ERROR("Could not map the fbdev framebuffer.\n");
+			return ret;
+		}
+
+		par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
+	}
+
+	return 0;
+}
+
+static int vmw_fb_set_par(struct fb_info *info)
+{
+	struct vmw_fb_par *par = info->par;
+	struct vmw_private *vmw_priv = par->vmw_priv;
+	struct drm_mode_set set;
+	struct fb_var_screeninfo *var = &info->var;
+	struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
+		DRM_MODE_TYPE_DRIVER,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
+	};
+	struct drm_display_mode *old_mode;
+	struct drm_display_mode *mode;
+	int ret;
+
+	old_mode = par->set_mode;
+	mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
+	if (!mode) {
+		DRM_ERROR("Could not create new fb mode.\n");
+		return -ENOMEM;
+	}
+
+	mode->hdisplay = var->xres;
+	mode->vdisplay = var->yres;
+	vmw_guess_mode_timing(mode);
+
+	if (old_mode && drm_mode_equal(old_mode, mode)) {
+		drm_mode_destroy(vmw_priv->dev, mode);
+		mode = old_mode;
+		old_mode = NULL;
+	} else if (!vmw_kms_validate_mode_vram(vmw_priv,
+					       mode->hdisplay *
+					       (var->bits_per_pixel + 7) / 8,
+					       mode->vdisplay)) {
+		drm_mode_destroy(vmw_priv->dev, mode);
+		return -EINVAL;
+	}
+
+	mutex_lock(&par->bo_mutex);
+	drm_modeset_lock_all(vmw_priv->dev);
+	ret = vmw_fb_kms_framebuffer(info);
+	if (ret)
+		goto out_unlock;
+
+	par->fb_x = var->xoffset;
+	par->fb_y = var->yoffset;
+
+	set.crtc = par->crtc;
+	set.x = 0;
+	set.y = 0;
+	set.mode = mode;
+	set.fb = par->set_fb;
+	set.num_connectors = 1;
+	set.connectors = &par->con;
+
+	ret = drm_mode_set_config_internal(&set);
+	if (ret)
+		goto out_unlock;
+
+	vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
+			  par->set_fb->width, par->set_fb->height);
+
+	/* If there already was stuff dirty we wont
+	 * schedule a new work, so lets do it now */
+
+	schedule_delayed_work(&par->local_work, 0);
+
+out_unlock:
+	if (old_mode)
+		drm_mode_destroy(vmw_priv->dev, old_mode);
+	par->set_mode = mode;
+
+	drm_modeset_unlock_all(vmw_priv->dev);
+	mutex_unlock(&par->bo_mutex);
+
+	return ret;
+}
+
+
+static struct fb_ops vmw_fb_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = vmw_fb_check_var,
+	.fb_set_par = vmw_fb_set_par,
+	.fb_setcolreg = vmw_fb_setcolreg,
+	.fb_fillrect = vmw_fb_fillrect,
+	.fb_copyarea = vmw_fb_copyarea,
+	.fb_imageblit = vmw_fb_imageblit,
+	.fb_pan_display = vmw_fb_pan_display,
+	.fb_blank = vmw_fb_blank,
+};
+
 int vmw_fb_init(struct vmw_private *vmw_priv)
 {
 	struct device *device = &vmw_priv->dev->pdev->dev;
 	struct vmw_fb_par *par;
 	struct fb_info *info;
-	unsigned initial_width, initial_height;
 	unsigned fb_width, fb_height;
 	unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
+	struct drm_display_mode *init_mode;
 	int ret;
 
 	fb_bpp = 32;
@@ -428,9 +650,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
 	fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
 	fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
 
-	initial_width = min(vmw_priv->initial_width, fb_width);
-	initial_height = min(vmw_priv->initial_height, fb_height);
-
 	fb_pitch = fb_width * fb_bpp / 8;
 	fb_size = fb_pitch * fb_height;
 	fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
@@ -444,35 +663,35 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
 	 */
 	vmw_priv->fb_info = info;
 	par = info->par;
+	memset(par, 0, sizeof(*par));
+	INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
 	par->vmw_priv = vmw_priv;
-	par->depth = fb_depth;
-	par->bpp = fb_bpp;
 	par->vmalloc = NULL;
 	par->max_width = fb_width;
 	par->max_height = fb_height;
 
+	drm_modeset_lock_all(vmw_priv->dev);
+	ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
+				      par->max_height, &par->con,
+				      &par->crtc, &init_mode);
+	if (ret) {
+		drm_modeset_unlock_all(vmw_priv->dev);
+		goto err_kms;
+	}
+
+	info->var.xres = init_mode->hdisplay;
+	info->var.yres = init_mode->vdisplay;
+	drm_modeset_unlock_all(vmw_priv->dev);
+
 	/*
 	 * Create buffers and alloc memory
 	 */
-	par->vmalloc = vmalloc(fb_size);
+	par->vmalloc = vzalloc(fb_size);
 	if (unlikely(par->vmalloc == NULL)) {
 		ret = -ENOMEM;
 		goto err_free;
 	}
 
-	ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
-	if (unlikely(ret != 0))
-		goto err_free;
-
-	ret = ttm_bo_kmap(&par->vmw_bo->base,
-			  0,
-			  par->vmw_bo->base.num_pages,
-			  &par->map);
-	if (unlikely(ret != 0))
-		goto err_unref;
-	par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
-	par->bo_size = fb_size;
-
 	/*
 	 * Fixed and var
 	 */
@@ -490,7 +709,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
 	info->fix.smem_len = fb_size;
 
 	info->pseudo_palette = par->pseudo_palette;
-	info->screen_base = par->vmalloc;
+	info->screen_base = (char __iomem *)par->vmalloc;
 	info->screen_size = fb_size;
 
 	info->flags = FBINFO_DEFAULT;
@@ -508,18 +727,14 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
 
 	info->var.xres_virtual = fb_width;
 	info->var.yres_virtual = fb_height;
-	info->var.bits_per_pixel = par->bpp;
+	info->var.bits_per_pixel = fb_bpp;
 	info->var.xoffset = 0;
 	info->var.yoffset = 0;
 	info->var.activate = FB_ACTIVATE_NOW;
 	info->var.height = -1;
 	info->var.width = -1;
 
-	info->var.xres = initial_width;
-	info->var.yres = initial_height;
-
 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
-
 	info->apertures = alloc_apertures(1);
 	if (!info->apertures) {
 		ret = -ENOMEM;
@@ -535,6 +750,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
 	par->dirty.y1 = par->dirty.y2 = 0;
 	par->dirty.active = true;
 	spin_lock_init(&par->dirty.lock);
+	mutex_init(&par->bo_mutex);
 	info->fbdefio = &vmw_defio;
 	fb_deferred_io_init(info);
 
@@ -542,16 +758,16 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
 	if (unlikely(ret != 0))
 		goto err_defio;
 
+	vmw_fb_set_par(info);
+
 	return 0;
 
 err_defio:
 	fb_deferred_io_cleanup(info);
 err_aper:
-	ttm_bo_kunmap(&par->map);
-err_unref:
-	ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
 err_free:
 	vfree(par->vmalloc);
+err_kms:
 	framebuffer_release(info);
 	vmw_priv->fb_info = NULL;
 
@@ -562,22 +778,19 @@ int vmw_fb_close(struct vmw_private *vmw_priv)
 {
 	struct fb_info *info;
 	struct vmw_fb_par *par;
-	struct ttm_buffer_object *bo;
 
 	if (!vmw_priv->fb_info)
 		return 0;
 
 	info = vmw_priv->fb_info;
 	par = info->par;
-	bo = &par->vmw_bo->base;
-	par->vmw_bo = NULL;
 
 	/* ??? order */
 	fb_deferred_io_cleanup(info);
+	cancel_delayed_work_sync(&par->local_work);
 	unregister_framebuffer(info);
 
-	ttm_bo_kunmap(&par->map);
-	ttm_bo_unref(&bo);
+	(void) vmw_fb_kms_detach(par, true, true);
 
 	vfree(par->vmalloc);
 	framebuffer_release(info);
@@ -602,11 +815,11 @@ int vmw_fb_off(struct vmw_private *vmw_priv)
 	spin_unlock_irqrestore(&par->dirty.lock, flags);
 
 	flush_delayed_work(&info->deferred_work);
+	flush_delayed_work(&par->local_work);
 
-	par->bo_ptr = NULL;
-	ttm_bo_kunmap(&par->map);
-
-	vmw_dmabuf_unpin(vmw_priv, par->vmw_bo, false);
+	mutex_lock(&par->bo_mutex);
+	(void) vmw_fb_kms_detach(par, true, false);
+	mutex_unlock(&par->bo_mutex);
 
 	return 0;
 }
@@ -616,8 +829,6 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
 	struct fb_info *info;
 	struct vmw_fb_par *par;
 	unsigned long flags;
-	bool dummy;
-	int ret;
 
 	if (!vmw_priv->fb_info)
 		return -EINVAL;
@@ -625,38 +836,10 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
 	info = vmw_priv->fb_info;
 	par = info->par;
 
-	/* we are already active */
-	if (par->bo_ptr != NULL)
-		return 0;
-
-	/* Make sure that all overlays are stoped when we take over */
-	vmw_overlay_stop_all(vmw_priv);
-
-	ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo, true, false);
-	if (unlikely(ret != 0)) {
-		DRM_ERROR("could not move buffer to start of VRAM\n");
-		goto err_no_buffer;
-	}
-
-	ret = ttm_bo_kmap(&par->vmw_bo->base,
-			  0,
-			  par->vmw_bo->base.num_pages,
-			  &par->map);
-	BUG_ON(ret != 0);
-	par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
-
+	vmw_fb_set_par(info);
 	spin_lock_irqsave(&par->dirty.lock, flags);
 	par->dirty.active = true;
 	spin_unlock_irqrestore(&par->dirty.lock, flags);
-
-err_no_buffer:
-	vmw_fb_set_par(info);
-
-	vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
-
-	/* If there already was stuff dirty we wont
-	 * schedule a new work, so lets do it now */
-	schedule_delayed_work(&info->deferred_work, 0);
-
+ 
 	return 0;
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 945f1e0dad92..567ddede51d1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2011-2014 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -142,7 +142,7 @@ static bool vmw_fence_enable_signaling(struct fence *f)
 	struct vmw_fence_manager *fman = fman_from_fence(fence);
 	struct vmw_private *dev_priv = fman->dev_priv;
 
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	u32 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
 	if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
 		return false;
@@ -386,7 +386,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
 				      u32 passed_seqno)
 {
 	u32 goal_seqno;
-	__le32 __iomem *fifo_mem;
+	u32 __iomem *fifo_mem;
 	struct vmw_fence_obj *fence;
 
 	if (likely(!fman->seqno_valid))
@@ -430,7 +430,7 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
 {
 	struct vmw_fence_manager *fman = fman_from_fence(fence);
 	u32 goal_seqno;
-	__le32 __iomem *fifo_mem;
+	u32 __iomem *fifo_mem;
 
 	if (fence_is_signaled_locked(&fence->base))
 		return false;
@@ -453,7 +453,7 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman)
 	struct list_head action_list;
 	bool needs_rerun;
 	uint32_t seqno, new_seqno;
-	__le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
 
 	seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
 rerun:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index 26a4add39208..8be6c29f5eb5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2011-2012 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 39f2b03888e7..80c40c31d4f8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -29,9 +29,14 @@
 #include <drm/drmP.h>
 #include <drm/ttm/ttm_placement.h>
 
+struct vmw_temp_set_context {
+	SVGA3dCmdHeader header;
+	SVGA3dCmdDXTempSetContext body;
+};
+
 bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
 {
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	uint32_t fifo_min, hwversion;
 	const struct vmw_fifo_state *fifo = &dev_priv->fifo;
 
@@ -71,8 +76,8 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
 	if (hwversion < SVGA3D_HWVERSION_WS8_B1)
 		return false;
 
-	/* Non-Screen Object path does not support surfaces */
-	if (!dev_priv->sou_priv)
+	/* Legacy Display Unit does not support surfaces */
+	if (dev_priv->active_display_unit == vmw_du_legacy)
 		return false;
 
 	return true;
@@ -80,7 +85,7 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
 
 bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
 {
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	uint32_t caps;
 
 	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
@@ -95,11 +100,11 @@ bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
 
 int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 {
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	uint32_t max;
 	uint32_t min;
-	uint32_t dummy;
 
+	fifo->dx = false;
 	fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
 	fifo->static_buffer = vmalloc(fifo->static_buffer_size);
 	if (unlikely(fifo->static_buffer == NULL))
@@ -112,10 +117,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 	mutex_init(&fifo->fifo_mutex);
 	init_rwsem(&fifo->rwsem);
 
-	/*
-	 * Allow mapping the first page read-only to user-space.
-	 */
-
 	DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
 	DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
 	DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
@@ -123,7 +124,10 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 	dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
 	dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
 	dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
-	vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
+
+	vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
+		  SVGA_REG_ENABLE_HIDE);
+	vmw_write(dev_priv, SVGA_REG_TRACES, 0);
 
 	min = 4;
 	if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
@@ -155,12 +159,13 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 	atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
 	iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
 	vmw_marker_queue_init(&fifo->marker_queue);
-	return vmw_fifo_send_fence(dev_priv, &dummy);
+
+	return 0;
 }
 
 void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
 {
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	static DEFINE_SPINLOCK(ping_lock);
 	unsigned long irq_flags;
 
@@ -178,7 +183,7 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
 
 void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 {
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 
 	vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
 	while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
@@ -208,7 +213,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 
 static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
 {
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
 	uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
 	uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
@@ -312,10 +317,11 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
  * Returns:
  *   Pointer to the fifo, or null on error (possible hardware hang).
  */
-void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
+static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
+				    uint32_t bytes)
 {
 	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	uint32_t max;
 	uint32_t min;
 	uint32_t next_cmd;
@@ -372,7 +378,8 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
 				if (reserveable)
 					iowrite32(bytes, fifo_mem +
 						  SVGA_FIFO_RESERVED);
-				return fifo_mem + (next_cmd >> 2);
+				return (void __force *) (fifo_mem +
+							 (next_cmd >> 2));
 			} else {
 				need_bounce = true;
 			}
@@ -391,11 +398,36 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
 out_err:
 	fifo_state->reserved_size = 0;
 	mutex_unlock(&fifo_state->fifo_mutex);
+
 	return NULL;
 }
 
+void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
+			  int ctx_id)
+{
+	void *ret;
+
+	if (dev_priv->cman)
+		ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
+					 ctx_id, false, NULL);
+	else if (ctx_id == SVGA3D_INVALID_ID)
+		ret = vmw_local_fifo_reserve(dev_priv, bytes);
+	else {
+		WARN_ON("Command buffer has not been allocated.\n");
+		ret = NULL;
+	}
+	if (IS_ERR_OR_NULL(ret)) {
+		DRM_ERROR("Fifo reserve failure of %u bytes.\n",
+			  (unsigned) bytes);
+		dump_stack();
+		return NULL;
+	}
+
+	return ret;
+}
+
 static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
-			      __le32 __iomem *fifo_mem,
+			      u32 __iomem *fifo_mem,
 			      uint32_t next_cmd,
 			      uint32_t max, uint32_t min, uint32_t bytes)
 {
@@ -417,7 +449,7 @@ static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
 }
 
 static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
-			       __le32 __iomem *fifo_mem,
+			       u32 __iomem *fifo_mem,
 			       uint32_t next_cmd,
 			       uint32_t max, uint32_t min, uint32_t bytes)
 {
@@ -436,15 +468,19 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
 	}
 }
 
-void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
+static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
 {
 	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
 	uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
 	uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
 	bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
 
+	if (fifo_state->dx)
+		bytes += sizeof(struct vmw_temp_set_context);
+
+	fifo_state->dx = false;
 	BUG_ON((bytes & 3) != 0);
 	BUG_ON(bytes > fifo_state->reserved_size);
 
@@ -482,13 +518,53 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
 	mutex_unlock(&fifo_state->fifo_mutex);
 }
 
+void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
+{
+	if (dev_priv->cman)
+		vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
+	else
+		vmw_local_fifo_commit(dev_priv, bytes);
+}
+
+
+/**
+ * vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands.
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @bytes: Number of bytes to commit.
+ */
+void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
+{
+	if (dev_priv->cman)
+		vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
+	else
+		vmw_local_fifo_commit(dev_priv, bytes);
+}
+
+/**
+ * vmw_fifo_flush - Flush any buffered commands and make sure command processing
+ * starts.
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @interruptible: Whether to wait interruptible if function needs to sleep.
+ */
+int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
+{
+	might_sleep();
+
+	if (dev_priv->cman)
+		return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
+	else
+		return 0;
+}
+
 int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
 {
 	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
 	struct svga_fifo_cmd_fence *cmd_fence;
-	void *fm;
+	u32 *fm;
 	int ret = 0;
-	uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
+	uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
 
 	fm = vmw_fifo_reserve(dev_priv, bytes);
 	if (unlikely(fm == NULL)) {
@@ -514,12 +590,10 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
 		return 0;
 	}
 
-	*(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
-	cmd_fence = (struct svga_fifo_cmd_fence *)
-	    ((unsigned long)fm + sizeof(__le32));
-
-	iowrite32(*seqno, &cmd_fence->fence);
-	vmw_fifo_commit(dev_priv, bytes);
+	*fm++ = SVGA_CMD_FENCE;
+	cmd_fence = (struct svga_fifo_cmd_fence *) fm;
+	cmd_fence->fence = *seqno;
+	vmw_fifo_commit_flush(dev_priv, bytes);
 	(void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
 	vmw_update_seqno(dev_priv, fifo_state);
 
@@ -545,7 +619,7 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
 	 * without writing to the query result structure.
 	 */
 
-	struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
+	struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
 	struct {
 		SVGA3dCmdHeader header;
 		SVGA3dCmdWaitForQuery body;
@@ -594,7 +668,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
 	 * without writing to the query result structure.
 	 */
 
-	struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
+	struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
 	struct {
 		SVGA3dCmdHeader header;
 		SVGA3dCmdWaitForGBQuery body;
@@ -647,3 +721,8 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
 
 	return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
 }
+
+void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
+{
+	return vmw_fifo_reserve_dx(dev_priv, bytes, SVGA3D_INVALID_ID);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 61d8d803199f..66ffa1d4759c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 69c8ce23123c..0a970afed93b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -28,6 +28,7 @@
 #include "vmwgfx_drv.h"
 #include <drm/vmwgfx_drm.h>
 #include "vmwgfx_kms.h"
+#include "device_include/svga3d_caps.h"
 
 struct svga_3d_compat_cap {
 	SVGA3dCapsRecordHeader header;
@@ -63,7 +64,7 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
 		break;
 	case DRM_VMW_PARAM_FIFO_HW_VERSION:
 	{
-		__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+		u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 		const struct vmw_fifo_state *fifo = &dev_priv->fifo;
 
 		if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
@@ -105,6 +106,13 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
 	case DRM_VMW_PARAM_MAX_MOB_SIZE:
 		param->value = dev_priv->max_mob_size;
 		break;
+	case DRM_VMW_PARAM_SCREEN_TARGET:
+		param->value =
+			(dev_priv->active_display_unit == vmw_du_screen_target);
+		break;
+	case DRM_VMW_PARAM_DX:
+		param->value = dev_priv->has_dx;
+		break;
 	default:
 		DRM_ERROR("Illegal vmwgfx get param request: %d\n",
 			  param->param);
@@ -154,7 +162,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
 		(struct drm_vmw_get_3d_cap_arg *) data;
 	struct vmw_private *dev_priv = vmw_priv(dev);
 	uint32_t size;
-	__le32 __iomem *fifo_mem;
+	u32 __iomem *fifo_mem;
 	void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
 	void *bounce;
 	int ret;
@@ -235,7 +243,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
 	int ret;
 
 	num_clips = arg->num_clips;
-	clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
+	clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
 
 	if (unlikely(num_clips == 0))
 		return 0;
@@ -318,7 +326,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
 	int ret;
 
 	num_clips = arg->num_clips;
-	clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
+	clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
 
 	if (unlikely(num_clips == 0))
 		return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 9fe9827ee499..9498a5e33c12 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -56,6 +56,9 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
 	if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
 		wake_up_all(&dev_priv->fifo_queue);
 
+	if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
+			     SVGA_IRQFLAG_ERROR))
+		vmw_cmdbuf_tasklet_schedule(dev_priv->cman);
 
 	return IRQ_HANDLED;
 }
@@ -69,7 +72,7 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
 void vmw_update_seqno(struct vmw_private *dev_priv,
 			 struct vmw_fifo_state *fifo_state)
 {
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
 
 	if (dev_priv->last_read_seqno != seqno) {
@@ -131,8 +134,16 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
 	 * Block command submission while waiting for idle.
 	 */
 
-	if (fifo_idle)
+	if (fifo_idle) {
 		down_read(&fifo_state->rwsem);
+		if (dev_priv->cman) {
+			ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
+					      10*HZ);
+			if (ret)
+				goto out_err;
+		}
+	}
+
 	signal_seq = atomic_read(&dev_priv->marker_seq);
 	ret = 0;
 
@@ -167,10 +178,11 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
 	}
 	finish_wait(&dev_priv->fence_queue, &__wait);
 	if (ret == 0 && fifo_idle) {
-		__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+		u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 		iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
 	}
 	wake_up_all(&dev_priv->fence_queue);
+out_err:
 	if (fifo_idle)
 		up_read(&fifo_state->rwsem);
 
@@ -315,3 +327,30 @@ void vmw_irq_uninstall(struct drm_device *dev)
 	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 	outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 }
+
+void vmw_generic_waiter_add(struct vmw_private *dev_priv,
+			    u32 flag, int *waiter_count)
+{
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+	if ((*waiter_count)++ == 0) {
+		outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+		dev_priv->irq_mask |= flag;
+		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+}
+
+void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
+			       u32 flag, int *waiter_count)
+{
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+	if (--(*waiter_count) == 0) {
+		dev_priv->irq_mask &= ~flag;
+		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 07cda8cbbddb..61fb7f3de311 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,45 +31,7 @@
 /* Might need a hrtimer here? */
 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
 
-
-struct vmw_clip_rect {
-	int x1, x2, y1, y2;
-};
-
-/**
- * Clip @num_rects number of @rects against @clip storing the
- * results in @out_rects and the number of passed rects in @out_num.
- */
-static void vmw_clip_cliprects(struct drm_clip_rect *rects,
-			int num_rects,
-			struct vmw_clip_rect clip,
-			SVGASignedRect *out_rects,
-			int *out_num)
-{
-	int i, k;
-
-	for (i = 0, k = 0; i < num_rects; i++) {
-		int x1 = max_t(int, clip.x1, rects[i].x1);
-		int y1 = max_t(int, clip.y1, rects[i].y1);
-		int x2 = min_t(int, clip.x2, rects[i].x2);
-		int y2 = min_t(int, clip.y2, rects[i].y2);
-
-		if (x1 >= x2)
-			continue;
-		if (y1 >= y2)
-			continue;
-
-		out_rects[k].left   = x1;
-		out_rects[k].top    = y1;
-		out_rects[k].right  = x2;
-		out_rects[k].bottom = y2;
-		k++;
-	}
-
-	*out_num = k;
-}
-
-void vmw_display_unit_cleanup(struct vmw_display_unit *du)
+void vmw_du_cleanup(struct vmw_display_unit *du)
 {
 	if (du->cursor_surface)
 		vmw_surface_unreference(&du->cursor_surface);
@@ -109,12 +71,12 @@ int vmw_cursor_update_image(struct vmw_private *dev_priv,
 
 	memcpy(&cmd[1], image, image_size);
 
-	cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
-	cmd->cursor.id = cpu_to_le32(0);
-	cmd->cursor.width = cpu_to_le32(width);
-	cmd->cursor.height = cpu_to_le32(height);
-	cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
-	cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
+	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
+	cmd->cursor.id = 0;
+	cmd->cursor.width = width;
+	cmd->cursor.height = height;
+	cmd->cursor.hotspotX = hotspotX;
+	cmd->cursor.hotspotY = hotspotY;
 
 	vmw_fifo_commit(dev_priv, cmd_size);
 
@@ -161,7 +123,7 @@ err_unreserve:
 void vmw_cursor_update_position(struct vmw_private *dev_priv,
 				bool show, int x, int y)
 {
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	uint32_t count;
 
 	iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
@@ -367,15 +329,6 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
 
 	srf->snooper.age++;
 
-	/* we can't call this function from this function since execbuf has
-	 * reserved fifo space.
-	 *
-	 * if (srf->snooper.crtc)
-	 *	vmw_ldu_crtc_cursor_update_image(dev_priv,
-	 *					 srf->snooper.image, 64, 64,
-	 *					 du->hotspot_x, du->hotspot_y);
-	 */
-
 	ttm_bo_kunmap(&map);
 err_unreserve:
 	ttm_bo_unreserve(bo);
@@ -412,183 +365,19 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
  * Surface framebuffer code
  */
 
-#define vmw_framebuffer_to_vfbs(x) \
-	container_of(x, struct vmw_framebuffer_surface, base.base)
-
-struct vmw_framebuffer_surface {
-	struct vmw_framebuffer base;
-	struct vmw_surface *surface;
-	struct vmw_dma_buffer *buffer;
-	struct list_head head;
-	struct drm_master *master;
-};
-
 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
 {
 	struct vmw_framebuffer_surface *vfbs =
 		vmw_framebuffer_to_vfbs(framebuffer);
-	struct vmw_master *vmaster = vmw_master(vfbs->master);
-
 
-	mutex_lock(&vmaster->fb_surf_mutex);
-	list_del(&vfbs->head);
-	mutex_unlock(&vmaster->fb_surf_mutex);
-
-	drm_master_put(&vfbs->master);
 	drm_framebuffer_cleanup(framebuffer);
 	vmw_surface_unreference(&vfbs->surface);
-	ttm_base_object_unref(&vfbs->base.user_obj);
+	if (vfbs->base.user_obj)
+		ttm_base_object_unref(&vfbs->base.user_obj);
 
 	kfree(vfbs);
 }
 
-static int do_surface_dirty_sou(struct vmw_private *dev_priv,
-				struct drm_file *file_priv,
-				struct vmw_framebuffer *framebuffer,
-				unsigned flags, unsigned color,
-				struct drm_clip_rect *clips,
-				unsigned num_clips, int inc,
-				struct vmw_fence_obj **out_fence)
-{
-	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
-	struct drm_clip_rect *clips_ptr;
-	struct drm_clip_rect *tmp;
-	struct drm_crtc *crtc;
-	size_t fifo_size;
-	int i, num_units;
-	int ret = 0; /* silence warning */
-	int left, right, top, bottom;
-
-	struct {
-		SVGA3dCmdHeader header;
-		SVGA3dCmdBlitSurfaceToScreen body;
-	} *cmd;
-	SVGASignedRect *blits;
-
-	num_units = 0;
-	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
-			    head) {
-		if (crtc->primary->fb != &framebuffer->base)
-			continue;
-		units[num_units++] = vmw_crtc_to_du(crtc);
-	}
-
-	BUG_ON(!clips || !num_clips);
-
-	tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
-	if (unlikely(tmp == NULL)) {
-		DRM_ERROR("Temporary cliprect memory alloc failed.\n");
-		return -ENOMEM;
-	}
-
-	fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
-	cmd = kzalloc(fifo_size, GFP_KERNEL);
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Temporary fifo memory alloc failed.\n");
-		ret = -ENOMEM;
-		goto out_free_tmp;
-	}
-
-	/* setup blits pointer */
-	blits = (SVGASignedRect *)&cmd[1];
-
-	/* initial clip region */
-	left = clips->x1;
-	right = clips->x2;
-	top = clips->y1;
-	bottom = clips->y2;
-
-	/* skip the first clip rect */
-	for (i = 1, clips_ptr = clips + inc;
-	     i < num_clips; i++, clips_ptr += inc) {
-		left = min_t(int, left, (int)clips_ptr->x1);
-		right = max_t(int, right, (int)clips_ptr->x2);
-		top = min_t(int, top, (int)clips_ptr->y1);
-		bottom = max_t(int, bottom, (int)clips_ptr->y2);
-	}
-
-	/* only need to do this once */
-	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
-	cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
-
-	cmd->body.srcRect.left = left;
-	cmd->body.srcRect.right = right;
-	cmd->body.srcRect.top = top;
-	cmd->body.srcRect.bottom = bottom;
-
-	clips_ptr = clips;
-	for (i = 0; i < num_clips; i++, clips_ptr += inc) {
-		tmp[i].x1 = clips_ptr->x1 - left;
-		tmp[i].x2 = clips_ptr->x2 - left;
-		tmp[i].y1 = clips_ptr->y1 - top;
-		tmp[i].y2 = clips_ptr->y2 - top;
-	}
-
-	/* do per unit writing, reuse fifo for each */
-	for (i = 0; i < num_units; i++) {
-		struct vmw_display_unit *unit = units[i];
-		struct vmw_clip_rect clip;
-		int num;
-
-		clip.x1 = left - unit->crtc.x;
-		clip.y1 = top - unit->crtc.y;
-		clip.x2 = right - unit->crtc.x;
-		clip.y2 = bottom - unit->crtc.y;
-
-		/* skip any crtcs that misses the clip region */
-		if (clip.x1 >= unit->crtc.mode.hdisplay ||
-		    clip.y1 >= unit->crtc.mode.vdisplay ||
-		    clip.x2 <= 0 || clip.y2 <= 0)
-			continue;
-
-		/*
-		 * In order for the clip rects to be correctly scaled
-		 * the src and dest rects needs to be the same size.
-		 */
-		cmd->body.destRect.left = clip.x1;
-		cmd->body.destRect.right = clip.x2;
-		cmd->body.destRect.top = clip.y1;
-		cmd->body.destRect.bottom = clip.y2;
-
-		/* create a clip rect of the crtc in dest coords */
-		clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
-		clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
-		clip.x1 = 0 - clip.x1;
-		clip.y1 = 0 - clip.y1;
-
-		/* need to reset sid as it is changed by execbuf */
-		cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
-		cmd->body.destScreenId = unit->unit;
-
-		/* clip and write blits to cmd stream */
-		vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
-
-		/* if no cliprects hit skip this */
-		if (num == 0)
-			continue;
-
-		/* only return the last fence */
-		if (out_fence && *out_fence)
-			vmw_fence_obj_unreference(out_fence);
-
-		/* recalculate package length */
-		fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
-		cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
-		ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
-					  fifo_size, 0, NULL, out_fence);
-
-		if (unlikely(ret != 0))
-			break;
-	}
-
-
-	kfree(cmd);
-out_free_tmp:
-	kfree(tmp);
-
-	return ret;
-}
-
 static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
 				  struct drm_file *file_priv,
 				  unsigned flags, unsigned color,
@@ -601,11 +390,8 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
 	struct drm_clip_rect norect;
 	int ret, inc = 1;
 
-	if (unlikely(vfbs->master != file_priv->master))
-		return -EINVAL;
-
-	/* Require ScreenObject support for 3D */
-	if (!dev_priv->sou_priv)
+	/* Legacy Display Unit does not support 3D */
+	if (dev_priv->active_display_unit == vmw_du_legacy)
 		return -EINVAL;
 
 	drm_modeset_lock_all(dev_priv->dev);
@@ -627,10 +413,16 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
 		inc = 2; /* skip source rects */
 	}
 
-	ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base,
-				   flags, color,
-				   clips, num_clips, inc, NULL);
+	if (dev_priv->active_display_unit == vmw_du_screen_object)
+		ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base,
+						   clips, NULL, NULL, 0, 0,
+						   num_clips, inc, NULL);
+	else
+		ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base,
+						 clips, NULL, NULL, 0, 0,
+						 num_clips, inc, NULL);
 
+	vmw_fifo_flush(dev_priv, false);
 	ttm_read_unlock(&dev_priv->reservation_sem);
 
 	drm_modeset_unlock_all(dev_priv->dev);
@@ -638,27 +430,66 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
 	return 0;
 }
 
+/**
+ * vmw_kms_readback - Perform a readback from the screen system to
+ * a dma-buffer backed framebuffer.
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * Must be set to NULL if @user_fence_rep is NULL.
+ * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @user_fence_rep: User-space provided structure for fence information.
+ * Must be set to non-NULL if @file_priv is non-NULL.
+ * @vclips: Array of clip rects.
+ * @num_clips: Number of clip rects in @vclips.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_readback(struct vmw_private *dev_priv,
+		     struct drm_file *file_priv,
+		     struct vmw_framebuffer *vfb,
+		     struct drm_vmw_fence_rep __user *user_fence_rep,
+		     struct drm_vmw_rect *vclips,
+		     uint32_t num_clips)
+{
+	switch (dev_priv->active_display_unit) {
+	case vmw_du_screen_object:
+		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
+					    user_fence_rep, vclips, num_clips);
+	case vmw_du_screen_target:
+		return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
+					user_fence_rep, NULL, vclips, num_clips,
+					1, false, true);
+	default:
+		WARN_ONCE(true,
+			  "Readback called with invalid display system.\n");
+}
+
+	return -ENOSYS;
+}
+
+
 static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
 	.destroy = vmw_framebuffer_surface_destroy,
 	.dirty = vmw_framebuffer_surface_dirty,
 };
 
 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
-					   struct drm_file *file_priv,
 					   struct vmw_surface *surface,
 					   struct vmw_framebuffer **out,
 					   const struct drm_mode_fb_cmd
-					   *mode_cmd)
+					   *mode_cmd,
+					   bool is_dmabuf_proxy)
 
 {
 	struct drm_device *dev = dev_priv->dev;
 	struct vmw_framebuffer_surface *vfbs;
 	enum SVGA3dSurfaceFormat format;
-	struct vmw_master *vmaster = vmw_master(file_priv->master);
 	int ret;
 
-	/* 3D is only supported on HWv8 hosts which supports screen objects */
-	if (!dev_priv->sou_priv)
+	/* 3D is only supported on HWv8 and newer hosts */
+	if (dev_priv->active_display_unit == vmw_du_legacy)
 		return -ENOSYS;
 
 	/*
@@ -692,15 +523,16 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
 	case 15:
 		format = SVGA3D_A1R5G5B5;
 		break;
-	case 8:
-		format = SVGA3D_LUMINANCE8;
-		break;
 	default:
 		DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
 		return -EINVAL;
 	}
 
-	if (unlikely(format != surface->format)) {
+	/*
+	 * For DX, surface format validation is done when surface->scanout
+	 * is set.
+	 */
+	if (!dev_priv->has_dx && format != surface->format) {
 		DRM_ERROR("Invalid surface format for requested mode.\n");
 		return -EINVAL;
 	}
@@ -711,38 +543,27 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
 		goto out_err1;
 	}
 
-	if (!vmw_surface_reference(surface)) {
-		DRM_ERROR("failed to reference surface %p\n", surface);
-		ret = -EINVAL;
-		goto out_err2;
-	}
-
 	/* XXX get the first 3 from the surface info */
 	vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
 	vfbs->base.base.pitches[0] = mode_cmd->pitch;
 	vfbs->base.base.depth = mode_cmd->depth;
 	vfbs->base.base.width = mode_cmd->width;
 	vfbs->base.base.height = mode_cmd->height;
-	vfbs->surface = surface;
+	vfbs->surface = vmw_surface_reference(surface);
 	vfbs->base.user_handle = mode_cmd->handle;
-	vfbs->master = drm_master_get(file_priv->master);
-
-	mutex_lock(&vmaster->fb_surf_mutex);
-	list_add_tail(&vfbs->head, &vmaster->fb_surf);
-	mutex_unlock(&vmaster->fb_surf_mutex);
+	vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
 
 	*out = &vfbs->base;
 
 	ret = drm_framebuffer_init(dev, &vfbs->base.base,
 				   &vmw_framebuffer_surface_funcs);
 	if (ret)
-		goto out_err3;
+		goto out_err2;
 
 	return 0;
 
-out_err3:
-	vmw_surface_unreference(&surface);
 out_err2:
+	vmw_surface_unreference(&surface);
 	kfree(vfbs);
 out_err1:
 	return ret;
@@ -752,14 +573,6 @@ out_err1:
  * Dmabuf framebuffer code
  */
 
-#define vmw_framebuffer_to_vfbd(x) \
-	container_of(x, struct vmw_framebuffer_dmabuf, base.base)
-
-struct vmw_framebuffer_dmabuf {
-	struct vmw_framebuffer base;
-	struct vmw_dma_buffer *buffer;
-};
-
 static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
 {
 	struct vmw_framebuffer_dmabuf *vfbd =
@@ -767,185 +580,12 @@ static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
 
 	drm_framebuffer_cleanup(framebuffer);
 	vmw_dmabuf_unreference(&vfbd->buffer);
-	ttm_base_object_unref(&vfbd->base.user_obj);
+	if (vfbd->base.user_obj)
+		ttm_base_object_unref(&vfbd->base.user_obj);
 
 	kfree(vfbd);
 }
 
-static int do_dmabuf_dirty_ldu(struct vmw_private *dev_priv,
-			       struct vmw_framebuffer *framebuffer,
-			       unsigned flags, unsigned color,
-			       struct drm_clip_rect *clips,
-			       unsigned num_clips, int increment)
-{
-	size_t fifo_size;
-	int i;
-
-	struct {
-		uint32_t header;
-		SVGAFifoCmdUpdate body;
-	} *cmd;
-
-	fifo_size = sizeof(*cmd) * num_clips;
-	cmd = vmw_fifo_reserve(dev_priv, fifo_size);
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Fifo reserve failed.\n");
-		return -ENOMEM;
-	}
-
-	memset(cmd, 0, fifo_size);
-	for (i = 0; i < num_clips; i++, clips += increment) {
-		cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
-		cmd[i].body.x = cpu_to_le32(clips->x1);
-		cmd[i].body.y = cpu_to_le32(clips->y1);
-		cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
-		cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
-	}
-
-	vmw_fifo_commit(dev_priv, fifo_size);
-	return 0;
-}
-
-static int do_dmabuf_define_gmrfb(struct drm_file *file_priv,
-				  struct vmw_private *dev_priv,
-				  struct vmw_framebuffer *framebuffer)
-{
-	int depth = framebuffer->base.depth;
-	size_t fifo_size;
-	int ret;
-
-	struct {
-		uint32_t header;
-		SVGAFifoCmdDefineGMRFB body;
-	} *cmd;
-
-	/* Emulate RGBA support, contrary to svga_reg.h this is not
-	 * supported by hosts. This is only a problem if we are reading
-	 * this value later and expecting what we uploaded back.
-	 */
-	if (depth == 32)
-		depth = 24;
-
-	fifo_size = sizeof(*cmd);
-	cmd = kmalloc(fifo_size, GFP_KERNEL);
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
-		return -ENOMEM;
-	}
-
-	memset(cmd, 0, fifo_size);
-	cmd->header = SVGA_CMD_DEFINE_GMRFB;
-	cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
-	cmd->body.format.colorDepth = depth;
-	cmd->body.format.reserved = 0;
-	cmd->body.bytesPerLine = framebuffer->base.pitches[0];
-	cmd->body.ptr.gmrId = framebuffer->user_handle;
-	cmd->body.ptr.offset = 0;
-
-	ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
-				  fifo_size, 0, NULL, NULL);
-
-	kfree(cmd);
-
-	return ret;
-}
-
-static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
-			       struct vmw_private *dev_priv,
-			       struct vmw_framebuffer *framebuffer,
-			       unsigned flags, unsigned color,
-			       struct drm_clip_rect *clips,
-			       unsigned num_clips, int increment,
-			       struct vmw_fence_obj **out_fence)
-{
-	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
-	struct drm_clip_rect *clips_ptr;
-	int i, k, num_units, ret;
-	struct drm_crtc *crtc;
-	size_t fifo_size;
-
-	struct {
-		uint32_t header;
-		SVGAFifoCmdBlitGMRFBToScreen body;
-	} *blits;
-
-	ret = do_dmabuf_define_gmrfb(file_priv, dev_priv, framebuffer);
-	if (unlikely(ret != 0))
-		return ret; /* define_gmrfb prints warnings */
-
-	fifo_size = sizeof(*blits) * num_clips;
-	blits = kmalloc(fifo_size, GFP_KERNEL);
-	if (unlikely(blits == NULL)) {
-		DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
-		return -ENOMEM;
-	}
-
-	num_units = 0;
-	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
-		if (crtc->primary->fb != &framebuffer->base)
-			continue;
-		units[num_units++] = vmw_crtc_to_du(crtc);
-	}
-
-	for (k = 0; k < num_units; k++) {
-		struct vmw_display_unit *unit = units[k];
-		int hit_num = 0;
-
-		clips_ptr = clips;
-		for (i = 0; i < num_clips; i++, clips_ptr += increment) {
-			int clip_x1 = clips_ptr->x1 - unit->crtc.x;
-			int clip_y1 = clips_ptr->y1 - unit->crtc.y;
-			int clip_x2 = clips_ptr->x2 - unit->crtc.x;
-			int clip_y2 = clips_ptr->y2 - unit->crtc.y;
-			int move_x, move_y;
-
-			/* skip any crtcs that misses the clip region */
-			if (clip_x1 >= unit->crtc.mode.hdisplay ||
-			    clip_y1 >= unit->crtc.mode.vdisplay ||
-			    clip_x2 <= 0 || clip_y2 <= 0)
-				continue;
-
-			/* clip size to crtc size */
-			clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
-			clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
-
-			/* translate both src and dest to bring clip into screen */
-			move_x = min_t(int, clip_x1, 0);
-			move_y = min_t(int, clip_y1, 0);
-
-			/* actual translate done here */
-			blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
-			blits[hit_num].body.destScreenId = unit->unit;
-			blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
-			blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
-			blits[hit_num].body.destRect.left = clip_x1 - move_x;
-			blits[hit_num].body.destRect.top = clip_y1 - move_y;
-			blits[hit_num].body.destRect.right = clip_x2;
-			blits[hit_num].body.destRect.bottom = clip_y2;
-			hit_num++;
-		}
-
-		/* no clips hit the crtc */
-		if (hit_num == 0)
-			continue;
-
-		/* only return the last fence */
-		if (out_fence && *out_fence)
-			vmw_fence_obj_unreference(out_fence);
-
-		fifo_size = sizeof(*blits) * hit_num;
-		ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits,
-					  fifo_size, 0, NULL, out_fence);
-
-		if (unlikely(ret != 0))
-			break;
-	}
-
-	kfree(blits);
-
-	return ret;
-}
-
 static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
 				 struct drm_file *file_priv,
 				 unsigned flags, unsigned color,
@@ -977,16 +617,29 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
 		increment = 2;
 	}
 
-	if (dev_priv->ldu_priv) {
-		ret = do_dmabuf_dirty_ldu(dev_priv, &vfbd->base,
-					  flags, color,
-					  clips, num_clips, increment);
-	} else {
-		ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base,
-					  flags, color,
-					  clips, num_clips, increment, NULL);
+	switch (dev_priv->active_display_unit) {
+	case vmw_du_screen_target:
+		ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL,
+				       clips, NULL, num_clips, increment,
+				       true, true);
+		break;
+	case vmw_du_screen_object:
+		ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
+						  clips, num_clips, increment,
+						  true,
+						  NULL);
+		break;
+	case vmw_du_legacy:
+		ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
+						  clips, num_clips, increment);
+		break;
+	default:
+		ret = -EINVAL;
+		WARN_ONCE(true, "Dirty called with invalid display system.\n");
+		break;
 	}
 
+	vmw_fifo_flush(dev_priv, false);
 	ttm_read_unlock(&dev_priv->reservation_sem);
 
 	drm_modeset_unlock_all(dev_priv->dev);
@@ -1002,41 +655,133 @@ static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
 /**
  * Pin the dmabuffer to the start of vram.
  */
-static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
+static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
 {
 	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
-	struct vmw_framebuffer_dmabuf *vfbd =
-		vmw_framebuffer_to_vfbd(&vfb->base);
+	struct vmw_dma_buffer *buf;
 	int ret;
 
-	/* This code should not be used with screen objects */
-	BUG_ON(dev_priv->sou_priv);
-
-	vmw_overlay_pause_all(dev_priv);
+	buf = vfb->dmabuf ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
 
-	ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer, true, false);
-
-	vmw_overlay_resume_all(dev_priv);
+	if (!buf)
+		return 0;
 
-	WARN_ON(ret != 0);
+	switch (dev_priv->active_display_unit) {
+	case vmw_du_legacy:
+		vmw_overlay_pause_all(dev_priv);
+		ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false);
+		vmw_overlay_resume_all(dev_priv);
+		break;
+	case vmw_du_screen_object:
+	case vmw_du_screen_target:
+		if (vfb->dmabuf)
+			return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf,
+							     false);
+
+		return vmw_dmabuf_pin_in_placement(dev_priv, buf,
+						   &vmw_mob_placement, false);
+	default:
+		return -EINVAL;
+	}
 
-	return 0;
+	return ret;
 }
 
-static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
+static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
 {
 	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
-	struct vmw_framebuffer_dmabuf *vfbd =
-		vmw_framebuffer_to_vfbd(&vfb->base);
+	struct vmw_dma_buffer *buf;
 
-	if (!vfbd->buffer) {
-		WARN_ON(!vfbd->buffer);
+	buf = vfb->dmabuf ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
+
+	if (WARN_ON(!buf))
 		return 0;
+
+	return vmw_dmabuf_unpin(dev_priv, buf, false);
+}
+
+/**
+ * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf
+ *
+ * @dev: DRM device
+ * @mode_cmd: parameters for the new surface
+ * @dmabuf_mob: MOB backing the DMA buf
+ * @srf_out: newly created surface
+ *
+ * When the content FB is a DMA buf, we create a surface as a proxy to the
+ * same buffer.  This way we can do a surface copy rather than a surface DMA.
+ * This is a more efficient approach
+ *
+ * RETURNS:
+ * 0 on success, error code otherwise
+ */
+static int vmw_create_dmabuf_proxy(struct drm_device *dev,
+				   const struct drm_mode_fb_cmd *mode_cmd,
+				   struct vmw_dma_buffer *dmabuf_mob,
+				   struct vmw_surface **srf_out)
+{
+	uint32_t format;
+	struct drm_vmw_size content_base_size;
+	struct vmw_resource *res;
+	int ret;
+
+	switch (mode_cmd->depth) {
+	case 32:
+	case 24:
+		format = SVGA3D_X8R8G8B8;
+		break;
+
+	case 16:
+	case 15:
+		format = SVGA3D_R5G6B5;
+		break;
+
+	case 8:
+		format = SVGA3D_P8;
+		break;
+
+	default:
+		DRM_ERROR("Invalid framebuffer format %d\n", mode_cmd->depth);
+		return -EINVAL;
 	}
 
-	return vmw_dmabuf_unpin(dev_priv, vfbd->buffer, false);
+	content_base_size.width  = mode_cmd->width;
+	content_base_size.height = mode_cmd->height;
+	content_base_size.depth  = 1;
+
+	ret = vmw_surface_gb_priv_define(dev,
+			0, /* kernel visible only */
+			0, /* flags */
+			format,
+			true, /* can be a scanout buffer */
+			1, /* num of mip levels */
+			0,
+			0,
+			content_base_size,
+			srf_out);
+	if (ret) {
+		DRM_ERROR("Failed to allocate proxy content buffer\n");
+		return ret;
+	}
+
+	res = &(*srf_out)->res;
+
+	/* Reserve and switch the backing mob. */
+	mutex_lock(&res->dev_priv->cmdbuf_mutex);
+	(void) vmw_resource_reserve(res, false, true);
+	vmw_dmabuf_unreference(&res->backup);
+	res->backup = vmw_dmabuf_reference(dmabuf_mob);
+	res->backup_offset = 0;
+	vmw_resource_unreserve(res, false, NULL, 0);
+	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+
+	return 0;
 }
 
+
+
 static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
 					  struct vmw_dma_buffer *dmabuf,
 					  struct vmw_framebuffer **out,
@@ -1057,7 +802,7 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
 	}
 
 	/* Limited framebuffer color depth support for screen objects */
-	if (dev_priv->sou_priv) {
+	if (dev_priv->active_display_unit == vmw_du_screen_object) {
 		switch (mode_cmd->depth) {
 		case 32:
 		case 24:
@@ -1089,41 +834,96 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
 		goto out_err1;
 	}
 
-	if (!vmw_dmabuf_reference(dmabuf)) {
-		DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
-		ret = -EINVAL;
-		goto out_err2;
-	}
-
 	vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
 	vfbd->base.base.pitches[0] = mode_cmd->pitch;
 	vfbd->base.base.depth = mode_cmd->depth;
 	vfbd->base.base.width = mode_cmd->width;
 	vfbd->base.base.height = mode_cmd->height;
-	if (!dev_priv->sou_priv) {
-		vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
-		vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
-	}
 	vfbd->base.dmabuf = true;
-	vfbd->buffer = dmabuf;
+	vfbd->buffer = vmw_dmabuf_reference(dmabuf);
 	vfbd->base.user_handle = mode_cmd->handle;
 	*out = &vfbd->base;
 
 	ret = drm_framebuffer_init(dev, &vfbd->base.base,
 				   &vmw_framebuffer_dmabuf_funcs);
 	if (ret)
-		goto out_err3;
+		goto out_err2;
 
 	return 0;
 
-out_err3:
-	vmw_dmabuf_unreference(&dmabuf);
 out_err2:
+	vmw_dmabuf_unreference(&dmabuf);
 	kfree(vfbd);
 out_err1:
 	return ret;
 }
 
+/**
+ * vmw_kms_new_framebuffer - Create a new framebuffer.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around.
+ * Either @dmabuf or @surface must be NULL.
+ * @surface: Pointer to a surface to wrap the kms framebuffer around.
+ * Either @dmabuf or @surface must be NULL.
+ * @only_2d: No presents will occur to this dma buffer based framebuffer. This
+ * Helps the code to do some important optimizations.
+ * @mode_cmd: Frame-buffer metadata.
+ */
+struct vmw_framebuffer *
+vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
+			struct vmw_dma_buffer *dmabuf,
+			struct vmw_surface *surface,
+			bool only_2d,
+			const struct drm_mode_fb_cmd *mode_cmd)
+{
+	struct vmw_framebuffer *vfb = NULL;
+	bool is_dmabuf_proxy = false;
+	int ret;
+
+	/*
+	 * We cannot use the SurfaceDMA command in an non-accelerated VM,
+	 * therefore, wrap the DMA buf in a surface so we can use the
+	 * SurfaceCopy command.
+	 */
+	if (dmabuf && only_2d &&
+	    dev_priv->active_display_unit == vmw_du_screen_target) {
+		ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
+					      dmabuf, &surface);
+		if (ret)
+			return ERR_PTR(ret);
+
+		is_dmabuf_proxy = true;
+	}
+
+	/* Create the new framebuffer depending one what we have */
+	if (surface) {
+		ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
+						      mode_cmd,
+						      is_dmabuf_proxy);
+
+		/*
+		 * vmw_create_dmabuf_proxy() adds a reference that is no longer
+		 * needed
+		 */
+		if (is_dmabuf_proxy)
+			vmw_surface_unreference(&surface);
+	} else if (dmabuf) {
+		ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb,
+						     mode_cmd);
+	} else {
+		BUG();
+	}
+
+	if (ret)
+		return ERR_PTR(ret);
+
+	vfb->pin = vmw_framebuffer_pin;
+	vfb->unpin = vmw_framebuffer_unpin;
+
+	return vfb;
+}
+
 /*
  * Generic Kernel modesetting functions
  */
@@ -1157,7 +957,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
 	if (!vmw_kms_validate_mode_vram(dev_priv,
 					mode_cmd.pitch,
 					mode_cmd.height)) {
-		DRM_ERROR("VRAM size is too small for requested mode.\n");
+		DRM_ERROR("Requested mode exceed bounding box limit.\n");
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -1187,15 +987,13 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
 	if (ret)
 		goto err_out;
 
-	/* Create the new framebuffer depending one what we got back */
-	if (bo)
-		ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
-						     &mode_cmd);
-	else if (surface)
-		ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
-						      surface, &vfb, &mode_cmd);
-	else
-		BUG();
+	vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
+				      !(dev_priv->capabilities & SVGA_CAP_3D),
+				      &mode_cmd);
+	if (IS_ERR(vfb)) {
+		ret = PTR_ERR(vfb);
+		goto err_out;
+ 	}
 
 err_out:
 	/* vmw_user_lookup_handle takes one ref so does new_fb */
@@ -1218,6 +1016,21 @@ static const struct drm_mode_config_funcs vmw_kms_funcs = {
 	.fb_create = vmw_kms_fb_create,
 };
 
+static int vmw_kms_generic_present(struct vmw_private *dev_priv,
+				   struct drm_file *file_priv,
+				   struct vmw_framebuffer *vfb,
+				   struct vmw_surface *surface,
+				   uint32_t sid,
+				   int32_t destX, int32_t destY,
+				   struct drm_vmw_rect *clips,
+				   uint32_t num_clips)
+{
+	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
+					    &surface->res, destX, destY,
+					    num_clips, 1, NULL);
+}
+
+
 int vmw_kms_present(struct vmw_private *dev_priv,
 		    struct drm_file *file_priv,
 		    struct vmw_framebuffer *vfb,
@@ -1227,238 +1040,31 @@ int vmw_kms_present(struct vmw_private *dev_priv,
 		    struct drm_vmw_rect *clips,
 		    uint32_t num_clips)
 {
-	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
-	struct drm_clip_rect *tmp;
-	struct drm_crtc *crtc;
-	size_t fifo_size;
-	int i, k, num_units;
-	int ret = 0; /* silence warning */
-	int left, right, top, bottom;
-
-	struct {
-		SVGA3dCmdHeader header;
-		SVGA3dCmdBlitSurfaceToScreen body;
-	} *cmd;
-	SVGASignedRect *blits;
-
-	num_units = 0;
-	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
-		if (crtc->primary->fb != &vfb->base)
-			continue;
-		units[num_units++] = vmw_crtc_to_du(crtc);
-	}
-
-	BUG_ON(surface == NULL);
-	BUG_ON(!clips || !num_clips);
-
-	tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
-	if (unlikely(tmp == NULL)) {
-		DRM_ERROR("Temporary cliprect memory alloc failed.\n");
-		return -ENOMEM;
-	}
-
-	fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
-	cmd = kmalloc(fifo_size, GFP_KERNEL);
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed to allocate temporary fifo memory.\n");
-		ret = -ENOMEM;
-		goto out_free_tmp;
-	}
-
-	left = clips->x;
-	right = clips->x + clips->w;
-	top = clips->y;
-	bottom = clips->y + clips->h;
-
-	for (i = 1; i < num_clips; i++) {
-		left = min_t(int, left, (int)clips[i].x);
-		right = max_t(int, right, (int)clips[i].x + clips[i].w);
-		top = min_t(int, top, (int)clips[i].y);
-		bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
-	}
-
-	/* only need to do this once */
-	memset(cmd, 0, fifo_size);
-	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
-
-	blits = (SVGASignedRect *)&cmd[1];
-
-	cmd->body.srcRect.left = left;
-	cmd->body.srcRect.right = right;
-	cmd->body.srcRect.top = top;
-	cmd->body.srcRect.bottom = bottom;
-
-	for (i = 0; i < num_clips; i++) {
-		tmp[i].x1 = clips[i].x - left;
-		tmp[i].x2 = clips[i].x + clips[i].w - left;
-		tmp[i].y1 = clips[i].y - top;
-		tmp[i].y2 = clips[i].y + clips[i].h - top;
-	}
-
-	for (k = 0; k < num_units; k++) {
-		struct vmw_display_unit *unit = units[k];
-		struct vmw_clip_rect clip;
-		int num;
-
-		clip.x1 = left + destX - unit->crtc.x;
-		clip.y1 = top + destY - unit->crtc.y;
-		clip.x2 = right + destX - unit->crtc.x;
-		clip.y2 = bottom + destY - unit->crtc.y;
-
-		/* skip any crtcs that misses the clip region */
-		if (clip.x1 >= unit->crtc.mode.hdisplay ||
-		    clip.y1 >= unit->crtc.mode.vdisplay ||
-		    clip.x2 <= 0 || clip.y2 <= 0)
-			continue;
-
-		/*
-		 * In order for the clip rects to be correctly scaled
-		 * the src and dest rects needs to be the same size.
-		 */
-		cmd->body.destRect.left = clip.x1;
-		cmd->body.destRect.right = clip.x2;
-		cmd->body.destRect.top = clip.y1;
-		cmd->body.destRect.bottom = clip.y2;
-
-		/* create a clip rect of the crtc in dest coords */
-		clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
-		clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
-		clip.x1 = 0 - clip.x1;
-		clip.y1 = 0 - clip.y1;
-
-		/* need to reset sid as it is changed by execbuf */
-		cmd->body.srcImage.sid = sid;
-		cmd->body.destScreenId = unit->unit;
-
-		/* clip and write blits to cmd stream */
-		vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
-
-		/* if no cliprects hit skip this */
-		if (num == 0)
-			continue;
-
-		/* recalculate package length */
-		fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
-		cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
-		ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
-					  fifo_size, 0, NULL, NULL);
-
-		if (unlikely(ret != 0))
-			break;
-	}
-
-	kfree(cmd);
-out_free_tmp:
-	kfree(tmp);
-
-	return ret;
-}
-
-int vmw_kms_readback(struct vmw_private *dev_priv,
-		     struct drm_file *file_priv,
-		     struct vmw_framebuffer *vfb,
-		     struct drm_vmw_fence_rep __user *user_fence_rep,
-		     struct drm_vmw_rect *clips,
-		     uint32_t num_clips)
-{
-	struct vmw_framebuffer_dmabuf *vfbd =
-		vmw_framebuffer_to_vfbd(&vfb->base);
-	struct vmw_dma_buffer *dmabuf = vfbd->buffer;
-	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
-	struct drm_crtc *crtc;
-	size_t fifo_size;
-	int i, k, ret, num_units, blits_pos;
-
-	struct {
-		uint32_t header;
-		SVGAFifoCmdDefineGMRFB body;
-	} *cmd;
-	struct {
-		uint32_t header;
-		SVGAFifoCmdBlitScreenToGMRFB body;
-	} *blits;
-
-	num_units = 0;
-	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
-		if (crtc->primary->fb != &vfb->base)
-			continue;
-		units[num_units++] = vmw_crtc_to_du(crtc);
-	}
-
-	BUG_ON(dmabuf == NULL);
-	BUG_ON(!clips || !num_clips);
-
-	/* take a safe guess at fifo size */
-	fifo_size = sizeof(*cmd) + sizeof(*blits) * num_clips * num_units;
-	cmd = kmalloc(fifo_size, GFP_KERNEL);
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed to allocate temporary fifo memory.\n");
-		return -ENOMEM;
-	}
-
-	memset(cmd, 0, fifo_size);
-	cmd->header = SVGA_CMD_DEFINE_GMRFB;
-	cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel;
-	cmd->body.format.colorDepth = vfb->base.depth;
-	cmd->body.format.reserved = 0;
-	cmd->body.bytesPerLine = vfb->base.pitches[0];
-	cmd->body.ptr.gmrId = vfb->user_handle;
-	cmd->body.ptr.offset = 0;
-
-	blits = (void *)&cmd[1];
-	blits_pos = 0;
-	for (i = 0; i < num_units; i++) {
-		struct drm_vmw_rect *c = clips;
-		for (k = 0; k < num_clips; k++, c++) {
-			/* transform clip coords to crtc origin based coords */
-			int clip_x1 = c->x - units[i]->crtc.x;
-			int clip_x2 = c->x - units[i]->crtc.x + c->w;
-			int clip_y1 = c->y - units[i]->crtc.y;
-			int clip_y2 = c->y - units[i]->crtc.y + c->h;
-			int dest_x = c->x;
-			int dest_y = c->y;
-
-			/* compensate for clipping, we negate
-			 * a negative number and add that.
-			 */
-			if (clip_x1 < 0)
-				dest_x += -clip_x1;
-			if (clip_y1 < 0)
-				dest_y += -clip_y1;
-
-			/* clip */
-			clip_x1 = max(clip_x1, 0);
-			clip_y1 = max(clip_y1, 0);
-			clip_x2 = min(clip_x2, units[i]->crtc.mode.hdisplay);
-			clip_y2 = min(clip_y2, units[i]->crtc.mode.vdisplay);
-
-			/* and cull any rects that misses the crtc */
-			if (clip_x1 >= units[i]->crtc.mode.hdisplay ||
-			    clip_y1 >= units[i]->crtc.mode.vdisplay ||
-			    clip_x2 <= 0 || clip_y2 <= 0)
-				continue;
-
-			blits[blits_pos].header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
-			blits[blits_pos].body.srcScreenId = units[i]->unit;
-			blits[blits_pos].body.destOrigin.x = dest_x;
-			blits[blits_pos].body.destOrigin.y = dest_y;
+	int ret;
 
-			blits[blits_pos].body.srcRect.left = clip_x1;
-			blits[blits_pos].body.srcRect.top = clip_y1;
-			blits[blits_pos].body.srcRect.right = clip_x2;
-			blits[blits_pos].body.srcRect.bottom = clip_y2;
-			blits_pos++;
-		}
+	switch (dev_priv->active_display_unit) {
+	case vmw_du_screen_target:
+		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
+						 &surface->res, destX, destY,
+						 num_clips, 1, NULL);
+		break;
+	case vmw_du_screen_object:
+		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
+					      sid, destX, destY, clips,
+					      num_clips);
+		break;
+	default:
+		WARN_ONCE(true,
+			  "Present called with invalid display system.\n");
+		ret = -ENOSYS;
+		break;
 	}
-	/* reset size here and use calculated exact size from loops */
-	fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos;
-
-	ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size,
-				  0, user_fence_rep, NULL);
+	if (ret)
+		return ret;
 
-	kfree(cmd);
+	vmw_fifo_flush(dev_priv, false);
 
-	return ret;
+	return 0;
 }
 
 int vmw_kms_init(struct vmw_private *dev_priv)
@@ -1470,30 +1076,37 @@ int vmw_kms_init(struct vmw_private *dev_priv)
 	dev->mode_config.funcs = &vmw_kms_funcs;
 	dev->mode_config.min_width = 1;
 	dev->mode_config.min_height = 1;
-	/* assumed largest fb size */
-	dev->mode_config.max_width = 8192;
-	dev->mode_config.max_height = 8192;
+	dev->mode_config.max_width = dev_priv->texture_max_width;
+	dev->mode_config.max_height = dev_priv->texture_max_height;
 
-	ret = vmw_kms_init_screen_object_display(dev_priv);
-	if (ret) /* Fallback */
-		(void)vmw_kms_init_legacy_display_system(dev_priv);
+	ret = vmw_kms_stdu_init_display(dev_priv);
+	if (ret) {
+		ret = vmw_kms_sou_init_display(dev_priv);
+		if (ret) /* Fallback */
+			ret = vmw_kms_ldu_init_display(dev_priv);
+	}
 
-	return 0;
+	return ret;
 }
 
 int vmw_kms_close(struct vmw_private *dev_priv)
 {
+	int ret;
+
 	/*
 	 * Docs says we should take the lock before calling this function
 	 * but since it destroys encoders and our destructor calls
 	 * drm_encoder_cleanup which takes the lock we deadlock.
 	 */
 	drm_mode_config_cleanup(dev_priv->dev);
-	if (dev_priv->sou_priv)
-		vmw_kms_close_screen_object_display(dev_priv);
+	if (dev_priv->active_display_unit == vmw_du_screen_object)
+		ret = vmw_kms_sou_close_display(dev_priv);
+	else if (dev_priv->active_display_unit == vmw_du_screen_target)
+		ret = vmw_kms_stdu_close_display(dev_priv);
 	else
-		vmw_kms_close_legacy_display_system(dev_priv);
-	return 0;
+		ret = vmw_kms_ldu_close_display(dev_priv);
+
+	return ret;
 }
 
 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
@@ -1569,7 +1182,7 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
 		  vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
 	else if (vmw_fifo_have_pitchlock(vmw_priv))
 		vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
-						       SVGA_FIFO_PITCHLOCK);
+						   SVGA_FIFO_PITCHLOCK);
 
 	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
 		return 0;
@@ -1641,7 +1254,9 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
 				uint32_t pitch,
 				uint32_t height)
 {
-	return ((u64) pitch * (u64) height) < (u64) dev_priv->prim_bb_mem;
+	return ((u64) pitch * (u64) height) < (u64)
+		((dev_priv->active_display_unit == vmw_du_screen_target) ?
+		 dev_priv->prim_bb_mem : dev_priv->vram_size);
 }
 
 
@@ -1715,75 +1330,6 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
 	return 0;
 }
 
-int vmw_du_page_flip(struct drm_crtc *crtc,
-		     struct drm_framebuffer *fb,
-		     struct drm_pending_vblank_event *event,
-		     uint32_t page_flip_flags)
-{
-	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
-	struct drm_framebuffer *old_fb = crtc->primary->fb;
-	struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
-	struct drm_file *file_priv ;
-	struct vmw_fence_obj *fence = NULL;
-	struct drm_clip_rect clips;
-	int ret;
-
-	if (event == NULL)
-		return -EINVAL;
-
-	/* require ScreenObject support for page flipping */
-	if (!dev_priv->sou_priv)
-		return -ENOSYS;
-
-	file_priv = event->base.file_priv;
-	if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
-		return -EINVAL;
-
-	crtc->primary->fb = fb;
-
-	/* do a full screen dirty update */
-	clips.x1 = clips.y1 = 0;
-	clips.x2 = fb->width;
-	clips.y2 = fb->height;
-
-	if (vfb->dmabuf)
-		ret = do_dmabuf_dirty_sou(file_priv, dev_priv, vfb,
-					  0, 0, &clips, 1, 1, &fence);
-	else
-		ret = do_surface_dirty_sou(dev_priv, file_priv, vfb,
-					   0, 0, &clips, 1, 1, &fence);
-
-
-	if (ret != 0)
-		goto out_no_fence;
-	if (!fence) {
-		ret = -EINVAL;
-		goto out_no_fence;
-	}
-
-	ret = vmw_event_fence_action_queue(file_priv, fence,
-					   &event->base,
-					   &event->event.tv_sec,
-					   &event->event.tv_usec,
-					   true);
-
-	/*
-	 * No need to hold on to this now. The only cleanup
-	 * we need to do if we fail is unref the fence.
-	 */
-	vmw_fence_obj_unreference(&fence);
-
-	if (vmw_crtc_to_du(crtc)->is_implicit)
-		vmw_kms_screen_object_update_implicit_fb(dev_priv, crtc);
-
-	return ret;
-
-out_no_fence:
-	crtc->primary->fb = old_fb;
-	return ret;
-}
-
-
 void vmw_du_crtc_save(struct drm_crtc *crtc)
 {
 }
@@ -1808,8 +1354,9 @@ void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
 	}
 }
 
-void vmw_du_connector_dpms(struct drm_connector *connector, int mode)
+int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
 {
+	return 0;
 }
 
 void vmw_du_connector_save(struct drm_connector *connector)
@@ -1919,7 +1466,7 @@ static struct drm_display_mode vmw_kms_connector_builtin[] = {
  * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
  * members filled in.
  */
-static void vmw_guess_mode_timing(struct drm_display_mode *mode)
+void vmw_guess_mode_timing(struct drm_display_mode *mode)
 {
 	mode->hsync_start = mode->hdisplay + 50;
 	mode->hsync_end = mode->hsync_start + 50;
@@ -1954,36 +1501,39 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
 	 * If using screen objects, then assume 32-bpp because that's what the
 	 * SVGA device is assuming
 	 */
-	if (dev_priv->sou_priv)
+	if (dev_priv->active_display_unit == vmw_du_screen_object)
 		assumed_bpp = 4;
 
+	if (dev_priv->active_display_unit == vmw_du_screen_target) {
+		max_width  = min(max_width,  dev_priv->stdu_max_width);
+		max_height = min(max_height, dev_priv->stdu_max_height);
+	}
+
 	/* Add preferred mode */
-	{
-		mode = drm_mode_duplicate(dev, &prefmode);
-		if (!mode)
-			return 0;
-		mode->hdisplay = du->pref_width;
-		mode->vdisplay = du->pref_height;
-		vmw_guess_mode_timing(mode);
-
-		if (vmw_kms_validate_mode_vram(dev_priv,
-						mode->hdisplay * assumed_bpp,
-						mode->vdisplay)) {
-			drm_mode_probed_add(connector, mode);
-		} else {
-			drm_mode_destroy(dev, mode);
-			mode = NULL;
-		}
+	mode = drm_mode_duplicate(dev, &prefmode);
+	if (!mode)
+		return 0;
+	mode->hdisplay = du->pref_width;
+	mode->vdisplay = du->pref_height;
+	vmw_guess_mode_timing(mode);
 
-		if (du->pref_mode) {
-			list_del_init(&du->pref_mode->head);
-			drm_mode_destroy(dev, du->pref_mode);
-		}
+	if (vmw_kms_validate_mode_vram(dev_priv,
+					mode->hdisplay * assumed_bpp,
+					mode->vdisplay)) {
+		drm_mode_probed_add(connector, mode);
+	} else {
+		drm_mode_destroy(dev, mode);
+		mode = NULL;
+	}
 
-		/* mode might be null here, this is intended */
-		du->pref_mode = mode;
+	if (du->pref_mode) {
+		list_del_init(&du->pref_mode->head);
+		drm_mode_destroy(dev, du->pref_mode);
 	}
 
+	/* mode might be null here, this is intended */
+	du->pref_mode = mode;
+
 	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
 		bmode = &vmw_kms_connector_builtin[i];
 		if (bmode->hdisplay > max_width ||
@@ -2003,11 +1553,9 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
 		drm_mode_probed_add(connector, mode);
 	}
 
-	/* Move the prefered mode first, help apps pick the right mode. */
-	if (du->pref_mode)
-		list_move(&du->pref_mode->head, &connector->probed_modes);
-
 	drm_mode_connector_list_update(connector, true);
+	/* Move the prefered mode first, help apps pick the right mode. */
+	drm_mode_sort(&connector->modes);
 
 	return 1;
 }
@@ -2031,7 +1579,9 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
 	unsigned rects_size;
 	int ret;
 	int i;
+	u64 total_pixels = 0;
 	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_vmw_rect bounding_box = {0};
 
 	if (!arg->num_outputs) {
 		struct drm_vmw_rect def_rect = {0, 0, 800, 600};
@@ -2062,6 +1612,40 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
 			ret = -EINVAL;
 			goto out_free;
 		}
+
+		/*
+		 * bounding_box.w and bunding_box.h are used as
+		 * lower-right coordinates
+		 */
+		if (rects[i].x + rects[i].w > bounding_box.w)
+			bounding_box.w = rects[i].x + rects[i].w;
+
+		if (rects[i].y + rects[i].h > bounding_box.h)
+			bounding_box.h = rects[i].y + rects[i].h;
+
+		total_pixels += (u64) rects[i].w * (u64) rects[i].h;
+	}
+
+	if (dev_priv->active_display_unit == vmw_du_screen_target) {
+		/*
+		 * For Screen Targets, the limits for a toplogy are:
+		 *	1. Bounding box (assuming 32bpp) must be < prim_bb_mem
+		 *      2. Total pixels (assuming 32bpp) must be < prim_bb_mem
+		 */
+		u64 bb_mem    = bounding_box.w * bounding_box.h * 4;
+		u64 pixel_mem = total_pixels * 4;
+
+		if (bb_mem > dev_priv->prim_bb_mem) {
+			DRM_ERROR("Topology is beyond supported limits.\n");
+			ret = -EINVAL;
+			goto out_free;
+		}
+
+		if (pixel_mem > dev_priv->prim_bb_mem) {
+			DRM_ERROR("Combined output size too large\n");
+			ret = -EINVAL;
+			goto out_free;
+		}
 	}
 
 	vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
@@ -2070,3 +1654,419 @@ out_free:
 	kfree(rects);
 	return ret;
 }
+
+/**
+ * vmw_kms_helper_dirty - Helper to build commands and perform actions based
+ * on a set of cliprects and a set of display units.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @framebuffer: Pointer to the framebuffer on which to perform the actions.
+ * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
+ * Cliprects are given in framebuffer coordinates.
+ * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
+ * be NULL. Cliprects are given in source coordinates.
+ * @dest_x: X coordinate offset for the crtc / destination clip rects.
+ * @dest_y: Y coordinate offset for the crtc / destination clip rects.
+ * @num_clips: Number of cliprects in the @clips or @vclips array.
+ * @increment: Integer with which to increment the clip counter when looping.
+ * Used to skip a predetermined number of clip rects.
+ * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
+ */
+int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
+			 struct vmw_framebuffer *framebuffer,
+			 const struct drm_clip_rect *clips,
+			 const struct drm_vmw_rect *vclips,
+			 s32 dest_x, s32 dest_y,
+			 int num_clips,
+			 int increment,
+			 struct vmw_kms_dirty *dirty)
+{
+	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+	struct drm_crtc *crtc;
+	u32 num_units = 0;
+	u32 i, k;
+	int ret;
+
+	dirty->dev_priv = dev_priv;
+
+	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
+		if (crtc->primary->fb != &framebuffer->base)
+			continue;
+		units[num_units++] = vmw_crtc_to_du(crtc);
+	}
+
+	for (k = 0; k < num_units; k++) {
+		struct vmw_display_unit *unit = units[k];
+		s32 crtc_x = unit->crtc.x;
+		s32 crtc_y = unit->crtc.y;
+		s32 crtc_width = unit->crtc.mode.hdisplay;
+		s32 crtc_height = unit->crtc.mode.vdisplay;
+		const struct drm_clip_rect *clips_ptr = clips;
+		const struct drm_vmw_rect *vclips_ptr = vclips;
+
+		dirty->unit = unit;
+		if (dirty->fifo_reserve_size > 0) {
+			dirty->cmd = vmw_fifo_reserve(dev_priv,
+						      dirty->fifo_reserve_size);
+			if (!dirty->cmd) {
+				DRM_ERROR("Couldn't reserve fifo space "
+					  "for dirty blits.\n");
+				return ret;
+			}
+			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
+		}
+		dirty->num_hits = 0;
+		for (i = 0; i < num_clips; i++, clips_ptr += increment,
+		       vclips_ptr += increment) {
+			s32 clip_left;
+			s32 clip_top;
+
+			/*
+			 * Select clip array type. Note that integer type
+			 * in @clips is unsigned short, whereas in @vclips
+			 * it's 32-bit.
+			 */
+			if (clips) {
+				dirty->fb_x = (s32) clips_ptr->x1;
+				dirty->fb_y = (s32) clips_ptr->y1;
+				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
+					crtc_x;
+				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
+					crtc_y;
+			} else {
+				dirty->fb_x = vclips_ptr->x;
+				dirty->fb_y = vclips_ptr->y;
+				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
+					dest_x - crtc_x;
+				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
+					dest_y - crtc_y;
+			}
+
+			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
+			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
+
+			/* Skip this clip if it's outside the crtc region */
+			if (dirty->unit_x1 >= crtc_width ||
+			    dirty->unit_y1 >= crtc_height ||
+			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
+				continue;
+
+			/* Clip right and bottom to crtc limits */
+			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
+					       crtc_width);
+			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
+					       crtc_height);
+
+			/* Clip left and top to crtc limits */
+			clip_left = min_t(s32, dirty->unit_x1, 0);
+			clip_top = min_t(s32, dirty->unit_y1, 0);
+			dirty->unit_x1 -= clip_left;
+			dirty->unit_y1 -= clip_top;
+			dirty->fb_x -= clip_left;
+			dirty->fb_y -= clip_top;
+
+			dirty->clip(dirty);
+		}
+
+		dirty->fifo_commit(dirty);
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
+ * command submission.
+ *
+ * @dev_priv. Pointer to a device private structure.
+ * @buf: The buffer object
+ * @interruptible: Whether to perform waits as interruptible.
+ * @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
+ * The buffer will be validated as a GMR. Already pinned buffers will not be
+ * validated.
+ *
+ * Returns 0 on success, negative error code on failure, -ERESTARTSYS if
+ * interrupted by a signal.
+ */
+int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
+				  struct vmw_dma_buffer *buf,
+				  bool interruptible,
+				  bool validate_as_mob)
+{
+	struct ttm_buffer_object *bo = &buf->base;
+	int ret;
+
+	ttm_bo_reserve(bo, false, false, interruptible, NULL);
+	ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
+					 validate_as_mob);
+	if (ret)
+		ttm_bo_unreserve(bo);
+
+	return ret;
+}
+
+/**
+ * vmw_kms_helper_buffer_revert - Undo the actions of
+ * vmw_kms_helper_buffer_prepare.
+ *
+ * @res: Pointer to the buffer object.
+ *
+ * Helper to be used if an error forces the caller to undo the actions of
+ * vmw_kms_helper_buffer_prepare.
+ */
+void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
+{
+	if (buf)
+		ttm_bo_unreserve(&buf->base);
+}
+
+/**
+ * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
+ * kms command submission.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @file_priv: Pointer to a struct drm_file representing the caller's
+ * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
+ * if non-NULL, @user_fence_rep must be non-NULL.
+ * @buf: The buffer object.
+ * @out_fence:  Optional pointer to a fence pointer. If non-NULL, a
+ * ref-counted fence pointer is returned here.
+ * @user_fence_rep: Optional pointer to a user-space provided struct
+ * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
+ * function copies fence data to user-space in a fail-safe manner.
+ */
+void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
+				  struct drm_file *file_priv,
+				  struct vmw_dma_buffer *buf,
+				  struct vmw_fence_obj **out_fence,
+				  struct drm_vmw_fence_rep __user *
+				  user_fence_rep)
+{
+	struct vmw_fence_obj *fence;
+	uint32_t handle;
+	int ret;
+
+	ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
+					 file_priv ? &handle : NULL);
+	if (buf)
+		vmw_fence_single_bo(&buf->base, fence);
+	if (file_priv)
+		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
+					    ret, user_fence_rep, fence,
+					    handle);
+	if (out_fence)
+		*out_fence = fence;
+	else
+		vmw_fence_obj_unreference(&fence);
+
+	vmw_kms_helper_buffer_revert(buf);
+}
+
+
+/**
+ * vmw_kms_helper_resource_revert - Undo the actions of
+ * vmw_kms_helper_resource_prepare.
+ *
+ * @res: Pointer to the resource. Typically a surface.
+ *
+ * Helper to be used if an error forces the caller to undo the actions of
+ * vmw_kms_helper_resource_prepare.
+ */
+void vmw_kms_helper_resource_revert(struct vmw_resource *res)
+{
+	vmw_kms_helper_buffer_revert(res->backup);
+	vmw_resource_unreserve(res, false, NULL, 0);
+	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+}
+
+/**
+ * vmw_kms_helper_resource_prepare - Reserve and validate a resource before
+ * command submission.
+ *
+ * @res: Pointer to the resource. Typically a surface.
+ * @interruptible: Whether to perform waits as interruptible.
+ *
+ * Reserves and validates also the backup buffer if a guest-backed resource.
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted by a signal.
+ */
+int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
+				    bool interruptible)
+{
+	int ret = 0;
+
+	if (interruptible)
+		ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
+	else
+		mutex_lock(&res->dev_priv->cmdbuf_mutex);
+
+	if (unlikely(ret != 0))
+		return -ERESTARTSYS;
+
+	ret = vmw_resource_reserve(res, interruptible, false);
+	if (ret)
+		goto out_unlock;
+
+	if (res->backup) {
+		ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
+						    interruptible,
+						    res->dev_priv->has_mob);
+		if (ret)
+			goto out_unreserve;
+	}
+	ret = vmw_resource_validate(res);
+	if (ret)
+		goto out_revert;
+	return 0;
+
+out_revert:
+	vmw_kms_helper_buffer_revert(res->backup);
+out_unreserve:
+	vmw_resource_unreserve(res, false, NULL, 0);
+out_unlock:
+	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+	return ret;
+}
+
+/**
+ * vmw_kms_helper_resource_finish - Unreserve and fence a resource after
+ * kms command submission.
+ *
+ * @res: Pointer to the resource. Typically a surface.
+ * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
+ * ref-counted fence pointer is returned here.
+ */
+void vmw_kms_helper_resource_finish(struct vmw_resource *res,
+			     struct vmw_fence_obj **out_fence)
+{
+	if (res->backup || out_fence)
+		vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup,
+					     out_fence, NULL);
+
+	vmw_resource_unreserve(res, false, NULL, 0);
+	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+}
+
+/**
+ * vmw_kms_update_proxy - Helper function to update a proxy surface from
+ * its backing MOB.
+ *
+ * @res: Pointer to the surface resource
+ * @clips: Clip rects in framebuffer (surface) space.
+ * @num_clips: Number of clips in @clips.
+ * @increment: Integer with which to increment the clip counter when looping.
+ * Used to skip a predetermined number of clip rects.
+ *
+ * This function makes sure the proxy surface is updated from its backing MOB
+ * using the region given by @clips. The surface resource @res and its backing
+ * MOB needs to be reserved and validated on call.
+ */
+int vmw_kms_update_proxy(struct vmw_resource *res,
+			 const struct drm_clip_rect *clips,
+			 unsigned num_clips,
+			 int increment)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdUpdateGBImage body;
+	} *cmd;
+	SVGA3dBox *box;
+	size_t copy_size = 0;
+	int i;
+
+	if (!clips)
+		return 0;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
+	if (!cmd) {
+		DRM_ERROR("Couldn't reserve fifo space for proxy surface "
+			  "update.\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
+		box = &cmd->body.box;
+
+		cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
+		cmd->header.size = sizeof(cmd->body);
+		cmd->body.image.sid = res->id;
+		cmd->body.image.face = 0;
+		cmd->body.image.mipmap = 0;
+
+		if (clips->x1 > size->width || clips->x2 > size->width ||
+		    clips->y1 > size->height || clips->y2 > size->height) {
+			DRM_ERROR("Invalid clips outsize of framebuffer.\n");
+			return -EINVAL;
+		}
+
+		box->x = clips->x1;
+		box->y = clips->y1;
+		box->z = 0;
+		box->w = clips->x2 - clips->x1;
+		box->h = clips->y2 - clips->y1;
+		box->d = 1;
+
+		copy_size += sizeof(*cmd);
+	}
+
+	vmw_fifo_commit(dev_priv, copy_size);
+
+	return 0;
+}
+
+int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
+			    unsigned unit,
+			    u32 max_width,
+			    u32 max_height,
+			    struct drm_connector **p_con,
+			    struct drm_crtc **p_crtc,
+			    struct drm_display_mode **p_mode)
+{
+	struct drm_connector *con;
+	struct vmw_display_unit *du;
+	struct drm_display_mode *mode;
+	int i = 0;
+
+	list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
+			    head) {
+		if (i == unit)
+			break;
+
+		++i;
+	}
+
+	if (i != unit) {
+		DRM_ERROR("Could not find initial display unit.\n");
+		return -EINVAL;
+	}
+
+	if (list_empty(&con->modes))
+		(void) vmw_du_connector_fill_modes(con, max_width, max_height);
+
+	if (list_empty(&con->modes)) {
+		DRM_ERROR("Could not find initial display mode.\n");
+		return -EINVAL;
+	}
+
+	du = vmw_connector_to_du(con);
+	*p_con = con;
+	*p_crtc = &du->crtc;
+
+	list_for_each_entry(mode, &con->modes, head) {
+		if (mode->type & DRM_MODE_TYPE_PREFERRED)
+			break;
+	}
+
+	if (mode->type & DRM_MODE_TYPE_PREFERRED)
+		*p_mode = mode;
+	else {
+		WARN_ONCE(true, "Could not find initial preferred mode.\n");
+		*p_mode = list_first_entry(&con->modes,
+					   struct drm_display_mode,
+					   head);
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 8d038c36bd57..782df7ca9794 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -32,11 +32,60 @@
 #include <drm/drm_crtc_helper.h>
 #include "vmwgfx_drv.h"
 
+/**
+ * struct vmw_kms_dirty - closure structure for the vmw_kms_helper_dirty
+ * function.
+ *
+ * @fifo_commit: Callback that is called once for each display unit after
+ * all clip rects. This function must commit the fifo space reserved by the
+ * helper. Set up by the caller.
+ * @clip: Callback that is called for each cliprect on each display unit.
+ * Set up by the caller.
+ * @fifo_reserve_size: Fifo size that the helper should try to allocat for
+ * each display unit. Set up by the caller.
+ * @dev_priv: Pointer to the device private. Set up by the helper.
+ * @unit: The current display unit. Set up by the helper before a call to @clip.
+ * @cmd: The allocated fifo space. Set up by the helper before the first @clip
+ * call.
+ * @num_hits: Number of clip rect commands for this display unit.
+ * Cleared by the helper before the first @clip call. Updated by the @clip
+ * callback.
+ * @fb_x: Clip rect left side in framebuffer coordinates.
+ * @fb_y: Clip rect right side in framebuffer coordinates.
+ * @unit_x1: Clip rect left side in crtc coordinates.
+ * @unit_y1: Clip rect top side in crtc coordinates.
+ * @unit_x2: Clip rect right side in crtc coordinates.
+ * @unit_y2: Clip rect bottom side in crtc coordinates.
+ *
+ * The clip rect coordinates are updated by the helper for each @clip call.
+ * Note that this may be derived from if more info needs to be passed between
+ * helper caller and helper callbacks.
+ */
+struct vmw_kms_dirty {
+	void (*fifo_commit)(struct vmw_kms_dirty *);
+	void (*clip)(struct vmw_kms_dirty *);
+	size_t fifo_reserve_size;
+	struct vmw_private *dev_priv;
+	struct vmw_display_unit *unit;
+	void *cmd;
+	u32 num_hits;
+	s32 fb_x;
+	s32 fb_y;
+	s32 unit_x1;
+	s32 unit_y1;
+	s32 unit_x2;
+	s32 unit_y2;
+};
+
 #define VMWGFX_NUM_DISPLAY_UNITS 8
 
 
 #define vmw_framebuffer_to_vfb(x) \
 	container_of(x, struct vmw_framebuffer, base)
+#define vmw_framebuffer_to_vfbs(x) \
+	container_of(x, struct vmw_framebuffer_surface, base.base)
+#define vmw_framebuffer_to_vfbd(x) \
+	container_of(x, struct vmw_framebuffer_dmabuf, base.base)
 
 /**
  * Base class for framebuffers
@@ -53,9 +102,27 @@ struct vmw_framebuffer {
 	uint32_t user_handle;
 };
 
+/*
+ * Clip rectangle
+ */
+struct vmw_clip_rect {
+	int x1, x2, y1, y2;
+};
+
+struct vmw_framebuffer_surface {
+	struct vmw_framebuffer base;
+	struct vmw_surface *surface;
+	struct vmw_dma_buffer *buffer;
+	struct list_head head;
+	bool is_dmabuf_proxy;  /* true if this is proxy surface for DMA buf */
+};
+
+
+struct vmw_framebuffer_dmabuf {
+	struct vmw_framebuffer base;
+	struct vmw_dma_buffer *buffer;
+};
 
-#define vmw_crtc_to_du(x) \
-	container_of(x, struct vmw_display_unit, crtc)
 
 /*
  * Basic cursor manipulation
@@ -120,11 +187,7 @@ struct vmw_display_unit {
 /*
  * Shared display unit functions - vmwgfx_kms.c
  */
-void vmw_display_unit_cleanup(struct vmw_display_unit *du);
-int vmw_du_page_flip(struct drm_crtc *crtc,
-		     struct drm_framebuffer *fb,
-		     struct drm_pending_vblank_event *event,
-		     uint32_t page_flip_flags);
+void vmw_du_cleanup(struct vmw_display_unit *du);
 void vmw_du_crtc_save(struct drm_crtc *crtc);
 void vmw_du_crtc_restore(struct drm_crtc *crtc);
 void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
@@ -133,7 +196,7 @@ void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
 int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
 			   uint32_t handle, uint32_t width, uint32_t height);
 int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
-void vmw_du_connector_dpms(struct drm_connector *connector, int mode);
+int vmw_du_connector_dpms(struct drm_connector *connector, int mode);
 void vmw_du_connector_save(struct drm_connector *connector);
 void vmw_du_connector_restore(struct drm_connector *connector);
 enum drm_connector_status
@@ -143,25 +206,118 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
 int vmw_du_connector_set_property(struct drm_connector *connector,
 				  struct drm_property *property,
 				  uint64_t val);
+int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
+			 struct vmw_framebuffer *framebuffer,
+			 const struct drm_clip_rect *clips,
+			 const struct drm_vmw_rect *vclips,
+			 s32 dest_x, s32 dest_y,
+			 int num_clips,
+			 int increment,
+			 struct vmw_kms_dirty *dirty);
 
+int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
+				  struct vmw_dma_buffer *buf,
+				  bool interruptible,
+				  bool validate_as_mob);
+void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf);
+void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
+				  struct drm_file *file_priv,
+				  struct vmw_dma_buffer *buf,
+				  struct vmw_fence_obj **out_fence,
+				  struct drm_vmw_fence_rep __user *
+				  user_fence_rep);
+int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
+				    bool interruptible);
+void vmw_kms_helper_resource_revert(struct vmw_resource *res);
+void vmw_kms_helper_resource_finish(struct vmw_resource *res,
+				    struct vmw_fence_obj **out_fence);
+int vmw_kms_readback(struct vmw_private *dev_priv,
+		     struct drm_file *file_priv,
+		     struct vmw_framebuffer *vfb,
+		     struct drm_vmw_fence_rep __user *user_fence_rep,
+		     struct drm_vmw_rect *vclips,
+		     uint32_t num_clips);
+struct vmw_framebuffer *
+vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
+			struct vmw_dma_buffer *dmabuf,
+			struct vmw_surface *surface,
+			bool only_2d,
+			const struct drm_mode_fb_cmd *mode_cmd);
+int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
+			    unsigned unit,
+			    u32 max_width,
+			    u32 max_height,
+			    struct drm_connector **p_con,
+			    struct drm_crtc **p_crtc,
+			    struct drm_display_mode **p_mode);
+void vmw_guess_mode_timing(struct drm_display_mode *mode);
 
 /*
  * Legacy display unit functions - vmwgfx_ldu.c
  */
-int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
-int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
+int vmw_kms_ldu_init_display(struct vmw_private *dev_priv);
+int vmw_kms_ldu_close_display(struct vmw_private *dev_priv);
+int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
+				struct vmw_framebuffer *framebuffer,
+				unsigned flags, unsigned color,
+				struct drm_clip_rect *clips,
+				unsigned num_clips, int increment);
+int vmw_kms_update_proxy(struct vmw_resource *res,
+			 const struct drm_clip_rect *clips,
+			 unsigned num_clips,
+			 int increment);
 
 /*
  * Screen Objects display functions - vmwgfx_scrn.c
  */
-int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv);
-int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv);
-int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num,
-			      struct drm_vmw_rect *rects);
-bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
-				     struct drm_crtc *crtc);
-void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
-					      struct drm_crtc *crtc);
+int vmw_kms_sou_init_display(struct vmw_private *dev_priv);
+int vmw_kms_sou_close_display(struct vmw_private *dev_priv);
+int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
+				 struct vmw_framebuffer *framebuffer,
+				 struct drm_clip_rect *clips,
+				 struct drm_vmw_rect *vclips,
+				 struct vmw_resource *srf,
+				 s32 dest_x,
+				 s32 dest_y,
+				 unsigned num_clips, int inc,
+				 struct vmw_fence_obj **out_fence);
+int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
+				struct vmw_framebuffer *framebuffer,
+				struct drm_clip_rect *clips,
+				unsigned num_clips, int increment,
+				bool interruptible,
+				struct vmw_fence_obj **out_fence);
+int vmw_kms_sou_readback(struct vmw_private *dev_priv,
+			 struct drm_file *file_priv,
+			 struct vmw_framebuffer *vfb,
+			 struct drm_vmw_fence_rep __user *user_fence_rep,
+			 struct drm_vmw_rect *vclips,
+			 uint32_t num_clips);
+
+/*
+ * Screen Target Display Unit functions - vmwgfx_stdu.c
+ */
+int vmw_kms_stdu_init_display(struct vmw_private *dev_priv);
+int vmw_kms_stdu_close_display(struct vmw_private *dev_priv);
+int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
+			       struct vmw_framebuffer *framebuffer,
+			       struct drm_clip_rect *clips,
+			       struct drm_vmw_rect *vclips,
+			       struct vmw_resource *srf,
+			       s32 dest_x,
+			       s32 dest_y,
+			       unsigned num_clips, int inc,
+			       struct vmw_fence_obj **out_fence);
+int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
+		     struct drm_file *file_priv,
+		     struct vmw_framebuffer *vfb,
+		     struct drm_vmw_fence_rep __user *user_fence_rep,
+		     struct drm_clip_rect *clips,
+		     struct drm_vmw_rect *vclips,
+		     uint32_t num_clips,
+		     int increment,
+		     bool to_surface,
+		     bool interruptible);
 
 
 #endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 5c289f748ab4..bb63e4d795fa 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -57,7 +57,7 @@ struct vmw_legacy_display_unit {
 static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
 {
 	list_del_init(&ldu->active);
-	vmw_display_unit_cleanup(&ldu->base);
+	vmw_du_cleanup(&ldu->base);
 	kfree(ldu);
 }
 
@@ -279,7 +279,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
 		return -EINVAL;
 	}
 
-	vmw_fb_off(dev_priv);
+	vmw_svga_enable(dev_priv);
 
 	crtc->primary->fb = fb;
 	encoder->crtc = crtc;
@@ -385,7 +385,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
 	return 0;
 }
 
-int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
+int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
 {
 	struct drm_device *dev = dev_priv->dev;
 	int i, ret;
@@ -422,6 +422,10 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
 	else
 		vmw_ldu_init(dev_priv, 0);
 
+	dev_priv->active_display_unit = vmw_du_legacy;
+
+	DRM_INFO("Legacy Display Unit initialized\n");
+
 	return 0;
 
 err_vblank_cleanup:
@@ -432,7 +436,7 @@ err_free:
 	return ret;
 }
 
-int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
+int vmw_kms_ldu_close_display(struct vmw_private *dev_priv)
 {
 	struct drm_device *dev = dev_priv->dev;
 
@@ -447,3 +451,38 @@ int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
 
 	return 0;
 }
+
+
+int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
+				struct vmw_framebuffer *framebuffer,
+				unsigned flags, unsigned color,
+				struct drm_clip_rect *clips,
+				unsigned num_clips, int increment)
+{
+	size_t fifo_size;
+	int i;
+
+	struct {
+		uint32_t header;
+		SVGAFifoCmdUpdate body;
+	} *cmd;
+
+	fifo_size = sizeof(*cmd) * num_clips;
+	cmd = vmw_fifo_reserve(dev_priv, fifo_size);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Fifo reserve failed.\n");
+		return -ENOMEM;
+	}
+
+	memset(cmd, 0, fifo_size);
+	for (i = 0; i < num_clips; i++, clips += increment) {
+		cmd[i].header = SVGA_CMD_UPDATE;
+		cmd[i].body.x = clips->x1;
+		cmd[i].body.y = clips->y1;
+		cmd[i].body.width = clips->x2 - clips->x1;
+		cmd[i].body.height = clips->y2 - clips->y1;
+	}
+
+	vmw_fifo_commit(dev_priv, fifo_size);
+	return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 04a64b8cd3cd..23db16008e39 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2012-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,7 +31,7 @@
  * If we set up the screen target otable, screen objects stop working.
  */
 
-#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) ? 0 : 1)
+#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE ? 0 : 1))
 
 #ifdef CONFIG_64BIT
 #define VMW_PPN_SIZE 8
@@ -67,9 +67,23 @@ struct vmw_mob {
  * @size:           Size of the table (page-aligned).
  * @page_table:     Pointer to a struct vmw_mob holding the page table.
  */
-struct vmw_otable {
-	unsigned long size;
-	struct vmw_mob *page_table;
+static const struct vmw_otable pre_dx_tables[] = {
+	{VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
+	 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}
+};
+
+static const struct vmw_otable dx_tables[] = {
+	{VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
+	 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE},
+	{VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true},
 };
 
 static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
@@ -92,6 +106,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
  */
 static int vmw_setup_otable_base(struct vmw_private *dev_priv,
 				 SVGAOTableType type,
+				 struct ttm_buffer_object *otable_bo,
 				 unsigned long offset,
 				 struct vmw_otable *otable)
 {
@@ -106,7 +121,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
 
 	BUG_ON(otable->page_table != NULL);
 
-	vsgt = vmw_bo_sg_table(dev_priv->otable_bo);
+	vsgt = vmw_bo_sg_table(otable_bo);
 	vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
 	WARN_ON(!vmw_piter_next(&iter));
 
@@ -142,7 +157,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
 	cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.type = type;
-	cmd->body.baseAddress = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
+	cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT;
 	cmd->body.sizeInBytes = otable->size;
 	cmd->body.validSizeInBytes = 0;
 	cmd->body.ptDepth = mob->pt_level;
@@ -191,18 +206,19 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
 	if (unlikely(cmd == NULL)) {
 		DRM_ERROR("Failed reserving FIFO space for OTable "
 			  "takedown.\n");
-	} else {
-		memset(cmd, 0, sizeof(*cmd));
-		cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
-		cmd->header.size = sizeof(cmd->body);
-		cmd->body.type = type;
-		cmd->body.baseAddress = 0;
-		cmd->body.sizeInBytes = 0;
-		cmd->body.validSizeInBytes = 0;
-		cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
-		vmw_fifo_commit(dev_priv, sizeof(*cmd));
+		return;
 	}
 
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.type = type;
+	cmd->body.baseAddress = 0;
+	cmd->body.sizeInBytes = 0;
+	cmd->body.validSizeInBytes = 0;
+	cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
 	if (bo) {
 		int ret;
 
@@ -217,47 +233,21 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
 	otable->page_table = NULL;
 }
 
-/*
- * vmw_otables_setup - Set up guest backed memory object tables
- *
- * @dev_priv:       Pointer to a device private structure
- *
- * Takes care of the device guest backed surface
- * initialization, by setting up the guest backed memory object tables.
- * Returns 0 on success and various error codes on failure. A succesful return
- * means the object tables can be taken down using the vmw_otables_takedown
- * function.
- */
-int vmw_otables_setup(struct vmw_private *dev_priv)
+
+static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
+				  struct vmw_otable_batch *batch)
 {
 	unsigned long offset;
 	unsigned long bo_size;
-	struct vmw_otable *otables;
+	struct vmw_otable *otables = batch->otables;
 	SVGAOTableType i;
 	int ret;
 
-	otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables),
-			  GFP_KERNEL);
-	if (unlikely(otables == NULL)) {
-		DRM_ERROR("Failed to allocate space for otable "
-			  "metadata.\n");
-		return -ENOMEM;
-	}
-
-	otables[SVGA_OTABLE_MOB].size =
-		VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE;
-	otables[SVGA_OTABLE_SURFACE].size =
-		VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE;
-	otables[SVGA_OTABLE_CONTEXT].size =
-		VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE;
-	otables[SVGA_OTABLE_SHADER].size =
-		VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE;
-	otables[SVGA_OTABLE_SCREEN_TARGET].size =
-		VMWGFX_NUM_GB_SCREEN_TARGET *
-		SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE;
-
 	bo_size = 0;
-	for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) {
+	for (i = 0; i < batch->num_otables; ++i) {
+		if (!otables[i].enabled)
+			continue;
+
 		otables[i].size =
 			(otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
 		bo_size += otables[i].size;
@@ -267,63 +257,105 @@ int vmw_otables_setup(struct vmw_private *dev_priv)
 			    ttm_bo_type_device,
 			    &vmw_sys_ne_placement,
 			    0, false, NULL,
-			    &dev_priv->otable_bo);
+			    &batch->otable_bo);
 
 	if (unlikely(ret != 0))
 		goto out_no_bo;
 
-	ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL);
+	ret = ttm_bo_reserve(batch->otable_bo, false, true, false, NULL);
 	BUG_ON(ret != 0);
-	ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm);
+	ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm);
 	if (unlikely(ret != 0))
 		goto out_unreserve;
-	ret = vmw_bo_map_dma(dev_priv->otable_bo);
+	ret = vmw_bo_map_dma(batch->otable_bo);
 	if (unlikely(ret != 0))
 		goto out_unreserve;
 
-	ttm_bo_unreserve(dev_priv->otable_bo);
+	ttm_bo_unreserve(batch->otable_bo);
 
 	offset = 0;
-	for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) {
-		ret = vmw_setup_otable_base(dev_priv, i, offset,
+	for (i = 0; i < batch->num_otables; ++i) {
+		if (!batch->otables[i].enabled)
+			continue;
+
+		ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo,
+					    offset,
 					    &otables[i]);
 		if (unlikely(ret != 0))
 			goto out_no_setup;
 		offset += otables[i].size;
 	}
 
-	dev_priv->otables = otables;
 	return 0;
 
 out_unreserve:
-	ttm_bo_unreserve(dev_priv->otable_bo);
+	ttm_bo_unreserve(batch->otable_bo);
 out_no_setup:
-	for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
-		vmw_takedown_otable_base(dev_priv, i, &otables[i]);
+	for (i = 0; i < batch->num_otables; ++i) {
+		if (batch->otables[i].enabled)
+			vmw_takedown_otable_base(dev_priv, i,
+						 &batch->otables[i]);
+	}
 
-	ttm_bo_unref(&dev_priv->otable_bo);
+	ttm_bo_unref(&batch->otable_bo);
 out_no_bo:
-	kfree(otables);
 	return ret;
 }
 
-
 /*
- * vmw_otables_takedown - Take down guest backed memory object tables
+ * vmw_otables_setup - Set up guest backed memory object tables
  *
  * @dev_priv:       Pointer to a device private structure
  *
- * Take down the Guest Memory Object tables.
+ * Takes care of the device guest backed surface
+ * initialization, by setting up the guest backed memory object tables.
+ * Returns 0 on success and various error codes on failure. A successful return
+ * means the object tables can be taken down using the vmw_otables_takedown
+ * function.
  */
-void vmw_otables_takedown(struct vmw_private *dev_priv)
+int vmw_otables_setup(struct vmw_private *dev_priv)
+{
+	struct vmw_otable **otables = &dev_priv->otable_batch.otables;
+	int ret;
+
+	if (dev_priv->has_dx) {
+		*otables = kmalloc(sizeof(dx_tables), GFP_KERNEL);
+		if (*otables == NULL)
+			return -ENOMEM;
+
+		memcpy(*otables, dx_tables, sizeof(dx_tables));
+		dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
+	} else {
+		*otables = kmalloc(sizeof(pre_dx_tables), GFP_KERNEL);
+		if (*otables == NULL)
+			return -ENOMEM;
+
+		memcpy(*otables, pre_dx_tables, sizeof(pre_dx_tables));
+		dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
+	}
+
+	ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch);
+	if (unlikely(ret != 0))
+		goto out_setup;
+
+	return 0;
+
+out_setup:
+	kfree(*otables);
+	return ret;
+}
+
+static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
+			       struct vmw_otable_batch *batch)
 {
 	SVGAOTableType i;
-	struct ttm_buffer_object *bo = dev_priv->otable_bo;
+	struct ttm_buffer_object *bo = batch->otable_bo;
 	int ret;
 
-	for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
-		vmw_takedown_otable_base(dev_priv, i,
-					 &dev_priv->otables[i]);
+	for (i = 0; i < batch->num_otables; ++i)
+		if (batch->otables[i].enabled)
+			vmw_takedown_otable_base(dev_priv, i,
+						 &batch->otables[i]);
 
 	ret = ttm_bo_reserve(bo, false, true, false, NULL);
 	BUG_ON(ret != 0);
@@ -331,11 +363,21 @@ void vmw_otables_takedown(struct vmw_private *dev_priv)
 	vmw_fence_single_bo(bo, NULL);
 	ttm_bo_unreserve(bo);
 
-	ttm_bo_unref(&dev_priv->otable_bo);
-	kfree(dev_priv->otables);
-	dev_priv->otables = NULL;
+	ttm_bo_unref(&batch->otable_bo);
 }
 
+/*
+ * vmw_otables_takedown - Take down guest backed memory object tables
+ *
+ * @dev_priv:       Pointer to a device private structure
+ *
+ * Take down the Guest Memory Object tables.
+ */
+void vmw_otables_takedown(struct vmw_private *dev_priv)
+{
+	vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch);
+	kfree(dev_priv->otable_batch.otables);
+}
 
 /*
  * vmw_mob_calculate_pt_pages - Calculate the number of page table pages
@@ -409,7 +451,7 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
 		goto out_unreserve;
 
 	ttm_bo_unreserve(mob->pt_bo);
-	
+
 	return 0;
 
 out_unreserve:
@@ -429,15 +471,15 @@ out_unreserve:
  * *@addr according to the page table entry size.
  */
 #if (VMW_PPN_SIZE == 8)
-static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
+static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
 {
-	*((__le64 *) *addr) = cpu_to_le64(val >> PAGE_SHIFT);
+	*((u64 *) *addr) = val >> PAGE_SHIFT;
 	*addr += 2;
 }
 #else
-static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
+static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
 {
-	*(*addr)++ = cpu_to_le32(val >> PAGE_SHIFT);
+	*(*addr)++ = val >> PAGE_SHIFT;
 }
 #endif
 
@@ -459,7 +501,7 @@ static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
 	unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
 	unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
 	unsigned long pt_page;
-	__le32 *addr, *save_addr;
+	u32 *addr, *save_addr;
 	unsigned long i;
 	struct page *page;
 
@@ -574,7 +616,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
 		vmw_fence_single_bo(bo, NULL);
 		ttm_bo_unreserve(bo);
 	}
-	vmw_3d_resource_dec(dev_priv, false);
+	vmw_fifo_resource_dec(dev_priv);
 }
 
 /*
@@ -627,7 +669,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
 		mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
 	}
 
-	(void) vmw_3d_resource_inc(dev_priv, false);
+	vmw_fifo_resource_inc(dev_priv);
 
 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL)) {
@@ -640,7 +682,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.mobid = mob_id;
 	cmd->body.ptDepth = mob->pt_level;
-	cmd->body.base = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
+	cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
 	cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
 
 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
@@ -648,7 +690,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
 	return 0;
 
 out_no_cmd_space:
-	vmw_3d_resource_dec(dev_priv, false);
+	vmw_fifo_resource_dec(dev_priv);
 	if (pt_set_up)
 		ttm_bo_unref(&mob->pt_bo);
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 87e39f68e9d0..76069f093ccf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,8 +31,8 @@
 
 #include <drm/ttm/ttm_placement.h>
 
-#include "svga_overlay.h"
-#include "svga_escape.h"
+#include "device_include/svga_overlay.h"
+#include "device_include/svga_escape.h"
 
 #define VMW_MAX_NUM_STREAMS 1
 #define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
@@ -100,7 +100,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
 {
 	struct vmw_escape_video_flush *flush;
 	size_t fifo_size;
-	bool have_so = dev_priv->sou_priv ? true : false;
+	bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object);
 	int i, num_items;
 	SVGAGuestPtr ptr;
 
@@ -231,10 +231,10 @@ static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
 	if (!pin)
 		return vmw_dmabuf_unpin(dev_priv, buf, inter);
 
-	if (!dev_priv->sou_priv)
-		return vmw_dmabuf_to_vram(dev_priv, buf, true, inter);
+	if (dev_priv->active_display_unit == vmw_du_legacy)
+		return vmw_dmabuf_pin_in_vram(dev_priv, buf, inter);
 
-	return vmw_dmabuf_to_vram_or_gmr(dev_priv, buf, true, inter);
+	return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, inter);
 }
 
 /**
@@ -453,7 +453,7 @@ int vmw_overlay_pause_all(struct vmw_private *dev_priv)
 
 static bool vmw_overlay_available(const struct vmw_private *dev_priv)
 {
-	return (dev_priv->overlay_priv != NULL && 
+	return (dev_priv->overlay_priv != NULL &&
 		((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) ==
 		 VMW_OVERLAY_CAP_MASK));
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
index 9d0dd3a342eb..dce798053a96 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -39,19 +39,17 @@
 #define VMWGFX_IRQSTATUS_PORT 0x8
 
 struct svga_guest_mem_descriptor {
-	__le32 ppn;
-	__le32 num_pages;
+	u32 ppn;
+	u32 num_pages;
 };
 
 struct svga_fifo_cmd_fence {
-	__le32 fence;
+	u32 fence;
 };
 
 #define SVGA_SYNC_GENERIC         1
 #define SVGA_SYNC_FIFOFULL        2
 
-#include "svga_types.h"
-
-#include "svga3d_reg.h"
+#include "device_include/svga3d_reg.h"
 
 #endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 210ef15b1d09..c1912f852b42 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,6 +31,7 @@
 #include <drm/ttm/ttm_placement.h>
 #include <drm/drmP.h>
 #include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
 
 #define VMW_RES_EVICT_ERR_COUNT 10
 
@@ -121,6 +122,7 @@ static void vmw_resource_release(struct kref *kref)
 	int id;
 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 
+	write_lock(&dev_priv->resource_lock);
 	res->avail = false;
 	list_del_init(&res->lru_head);
 	write_unlock(&dev_priv->resource_lock);
@@ -143,10 +145,10 @@ static void vmw_resource_release(struct kref *kref)
 	}
 
 	if (likely(res->hw_destroy != NULL)) {
-		res->hw_destroy(res);
 		mutex_lock(&dev_priv->binding_mutex);
-		vmw_context_binding_res_list_kill(&res->binding_head);
+		vmw_binding_res_list_kill(&res->binding_head);
 		mutex_unlock(&dev_priv->binding_mutex);
+		res->hw_destroy(res);
 	}
 
 	id = res->id;
@@ -156,20 +158,17 @@ static void vmw_resource_release(struct kref *kref)
 		kfree(res);
 
 	write_lock(&dev_priv->resource_lock);
-
 	if (id != -1)
 		idr_remove(idr, id);
+	write_unlock(&dev_priv->resource_lock);
 }
 
 void vmw_resource_unreference(struct vmw_resource **p_res)
 {
 	struct vmw_resource *res = *p_res;
-	struct vmw_private *dev_priv = res->dev_priv;
 
 	*p_res = NULL;
-	write_lock(&dev_priv->resource_lock);
 	kref_put(&res->kref, vmw_resource_release);
-	write_unlock(&dev_priv->resource_lock);
 }
 
 
@@ -260,17 +259,16 @@ void vmw_resource_activate(struct vmw_resource *res,
 	write_unlock(&dev_priv->resource_lock);
 }
 
-struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
-					 struct idr *idr, int id)
+static struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
+						struct idr *idr, int id)
 {
 	struct vmw_resource *res;
 
 	read_lock(&dev_priv->resource_lock);
 	res = idr_find(idr, id);
-	if (res && res->avail)
-		kref_get(&res->kref);
-	else
+	if (!res || !res->avail || !kref_get_unless_zero(&res->kref))
 		res = NULL;
+
 	read_unlock(&dev_priv->resource_lock);
 
 	if (unlikely(res == NULL))
@@ -900,20 +898,21 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
 				   vmw_user_stream_size,
 				   false, true);
+	ttm_read_unlock(&dev_priv->reservation_sem);
 	if (unlikely(ret != 0)) {
 		if (ret != -ERESTARTSYS)
 			DRM_ERROR("Out of graphics memory for stream"
 				  " creation.\n");
-		goto out_unlock;
-	}
 
+		goto out_ret;
+	}
 
 	stream = kmalloc(sizeof(*stream), GFP_KERNEL);
 	if (unlikely(stream == NULL)) {
 		ttm_mem_global_free(vmw_mem_glob(dev_priv),
 				    vmw_user_stream_size);
 		ret = -ENOMEM;
-		goto out_unlock;
+		goto out_ret;
 	}
 
 	res = &stream->stream.res;
@@ -926,7 +925,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
 
 	ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
 	if (unlikely(ret != 0))
-		goto out_unlock;
+		goto out_ret;
 
 	tmp = vmw_resource_reference(res);
 	ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
@@ -940,8 +939,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
 	arg->stream_id = res->id;
 out_err:
 	vmw_resource_unreference(&res);
-out_unlock:
-	ttm_read_unlock(&dev_priv->reservation_sem);
+out_ret:
 	return ret;
 }
 
@@ -1152,14 +1150,16 @@ out_bind_failed:
  * command submission.
  *
  * @res:               Pointer to the struct vmw_resource to unreserve.
+ * @switch_backup:     Backup buffer has been switched.
  * @new_backup:        Pointer to new backup buffer if command submission
- *                     switched.
- * @new_backup_offset: New backup offset if @new_backup is !NULL.
+ *                     switched. May be NULL.
+ * @new_backup_offset: New backup offset if @switch_backup is true.
  *
  * Currently unreserving a resource means putting it back on the device's
  * resource lru list, so that it can be evicted if necessary.
  */
 void vmw_resource_unreserve(struct vmw_resource *res,
+			    bool switch_backup,
 			    struct vmw_dma_buffer *new_backup,
 			    unsigned long new_backup_offset)
 {
@@ -1168,22 +1168,25 @@ void vmw_resource_unreserve(struct vmw_resource *res,
 	if (!list_empty(&res->lru_head))
 		return;
 
-	if (new_backup && new_backup != res->backup) {
-
+	if (switch_backup && new_backup != res->backup) {
 		if (res->backup) {
 			lockdep_assert_held(&res->backup->base.resv->lock.base);
 			list_del_init(&res->mob_head);
 			vmw_dmabuf_unreference(&res->backup);
 		}
 
-		res->backup = vmw_dmabuf_reference(new_backup);
-		lockdep_assert_held(&new_backup->base.resv->lock.base);
-		list_add_tail(&res->mob_head, &new_backup->res_list);
+		if (new_backup) {
+			res->backup = vmw_dmabuf_reference(new_backup);
+			lockdep_assert_held(&new_backup->base.resv->lock.base);
+			list_add_tail(&res->mob_head, &new_backup->res_list);
+		} else {
+			res->backup = NULL;
+		}
 	}
-	if (new_backup)
+	if (switch_backup)
 		res->backup_offset = new_backup_offset;
 
-	if (!res->func->may_evict || res->id == -1)
+	if (!res->func->may_evict || res->id == -1 || res->pin_count)
 		return;
 
 	write_lock(&dev_priv->resource_lock);
@@ -1259,7 +1262,8 @@ out_no_reserve:
  * the buffer may not be bound to the resource at this point.
  *
  */
-int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
+int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
+			 bool no_backup)
 {
 	struct vmw_private *dev_priv = res->dev_priv;
 	int ret;
@@ -1270,9 +1274,13 @@ int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
 
 	if (res->func->needs_backup && res->backup == NULL &&
 	    !no_backup) {
-		ret = vmw_resource_buf_alloc(res, true);
-		if (unlikely(ret != 0))
+		ret = vmw_resource_buf_alloc(res, interruptible);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Failed to allocate a backup buffer "
+				  "of size %lu. bytes\n",
+				  (unsigned long) res->backup_size);
 			return ret;
+		}
 	}
 
 	return 0;
@@ -1305,7 +1313,7 @@ vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
  * @res:            The resource to evict.
  * @interruptible:  Whether to wait interruptible.
  */
-int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
+static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
 {
 	struct ttm_validate_buffer val_buf;
 	const struct vmw_res_func *func = res->func;
@@ -1356,7 +1364,7 @@ int vmw_resource_validate(struct vmw_resource *res)
 	struct ttm_validate_buffer val_buf;
 	unsigned err_count = 0;
 
-	if (likely(!res->func->may_evict))
+	if (!res->func->create)
 		return 0;
 
 	val_buf.bo = NULL;
@@ -1443,9 +1451,9 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
 /**
  * vmw_resource_move_notify - TTM move_notify_callback
  *
- * @bo:             The TTM buffer object about to move.
- * @mem:            The truct ttm_mem_reg indicating to what memory
- *                  region the move is taking place.
+ * @bo: The TTM buffer object about to move.
+ * @mem: The struct ttm_mem_reg indicating to what memory
+ *       region the move is taking place.
  *
  * Evicts the Guest Backed hardware resource if the backup
  * buffer is being moved out of MOB memory.
@@ -1495,6 +1503,101 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
 	}
 }
 
+
+
+/**
+ * vmw_query_readback_all - Read back cached query states
+ *
+ * @dx_query_mob: Buffer containing the DX query MOB
+ *
+ * Read back cached states from the device if they exist.  This function
+ * assumings binding_mutex is held.
+ */
+int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
+{
+	struct vmw_resource *dx_query_ctx;
+	struct vmw_private *dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXReadbackAllQuery body;
+	} *cmd;
+
+
+	/* No query bound, so do nothing */
+	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
+		return 0;
+
+	dx_query_ctx = dx_query_mob->dx_query_ctx;
+	dev_priv     = dx_query_ctx->dev_priv;
+
+	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for "
+			  "query MOB read back.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid    = dx_query_ctx->id;
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	/* Triggers a rebind the next time affected context is bound */
+	dx_query_mob->dx_query_ctx = NULL;
+
+	return 0;
+}
+
+
+
+/**
+ * vmw_query_move_notify - Read back cached query states
+ *
+ * @bo: The TTM buffer object about to move.
+ * @mem: The memory region @bo is moving to.
+ *
+ * Called before the query MOB is swapped out to read back cached query
+ * states from the device.
+ */
+void vmw_query_move_notify(struct ttm_buffer_object *bo,
+			   struct ttm_mem_reg *mem)
+{
+	struct vmw_dma_buffer *dx_query_mob;
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct vmw_private *dev_priv;
+
+
+	dev_priv = container_of(bdev, struct vmw_private, bdev);
+
+	mutex_lock(&dev_priv->binding_mutex);
+
+	dx_query_mob = container_of(bo, struct vmw_dma_buffer, base);
+	if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
+		mutex_unlock(&dev_priv->binding_mutex);
+		return;
+	}
+
+	/* If BO is being moved from MOB to system memory */
+	if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
+		struct vmw_fence_obj *fence;
+
+		(void) vmw_query_readback_all(dx_query_mob);
+		mutex_unlock(&dev_priv->binding_mutex);
+
+		/* Create a fence and attach the BO to it */
+		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+		vmw_fence_single_bo(bo, fence);
+
+		if (fence != NULL)
+			vmw_fence_obj_unreference(&fence);
+
+		(void) ttm_bo_wait(bo, false, false, false);
+	} else
+		mutex_unlock(&dev_priv->binding_mutex);
+
+}
+
 /**
  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
  *
@@ -1573,3 +1676,107 @@ void vmw_resource_evict_all(struct vmw_private *dev_priv)
 
 	mutex_unlock(&dev_priv->cmdbuf_mutex);
 }
+
+/**
+ * vmw_resource_pin - Add a pin reference on a resource
+ *
+ * @res: The resource to add a pin reference on
+ *
+ * This function adds a pin reference, and if needed validates the resource.
+ * Having a pin reference means that the resource can never be evicted, and
+ * its id will never change as long as there is a pin reference.
+ * This function returns 0 on success and a negative error code on failure.
+ */
+int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	int ret;
+
+	ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+	mutex_lock(&dev_priv->cmdbuf_mutex);
+	ret = vmw_resource_reserve(res, interruptible, false);
+	if (ret)
+		goto out_no_reserve;
+
+	if (res->pin_count == 0) {
+		struct vmw_dma_buffer *vbo = NULL;
+
+		if (res->backup) {
+			vbo = res->backup;
+
+			ttm_bo_reserve(&vbo->base, interruptible, false, false,
+				       NULL);
+			if (!vbo->pin_count) {
+				ret = ttm_bo_validate
+					(&vbo->base,
+					 res->func->backup_placement,
+					 interruptible, false);
+				if (ret) {
+					ttm_bo_unreserve(&vbo->base);
+					goto out_no_validate;
+				}
+			}
+
+			/* Do we really need to pin the MOB as well? */
+			vmw_bo_pin_reserved(vbo, true);
+		}
+		ret = vmw_resource_validate(res);
+		if (vbo)
+			ttm_bo_unreserve(&vbo->base);
+		if (ret)
+			goto out_no_validate;
+	}
+	res->pin_count++;
+
+out_no_validate:
+	vmw_resource_unreserve(res, false, NULL, 0UL);
+out_no_reserve:
+	mutex_unlock(&dev_priv->cmdbuf_mutex);
+	ttm_write_unlock(&dev_priv->reservation_sem);
+
+	return ret;
+}
+
+/**
+ * vmw_resource_unpin - Remove a pin reference from a resource
+ *
+ * @res: The resource to remove a pin reference from
+ *
+ * Having a pin reference means that the resource can never be evicted, and
+ * its id will never change as long as there is a pin reference.
+ */
+void vmw_resource_unpin(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	int ret;
+
+	ttm_read_lock(&dev_priv->reservation_sem, false);
+	mutex_lock(&dev_priv->cmdbuf_mutex);
+
+	ret = vmw_resource_reserve(res, false, true);
+	WARN_ON(ret);
+
+	WARN_ON(res->pin_count == 0);
+	if (--res->pin_count == 0 && res->backup) {
+		struct vmw_dma_buffer *vbo = res->backup;
+
+		ttm_bo_reserve(&vbo->base, false, false, false, NULL);
+		vmw_bo_pin_reserved(vbo, false);
+		ttm_bo_unreserve(&vbo->base);
+	}
+
+	vmw_resource_unreserve(res, false, NULL, 0UL);
+
+	mutex_unlock(&dev_priv->cmdbuf_mutex);
+	ttm_read_unlock(&dev_priv->reservation_sem);
+}
+
+/**
+ * vmw_res_type - Return the resource type
+ *
+ * @res: Pointer to the resource
+ */
+enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
+{
+	return res->func->res_type;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
index f3adeed2854c..5994ef6265e0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2012-2014 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -30,6 +30,12 @@
 
 #include "vmwgfx_drv.h"
 
+enum vmw_cmdbuf_res_state {
+	VMW_CMDBUF_RES_COMMITTED,
+	VMW_CMDBUF_RES_ADD,
+	VMW_CMDBUF_RES_DEL
+};
+
 /**
  * struct vmw_user_resource_conv - Identify a derived user-exported resource
  * type and provide a function to convert its ttm_base_object pointer to
@@ -55,8 +61,10 @@ struct vmw_user_resource_conv {
  * @bind:              Bind a hardware resource to persistent buffer storage.
  * @unbind:            Unbind a hardware resource from persistent
  *                     buffer storage.
+ * @commit_notify:     If the resource is a command buffer managed resource,
+ *                     callback to notify that a define or remove command
+ *                     has been committed to the device.
  */
-
 struct vmw_res_func {
 	enum vmw_res_type res_type;
 	bool needs_backup;
@@ -71,6 +79,8 @@ struct vmw_res_func {
 	int (*unbind) (struct vmw_resource *res,
 		       bool readback,
 		       struct ttm_validate_buffer *val_buf);
+	void (*commit_notify)(struct vmw_resource *res,
+			      enum vmw_cmdbuf_res_state state);
 };
 
 int vmw_resource_alloc_id(struct vmw_resource *res);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 7dc591d04d9a..b96d1ab610c5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -36,10 +36,55 @@
 #define vmw_connector_to_sou(x) \
 	container_of(x, struct vmw_screen_object_unit, base.connector)
 
+/**
+ * struct vmw_kms_sou_surface_dirty - Closure structure for
+ * blit surface to screen command.
+ * @base: The base type we derive from. Used by vmw_kms_helper_dirty().
+ * @left: Left side of bounding box.
+ * @right: Right side of bounding box.
+ * @top: Top side of bounding box.
+ * @bottom: Bottom side of bounding box.
+ * @dst_x: Difference between source clip rects and framebuffer coordinates.
+ * @dst_y: Difference between source clip rects and framebuffer coordinates.
+ * @sid: Surface id of surface to copy from.
+ */
+struct vmw_kms_sou_surface_dirty {
+	struct vmw_kms_dirty base;
+	s32 left, right, top, bottom;
+	s32 dst_x, dst_y;
+	u32 sid;
+};
+
+/*
+ * SVGA commands that are used by this code. Please see the device headers
+ * for explanation.
+ */
+struct vmw_kms_sou_readback_blit {
+	uint32 header;
+	SVGAFifoCmdBlitScreenToGMRFB body;
+};
+
+struct vmw_kms_sou_dmabuf_blit {
+	uint32 header;
+	SVGAFifoCmdBlitGMRFBToScreen body;
+};
+
+struct vmw_kms_sou_dirty_cmd {
+	SVGA3dCmdHeader header;
+	SVGA3dCmdBlitSurfaceToScreen body;
+};
+
+
+/*
+ * Other structs.
+ */
+
 struct vmw_screen_object_display {
 	unsigned num_implicit;
 
 	struct vmw_framebuffer *implicit_fb;
+	SVGAFifoCmdDefineGMRFB cur;
+	struct vmw_dma_buffer *pinned_gmrfb;
 };
 
 /**
@@ -57,7 +102,7 @@ struct vmw_screen_object_unit {
 
 static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
 {
-	vmw_display_unit_cleanup(&sou->base);
+	vmw_du_cleanup(&sou->base);
 	kfree(sou);
 }
 
@@ -72,7 +117,7 @@ static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
 }
 
 static void vmw_sou_del_active(struct vmw_private *vmw_priv,
-			      struct vmw_screen_object_unit *sou)
+			       struct vmw_screen_object_unit *sou)
 {
 	struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
 
@@ -84,8 +129,8 @@ static void vmw_sou_del_active(struct vmw_private *vmw_priv,
 }
 
 static void vmw_sou_add_active(struct vmw_private *vmw_priv,
-			      struct vmw_screen_object_unit *sou,
-			      struct vmw_framebuffer *vfb)
+			       struct vmw_screen_object_unit *sou,
+			       struct vmw_framebuffer *vfb)
 {
 	struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
 
@@ -202,14 +247,7 @@ static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
 static void vmw_sou_backing_free(struct vmw_private *dev_priv,
 				 struct vmw_screen_object_unit *sou)
 {
-	struct ttm_buffer_object *bo;
-
-	if (unlikely(sou->buffer == NULL))
-		return;
-
-	bo = &sou->buffer->base;
-	ttm_bo_unref(&bo);
-	sou->buffer = NULL;
+	vmw_dmabuf_unreference(&sou->buffer);
 	sou->buffer_size = 0;
 }
 
@@ -274,13 +312,13 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
 	dev_priv = vmw_priv(crtc->dev);
 
 	if (set->num_connectors > 1) {
-		DRM_ERROR("to many connectors\n");
+		DRM_ERROR("Too many connectors\n");
 		return -EINVAL;
 	}
 
 	if (set->num_connectors == 1 &&
 	    set->connectors[0] != &sou->base.connector) {
-		DRM_ERROR("connector doesn't match %p %p\n",
+		DRM_ERROR("Connector doesn't match %p %p\n",
 			set->connectors[0], &sou->base.connector);
 		return -EINVAL;
 	}
@@ -331,7 +369,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
 		return -EINVAL;
 	}
 
-	vmw_fb_off(dev_priv);
+	vmw_svga_enable(dev_priv);
 
 	if (mode->hdisplay != crtc->mode.hdisplay ||
 	    mode->vdisplay != crtc->mode.vdisplay) {
@@ -390,6 +428,108 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
 	return 0;
 }
 
+/**
+ * Returns if this unit can be page flipped.
+ * Must be called with the mode_config mutex held.
+ */
+static bool vmw_sou_screen_object_flippable(struct vmw_private *dev_priv,
+					    struct drm_crtc *crtc)
+{
+	struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+
+	if (!sou->base.is_implicit)
+		return true;
+
+	if (dev_priv->sou_priv->num_implicit != 1)
+		return false;
+
+	return true;
+}
+
+/**
+ * Update the implicit fb to the current fb of this crtc.
+ * Must be called with the mode_config mutex held.
+ */
+static void vmw_sou_update_implicit_fb(struct vmw_private *dev_priv,
+				       struct drm_crtc *crtc)
+{
+	struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+
+	BUG_ON(!sou->base.is_implicit);
+
+	dev_priv->sou_priv->implicit_fb =
+		vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb);
+}
+
+static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
+				  struct drm_framebuffer *fb,
+				  struct drm_pending_vblank_event *event,
+				  uint32_t flags)
+{
+	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+	struct drm_framebuffer *old_fb = crtc->primary->fb;
+	struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
+	struct vmw_fence_obj *fence = NULL;
+	struct drm_clip_rect clips;
+	int ret;
+
+	/* require ScreenObject support for page flipping */
+	if (!dev_priv->sou_priv)
+		return -ENOSYS;
+
+	if (!vmw_sou_screen_object_flippable(dev_priv, crtc))
+		return -EINVAL;
+
+	crtc->primary->fb = fb;
+
+	/* do a full screen dirty update */
+	clips.x1 = clips.y1 = 0;
+	clips.x2 = fb->width;
+	clips.y2 = fb->height;
+
+	if (vfb->dmabuf)
+		ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb,
+						  &clips, 1, 1,
+						  true, &fence);
+	else
+		ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb,
+						   &clips, NULL, NULL,
+						   0, 0, 1, 1, &fence);
+
+
+	if (ret != 0)
+		goto out_no_fence;
+	if (!fence) {
+		ret = -EINVAL;
+		goto out_no_fence;
+	}
+
+	if (event) {
+		struct drm_file *file_priv = event->base.file_priv;
+
+		ret = vmw_event_fence_action_queue(file_priv, fence,
+						   &event->base,
+						   &event->event.tv_sec,
+						   &event->event.tv_usec,
+						   true);
+	}
+
+	/*
+	 * No need to hold on to this now. The only cleanup
+	 * we need to do if we fail is unref the fence.
+	 */
+	vmw_fence_obj_unreference(&fence);
+
+	if (vmw_crtc_to_du(crtc)->is_implicit)
+		vmw_sou_update_implicit_fb(dev_priv, crtc);
+
+	return ret;
+
+out_no_fence:
+	crtc->primary->fb = old_fb;
+	return ret;
+}
+
 static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
 	.save = vmw_du_crtc_save,
 	.restore = vmw_du_crtc_restore,
@@ -398,7 +538,7 @@ static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
 	.gamma_set = vmw_du_crtc_gamma_set,
 	.destroy = vmw_sou_crtc_destroy,
 	.set_config = vmw_sou_crtc_set_config,
-	.page_flip = vmw_du_page_flip,
+	.page_flip = vmw_sou_crtc_page_flip,
 };
 
 /*
@@ -423,7 +563,7 @@ static void vmw_sou_connector_destroy(struct drm_connector *connector)
 	vmw_sou_destroy(vmw_connector_to_sou(connector));
 }
 
-static struct drm_connector_funcs vmw_legacy_connector_funcs = {
+static struct drm_connector_funcs vmw_sou_connector_funcs = {
 	.dpms = vmw_du_connector_dpms,
 	.save = vmw_du_connector_save,
 	.restore = vmw_du_connector_restore,
@@ -458,7 +598,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
 	sou->base.pref_mode = NULL;
 	sou->base.is_implicit = true;
 
-	drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
+	drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
 			   DRM_MODE_CONNECTOR_VIRTUAL);
 	connector->status = vmw_du_connector_detect(connector, true);
 
@@ -481,7 +621,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
 	return 0;
 }
 
-int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
+int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
 {
 	struct drm_device *dev = dev_priv->dev;
 	int i, ret;
@@ -516,7 +656,9 @@ int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
 	for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
 		vmw_sou_init(dev_priv, i);
 
-	DRM_INFO("Screen objects system initialized\n");
+	dev_priv->active_display_unit = vmw_du_screen_object;
+
+	DRM_INFO("Screen Objects Display Unit initialized\n");
 
 	return 0;
 
@@ -529,7 +671,7 @@ err_no_mem:
 	return ret;
 }
 
-int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv)
+int vmw_kms_sou_close_display(struct vmw_private *dev_priv)
 {
 	struct drm_device *dev = dev_priv->dev;
 
@@ -543,35 +685,369 @@ int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv)
 	return 0;
 }
 
+static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv,
+				  struct vmw_framebuffer *framebuffer)
+{
+	struct vmw_dma_buffer *buf =
+		container_of(framebuffer, struct vmw_framebuffer_dmabuf,
+			     base)->buffer;
+	int depth = framebuffer->base.depth;
+	struct {
+		uint32_t header;
+		SVGAFifoCmdDefineGMRFB body;
+	} *cmd;
+
+	/* Emulate RGBA support, contrary to svga_reg.h this is not
+	 * supported by hosts. This is only a problem if we are reading
+	 * this value later and expecting what we uploaded back.
+	 */
+	if (depth == 32)
+		depth = 24;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (!cmd) {
+		DRM_ERROR("Out of fifo space for dirty framebuffer command.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header = SVGA_CMD_DEFINE_GMRFB;
+	cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
+	cmd->body.format.colorDepth = depth;
+	cmd->body.format.reserved = 0;
+	cmd->body.bytesPerLine = framebuffer->base.pitches[0];
+	/* Buffer is reserved in vram or GMR */
+	vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr);
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
 /**
- * Returns if this unit can be page flipped.
- * Must be called with the mode_config mutex held.
+ * vmw_sou_surface_fifo_commit - Callback to fill in and submit a
+ * blit surface to screen command.
+ *
+ * @dirty: The closure structure.
+ *
+ * Fills in the missing fields in the command, and translates the cliprects
+ * to match the destination bounding box encoded.
  */
-bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
-				     struct drm_crtc *crtc)
+static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
 {
-	struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+	struct vmw_kms_sou_surface_dirty *sdirty =
+		container_of(dirty, typeof(*sdirty), base);
+	struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
+	s32 trans_x = dirty->unit->crtc.x - sdirty->dst_x;
+	s32 trans_y = dirty->unit->crtc.y - sdirty->dst_y;
+	size_t region_size = dirty->num_hits * sizeof(SVGASignedRect);
+	SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
+	int i;
+
+	cmd->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
+	cmd->header.size = sizeof(cmd->body) + region_size;
+
+	/*
+	 * Use the destination bounding box to specify destination - and
+	 * source bounding regions.
+	 */
+	cmd->body.destRect.left = sdirty->left;
+	cmd->body.destRect.right = sdirty->right;
+	cmd->body.destRect.top = sdirty->top;
+	cmd->body.destRect.bottom = sdirty->bottom;
+
+	cmd->body.srcRect.left = sdirty->left + trans_x;
+	cmd->body.srcRect.right = sdirty->right + trans_x;
+	cmd->body.srcRect.top = sdirty->top + trans_y;
+	cmd->body.srcRect.bottom = sdirty->bottom + trans_y;
+
+	cmd->body.srcImage.sid = sdirty->sid;
+	cmd->body.destScreenId = dirty->unit->unit;
+
+	/* Blits are relative to the destination rect. Translate. */
+	for (i = 0; i < dirty->num_hits; ++i, ++blit) {
+		blit->left -= sdirty->left;
+		blit->right -= sdirty->left;
+		blit->top -= sdirty->top;
+		blit->bottom -= sdirty->top;
+	}
 
-	if (!sou->base.is_implicit)
-		return true;
+	vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd));
 
-	if (dev_priv->sou_priv->num_implicit != 1)
-		return false;
+	sdirty->left = sdirty->top = S32_MAX;
+	sdirty->right = sdirty->bottom = S32_MIN;
+}
 
-	return true;
+/**
+ * vmw_sou_surface_clip - Callback to encode a blit surface to screen cliprect.
+ *
+ * @dirty: The closure structure
+ *
+ * Encodes a SVGASignedRect cliprect and updates the bounding box of the
+ * BLIT_SURFACE_TO_SCREEN command.
+ */
+static void vmw_sou_surface_clip(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_kms_sou_surface_dirty *sdirty =
+		container_of(dirty, typeof(*sdirty), base);
+	struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
+	SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
+
+	/* Destination rect. */
+	blit += dirty->num_hits;
+	blit->left = dirty->unit_x1;
+	blit->top = dirty->unit_y1;
+	blit->right = dirty->unit_x2;
+	blit->bottom = dirty->unit_y2;
+
+	/* Destination bounding box */
+	sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
+	sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
+	sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
+	sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
+
+	dirty->num_hits++;
 }
 
 /**
- * Update the implicit fb to the current fb of this crtc.
- * Must be called with the mode_config mutex held.
+ * vmw_kms_sou_do_surface_dirty - Dirty part of a surface backed framebuffer
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @framebuffer: Pointer to the surface-buffer backed framebuffer.
+ * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
+ * @vclips: Alternate array of clip rects. Either @clips or @vclips must
+ * be NULL.
+ * @srf: Pointer to surface to blit from. If NULL, the surface attached
+ * to @framebuffer will be used.
+ * @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
+ * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
+ * @num_clips: Number of clip rects in @clips.
+ * @inc: Increment to use when looping over @clips.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to a
+ * struct vmw_fence_obj. The returned fence pointer may be NULL in which
+ * case the device has already synchronized.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
  */
-void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
-					      struct drm_crtc *crtc)
+int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
+				 struct vmw_framebuffer *framebuffer,
+				 struct drm_clip_rect *clips,
+				 struct drm_vmw_rect *vclips,
+				 struct vmw_resource *srf,
+				 s32 dest_x,
+				 s32 dest_y,
+				 unsigned num_clips, int inc,
+				 struct vmw_fence_obj **out_fence)
 {
-	struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+	struct vmw_framebuffer_surface *vfbs =
+		container_of(framebuffer, typeof(*vfbs), base);
+	struct vmw_kms_sou_surface_dirty sdirty;
+	int ret;
 
-	BUG_ON(!sou->base.is_implicit);
+	if (!srf)
+		srf = &vfbs->surface->res;
 
-	dev_priv->sou_priv->implicit_fb =
-		vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb);
+	ret = vmw_kms_helper_resource_prepare(srf, true);
+	if (ret)
+		return ret;
+
+	sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
+	sdirty.base.clip = vmw_sou_surface_clip;
+	sdirty.base.dev_priv = dev_priv;
+	sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) +
+	  sizeof(SVGASignedRect) * num_clips;
+
+	sdirty.sid = srf->id;
+	sdirty.left = sdirty.top = S32_MAX;
+	sdirty.right = sdirty.bottom = S32_MIN;
+	sdirty.dst_x = dest_x;
+	sdirty.dst_y = dest_y;
+
+	ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
+				   dest_x, dest_y, num_clips, inc,
+				   &sdirty.base);
+	vmw_kms_helper_resource_finish(srf, out_fence);
+
+	return ret;
+}
+
+/**
+ * vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips.
+ *
+ * @dirty: The closure structure.
+ *
+ * Commits a previously built command buffer of readback clips.
+ */
+static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
+{
+	vmw_fifo_commit(dirty->dev_priv,
+			sizeof(struct vmw_kms_sou_dmabuf_blit) *
+			dirty->num_hits);
+}
+
+/**
+ * vmw_sou_dmabuf_clip - Callback to encode a readback cliprect.
+ *
+ * @dirty: The closure structure
+ *
+ * Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
+ */
+static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd;
+
+	blit += dirty->num_hits;
+	blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
+	blit->body.destScreenId = dirty->unit->unit;
+	blit->body.srcOrigin.x = dirty->fb_x;
+	blit->body.srcOrigin.y = dirty->fb_y;
+	blit->body.destRect.left = dirty->unit_x1;
+	blit->body.destRect.top = dirty->unit_y1;
+	blit->body.destRect.right = dirty->unit_x2;
+	blit->body.destRect.bottom = dirty->unit_y2;
+	dirty->num_hits++;
+}
+
+/**
+ * vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @framebuffer: Pointer to the dma-buffer backed framebuffer.
+ * @clips: Array of clip rects.
+ * @num_clips: Number of clip rects in @clips.
+ * @increment: Increment to use when looping over @clips.
+ * @interruptible: Whether to perform waits interruptible if possible.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to a
+ * struct vmw_fence_obj. The returned fence pointer may be NULL in which
+ * case the device has already synchronized.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
+				struct vmw_framebuffer *framebuffer,
+				struct drm_clip_rect *clips,
+				unsigned num_clips, int increment,
+				bool interruptible,
+				struct vmw_fence_obj **out_fence)
+{
+	struct vmw_dma_buffer *buf =
+		container_of(framebuffer, struct vmw_framebuffer_dmabuf,
+			     base)->buffer;
+	struct vmw_kms_dirty dirty;
+	int ret;
+
+	ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
+					    false);
+	if (ret)
+		return ret;
+
+	ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer);
+	if (unlikely(ret != 0))
+		goto out_revert;
+
+	dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit;
+	dirty.clip = vmw_sou_dmabuf_clip;
+	dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
+		num_clips;
+	ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, NULL,
+				   0, 0, num_clips, increment, &dirty);
+	vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
+
+	return ret;
+
+out_revert:
+	vmw_kms_helper_buffer_revert(buf);
+
+	return ret;
+}
+
+
+/**
+ * vmw_sou_readback_fifo_commit - Callback to submit a set of readback clips.
+ *
+ * @dirty: The closure structure.
+ *
+ * Commits a previously built command buffer of readback clips.
+ */
+static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
+{
+	vmw_fifo_commit(dirty->dev_priv,
+			sizeof(struct vmw_kms_sou_readback_blit) *
+			dirty->num_hits);
+}
+
+/**
+ * vmw_sou_readback_clip - Callback to encode a readback cliprect.
+ *
+ * @dirty: The closure structure
+ *
+ * Encodes a BLIT_SCREEN_TO_GMRFB cliprect.
+ */
+static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_kms_sou_readback_blit *blit = dirty->cmd;
+
+	blit += dirty->num_hits;
+	blit->header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
+	blit->body.srcScreenId = dirty->unit->unit;
+	blit->body.destOrigin.x = dirty->fb_x;
+	blit->body.destOrigin.y = dirty->fb_y;
+	blit->body.srcRect.left = dirty->unit_x1;
+	blit->body.srcRect.top = dirty->unit_y1;
+	blit->body.srcRect.right = dirty->unit_x2;
+	blit->body.srcRect.bottom = dirty->unit_y2;
+	dirty->num_hits++;
+}
+
+/**
+ * vmw_kms_sou_readback - Perform a readback from the screen object system to
+ * a dma-buffer backed framebuffer.
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * Must be set to NULL if @user_fence_rep is NULL.
+ * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @user_fence_rep: User-space provided structure for fence information.
+ * Must be set to non-NULL if @file_priv is non-NULL.
+ * @vclips: Array of clip rects.
+ * @num_clips: Number of clip rects in @vclips.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_sou_readback(struct vmw_private *dev_priv,
+			 struct drm_file *file_priv,
+			 struct vmw_framebuffer *vfb,
+			 struct drm_vmw_fence_rep __user *user_fence_rep,
+			 struct drm_vmw_rect *vclips,
+			 uint32_t num_clips)
+{
+	struct vmw_dma_buffer *buf =
+		container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
+	struct vmw_kms_dirty dirty;
+	int ret;
+
+	ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false);
+	if (ret)
+		return ret;
+
+	ret = do_dmabuf_define_gmrfb(dev_priv, vfb);
+	if (unlikely(ret != 0))
+		goto out_revert;
+
+	dirty.fifo_commit = vmw_sou_readback_fifo_commit;
+	dirty.clip = vmw_sou_readback_clip;
+	dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) *
+		num_clips;
+	ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
+				   0, 0, num_clips, 1, &dirty);
+	vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
+				     user_fence_rep);
+
+	return ret;
+
+out_revert:
+	vmw_kms_helper_buffer_revert(buf);
+
+	return ret;
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 6a4584a43aa6..bba1ee395478 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,12 +27,15 @@
 
 #include "vmwgfx_drv.h"
 #include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
 #include "ttm/ttm_placement.h"
 
 struct vmw_shader {
 	struct vmw_resource res;
 	SVGA3dShaderType type;
 	uint32_t size;
+	uint8_t num_input_sig;
+	uint8_t num_output_sig;
 };
 
 struct vmw_user_shader {
@@ -40,8 +43,18 @@ struct vmw_user_shader {
 	struct vmw_shader shader;
 };
 
+struct vmw_dx_shader {
+	struct vmw_resource res;
+	struct vmw_resource *ctx;
+	struct vmw_resource *cotable;
+	u32 id;
+	bool committed;
+	struct list_head cotable_head;
+};
+
 static uint64_t vmw_user_shader_size;
 static uint64_t vmw_shader_size;
+static size_t vmw_shader_dx_size;
 
 static void vmw_user_shader_free(struct vmw_resource *res);
 static struct vmw_resource *
@@ -55,6 +68,18 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
 				 struct ttm_validate_buffer *val_buf);
 static int vmw_gb_shader_destroy(struct vmw_resource *res);
 
+static int vmw_dx_shader_create(struct vmw_resource *res);
+static int vmw_dx_shader_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf);
+static int vmw_dx_shader_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf);
+static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
+					enum vmw_cmdbuf_res_state state);
+static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type);
+static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type);
+static uint64_t vmw_user_shader_size;
+
 static const struct vmw_user_resource_conv user_shader_conv = {
 	.object_type = VMW_RES_SHADER,
 	.base_obj_to_res = vmw_user_shader_base_to_res,
@@ -77,6 +102,24 @@ static const struct vmw_res_func vmw_gb_shader_func = {
 	.unbind = vmw_gb_shader_unbind
 };
 
+static const struct vmw_res_func vmw_dx_shader_func = {
+	.res_type = vmw_res_shader,
+	.needs_backup = true,
+	.may_evict = false,
+	.type_name = "dx shaders",
+	.backup_placement = &vmw_mob_placement,
+	.create = vmw_dx_shader_create,
+	/*
+	 * The destroy callback is only called with a committed resource on
+	 * context destroy, in which case we destroy the cotable anyway,
+	 * so there's no need to destroy DX shaders separately.
+	 */
+	.destroy = NULL,
+	.bind = vmw_dx_shader_bind,
+	.unbind = vmw_dx_shader_unbind,
+	.commit_notify = vmw_dx_shader_commit_notify,
+};
+
 /**
  * Shader management:
  */
@@ -87,25 +130,42 @@ vmw_res_to_shader(struct vmw_resource *res)
 	return container_of(res, struct vmw_shader, res);
 }
 
+/**
+ * vmw_res_to_dx_shader - typecast a struct vmw_resource to a
+ * struct vmw_dx_shader
+ *
+ * @res: Pointer to the struct vmw_resource.
+ */
+static inline struct vmw_dx_shader *
+vmw_res_to_dx_shader(struct vmw_resource *res)
+{
+	return container_of(res, struct vmw_dx_shader, res);
+}
+
 static void vmw_hw_shader_destroy(struct vmw_resource *res)
 {
-	(void) vmw_gb_shader_destroy(res);
+	if (likely(res->func->destroy))
+		(void) res->func->destroy(res);
+	else
+		res->id = -1;
 }
 
+
 static int vmw_gb_shader_init(struct vmw_private *dev_priv,
 			      struct vmw_resource *res,
 			      uint32_t size,
 			      uint64_t offset,
 			      SVGA3dShaderType type,
+			      uint8_t num_input_sig,
+			      uint8_t num_output_sig,
 			      struct vmw_dma_buffer *byte_code,
 			      void (*res_free) (struct vmw_resource *res))
 {
 	struct vmw_shader *shader = vmw_res_to_shader(res);
 	int ret;
 
-	ret = vmw_resource_init(dev_priv, res, true,
-				res_free, &vmw_gb_shader_func);
-
+	ret = vmw_resource_init(dev_priv, res, true, res_free,
+				&vmw_gb_shader_func);
 
 	if (unlikely(ret != 0)) {
 		if (res_free)
@@ -122,11 +182,17 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
 	}
 	shader->size = size;
 	shader->type = type;
+	shader->num_input_sig = num_input_sig;
+	shader->num_output_sig = num_output_sig;
 
 	vmw_resource_activate(res, vmw_hw_shader_destroy);
 	return 0;
 }
 
+/*
+ * GB shader code:
+ */
+
 static int vmw_gb_shader_create(struct vmw_resource *res)
 {
 	struct vmw_private *dev_priv = res->dev_priv;
@@ -165,7 +231,7 @@ static int vmw_gb_shader_create(struct vmw_resource *res)
 	cmd->body.type = shader->type;
 	cmd->body.sizeInBytes = shader->size;
 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-	(void) vmw_3d_resource_inc(dev_priv, false);
+	vmw_fifo_resource_inc(dev_priv);
 
 	return 0;
 
@@ -259,7 +325,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
 		return 0;
 
 	mutex_lock(&dev_priv->binding_mutex);
-	vmw_context_binding_res_list_scrub(&res->binding_head);
+	vmw_binding_res_list_scrub(&res->binding_head);
 
 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL)) {
@@ -275,12 +341,327 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 	mutex_unlock(&dev_priv->binding_mutex);
 	vmw_resource_release_id(res);
-	vmw_3d_resource_dec(dev_priv, false);
+	vmw_fifo_resource_dec(dev_priv);
+
+	return 0;
+}
+
+/*
+ * DX shader code:
+ */
+
+/**
+ * vmw_dx_shader_commit_notify - Notify that a shader operation has been
+ * committed to hardware from a user-supplied command stream.
+ *
+ * @res: Pointer to the shader resource.
+ * @state: Indicating whether a creation or removal has been committed.
+ *
+ */
+static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
+					enum vmw_cmdbuf_res_state state)
+{
+	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	if (state == VMW_CMDBUF_RES_ADD) {
+		mutex_lock(&dev_priv->binding_mutex);
+		vmw_cotable_add_resource(shader->cotable,
+					 &shader->cotable_head);
+		shader->committed = true;
+		res->id = shader->id;
+		mutex_unlock(&dev_priv->binding_mutex);
+	} else {
+		mutex_lock(&dev_priv->binding_mutex);
+		list_del_init(&shader->cotable_head);
+		shader->committed = false;
+		res->id = -1;
+		mutex_unlock(&dev_priv->binding_mutex);
+	}
+}
+
+/**
+ * vmw_dx_shader_unscrub - Have the device reattach a MOB to a DX shader.
+ *
+ * @res: The shader resource
+ *
+ * This function reverts a scrub operation.
+ */
+static int vmw_dx_shader_unscrub(struct vmw_resource *res)
+{
+	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindShader body;
+	} *cmd;
+
+	if (!list_empty(&shader->cotable_head) || !shader->committed)
+		return 0;
+
+	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
+				  shader->ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for shader "
+			  "scrubbing.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = shader->ctx->id;
+	cmd->body.shid = shader->id;
+	cmd->body.mobid = res->backup->base.mem.start;
+	cmd->body.offsetInBytes = res->backup_offset;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	vmw_cotable_add_resource(shader->cotable, &shader->cotable_head);
+
+	return 0;
+}
+
+/**
+ * vmw_dx_shader_create - The DX shader create callback
+ *
+ * @res: The DX shader resource
+ *
+ * The create callback is called as part of resource validation and
+ * makes sure that we unscrub the shader if it's previously been scrubbed.
+ */
+static int vmw_dx_shader_create(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+	int ret = 0;
+
+	WARN_ON_ONCE(!shader->committed);
+
+	if (!list_empty(&res->mob_head)) {
+		mutex_lock(&dev_priv->binding_mutex);
+		ret = vmw_dx_shader_unscrub(res);
+		mutex_unlock(&dev_priv->binding_mutex);
+	}
+
+	res->id = shader->id;
+	return ret;
+}
+
+/**
+ * vmw_dx_shader_bind - The DX shader bind callback
+ *
+ * @res: The DX shader resource
+ * @val_buf: Pointer to the validate buffer.
+ *
+ */
+static int vmw_dx_shader_bind(struct vmw_resource *res,
+			      struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct ttm_buffer_object *bo = val_buf->bo;
+
+	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+	mutex_lock(&dev_priv->binding_mutex);
+	vmw_dx_shader_unscrub(res);
+	mutex_unlock(&dev_priv->binding_mutex);
+
+	return 0;
+}
+
+/**
+ * vmw_dx_shader_scrub - Have the device unbind a MOB from a DX shader.
+ *
+ * @res: The shader resource
+ *
+ * This function unbinds a MOB from the DX shader without requiring the
+ * MOB dma_buffer to be reserved. The driver still considers the MOB bound.
+ * However, once the driver eventually decides to unbind the MOB, it doesn't
+ * need to access the context.
+ */
+static int vmw_dx_shader_scrub(struct vmw_resource *res)
+{
+	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindShader body;
+	} *cmd;
+
+	if (list_empty(&shader->cotable_head))
+		return 0;
+
+	WARN_ON_ONCE(!shader->committed);
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for shader "
+			  "scrubbing.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = shader->ctx->id;
+	cmd->body.shid = res->id;
+	cmd->body.mobid = SVGA3D_INVALID_ID;
+	cmd->body.offsetInBytes = 0;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	res->id = -1;
+	list_del_init(&shader->cotable_head);
 
 	return 0;
 }
 
 /**
+ * vmw_dx_shader_unbind - The dx shader unbind callback.
+ *
+ * @res: The shader resource
+ * @readback: Whether this is a readback unbind. Currently unused.
+ * @val_buf: MOB buffer information.
+ */
+static int vmw_dx_shader_unbind(struct vmw_resource *res,
+				bool readback,
+				struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_fence_obj *fence;
+	int ret;
+
+	BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
+
+	mutex_lock(&dev_priv->binding_mutex);
+	ret = vmw_dx_shader_scrub(res);
+	mutex_unlock(&dev_priv->binding_mutex);
+
+	if (ret)
+		return ret;
+
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
+					  &fence, NULL);
+	vmw_fence_single_bo(val_buf->bo, fence);
+
+	if (likely(fence != NULL))
+		vmw_fence_obj_unreference(&fence);
+
+	return 0;
+}
+
+/**
+ * vmw_dx_shader_cotable_list_scrub - The cotable unbind_func callback for
+ * DX shaders.
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @list: The list of cotable resources.
+ * @readback: Whether the call was part of a readback unbind.
+ *
+ * Scrubs all shader MOBs so that any subsequent shader unbind or shader
+ * destroy operation won't need to swap in the context.
+ */
+void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
+				      struct list_head *list,
+				      bool readback)
+{
+	struct vmw_dx_shader *entry, *next;
+
+	WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+
+	list_for_each_entry_safe(entry, next, list, cotable_head) {
+		WARN_ON(vmw_dx_shader_scrub(&entry->res));
+		if (!readback)
+			entry->committed = false;
+	}
+}
+
+/**
+ * vmw_dx_shader_res_free - The DX shader free callback
+ *
+ * @res: The shader resource
+ *
+ * Frees the DX shader resource and updates memory accounting.
+ */
+static void vmw_dx_shader_res_free(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+
+	vmw_resource_unreference(&shader->cotable);
+	kfree(shader);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
+}
+
+/**
+ * vmw_dx_shader_add - Add a shader resource as a command buffer managed
+ * resource.
+ *
+ * @man: The command buffer resource manager.
+ * @ctx: Pointer to the context resource.
+ * @user_key: The id used for this shader.
+ * @shader_type: The shader type.
+ * @list: The list of staged command buffer managed resources.
+ */
+int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
+		      struct vmw_resource *ctx,
+		      u32 user_key,
+		      SVGA3dShaderType shader_type,
+		      struct list_head *list)
+{
+	struct vmw_dx_shader *shader;
+	struct vmw_resource *res;
+	struct vmw_private *dev_priv = ctx->dev_priv;
+	int ret;
+
+	if (!vmw_shader_dx_size)
+		vmw_shader_dx_size = ttm_round_pot(sizeof(*shader));
+
+	if (!vmw_shader_id_ok(user_key, shader_type))
+		return -EINVAL;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size,
+				   false, true);
+	if (ret) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for shader "
+				  "creation.\n");
+		return ret;
+	}
+
+	shader = kmalloc(sizeof(*shader), GFP_KERNEL);
+	if (!shader) {
+		ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
+		return -ENOMEM;
+	}
+
+	res = &shader->res;
+	shader->ctx = ctx;
+	shader->cotable = vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER);
+	shader->id = user_key;
+	shader->committed = false;
+	INIT_LIST_HEAD(&shader->cotable_head);
+	ret = vmw_resource_init(dev_priv, res, true,
+				vmw_dx_shader_res_free, &vmw_dx_shader_func);
+	if (ret)
+		goto out_resource_init;
+
+	/*
+	 * The user_key name-space is not per shader type for DX shaders,
+	 * so when hashing, use a single zero shader type.
+	 */
+	ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
+				 vmw_shader_key(user_key, 0),
+				 res, list);
+	if (ret)
+		goto out_resource_init;
+
+	res->id = shader->id;
+	vmw_resource_activate(res, vmw_hw_shader_destroy);
+
+out_resource_init:
+	vmw_resource_unreference(&res);
+
+	return ret;
+}
+
+
+
+/**
  * User-space shader management:
  */
 
@@ -341,6 +722,8 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
 				 size_t shader_size,
 				 size_t offset,
 				 SVGA3dShaderType shader_type,
+				 uint8_t num_input_sig,
+				 uint8_t num_output_sig,
 				 struct ttm_object_file *tfile,
 				 u32 *handle)
 {
@@ -383,7 +766,8 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
 	 */
 
 	ret = vmw_gb_shader_init(dev_priv, res, shader_size,
-				 offset, shader_type, buffer,
+				 offset, shader_type, num_input_sig,
+				 num_output_sig, buffer,
 				 vmw_user_shader_free);
 	if (unlikely(ret != 0))
 		goto out;
@@ -407,11 +791,11 @@ out:
 }
 
 
-struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
-				      struct vmw_dma_buffer *buffer,
-				      size_t shader_size,
-				      size_t offset,
-				      SVGA3dShaderType shader_type)
+static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
+					     struct vmw_dma_buffer *buffer,
+					     size_t shader_size,
+					     size_t offset,
+					     SVGA3dShaderType shader_type)
 {
 	struct vmw_shader *shader;
 	struct vmw_resource *res;
@@ -449,7 +833,7 @@ struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
 	 * From here on, the destructor takes over resource freeing.
 	 */
 	ret = vmw_gb_shader_init(dev_priv, res, shader_size,
-				 offset, shader_type, buffer,
+				 offset, shader_type, 0, 0, buffer,
 				 vmw_shader_free);
 
 out_err:
@@ -457,19 +841,20 @@ out_err:
 }
 
 
-int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
-			     struct drm_file *file_priv)
+static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
+			     enum drm_vmw_shader_type shader_type_drm,
+			     u32 buffer_handle, size_t size, size_t offset,
+			     uint8_t num_input_sig, uint8_t num_output_sig,
+			     uint32_t *shader_handle)
 {
 	struct vmw_private *dev_priv = vmw_priv(dev);
-	struct drm_vmw_shader_create_arg *arg =
-		(struct drm_vmw_shader_create_arg *)data;
 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 	struct vmw_dma_buffer *buffer = NULL;
 	SVGA3dShaderType shader_type;
 	int ret;
 
-	if (arg->buffer_handle != SVGA3D_INVALID_ID) {
-		ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle,
+	if (buffer_handle != SVGA3D_INVALID_ID) {
+		ret = vmw_user_dmabuf_lookup(tfile, buffer_handle,
 					     &buffer);
 		if (unlikely(ret != 0)) {
 			DRM_ERROR("Could not find buffer for shader "
@@ -478,23 +863,20 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
 		}
 
 		if ((u64)buffer->base.num_pages * PAGE_SIZE <
-		    (u64)arg->size + (u64)arg->offset) {
+		    (u64)size + (u64)offset) {
 			DRM_ERROR("Illegal buffer- or shader size.\n");
 			ret = -EINVAL;
 			goto out_bad_arg;
 		}
 	}
 
-	switch (arg->shader_type) {
+	switch (shader_type_drm) {
 	case drm_vmw_shader_type_vs:
 		shader_type = SVGA3D_SHADERTYPE_VS;
 		break;
 	case drm_vmw_shader_type_ps:
 		shader_type = SVGA3D_SHADERTYPE_PS;
 		break;
-	case drm_vmw_shader_type_gs:
-		shader_type = SVGA3D_SHADERTYPE_GS;
-		break;
 	default:
 		DRM_ERROR("Illegal shader type.\n");
 		ret = -EINVAL;
@@ -505,8 +887,9 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
 	if (unlikely(ret != 0))
 		goto out_bad_arg;
 
-	ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
-				    shader_type, tfile, &arg->shader_handle);
+	ret = vmw_user_shader_alloc(dev_priv, buffer, size, offset,
+				    shader_type, num_input_sig,
+				    num_output_sig, tfile, shader_handle);
 
 	ttm_read_unlock(&dev_priv->reservation_sem);
 out_bad_arg:
@@ -515,7 +898,7 @@ out_bad_arg:
 }
 
 /**
- * vmw_compat_shader_id_ok - Check whether a compat shader user key and
+ * vmw_shader_id_ok - Check whether a compat shader user key and
  * shader type are within valid bounds.
  *
  * @user_key: User space id of the shader.
@@ -523,13 +906,13 @@ out_bad_arg:
  *
  * Returns true if valid false if not.
  */
-static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
+static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
 {
 	return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16;
 }
 
 /**
- * vmw_compat_shader_key - Compute a hash key suitable for a compat shader.
+ * vmw_shader_key - Compute a hash key suitable for a compat shader.
  *
  * @user_key: User space id of the shader.
  * @shader_type: Shader type.
@@ -537,13 +920,13 @@ static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
  * Returns a hash key suitable for a command buffer managed resource
  * manager hash table.
  */
-static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
+static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type)
 {
 	return user_key | (shader_type << 20);
 }
 
 /**
- * vmw_compat_shader_remove - Stage a compat shader for removal.
+ * vmw_shader_remove - Stage a compat shader for removal.
  *
  * @man: Pointer to the compat shader manager identifying the shader namespace.
  * @user_key: The key that is used to identify the shader. The key is
@@ -551,17 +934,18 @@ static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
  * @shader_type: Shader type.
  * @list: Caller's list of staged command buffer resource actions.
  */
-int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
-			     u32 user_key, SVGA3dShaderType shader_type,
-			     struct list_head *list)
+int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
+		      u32 user_key, SVGA3dShaderType shader_type,
+		      struct list_head *list)
 {
-	if (!vmw_compat_shader_id_ok(user_key, shader_type))
+	struct vmw_resource *dummy;
+
+	if (!vmw_shader_id_ok(user_key, shader_type))
 		return -EINVAL;
 
-	return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader,
-				     vmw_compat_shader_key(user_key,
-							   shader_type),
-				     list);
+	return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_shader,
+				     vmw_shader_key(user_key, shader_type),
+				     list, &dummy);
 }
 
 /**
@@ -591,7 +975,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
 	int ret;
 	struct vmw_resource *res;
 
-	if (!vmw_compat_shader_id_ok(user_key, shader_type))
+	if (!vmw_shader_id_ok(user_key, shader_type))
 		return -EINVAL;
 
 	/* Allocate and pin a DMA buffer */
@@ -628,8 +1012,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
 	if (unlikely(ret != 0))
 		goto no_reserve;
 
-	ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader,
-				 vmw_compat_shader_key(user_key, shader_type),
+	ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
+				 vmw_shader_key(user_key, shader_type),
 				 res, list);
 	vmw_resource_unreference(&res);
 no_reserve:
@@ -639,7 +1023,7 @@ out:
 }
 
 /**
- * vmw_compat_shader_lookup - Look up a compat shader
+ * vmw_shader_lookup - Look up a compat shader
  *
  * @man: Pointer to the command buffer managed resource manager identifying
  * the shader namespace.
@@ -650,14 +1034,26 @@ out:
  * found. An error pointer otherwise.
  */
 struct vmw_resource *
-vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
-			 u32 user_key,
-			 SVGA3dShaderType shader_type)
+vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
+		  u32 user_key,
+		  SVGA3dShaderType shader_type)
 {
-	if (!vmw_compat_shader_id_ok(user_key, shader_type))
+	if (!vmw_shader_id_ok(user_key, shader_type))
 		return ERR_PTR(-EINVAL);
 
-	return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader,
-				     vmw_compat_shader_key(user_key,
-							   shader_type));
+	return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_shader,
+				     vmw_shader_key(user_key, shader_type));
+}
+
+int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv)
+{
+	struct drm_vmw_shader_create_arg *arg =
+		(struct drm_vmw_shader_create_arg *)data;
+
+	return vmw_shader_define(dev, file_priv, arg->shader_type,
+				 arg->buffer_handle,
+				 arg->size, arg->offset,
+				 0, 0,
+				 &arg->shader_handle);
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
new file mode 100644
index 000000000000..5a73eebd0f35
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -0,0 +1,555 @@
+/**************************************************************************
+ * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_so.h"
+#include "vmwgfx_binding.h"
+
+/*
+ * The currently only reason we need to keep track of views is that if we
+ * destroy a hardware surface, all views pointing to it must also be destroyed,
+ * otherwise the device will error.
+ * So in particuar if a surface is evicted, we must destroy all views pointing
+ * to it, and all context bindings of that view. Similarly we must restore
+ * the view bindings, views and surfaces pointed to by the views when a
+ * context is referenced in the command stream.
+ */
+
+/**
+ * struct vmw_view - view metadata
+ *
+ * @res: The struct vmw_resource we derive from
+ * @ctx: Non-refcounted pointer to the context this view belongs to.
+ * @srf: Refcounted pointer to the surface pointed to by this view.
+ * @cotable: Refcounted pointer to the cotable holding this view.
+ * @srf_head: List head for the surface-to-view list.
+ * @cotable_head: List head for the cotable-to_view list.
+ * @view_type: View type.
+ * @view_id: User-space per context view id. Currently used also as per
+ * context device view id.
+ * @cmd_size: Size of the SVGA3D define view command that we've copied from the
+ * command stream.
+ * @committed: Whether the view is actually created or pending creation at the
+ * device level.
+ * @cmd: The SVGA3D define view command copied from the command stream.
+ */
+struct vmw_view {
+	struct rcu_head rcu;
+	struct vmw_resource res;
+	struct vmw_resource *ctx;      /* Immutable */
+	struct vmw_resource *srf;      /* Immutable */
+	struct vmw_resource *cotable;  /* Immutable */
+	struct list_head srf_head;     /* Protected by binding_mutex */
+	struct list_head cotable_head; /* Protected by binding_mutex */
+	unsigned view_type;            /* Immutable */
+	unsigned view_id;              /* Immutable */
+	u32 cmd_size;                  /* Immutable */
+	bool committed;                /* Protected by binding_mutex */
+	u32 cmd[1];                    /* Immutable */
+};
+
+static int vmw_view_create(struct vmw_resource *res);
+static int vmw_view_destroy(struct vmw_resource *res);
+static void vmw_hw_view_destroy(struct vmw_resource *res);
+static void vmw_view_commit_notify(struct vmw_resource *res,
+				   enum vmw_cmdbuf_res_state state);
+
+static const struct vmw_res_func vmw_view_func = {
+	.res_type = vmw_res_view,
+	.needs_backup = false,
+	.may_evict = false,
+	.type_name = "DX view",
+	.backup_placement = NULL,
+	.create = vmw_view_create,
+	.commit_notify = vmw_view_commit_notify,
+};
+
+/**
+ * struct vmw_view - view define command body stub
+ *
+ * @view_id: The device id of the view being defined
+ * @sid: The surface id of the view being defined
+ *
+ * This generic struct is used by the code to change @view_id and @sid of a
+ * saved view define command.
+ */
+struct vmw_view_define {
+	uint32 view_id;
+	uint32 sid;
+};
+
+/**
+ * vmw_view - Convert a struct vmw_resource to a struct vmw_view
+ *
+ * @res: Pointer to the resource to convert.
+ *
+ * Returns a pointer to a struct vmw_view.
+ */
+static struct vmw_view *vmw_view(struct vmw_resource *res)
+{
+	return container_of(res, struct vmw_view, res);
+}
+
+/**
+ * vmw_view_commit_notify - Notify that a view operation has been committed to
+ * hardware from a user-supplied command stream.
+ *
+ * @res: Pointer to the view resource.
+ * @state: Indicating whether a creation or removal has been committed.
+ *
+ */
+static void vmw_view_commit_notify(struct vmw_resource *res,
+				   enum vmw_cmdbuf_res_state state)
+{
+	struct vmw_view *view = vmw_view(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	mutex_lock(&dev_priv->binding_mutex);
+	if (state == VMW_CMDBUF_RES_ADD) {
+		struct vmw_surface *srf = vmw_res_to_srf(view->srf);
+
+		list_add_tail(&view->srf_head, &srf->view_list);
+		vmw_cotable_add_resource(view->cotable, &view->cotable_head);
+		view->committed = true;
+		res->id = view->view_id;
+
+	} else {
+		list_del_init(&view->cotable_head);
+		list_del_init(&view->srf_head);
+		view->committed = false;
+		res->id = -1;
+	}
+	mutex_unlock(&dev_priv->binding_mutex);
+}
+
+/**
+ * vmw_view_create - Create a hardware view.
+ *
+ * @res: Pointer to the view resource.
+ *
+ * Create a hardware view. Typically used if that view has previously been
+ * destroyed by an eviction operation.
+ */
+static int vmw_view_create(struct vmw_resource *res)
+{
+	struct vmw_view *view = vmw_view(res);
+	struct vmw_surface *srf = vmw_res_to_srf(view->srf);
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		struct vmw_view_define body;
+	} *cmd;
+
+	mutex_lock(&dev_priv->binding_mutex);
+	if (!view->committed) {
+		mutex_unlock(&dev_priv->binding_mutex);
+		return 0;
+	}
+
+	cmd = vmw_fifo_reserve_dx(res->dev_priv, view->cmd_size,
+				  view->ctx->id);
+	if (!cmd) {
+		DRM_ERROR("Failed reserving FIFO space for view creation.\n");
+		mutex_unlock(&dev_priv->binding_mutex);
+		return -ENOMEM;
+	}
+	memcpy(cmd, &view->cmd, view->cmd_size);
+	WARN_ON(cmd->body.view_id != view->view_id);
+	/* Sid may have changed due to surface eviction. */
+	WARN_ON(view->srf->id == SVGA3D_INVALID_ID);
+	cmd->body.sid = view->srf->id;
+	vmw_fifo_commit(res->dev_priv, view->cmd_size);
+	res->id = view->view_id;
+	list_add_tail(&view->srf_head, &srf->view_list);
+	vmw_cotable_add_resource(view->cotable, &view->cotable_head);
+	mutex_unlock(&dev_priv->binding_mutex);
+
+	return 0;
+}
+
+/**
+ * vmw_view_destroy - Destroy a hardware view.
+ *
+ * @res: Pointer to the view resource.
+ *
+ * Destroy a hardware view. Typically used on unexpected termination of the
+ * owning process or if the surface the view is pointing to is destroyed.
+ */
+static int vmw_view_destroy(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_view *view = vmw_view(res);
+	struct {
+		SVGA3dCmdHeader header;
+		union vmw_view_destroy body;
+	} *cmd;
+
+	WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+	vmw_binding_res_list_scrub(&res->binding_head);
+
+	if (!view->committed || res->id == -1)
+		return 0;
+
+	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), view->ctx->id);
+	if (!cmd) {
+		DRM_ERROR("Failed reserving FIFO space for view "
+			  "destruction.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = vmw_view_destroy_cmds[view->view_type];
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.view_id = view->view_id;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	res->id = -1;
+	list_del_init(&view->cotable_head);
+	list_del_init(&view->srf_head);
+
+	return 0;
+}
+
+/**
+ * vmw_hw_view_destroy - Destroy a hardware view as part of resource cleanup.
+ *
+ * @res: Pointer to the view resource.
+ *
+ * Destroy a hardware view if it's still present.
+ */
+static void vmw_hw_view_destroy(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	mutex_lock(&dev_priv->binding_mutex);
+	WARN_ON(vmw_view_destroy(res));
+	res->id = -1;
+	mutex_unlock(&dev_priv->binding_mutex);
+}
+
+/**
+ * vmw_view_key - Compute a view key suitable for the cmdbuf resource manager
+ *
+ * @user_key: The user-space id used for the view.
+ * @view_type: The view type.
+ *
+ * Destroy a hardware view if it's still present.
+ */
+static u32 vmw_view_key(u32 user_key, enum vmw_view_type view_type)
+{
+	return user_key | (view_type << 20);
+}
+
+/**
+ * vmw_view_id_ok - Basic view id and type range checks.
+ *
+ * @user_key: The user-space id used for the view.
+ * @view_type: The view type.
+ *
+ * Checks that the view id and type (typically provided by user-space) is
+ * valid.
+ */
+static bool vmw_view_id_ok(u32 user_key, enum vmw_view_type view_type)
+{
+	return (user_key < SVGA_COTABLE_MAX_IDS &&
+		view_type < vmw_view_max);
+}
+
+/**
+ * vmw_view_res_free - resource res_free callback for view resources
+ *
+ * @res: Pointer to a struct vmw_resource
+ *
+ * Frees memory and memory accounting held by a struct vmw_view.
+ */
+static void vmw_view_res_free(struct vmw_resource *res)
+{
+	struct vmw_view *view = vmw_view(res);
+	size_t size = offsetof(struct vmw_view, cmd) + view->cmd_size;
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	vmw_resource_unreference(&view->cotable);
+	vmw_resource_unreference(&view->srf);
+	kfree_rcu(view, rcu);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_view_add - Create a view resource and stage it for addition
+ * as a command buffer managed resource.
+ *
+ * @man: Pointer to the compat shader manager identifying the shader namespace.
+ * @ctx: Pointer to a struct vmw_resource identifying the active context.
+ * @srf: Pointer to a struct vmw_resource identifying the surface the view
+ * points to.
+ * @view_type: The view type deduced from the view create command.
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the view type and to the context.
+ * @cmd: Pointer to the view create command in the command stream.
+ * @cmd_size: Size of the view create command in the command stream.
+ * @list: Caller's list of staged command buffer resource actions.
+ */
+int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
+		 struct vmw_resource *ctx,
+		 struct vmw_resource *srf,
+		 enum vmw_view_type view_type,
+		 u32 user_key,
+		 const void *cmd,
+		 size_t cmd_size,
+		 struct list_head *list)
+{
+	static const size_t vmw_view_define_sizes[] = {
+		[vmw_view_sr] = sizeof(SVGA3dCmdDXDefineShaderResourceView),
+		[vmw_view_rt] = sizeof(SVGA3dCmdDXDefineRenderTargetView),
+		[vmw_view_ds] = sizeof(SVGA3dCmdDXDefineDepthStencilView)
+	};
+
+	struct vmw_private *dev_priv = ctx->dev_priv;
+	struct vmw_resource *res;
+	struct vmw_view *view;
+	size_t size;
+	int ret;
+
+	if (cmd_size != vmw_view_define_sizes[view_type] +
+	    sizeof(SVGA3dCmdHeader)) {
+		DRM_ERROR("Illegal view create command size.\n");
+		return -EINVAL;
+	}
+
+	if (!vmw_view_id_ok(user_key, view_type)) {
+		DRM_ERROR("Illegal view add view id.\n");
+		return -EINVAL;
+	}
+
+	size = offsetof(struct vmw_view, cmd) + cmd_size;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, false, true);
+	if (ret) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for view"
+				  " creation.\n");
+		return ret;
+	}
+
+	view = kmalloc(size, GFP_KERNEL);
+	if (!view) {
+		ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+		return -ENOMEM;
+	}
+
+	res = &view->res;
+	view->ctx = ctx;
+	view->srf = vmw_resource_reference(srf);
+	view->cotable = vmw_context_cotable(ctx, vmw_view_cotables[view_type]);
+	view->view_type = view_type;
+	view->view_id = user_key;
+	view->cmd_size = cmd_size;
+	view->committed = false;
+	INIT_LIST_HEAD(&view->srf_head);
+	INIT_LIST_HEAD(&view->cotable_head);
+	memcpy(&view->cmd, cmd, cmd_size);
+	ret = vmw_resource_init(dev_priv, res, true,
+				vmw_view_res_free, &vmw_view_func);
+	if (ret)
+		goto out_resource_init;
+
+	ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_view,
+				 vmw_view_key(user_key, view_type),
+				 res, list);
+	if (ret)
+		goto out_resource_init;
+
+	res->id = view->view_id;
+	vmw_resource_activate(res, vmw_hw_view_destroy);
+
+out_resource_init:
+	vmw_resource_unreference(&res);
+
+	return ret;
+}
+
+/**
+ * vmw_view_remove - Stage a view for removal.
+ *
+ * @man: Pointer to the view manager identifying the shader namespace.
+ * @user_key: The key that is used to identify the view. The key is
+ * unique to the view type.
+ * @view_type: View type
+ * @list: Caller's list of staged command buffer resource actions.
+ * @res_p: If the resource is in an already committed state, points to the
+ * struct vmw_resource on successful return. The pointer will be
+ * non ref-counted.
+ */
+int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
+		    u32 user_key, enum vmw_view_type view_type,
+		    struct list_head *list,
+		    struct vmw_resource **res_p)
+{
+	if (!vmw_view_id_ok(user_key, view_type)) {
+		DRM_ERROR("Illegal view remove view id.\n");
+		return -EINVAL;
+	}
+
+	return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_view,
+				     vmw_view_key(user_key, view_type),
+				     list, res_p);
+}
+
+/**
+ * vmw_view_cotable_list_destroy - Evict all views belonging to a cotable.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @list: List of views belonging to a cotable.
+ * @readback: Unused. Needed for function interface only.
+ *
+ * This function evicts all views belonging to a cotable.
+ * It must be called with the binding_mutex held, and the caller must hold
+ * a reference to the view resource. This is typically called before the
+ * cotable is paged out.
+ */
+void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
+				   struct list_head *list,
+				   bool readback)
+{
+	struct vmw_view *entry, *next;
+
+	WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+
+	list_for_each_entry_safe(entry, next, list, cotable_head)
+		WARN_ON(vmw_view_destroy(&entry->res));
+}
+
+/**
+ * vmw_view_surface_list_destroy - Evict all views pointing to a surface
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @list: List of views pointing to a surface.
+ *
+ * This function evicts all views pointing to a surface. This is typically
+ * called before the surface is evicted.
+ */
+void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
+				   struct list_head *list)
+{
+	struct vmw_view *entry, *next;
+
+	WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+
+	list_for_each_entry_safe(entry, next, list, srf_head)
+		WARN_ON(vmw_view_destroy(&entry->res));
+}
+
+/**
+ * vmw_view_srf - Return a non-refcounted pointer to the surface a view is
+ * pointing to.
+ *
+ * @res: pointer to a view resource.
+ *
+ * Note that the view itself is holding a reference, so as long
+ * the view resource is alive, the surface resource will be.
+ */
+struct vmw_resource *vmw_view_srf(struct vmw_resource *res)
+{
+	return vmw_view(res)->srf;
+}
+
+/**
+ * vmw_view_lookup - Look up a view.
+ *
+ * @man: The context's cmdbuf ref manager.
+ * @view_type: The view type.
+ * @user_key: The view user id.
+ *
+ * returns a refcounted pointer to a view or an error pointer if not found.
+ */
+struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
+				     enum vmw_view_type view_type,
+				     u32 user_key)
+{
+	return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_view,
+				     vmw_view_key(user_key, view_type));
+}
+
+const u32 vmw_view_destroy_cmds[] = {
+	[vmw_view_sr] = SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
+	[vmw_view_rt] = SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
+	[vmw_view_ds] = SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
+};
+
+const SVGACOTableType vmw_view_cotables[] = {
+	[vmw_view_sr] = SVGA_COTABLE_SRVIEW,
+	[vmw_view_rt] = SVGA_COTABLE_RTVIEW,
+	[vmw_view_ds] = SVGA_COTABLE_DSVIEW,
+};
+
+const SVGACOTableType vmw_so_cotables[] = {
+	[vmw_so_el] = SVGA_COTABLE_ELEMENTLAYOUT,
+	[vmw_so_bs] = SVGA_COTABLE_BLENDSTATE,
+	[vmw_so_ds] = SVGA_COTABLE_DEPTHSTENCIL,
+	[vmw_so_rs] = SVGA_COTABLE_RASTERIZERSTATE,
+	[vmw_so_ss] = SVGA_COTABLE_SAMPLER,
+	[vmw_so_so] = SVGA_COTABLE_STREAMOUTPUT
+};
+
+
+/* To remove unused function warning */
+static void vmw_so_build_asserts(void) __attribute__((used));
+
+
+/*
+ * This function is unused at run-time, and only used to dump various build
+ * asserts important for code optimization assumptions.
+ */
+static void vmw_so_build_asserts(void)
+{
+	/* Assert that our vmw_view_cmd_to_type() function is correct. */
+	BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW !=
+		     SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 1);
+	BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW !=
+		     SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 2);
+	BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW !=
+		     SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 3);
+	BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW !=
+		     SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 4);
+	BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW !=
+		     SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 5);
+
+	/* Assert that our "one body fits all" assumption is valid */
+	BUILD_BUG_ON(sizeof(union vmw_view_destroy) != sizeof(u32));
+
+	/* Assert that the view key space can hold all view ids. */
+	BUILD_BUG_ON(SVGA_COTABLE_MAX_IDS >= ((1 << 20) - 1));
+
+	/*
+	 * Assert that the offset of sid in all view define commands
+	 * is what we assume it to be.
+	 */
+	BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+		     offsetof(SVGA3dCmdDXDefineShaderResourceView, sid));
+	BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+		     offsetof(SVGA3dCmdDXDefineRenderTargetView, sid));
+	BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+		     offsetof(SVGA3dCmdDXDefineDepthStencilView, sid));
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
new file mode 100644
index 000000000000..268738387b5e
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
@@ -0,0 +1,160 @@
+/**************************************************************************
+ * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#ifndef VMW_SO_H
+#define VMW_SO_H
+
+enum vmw_view_type {
+	vmw_view_sr,
+	vmw_view_rt,
+	vmw_view_ds,
+	vmw_view_max,
+};
+
+enum vmw_so_type {
+	vmw_so_el,
+	vmw_so_bs,
+	vmw_so_ds,
+	vmw_so_rs,
+	vmw_so_ss,
+	vmw_so_so,
+	vmw_so_max,
+};
+
+/**
+ * union vmw_view_destroy - view destruction command body
+ *
+ * @rtv: RenderTarget view destruction command body
+ * @srv: ShaderResource view destruction command body
+ * @dsv: DepthStencil view destruction command body
+ * @view_id: A single u32 view id.
+ *
+ * The assumption here is that all union members are really represented by a
+ * single u32 in the command stream. If that's not the case,
+ * the size of this union will not equal the size of an u32, and the
+ * assumption is invalid, and we detect that at compile time in the
+ * vmw_so_build_asserts() function.
+ */
+union vmw_view_destroy {
+	struct SVGA3dCmdDXDestroyRenderTargetView rtv;
+	struct SVGA3dCmdDXDestroyShaderResourceView srv;
+	struct SVGA3dCmdDXDestroyDepthStencilView dsv;
+	u32 view_id;
+};
+
+/* Map enum vmw_view_type to view destroy command ids*/
+extern const u32 vmw_view_destroy_cmds[];
+
+/* Map enum vmw_view_type to SVGACOTableType */
+extern const SVGACOTableType vmw_view_cotables[];
+
+/* Map enum vmw_so_type to SVGACOTableType */
+extern const SVGACOTableType vmw_so_cotables[];
+
+/*
+ * vmw_view_cmd_to_type - Return the view type for a create or destroy command
+ *
+ * @id: The SVGA3D command id.
+ *
+ * For a given view create or destroy command id, return the corresponding
+ * enum vmw_view_type. If the command is unknown, return vmw_view_max.
+ * The validity of the simplified calculation is verified in the
+ * vmw_so_build_asserts() function.
+ */
+static inline enum vmw_view_type vmw_view_cmd_to_type(u32 id)
+{
+	u32 tmp = (id - SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW) / 2;
+
+	if (tmp > (u32)vmw_view_max)
+		return vmw_view_max;
+
+	return (enum vmw_view_type) tmp;
+}
+
+/*
+ * vmw_so_cmd_to_type - Return the state object type for a
+ * create or destroy command
+ *
+ * @id: The SVGA3D command id.
+ *
+ * For a given state object create or destroy command id,
+ * return the corresponding enum vmw_so_type. If the command is uknown,
+ * return vmw_so_max. We should perhaps optimize this function using
+ * a similar strategy as vmw_view_cmd_to_type().
+ */
+static inline enum vmw_so_type vmw_so_cmd_to_type(u32 id)
+{
+	switch (id) {
+	case SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT:
+	case SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT:
+		return vmw_so_el;
+	case SVGA_3D_CMD_DX_DEFINE_BLEND_STATE:
+	case SVGA_3D_CMD_DX_DESTROY_BLEND_STATE:
+		return vmw_so_bs;
+	case SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE:
+	case SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE:
+		return vmw_so_ds;
+	case SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE:
+	case SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE:
+		return vmw_so_rs;
+	case SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE:
+	case SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE:
+		return vmw_so_ss;
+	case SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT:
+	case SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT:
+		return vmw_so_so;
+	default:
+		break;
+	}
+	return vmw_so_max;
+}
+
+/*
+ * View management - vmwgfx_so.c
+ */
+extern int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
+			struct vmw_resource *ctx,
+			struct vmw_resource *srf,
+			enum vmw_view_type view_type,
+			u32 user_key,
+			const void *cmd,
+			size_t cmd_size,
+			struct list_head *list);
+
+extern int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
+			   u32 user_key, enum vmw_view_type view_type,
+			   struct list_head *list,
+			   struct vmw_resource **res_p);
+
+extern void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
+					  struct list_head *view_list);
+extern void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
+					  struct list_head *list,
+					  bool readback);
+extern struct vmw_resource *vmw_view_srf(struct vmw_resource *res);
+extern struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
+					    enum vmw_view_type view_type,
+					    u32 user_key);
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
new file mode 100644
index 000000000000..c22e2df1b336
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -0,0 +1,1266 @@
+/******************************************************************************
+ *
+ * COPYRIGHT © 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ ******************************************************************************/
+
+#include "vmwgfx_kms.h"
+#include "device_include/svga3d_surfacedefs.h"
+#include <drm/drm_plane_helper.h>
+
+#define vmw_crtc_to_stdu(x) \
+	container_of(x, struct vmw_screen_target_display_unit, base.crtc)
+#define vmw_encoder_to_stdu(x) \
+	container_of(x, struct vmw_screen_target_display_unit, base.encoder)
+#define vmw_connector_to_stdu(x) \
+	container_of(x, struct vmw_screen_target_display_unit, base.connector)
+
+
+
+enum stdu_content_type {
+	SAME_AS_DISPLAY = 0,
+	SEPARATE_SURFACE,
+	SEPARATE_DMA
+};
+
+/**
+ * struct vmw_stdu_dirty - closure structure for the update functions
+ *
+ * @base: The base type we derive from. Used by vmw_kms_helper_dirty().
+ * @transfer: Transfer direction for DMA command.
+ * @left: Left side of bounding box.
+ * @right: Right side of bounding box.
+ * @top: Top side of bounding box.
+ * @bottom: Bottom side of bounding box.
+ * @buf: DMA buffer when DMA-ing between buffer and screen targets.
+ * @sid: Surface ID when copying between surface and screen targets.
+ */
+struct vmw_stdu_dirty {
+	struct vmw_kms_dirty base;
+	SVGA3dTransferType  transfer;
+	s32 left, right, top, bottom;
+	u32 pitch;
+	union {
+		struct vmw_dma_buffer *buf;
+		u32 sid;
+	};
+};
+
+/*
+ * SVGA commands that are used by this code. Please see the device headers
+ * for explanation.
+ */
+struct vmw_stdu_update {
+	SVGA3dCmdHeader header;
+	SVGA3dCmdUpdateGBScreenTarget body;
+};
+
+struct vmw_stdu_dma {
+	SVGA3dCmdHeader     header;
+	SVGA3dCmdSurfaceDMA body;
+};
+
+struct vmw_stdu_surface_copy {
+	SVGA3dCmdHeader      header;
+	SVGA3dCmdSurfaceCopy body;
+};
+
+
+/**
+ * struct vmw_screen_target_display_unit
+ *
+ * @base: VMW specific DU structure
+ * @display_srf: surface to be displayed.  The dimension of this will always
+ *               match the display mode.  If the display mode matches
+ *               content_vfbs dimensions, then this is a pointer into the
+ *               corresponding field in content_vfbs.  If not, then this
+ *               is a separate buffer to which content_vfbs will blit to.
+ * @content_fb: holds the rendered content, can be a surface or DMA buffer
+ * @content_type:  content_fb type
+ * @defined:  true if the current display unit has been initialized
+ */
+struct vmw_screen_target_display_unit {
+	struct vmw_display_unit base;
+
+	struct vmw_surface     *display_srf;
+	struct drm_framebuffer *content_fb;
+
+	enum stdu_content_type content_fb_type;
+
+	bool defined;
+};
+
+
+
+static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu);
+
+
+
+/******************************************************************************
+ * Screen Target Display Unit helper Functions
+ *****************************************************************************/
+
+/**
+ * vmw_stdu_pin_display - pins the resource associated with the display surface
+ *
+ * @stdu: contains the display surface
+ *
+ * Since the display surface can either be a private surface allocated by us,
+ * or it can point to the content surface, we use this function to not pin the
+ * same resource twice.
+ */
+static int vmw_stdu_pin_display(struct vmw_screen_target_display_unit *stdu)
+{
+	return vmw_resource_pin(&stdu->display_srf->res, false);
+}
+
+
+
+/**
+ * vmw_stdu_unpin_display - unpins the resource associated with display surface
+ *
+ * @stdu: contains the display surface
+ *
+ * If the display surface was privatedly allocated by
+ * vmw_surface_gb_priv_define() and not registered as a framebuffer, then it
+ * won't be automatically cleaned up when all the framebuffers are freed.  As
+ * such, we have to explicitly call vmw_resource_unreference() to get it freed.
+ */
+static void vmw_stdu_unpin_display(struct vmw_screen_target_display_unit *stdu)
+{
+	if (stdu->display_srf) {
+		struct vmw_resource *res = &stdu->display_srf->res;
+
+		vmw_resource_unpin(res);
+
+		if (stdu->content_fb_type != SAME_AS_DISPLAY) {
+			vmw_resource_unreference(&res);
+			stdu->content_fb_type = SAME_AS_DISPLAY;
+		}
+
+		stdu->display_srf = NULL;
+	}
+}
+
+
+
+/******************************************************************************
+ * Screen Target Display Unit CRTC Functions
+ *****************************************************************************/
+
+
+/**
+ * vmw_stdu_crtc_destroy - cleans up the STDU
+ *
+ * @crtc: used to get a reference to the containing STDU
+ */
+static void vmw_stdu_crtc_destroy(struct drm_crtc *crtc)
+{
+	vmw_stdu_destroy(vmw_crtc_to_stdu(crtc));
+}
+
+/**
+ * vmw_stdu_define_st - Defines a Screen Target
+ *
+ * @dev_priv:  VMW DRM device
+ * @stdu: display unit to create a Screen Target for
+ *
+ * Creates a STDU that we can used later.  This function is called whenever the
+ * framebuffer size changes.
+ *
+ * RETURNs:
+ * 0 on success, error code on failure
+ */
+static int vmw_stdu_define_st(struct vmw_private *dev_priv,
+			      struct vmw_screen_target_display_unit *stdu)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDefineGBScreenTarget body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Out of FIFO space defining Screen Target\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id   = SVGA_3D_CMD_DEFINE_GB_SCREENTARGET;
+	cmd->header.size = sizeof(cmd->body);
+
+	cmd->body.stid   = stdu->base.unit;
+	cmd->body.width  = stdu->display_srf->base_size.width;
+	cmd->body.height = stdu->display_srf->base_size.height;
+	cmd->body.flags  = (0 == cmd->body.stid) ? SVGA_STFLAG_PRIMARY : 0;
+	cmd->body.dpi    = 0;
+	cmd->body.xRoot  = stdu->base.crtc.x;
+	cmd->body.yRoot  = stdu->base.crtc.y;
+
+	if (!stdu->base.is_implicit) {
+		cmd->body.xRoot  = stdu->base.gui_x;
+		cmd->body.yRoot  = stdu->base.gui_y;
+	}
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	stdu->defined = true;
+
+	return 0;
+}
+
+
+
+/**
+ * vmw_stdu_bind_st - Binds a surface to a Screen Target
+ *
+ * @dev_priv: VMW DRM device
+ * @stdu: display unit affected
+ * @res: Buffer to bind to the screen target.  Set to NULL to blank screen.
+ *
+ * Binding a surface to a Screen Target the same as flipping
+ */
+static int vmw_stdu_bind_st(struct vmw_private *dev_priv,
+			    struct vmw_screen_target_display_unit *stdu,
+			    struct vmw_resource *res)
+{
+	SVGA3dSurfaceImageId image;
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdBindGBScreenTarget body;
+	} *cmd;
+
+
+	if (!stdu->defined) {
+		DRM_ERROR("No screen target defined\n");
+		return -EINVAL;
+	}
+
+	/* Set up image using information in vfb */
+	memset(&image, 0, sizeof(image));
+	image.sid = res ? res->id : SVGA3D_INVALID_ID;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Out of FIFO space binding a screen target\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id   = SVGA_3D_CMD_BIND_GB_SCREENTARGET;
+	cmd->header.size = sizeof(cmd->body);
+
+	cmd->body.stid   = stdu->base.unit;
+	cmd->body.image  = image;
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_stdu_populate_update - populate an UPDATE_GB_SCREENTARGET command with a
+ * bounding box.
+ *
+ * @cmd: Pointer to command stream.
+ * @unit: Screen target unit.
+ * @left: Left side of bounding box.
+ * @right: Right side of bounding box.
+ * @top: Top side of bounding box.
+ * @bottom: Bottom side of bounding box.
+ */
+static void vmw_stdu_populate_update(void *cmd, int unit,
+				     s32 left, s32 right, s32 top, s32 bottom)
+{
+	struct vmw_stdu_update *update = cmd;
+
+	update->header.id   = SVGA_3D_CMD_UPDATE_GB_SCREENTARGET;
+	update->header.size = sizeof(update->body);
+
+	update->body.stid   = unit;
+	update->body.rect.x = left;
+	update->body.rect.y = top;
+	update->body.rect.w = right - left;
+	update->body.rect.h = bottom - top;
+}
+
+/**
+ * vmw_stdu_update_st - Full update of a Screen Target
+ *
+ * @dev_priv: VMW DRM device
+ * @stdu: display unit affected
+ *
+ * This function needs to be called whenever the content of a screen
+ * target has changed completely. Typically as a result of a backing
+ * surface change.
+ *
+ * RETURNS:
+ * 0 on success, error code on failure
+ */
+static int vmw_stdu_update_st(struct vmw_private *dev_priv,
+			      struct vmw_screen_target_display_unit *stdu)
+{
+	struct vmw_stdu_update *cmd;
+	struct drm_crtc *crtc = &stdu->base.crtc;
+
+	if (!stdu->defined) {
+		DRM_ERROR("No screen target defined");
+		return -EINVAL;
+	}
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Out of FIFO space updating a Screen Target\n");
+		return -ENOMEM;
+	}
+
+	vmw_stdu_populate_update(cmd, stdu->base.unit, 0, crtc->mode.hdisplay,
+				 0, crtc->mode.vdisplay);
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+
+
+/**
+ * vmw_stdu_destroy_st - Destroy a Screen Target
+ *
+ * @dev_priv:  VMW DRM device
+ * @stdu: display unit to destroy
+ */
+static int vmw_stdu_destroy_st(struct vmw_private *dev_priv,
+			       struct vmw_screen_target_display_unit *stdu)
+{
+	int    ret;
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDestroyGBScreenTarget body;
+	} *cmd;
+
+
+	/* Nothing to do if not successfully defined */
+	if (unlikely(!stdu->defined))
+		return 0;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Out of FIFO space, screen target not destroyed\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id   = SVGA_3D_CMD_DESTROY_GB_SCREENTARGET;
+	cmd->header.size = sizeof(cmd->body);
+
+	cmd->body.stid   = stdu->base.unit;
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	/* Force sync */
+	ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
+	if (unlikely(ret != 0))
+		DRM_ERROR("Failed to sync with HW");
+
+	stdu->defined = false;
+
+	return ret;
+}
+
+
+
+/**
+ * vmw_stdu_crtc_set_config - Sets a mode
+ *
+ * @set:  mode parameters
+ *
+ * This function is the device-specific portion of the DRM CRTC mode set.
+ * For the SVGA device, we do this by defining a Screen Target, binding a
+ * GB Surface to that target, and finally update the screen target.
+ *
+ * RETURNS:
+ * 0 on success, error code otherwise
+ */
+static int vmw_stdu_crtc_set_config(struct drm_mode_set *set)
+{
+	struct vmw_private *dev_priv;
+	struct vmw_screen_target_display_unit *stdu;
+	struct vmw_framebuffer *vfb;
+	struct vmw_framebuffer_surface *new_vfbs;
+	struct drm_display_mode *mode;
+	struct drm_framebuffer  *new_fb;
+	struct drm_crtc      *crtc;
+	struct drm_encoder   *encoder;
+	struct drm_connector *connector;
+	int    ret;
+
+
+	if (!set || !set->crtc)
+		return -EINVAL;
+
+	crtc     = set->crtc;
+	crtc->x  = set->x;
+	crtc->y  = set->y;
+	stdu     = vmw_crtc_to_stdu(crtc);
+	mode     = set->mode;
+	new_fb   = set->fb;
+	dev_priv = vmw_priv(crtc->dev);
+
+
+	if (set->num_connectors > 1) {
+		DRM_ERROR("Too many connectors\n");
+		return -EINVAL;
+	}
+
+	if (set->num_connectors == 1 &&
+	    set->connectors[0] != &stdu->base.connector) {
+		DRM_ERROR("Connectors don't match %p %p\n",
+			set->connectors[0], &stdu->base.connector);
+		return -EINVAL;
+	}
+
+
+	/* Since they always map one to one these are safe */
+	connector = &stdu->base.connector;
+	encoder   = &stdu->base.encoder;
+
+
+	/*
+	 * After this point the CRTC will be considered off unless a new fb
+	 * is bound
+	 */
+	if (stdu->defined) {
+		/* Unbind current surface by binding an invalid one */
+		ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
+		if (unlikely(ret != 0))
+			return ret;
+
+		/* Update Screen Target, display will now be blank */
+		if (crtc->primary->fb) {
+			vmw_stdu_update_st(dev_priv, stdu);
+			if (unlikely(ret != 0))
+				return ret;
+		}
+
+		crtc->primary->fb  = NULL;
+		crtc->enabled      = false;
+		encoder->crtc      = NULL;
+		connector->encoder = NULL;
+
+		vmw_stdu_unpin_display(stdu);
+		stdu->content_fb      = NULL;
+		stdu->content_fb_type = SAME_AS_DISPLAY;
+
+		ret = vmw_stdu_destroy_st(dev_priv, stdu);
+		/* The hardware is hung, give up */
+		if (unlikely(ret != 0))
+			return ret;
+	}
+
+
+	/* Any of these conditions means the caller wants CRTC off */
+	if (set->num_connectors == 0 || !mode || !new_fb)
+		return 0;
+
+
+	if (set->x + mode->hdisplay > new_fb->width ||
+	    set->y + mode->vdisplay > new_fb->height) {
+		DRM_ERROR("Set outside of framebuffer\n");
+		return -EINVAL;
+	}
+
+	stdu->content_fb = new_fb;
+	vfb = vmw_framebuffer_to_vfb(stdu->content_fb);
+
+	if (vfb->dmabuf)
+		stdu->content_fb_type = SEPARATE_DMA;
+
+	/*
+	 * If the requested mode is different than the width and height
+	 * of the FB or if the content buffer is a DMA buf, then allocate
+	 * a display FB that matches the dimension of the mode
+	 */
+	if (mode->hdisplay != new_fb->width  ||
+	    mode->vdisplay != new_fb->height ||
+	    stdu->content_fb_type != SAME_AS_DISPLAY) {
+		struct vmw_surface content_srf;
+		struct drm_vmw_size display_base_size = {0};
+		struct vmw_surface *display_srf;
+
+
+		display_base_size.width  = mode->hdisplay;
+		display_base_size.height = mode->vdisplay;
+		display_base_size.depth  = 1;
+
+		/*
+		 * If content buffer is a DMA buf, then we have to construct
+		 * surface info
+		 */
+		if (stdu->content_fb_type == SEPARATE_DMA) {
+
+			switch (new_fb->bits_per_pixel) {
+			case 32:
+				content_srf.format = SVGA3D_X8R8G8B8;
+				break;
+
+			case 16:
+				content_srf.format = SVGA3D_R5G6B5;
+				break;
+
+			case 8:
+				content_srf.format = SVGA3D_P8;
+				break;
+
+			default:
+				DRM_ERROR("Invalid format\n");
+				ret = -EINVAL;
+				goto err_unref_content;
+			}
+
+			content_srf.flags             = 0;
+			content_srf.mip_levels[0]     = 1;
+			content_srf.multisample_count = 0;
+		} else {
+
+			stdu->content_fb_type = SEPARATE_SURFACE;
+
+			new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
+			content_srf = *new_vfbs->surface;
+		}
+
+
+		ret = vmw_surface_gb_priv_define(crtc->dev,
+				0, /* because kernel visible only */
+				content_srf.flags,
+				content_srf.format,
+				true, /* a scanout buffer */
+				content_srf.mip_levels[0],
+				content_srf.multisample_count,
+				0,
+				display_base_size,
+				&display_srf);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Cannot allocate a display FB.\n");
+			goto err_unref_content;
+		}
+
+		stdu->display_srf = display_srf;
+	} else {
+		new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
+		stdu->display_srf = new_vfbs->surface;
+	}
+
+
+	ret = vmw_stdu_pin_display(stdu);
+	if (unlikely(ret != 0)) {
+		stdu->display_srf = NULL;
+		goto err_unref_content;
+	}
+
+	vmw_svga_enable(dev_priv);
+
+	/*
+	 * Steps to displaying a surface, assume surface is already
+	 * bound:
+	 *   1.  define a screen target
+	 *   2.  bind a fb to the screen target
+	 *   3.  update that screen target (this is done later by
+	 *       vmw_kms_stdu_do_surface_dirty_or_present)
+	 */
+	ret = vmw_stdu_define_st(dev_priv, stdu);
+	if (unlikely(ret != 0))
+		goto err_unpin_display_and_content;
+
+	ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
+	if (unlikely(ret != 0))
+		goto err_unpin_destroy_st;
+
+
+	connector->encoder = encoder;
+	encoder->crtc      = crtc;
+
+	crtc->mode    = *mode;
+	crtc->primary->fb = new_fb;
+	crtc->enabled = true;
+
+	return ret;
+
+err_unpin_destroy_st:
+	vmw_stdu_destroy_st(dev_priv, stdu);
+err_unpin_display_and_content:
+	vmw_stdu_unpin_display(stdu);
+err_unref_content:
+	stdu->content_fb = NULL;
+	return ret;
+}
+
+
+
+/**
+ * vmw_stdu_crtc_page_flip - Binds a buffer to a screen target
+ *
+ * @crtc: CRTC to attach FB to
+ * @fb: FB to attach
+ * @event: Event to be posted. This event should've been alloced
+ *         using k[mz]alloc, and should've been completely initialized.
+ * @page_flip_flags: Input flags.
+ *
+ * If the STDU uses the same display and content buffers, i.e. a true flip,
+ * this function will replace the existing display buffer with the new content
+ * buffer.
+ *
+ * If the STDU uses different display and content buffers, i.e. a blit, then
+ * only the content buffer will be updated.
+ *
+ * RETURNS:
+ * 0 on success, error code on failure
+ */
+static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
+				   struct drm_framebuffer *new_fb,
+				   struct drm_pending_vblank_event *event,
+				   uint32_t flags)
+
+{
+	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+	struct vmw_screen_target_display_unit *stdu;
+	int ret;
+
+	if (crtc == NULL)
+		return -EINVAL;
+
+	dev_priv          = vmw_priv(crtc->dev);
+	stdu              = vmw_crtc_to_stdu(crtc);
+	crtc->primary->fb = new_fb;
+	stdu->content_fb  = new_fb;
+
+	if (stdu->display_srf) {
+		/*
+		 * If the display surface is the same as the content surface
+		 * then remove the reference
+		 */
+		if (stdu->content_fb_type == SAME_AS_DISPLAY) {
+			if (stdu->defined) {
+				/* Unbind the current surface */
+				ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
+				if (unlikely(ret != 0))
+					goto err_out;
+			}
+			vmw_stdu_unpin_display(stdu);
+			stdu->display_srf = NULL;
+		}
+	}
+
+
+	if (!new_fb) {
+		/* Blanks the display */
+		(void) vmw_stdu_update_st(dev_priv, stdu);
+
+		return 0;
+	}
+
+
+	if (stdu->content_fb_type == SAME_AS_DISPLAY) {
+		stdu->display_srf = vmw_framebuffer_to_vfbs(new_fb)->surface;
+		ret = vmw_stdu_pin_display(stdu);
+		if (ret) {
+			stdu->display_srf = NULL;
+			goto err_out;
+		}
+
+		/* Bind display surface */
+		ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
+		if (unlikely(ret != 0))
+			goto err_unpin_display_and_content;
+	}
+
+	/* Update display surface: after this point everything is bound */
+	ret = vmw_stdu_update_st(dev_priv, stdu);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (event) {
+		struct vmw_fence_obj *fence = NULL;
+		struct drm_file *file_priv = event->base.file_priv;
+
+		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+		if (!fence)
+			return -ENOMEM;
+
+		ret = vmw_event_fence_action_queue(file_priv, fence,
+						   &event->base,
+						   &event->event.tv_sec,
+						   &event->event.tv_usec,
+						   true);
+		vmw_fence_obj_unreference(&fence);
+	}
+
+	return ret;
+
+err_unpin_display_and_content:
+	vmw_stdu_unpin_display(stdu);
+err_out:
+	crtc->primary->fb = NULL;
+	stdu->content_fb = NULL;
+	return ret;
+}
+
+
+/**
+ * vmw_stdu_dmabuf_clip - Callback to encode a suface DMA command cliprect
+ *
+ * @dirty: The closure structure.
+ *
+ * Encodes a surface DMA command cliprect and updates the bounding box
+ * for the DMA.
+ */
+static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_stdu_dirty *ddirty =
+		container_of(dirty, struct vmw_stdu_dirty, base);
+	struct vmw_stdu_dma *cmd = dirty->cmd;
+	struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
+
+	blit += dirty->num_hits;
+	blit->srcx = dirty->fb_x;
+	blit->srcy = dirty->fb_y;
+	blit->x = dirty->unit_x1;
+	blit->y = dirty->unit_y1;
+	blit->d = 1;
+	blit->w = dirty->unit_x2 - dirty->unit_x1;
+	blit->h = dirty->unit_y2 - dirty->unit_y1;
+	dirty->num_hits++;
+
+	if (ddirty->transfer != SVGA3D_WRITE_HOST_VRAM)
+		return;
+
+	/* Destination bounding box */
+	ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1);
+	ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1);
+	ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2);
+	ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2);
+}
+
+/**
+ * vmw_stdu_dmabuf_fifo_commit - Callback to fill in and submit a DMA command.
+ *
+ * @dirty: The closure structure.
+ *
+ * Fills in the missing fields in a DMA command, and optionally encodes
+ * a screen target update command, depending on transfer direction.
+ */
+static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_stdu_dirty *ddirty =
+		container_of(dirty, struct vmw_stdu_dirty, base);
+	struct vmw_screen_target_display_unit *stdu =
+		container_of(dirty->unit, typeof(*stdu), base);
+	struct vmw_stdu_dma *cmd = dirty->cmd;
+	struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
+	SVGA3dCmdSurfaceDMASuffix *suffix =
+		(SVGA3dCmdSurfaceDMASuffix *) &blit[dirty->num_hits];
+	size_t blit_size = sizeof(*blit) * dirty->num_hits + sizeof(*suffix);
+
+	if (!dirty->num_hits) {
+		vmw_fifo_commit(dirty->dev_priv, 0);
+		return;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_SURFACE_DMA;
+	cmd->header.size = sizeof(cmd->body) + blit_size;
+	vmw_bo_get_guest_ptr(&ddirty->buf->base, &cmd->body.guest.ptr);
+	cmd->body.guest.pitch = ddirty->pitch;
+	cmd->body.host.sid = stdu->display_srf->res.id;
+	cmd->body.host.face = 0;
+	cmd->body.host.mipmap = 0;
+	cmd->body.transfer = ddirty->transfer;
+	suffix->suffixSize = sizeof(*suffix);
+	suffix->maximumOffset = ddirty->buf->base.num_pages * PAGE_SIZE;
+
+	if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) {
+		blit_size += sizeof(struct vmw_stdu_update);
+
+		vmw_stdu_populate_update(&suffix[1], stdu->base.unit,
+					 ddirty->left, ddirty->right,
+					 ddirty->top, ddirty->bottom);
+	}
+
+	vmw_fifo_commit(dirty->dev_priv, sizeof(*cmd) + blit_size);
+
+	ddirty->left = ddirty->top = S32_MAX;
+	ddirty->right = ddirty->bottom = S32_MIN;
+}
+
+/**
+ * vmw_kms_stdu_dma - Perform a DMA transfer between a dma-buffer backed
+ * framebuffer and the screen target system.
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @file_priv: Pointer to a struct drm-file identifying the caller. May be
+ * set to NULL, but then @user_fence_rep must also be set to NULL.
+ * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
+ * @vclips: Alternate array of clip rects. Either @clips or @vclips must
+ * be NULL.
+ * @num_clips: Number of clip rects in @clips or @vclips.
+ * @increment: Increment to use when looping over @clips or @vclips.
+ * @to_surface: Whether to DMA to the screen target system as opposed to
+ * from the screen target system.
+ * @interruptible: Whether to perform waits interruptible if possible.
+ *
+ * If DMA-ing till the screen target system, the function will also notify
+ * the screen target system that a bounding box of the cliprects has been
+ * updated.
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
+		     struct drm_file *file_priv,
+		     struct vmw_framebuffer *vfb,
+		     struct drm_vmw_fence_rep __user *user_fence_rep,
+		     struct drm_clip_rect *clips,
+		     struct drm_vmw_rect *vclips,
+		     uint32_t num_clips,
+		     int increment,
+		     bool to_surface,
+		     bool interruptible)
+{
+	struct vmw_dma_buffer *buf =
+		container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
+	struct vmw_stdu_dirty ddirty;
+	int ret;
+
+	ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
+					    false);
+	if (ret)
+		return ret;
+
+	ddirty.transfer = (to_surface) ? SVGA3D_WRITE_HOST_VRAM :
+		SVGA3D_READ_HOST_VRAM;
+	ddirty.left = ddirty.top = S32_MAX;
+	ddirty.right = ddirty.bottom = S32_MIN;
+	ddirty.pitch = vfb->base.pitches[0];
+	ddirty.buf = buf;
+	ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit;
+	ddirty.base.clip = vmw_stdu_dmabuf_clip;
+	ddirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_dma) +
+		num_clips * sizeof(SVGA3dCopyBox) +
+		sizeof(SVGA3dCmdSurfaceDMASuffix);
+	if (to_surface)
+		ddirty.base.fifo_reserve_size += sizeof(struct vmw_stdu_update);
+
+	ret = vmw_kms_helper_dirty(dev_priv, vfb, clips, vclips,
+				   0, 0, num_clips, increment, &ddirty.base);
+	vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
+				     user_fence_rep);
+
+	return ret;
+}
+
+/**
+ * vmw_stdu_surface_clip - Callback to encode a surface copy command cliprect
+ *
+ * @dirty: The closure structure.
+ *
+ * Encodes a surface copy command cliprect and updates the bounding box
+ * for the copy.
+ */
+static void vmw_kms_stdu_surface_clip(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_stdu_dirty *sdirty =
+		container_of(dirty, struct vmw_stdu_dirty, base);
+	struct vmw_stdu_surface_copy *cmd = dirty->cmd;
+	struct vmw_screen_target_display_unit *stdu =
+		container_of(dirty->unit, typeof(*stdu), base);
+
+	if (sdirty->sid != stdu->display_srf->res.id) {
+		struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
+
+		blit += dirty->num_hits;
+		blit->srcx = dirty->fb_x;
+		blit->srcy = dirty->fb_y;
+		blit->x = dirty->unit_x1;
+		blit->y = dirty->unit_y1;
+		blit->d = 1;
+		blit->w = dirty->unit_x2 - dirty->unit_x1;
+		blit->h = dirty->unit_y2 - dirty->unit_y1;
+	}
+
+	dirty->num_hits++;
+
+	/* Destination bounding box */
+	sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
+	sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
+	sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
+	sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
+}
+
+/**
+ * vmw_stdu_surface_fifo_commit - Callback to fill in and submit a surface
+ * copy command.
+ *
+ * @dirty: The closure structure.
+ *
+ * Fills in the missing fields in a surface copy command, and encodes a screen
+ * target update command.
+ */
+static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_stdu_dirty *sdirty =
+		container_of(dirty, struct vmw_stdu_dirty, base);
+	struct vmw_screen_target_display_unit *stdu =
+		container_of(dirty->unit, typeof(*stdu), base);
+	struct vmw_stdu_surface_copy *cmd = dirty->cmd;
+	struct vmw_stdu_update *update;
+	size_t blit_size = sizeof(SVGA3dCopyBox) * dirty->num_hits;
+	size_t commit_size;
+
+	if (!dirty->num_hits) {
+		vmw_fifo_commit(dirty->dev_priv, 0);
+		return;
+	}
+
+	if (sdirty->sid != stdu->display_srf->res.id) {
+		struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
+
+		cmd->header.id = SVGA_3D_CMD_SURFACE_COPY;
+		cmd->header.size = sizeof(cmd->body) + blit_size;
+		cmd->body.src.sid = sdirty->sid;
+		cmd->body.dest.sid = stdu->display_srf->res.id;
+		update = (struct vmw_stdu_update *) &blit[dirty->num_hits];
+		commit_size = sizeof(*cmd) + blit_size + sizeof(*update);
+	} else {
+		update = dirty->cmd;
+		commit_size = sizeof(*update);
+	}
+
+	vmw_stdu_populate_update(update, stdu->base.unit, sdirty->left,
+				 sdirty->right, sdirty->top, sdirty->bottom);
+
+	vmw_fifo_commit(dirty->dev_priv, commit_size);
+
+	sdirty->left = sdirty->top = S32_MAX;
+	sdirty->right = sdirty->bottom = S32_MIN;
+}
+
+/**
+ * vmw_kms_stdu_surface_dirty - Dirty part of a surface backed framebuffer
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @framebuffer: Pointer to the surface-buffer backed framebuffer.
+ * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
+ * @vclips: Alternate array of clip rects. Either @clips or @vclips must
+ * be NULL.
+ * @srf: Pointer to surface to blit from. If NULL, the surface attached
+ * to @framebuffer will be used.
+ * @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
+ * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
+ * @num_clips: Number of clip rects in @clips.
+ * @inc: Increment to use when looping over @clips.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to a
+ * struct vmw_fence_obj. The returned fence pointer may be NULL in which
+ * case the device has already synchronized.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
+			       struct vmw_framebuffer *framebuffer,
+			       struct drm_clip_rect *clips,
+			       struct drm_vmw_rect *vclips,
+			       struct vmw_resource *srf,
+			       s32 dest_x,
+			       s32 dest_y,
+			       unsigned num_clips, int inc,
+			       struct vmw_fence_obj **out_fence)
+{
+	struct vmw_framebuffer_surface *vfbs =
+		container_of(framebuffer, typeof(*vfbs), base);
+	struct vmw_stdu_dirty sdirty;
+	int ret;
+
+	if (!srf)
+		srf = &vfbs->surface->res;
+
+	ret = vmw_kms_helper_resource_prepare(srf, true);
+	if (ret)
+		return ret;
+
+	if (vfbs->is_dmabuf_proxy) {
+		ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
+		if (ret)
+			goto out_finish;
+	}
+
+	sdirty.base.fifo_commit = vmw_kms_stdu_surface_fifo_commit;
+	sdirty.base.clip = vmw_kms_stdu_surface_clip;
+	sdirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_surface_copy) +
+		sizeof(SVGA3dCopyBox) * num_clips +
+		sizeof(struct vmw_stdu_update);
+	sdirty.sid = srf->id;
+	sdirty.left = sdirty.top = S32_MAX;
+	sdirty.right = sdirty.bottom = S32_MIN;
+
+	ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
+				   dest_x, dest_y, num_clips, inc,
+				   &sdirty.base);
+out_finish:
+	vmw_kms_helper_resource_finish(srf, out_fence);
+
+	return ret;
+}
+
+
+/*
+ *  Screen Target CRTC dispatch table
+ */
+static struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
+	.save = vmw_du_crtc_save,
+	.restore = vmw_du_crtc_restore,
+	.cursor_set = vmw_du_crtc_cursor_set,
+	.cursor_move = vmw_du_crtc_cursor_move,
+	.gamma_set = vmw_du_crtc_gamma_set,
+	.destroy = vmw_stdu_crtc_destroy,
+	.set_config = vmw_stdu_crtc_set_config,
+	.page_flip = vmw_stdu_crtc_page_flip,
+};
+
+
+
+/******************************************************************************
+ * Screen Target Display Unit Encoder Functions
+ *****************************************************************************/
+
+/**
+ * vmw_stdu_encoder_destroy - cleans up the STDU
+ *
+ * @encoder: used the get the containing STDU
+ *
+ * vmwgfx cleans up crtc/encoder/connector all at the same time so technically
+ * this can be a no-op.  Nevertheless, it doesn't hurt of have this in case
+ * the common KMS code changes and somehow vmw_stdu_crtc_destroy() doesn't
+ * get called.
+ */
+static void vmw_stdu_encoder_destroy(struct drm_encoder *encoder)
+{
+	vmw_stdu_destroy(vmw_encoder_to_stdu(encoder));
+}
+
+static struct drm_encoder_funcs vmw_stdu_encoder_funcs = {
+	.destroy = vmw_stdu_encoder_destroy,
+};
+
+
+
+/******************************************************************************
+ * Screen Target Display Unit Connector Functions
+ *****************************************************************************/
+
+/**
+ * vmw_stdu_connector_destroy - cleans up the STDU
+ *
+ * @connector: used to get the containing STDU
+ *
+ * vmwgfx cleans up crtc/encoder/connector all at the same time so technically
+ * this can be a no-op.  Nevertheless, it doesn't hurt of have this in case
+ * the common KMS code changes and somehow vmw_stdu_crtc_destroy() doesn't
+ * get called.
+ */
+static void vmw_stdu_connector_destroy(struct drm_connector *connector)
+{
+	vmw_stdu_destroy(vmw_connector_to_stdu(connector));
+}
+
+
+
+static struct drm_connector_funcs vmw_stdu_connector_funcs = {
+	.dpms = vmw_du_connector_dpms,
+	.save = vmw_du_connector_save,
+	.restore = vmw_du_connector_restore,
+	.detect = vmw_du_connector_detect,
+	.fill_modes = vmw_du_connector_fill_modes,
+	.set_property = vmw_du_connector_set_property,
+	.destroy = vmw_stdu_connector_destroy,
+};
+
+
+
+/**
+ * vmw_stdu_init - Sets up a Screen Target Display Unit
+ *
+ * @dev_priv: VMW DRM device
+ * @unit: unit number range from 0 to VMWGFX_NUM_DISPLAY_UNITS
+ *
+ * This function is called once per CRTC, and allocates one Screen Target
+ * display unit to represent that CRTC.  Since the SVGA device does not separate
+ * out encoder and connector, they are represented as part of the STDU as well.
+ */
+static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
+{
+	struct vmw_screen_target_display_unit *stdu;
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	struct drm_crtc *crtc;
+
+
+	stdu = kzalloc(sizeof(*stdu), GFP_KERNEL);
+	if (!stdu)
+		return -ENOMEM;
+
+	stdu->base.unit = unit;
+	crtc = &stdu->base.crtc;
+	encoder = &stdu->base.encoder;
+	connector = &stdu->base.connector;
+
+	stdu->base.pref_active = (unit == 0);
+	stdu->base.pref_width  = dev_priv->initial_width;
+	stdu->base.pref_height = dev_priv->initial_height;
+	stdu->base.is_implicit = true;
+
+	drm_connector_init(dev, connector, &vmw_stdu_connector_funcs,
+			   DRM_MODE_CONNECTOR_VIRTUAL);
+	connector->status = vmw_du_connector_detect(connector, false);
+
+	drm_encoder_init(dev, encoder, &vmw_stdu_encoder_funcs,
+			 DRM_MODE_ENCODER_VIRTUAL);
+	drm_mode_connector_attach_encoder(connector, encoder);
+	encoder->possible_crtcs = (1 << unit);
+	encoder->possible_clones = 0;
+
+	(void) drm_connector_register(connector);
+
+	drm_crtc_init(dev, crtc, &vmw_stdu_crtc_funcs);
+
+	drm_mode_crtc_set_gamma_size(crtc, 256);
+
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.dirty_info_property,
+				   1);
+
+	return 0;
+}
+
+
+
+/**
+ *  vmw_stdu_destroy - Cleans up a vmw_screen_target_display_unit
+ *
+ *  @stdu:  Screen Target Display Unit to be destroyed
+ *
+ *  Clean up after vmw_stdu_init
+ */
+static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu)
+{
+	vmw_stdu_unpin_display(stdu);
+
+	vmw_du_cleanup(&stdu->base);
+	kfree(stdu);
+}
+
+
+
+/******************************************************************************
+ * Screen Target Display KMS Functions
+ *
+ * These functions are called by the common KMS code in vmwgfx_kms.c
+ *****************************************************************************/
+
+/**
+ * vmw_kms_stdu_init_display - Initializes a Screen Target based display
+ *
+ * @dev_priv: VMW DRM device
+ *
+ * This function initialize a Screen Target based display device.  It checks
+ * the capability bits to make sure the underlying hardware can support
+ * screen targets, and then creates the maximum number of CRTCs, a.k.a Display
+ * Units, as supported by the display hardware.
+ *
+ * RETURNS:
+ * 0 on success, error code otherwise
+ */
+int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	int i, ret;
+
+
+	/* Do nothing if Screen Target support is turned off */
+	if (!VMWGFX_ENABLE_SCREEN_TARGET_OTABLE)
+		return -ENOSYS;
+
+	if (!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS))
+		return -ENOSYS;
+
+	ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = drm_mode_create_dirty_info_property(dev);
+	if (unlikely(ret != 0))
+		goto err_vblank_cleanup;
+
+	dev_priv->active_display_unit = vmw_du_screen_target;
+
+	for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
+		ret = vmw_stdu_init(dev_priv, i);
+
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Failed to initialize STDU %d", i);
+			goto err_vblank_cleanup;
+		}
+	}
+
+	DRM_INFO("Screen Target Display device initialized\n");
+
+	return 0;
+
+err_vblank_cleanup:
+	drm_vblank_cleanup(dev);
+	return ret;
+}
+
+
+
+/**
+ * vmw_kms_stdu_close_display - Cleans up after vmw_kms_stdu_init_display
+ *
+ * @dev_priv: VMW DRM device
+ *
+ * Frees up any resources allocated by vmw_kms_stdu_init_display
+ *
+ * RETURNS:
+ * 0 on success
+ */
+int vmw_kms_stdu_close_display(struct vmw_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+
+	drm_vblank_cleanup(dev);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 4ecdbf3e59da..3361769842f4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,8 +27,11 @@
 
 #include "vmwgfx_drv.h"
 #include "vmwgfx_resource_priv.h"
+#include "vmwgfx_so.h"
+#include "vmwgfx_binding.h"
 #include <ttm/ttm_placement.h>
-#include "svga3d_surfacedefs.h"
+#include "device_include/svga3d_surfacedefs.h"
+
 
 /**
  * struct vmw_user_surface - User-space visible surface resource
@@ -36,7 +39,7 @@
  * @base:           The TTM base object handling user-space visibility.
  * @srf:            The surface metadata.
  * @size:           TTM accounting size for the surface.
- * @master:         master of the creating client. Used for security check.
+ * @master: master of the creating client. Used for security check.
  */
 struct vmw_user_surface {
 	struct ttm_prime_object prime;
@@ -220,7 +223,7 @@ static void vmw_surface_define_encode(const struct vmw_surface *srf,
 	cmd->header.size = cmd_len;
 	cmd->body.sid = srf->res.id;
 	cmd->body.surfaceFlags = srf->flags;
-	cmd->body.format = cpu_to_le32(srf->format);
+	cmd->body.format = srf->format;
 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
 		cmd->body.face[i].numMipLevels = srf->mip_levels[i];
 
@@ -340,7 +343,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
 		dev_priv->used_memory_size -= res->backup_size;
 		mutex_unlock(&dev_priv->cmdbuf_mutex);
 	}
-	vmw_3d_resource_dec(dev_priv, false);
+	vmw_fifo_resource_dec(dev_priv);
 }
 
 /**
@@ -576,14 +579,14 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
 
 	BUG_ON(res_free == NULL);
 	if (!dev_priv->has_mob)
-		(void) vmw_3d_resource_inc(dev_priv, false);
+		vmw_fifo_resource_inc(dev_priv);
 	ret = vmw_resource_init(dev_priv, res, true, res_free,
 				(dev_priv->has_mob) ? &vmw_gb_surface_func :
 				&vmw_legacy_surface_func);
 
 	if (unlikely(ret != 0)) {
 		if (!dev_priv->has_mob)
-			vmw_3d_resource_dec(dev_priv, false);
+			vmw_fifo_resource_dec(dev_priv);
 		res_free(res);
 		return ret;
 	}
@@ -593,6 +596,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
 	 * surface validate.
 	 */
 
+	INIT_LIST_HEAD(&srf->view_list);
 	vmw_resource_activate(res, vmw_hw_surface_destroy);
 	return ret;
 }
@@ -723,6 +727,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
 	desc = svga3dsurface_get_desc(req->format);
 	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
 		DRM_ERROR("Invalid surface format for surface creation.\n");
+		DRM_ERROR("Format requested is: %d\n", req->format);
 		return -EINVAL;
 	}
 
@@ -906,6 +911,12 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
 				  "surface reference.\n");
 			return -EACCES;
 		}
+		if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
+			DRM_ERROR("Locked master refused legacy "
+				  "surface reference.\n");
+			return -EACCES;
+		}
+
 		handle = u_handle;
 	}
 
@@ -1018,17 +1029,21 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
 {
 	struct vmw_private *dev_priv = res->dev_priv;
 	struct vmw_surface *srf = vmw_res_to_srf(res);
-	uint32_t cmd_len, submit_len;
+	uint32_t cmd_len, cmd_id, submit_len;
 	int ret;
 	struct {
 		SVGA3dCmdHeader header;
 		SVGA3dCmdDefineGBSurface body;
 	} *cmd;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDefineGBSurface_v2 body;
+	} *cmd2;
 
 	if (likely(res->id != -1))
 		return 0;
 
-	(void) vmw_3d_resource_inc(dev_priv, false);
+	vmw_fifo_resource_inc(dev_priv);
 	ret = vmw_resource_alloc_id(res);
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Failed to allocate a surface id.\n");
@@ -1040,9 +1055,19 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
 		goto out_no_fifo;
 	}
 
-	cmd_len = sizeof(cmd->body);
-	submit_len = sizeof(*cmd);
+	if (srf->array_size > 0) {
+		/* has_dx checked on creation time. */
+		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
+		cmd_len = sizeof(cmd2->body);
+		submit_len = sizeof(*cmd2);
+	} else {
+		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
+		cmd_len = sizeof(cmd->body);
+		submit_len = sizeof(*cmd);
+	}
+
 	cmd = vmw_fifo_reserve(dev_priv, submit_len);
+	cmd2 = (typeof(cmd2))cmd;
 	if (unlikely(cmd == NULL)) {
 		DRM_ERROR("Failed reserving FIFO space for surface "
 			  "creation.\n");
@@ -1050,17 +1075,33 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
 		goto out_no_fifo;
 	}
 
-	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
-	cmd->header.size = cmd_len;
-	cmd->body.sid = srf->res.id;
-	cmd->body.surfaceFlags = srf->flags;
-	cmd->body.format = cpu_to_le32(srf->format);
-	cmd->body.numMipLevels = srf->mip_levels[0];
-	cmd->body.multisampleCount = srf->multisample_count;
-	cmd->body.autogenFilter = srf->autogen_filter;
-	cmd->body.size.width = srf->base_size.width;
-	cmd->body.size.height = srf->base_size.height;
-	cmd->body.size.depth = srf->base_size.depth;
+	if (srf->array_size > 0) {
+		cmd2->header.id = cmd_id;
+		cmd2->header.size = cmd_len;
+		cmd2->body.sid = srf->res.id;
+		cmd2->body.surfaceFlags = srf->flags;
+		cmd2->body.format = cpu_to_le32(srf->format);
+		cmd2->body.numMipLevels = srf->mip_levels[0];
+		cmd2->body.multisampleCount = srf->multisample_count;
+		cmd2->body.autogenFilter = srf->autogen_filter;
+		cmd2->body.size.width = srf->base_size.width;
+		cmd2->body.size.height = srf->base_size.height;
+		cmd2->body.size.depth = srf->base_size.depth;
+		cmd2->body.arraySize = srf->array_size;
+	} else {
+		cmd->header.id = cmd_id;
+		cmd->header.size = cmd_len;
+		cmd->body.sid = srf->res.id;
+		cmd->body.surfaceFlags = srf->flags;
+		cmd->body.format = cpu_to_le32(srf->format);
+		cmd->body.numMipLevels = srf->mip_levels[0];
+		cmd->body.multisampleCount = srf->multisample_count;
+		cmd->body.autogenFilter = srf->autogen_filter;
+		cmd->body.size.width = srf->base_size.width;
+		cmd->body.size.height = srf->base_size.height;
+		cmd->body.size.depth = srf->base_size.depth;
+	}
+
 	vmw_fifo_commit(dev_priv, submit_len);
 
 	return 0;
@@ -1068,7 +1109,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
 out_no_fifo:
 	vmw_resource_release_id(res);
 out_no_id:
-	vmw_3d_resource_dec(dev_priv, false);
+	vmw_fifo_resource_dec(dev_priv);
 	return ret;
 }
 
@@ -1188,6 +1229,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
 static int vmw_gb_surface_destroy(struct vmw_resource *res)
 {
 	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_surface *srf = vmw_res_to_srf(res);
 	struct {
 		SVGA3dCmdHeader header;
 		SVGA3dCmdDestroyGBSurface body;
@@ -1197,7 +1239,8 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
 		return 0;
 
 	mutex_lock(&dev_priv->binding_mutex);
-	vmw_context_binding_res_list_scrub(&res->binding_head);
+	vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
+	vmw_binding_res_list_scrub(&res->binding_head);
 
 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL)) {
@@ -1213,11 +1256,12 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 	mutex_unlock(&dev_priv->binding_mutex);
 	vmw_resource_release_id(res);
-	vmw_3d_resource_dec(dev_priv, false);
+	vmw_fifo_resource_dec(dev_priv);
 
 	return 0;
 }
 
+
 /**
  * vmw_gb_surface_define_ioctl - Ioctl function implementing
  *                               the user surface define functionality.
@@ -1241,77 +1285,51 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 	int ret;
 	uint32_t size;
-	const struct svga3d_surface_desc *desc;
 	uint32_t backup_handle;
 
+
 	if (unlikely(vmw_user_surface_size == 0))
 		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
 			128;
 
 	size = vmw_user_surface_size + 128;
 
-	desc = svga3dsurface_get_desc(req->format);
-	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
-		DRM_ERROR("Invalid surface format for surface creation.\n");
-		return -EINVAL;
-	}
-
-	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+	/* Define a surface based on the parameters. */
+	ret = vmw_surface_gb_priv_define(dev,
+			size,
+			req->svga3d_flags,
+			req->format,
+			req->drm_surface_flags & drm_vmw_surface_flag_scanout,
+			req->mip_levels,
+			req->multisample_count,
+			req->array_size,
+			req->base_size,
+			&srf);
 	if (unlikely(ret != 0))
 		return ret;
 
-	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
-				   size, false, true);
-	if (unlikely(ret != 0)) {
-		if (ret != -ERESTARTSYS)
-			DRM_ERROR("Out of graphics memory for surface"
-				  " creation.\n");
-		goto out_unlock;
-	}
-
-	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
-	if (unlikely(user_srf == NULL)) {
-		ret = -ENOMEM;
-		goto out_no_user_srf;
-	}
-
-	srf = &user_srf->srf;
-	res = &srf->res;
-
-	srf->flags = req->svga3d_flags;
-	srf->format = req->format;
-	srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout;
-	srf->mip_levels[0] = req->mip_levels;
-	srf->num_sizes = 1;
-	srf->sizes = NULL;
-	srf->offsets = NULL;
-	user_srf->size = size;
-	srf->base_size = req->base_size;
-	srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
-	srf->multisample_count = req->multisample_count;
-	res->backup_size = svga3dsurface_get_serialized_size
-	  (srf->format, srf->base_size, srf->mip_levels[0],
-	   srf->flags & SVGA3D_SURFACE_CUBEMAP);
-
-	user_srf->prime.base.shareable = false;
-	user_srf->prime.base.tfile = NULL;
+	user_srf = container_of(srf, struct vmw_user_surface, srf);
 	if (drm_is_primary_client(file_priv))
 		user_srf->master = drm_master_get(file_priv->master);
 
-	/**
-	 * From this point, the generic resource management functions
-	 * destroy the object on failure.
-	 */
-
-	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 	if (unlikely(ret != 0))
-		goto out_unlock;
+		return ret;
+
+	res = &user_srf->srf.res;
+
 
 	if (req->buffer_handle != SVGA3D_INVALID_ID) {
 		ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
 					     &res->backup);
-	} else if (req->drm_surface_flags &
-		   drm_vmw_surface_flag_create_buffer)
+		if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
+		    res->backup_size) {
+			DRM_ERROR("Surface backup buffer is too small.\n");
+			vmw_dmabuf_unreference(&res->backup);
+			ret = -EINVAL;
+			goto out_unlock;
+		}
+	} else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
 		ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
 					    res->backup_size,
 					    req->drm_surface_flags &
@@ -1324,7 +1342,7 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
 		goto out_unlock;
 	}
 
-	tmp = vmw_resource_reference(&srf->res);
+	tmp = vmw_resource_reference(res);
 	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
 				    req->drm_surface_flags &
 				    drm_vmw_surface_flag_shareable,
@@ -1337,7 +1355,7 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
 		goto out_unlock;
 	}
 
-	rep->handle = user_srf->prime.base.hash.key;
+	rep->handle      = user_srf->prime.base.hash.key;
 	rep->backup_size = res->backup_size;
 	if (res->backup) {
 		rep->buffer_map_handle =
@@ -1352,10 +1370,6 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
 
 	vmw_resource_unreference(&res);
 
-	ttm_read_unlock(&dev_priv->reservation_sem);
-	return 0;
-out_no_user_srf:
-	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
 out_unlock:
 	ttm_read_unlock(&dev_priv->reservation_sem);
 	return ret;
@@ -1415,6 +1429,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
 	rep->creq.drm_surface_flags = 0;
 	rep->creq.multisample_count = srf->multisample_count;
 	rep->creq.autogen_filter = srf->autogen_filter;
+	rep->creq.array_size = srf->array_size;
 	rep->creq.buffer_handle = backup_handle;
 	rep->creq.base_size = srf->base_size;
 	rep->crep.handle = user_srf->prime.base.hash.key;
@@ -1429,3 +1444,137 @@ out_bad_resource:
 
 	return ret;
 }
+
+/**
+ * vmw_surface_gb_priv_define - Define a private GB surface
+ *
+ * @dev:  Pointer to a struct drm_device
+ * @user_accounting_size:  Used to track user-space memory usage, set
+ *                         to 0 for kernel mode only memory
+ * @svga3d_flags: SVGA3d surface flags for the device
+ * @format: requested surface format
+ * @for_scanout: true if inteded to be used for scanout buffer
+ * @num_mip_levels:  number of MIP levels
+ * @multisample_count:
+ * @array_size: Surface array size.
+ * @size: width, heigh, depth of the surface requested
+ * @user_srf_out: allocated user_srf.  Set to NULL on failure.
+ *
+ * GB surfaces allocated by this function will not have a user mode handle, and
+ * thus will only be visible to vmwgfx.  For optimization reasons the
+ * surface may later be given a user mode handle by another function to make
+ * it available to user mode drivers.
+ */
+int vmw_surface_gb_priv_define(struct drm_device *dev,
+			       uint32_t user_accounting_size,
+			       uint32_t svga3d_flags,
+			       SVGA3dSurfaceFormat format,
+			       bool for_scanout,
+			       uint32_t num_mip_levels,
+			       uint32_t multisample_count,
+			       uint32_t array_size,
+			       struct drm_vmw_size size,
+			       struct vmw_surface **srf_out)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_user_surface *user_srf;
+	struct vmw_surface *srf;
+	int ret;
+	u32 num_layers;
+
+	*srf_out = NULL;
+
+	if (for_scanout) {
+		if (!svga3dsurface_is_screen_target_format(format)) {
+			DRM_ERROR("Invalid Screen Target surface format.");
+			return -EINVAL;
+		}
+	} else {
+		const struct svga3d_surface_desc *desc;
+
+		desc = svga3dsurface_get_desc(format);
+		if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
+			DRM_ERROR("Invalid surface format.\n");
+			return -EINVAL;
+		}
+	}
+
+	/* array_size must be null for non-GL3 host. */
+	if (array_size > 0 && !dev_priv->has_dx) {
+		DRM_ERROR("Tried to create DX surface on non-DX host.\n");
+		return -EINVAL;
+	}
+
+	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+				   user_accounting_size, false, true);
+	if (unlikely(ret != 0)) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for surface"
+				  " creation.\n");
+		goto out_unlock;
+	}
+
+	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
+	if (unlikely(user_srf == NULL)) {
+		ret = -ENOMEM;
+		goto out_no_user_srf;
+	}
+
+	*srf_out  = &user_srf->srf;
+	user_srf->size = user_accounting_size;
+	user_srf->prime.base.shareable = false;
+	user_srf->prime.base.tfile     = NULL;
+
+	srf = &user_srf->srf;
+	srf->flags             = svga3d_flags;
+	srf->format            = format;
+	srf->scanout           = for_scanout;
+	srf->mip_levels[0]     = num_mip_levels;
+	srf->num_sizes         = 1;
+	srf->sizes             = NULL;
+	srf->offsets           = NULL;
+	srf->base_size         = size;
+	srf->autogen_filter    = SVGA3D_TEX_FILTER_NONE;
+	srf->array_size        = array_size;
+	srf->multisample_count = multisample_count;
+
+	if (array_size)
+		num_layers = array_size;
+	else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
+		num_layers = SVGA3D_MAX_SURFACE_FACES;
+	else
+		num_layers = 1;
+
+	srf->res.backup_size   =
+		svga3dsurface_get_serialized_size(srf->format,
+						  srf->base_size,
+						  srf->mip_levels[0],
+						  num_layers);
+
+	if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
+		srf->res.backup_size += sizeof(SVGA3dDXSOState);
+
+	if (dev_priv->active_display_unit == vmw_du_screen_target &&
+	    for_scanout)
+		srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
+
+	/*
+	 * From this point, the generic resource management functions
+	 * destroy the object on failure.
+	 */
+	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+
+	ttm_read_unlock(&dev_priv->reservation_sem);
+	return ret;
+
+out_no_user_srf:
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
+
+out_unlock:
+	ttm_read_unlock(&dev_priv->reservation_sem);
+	return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 98d6bfb3a997..e771091d2cd3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/host1x/mipi.c b/drivers/gpu/host1x/mipi.c
index fbc6ee6ca337..52a6fd224127 100644
--- a/drivers/gpu/host1x/mipi.c
+++ b/drivers/gpu/host1x/mipi.c
@@ -31,6 +31,9 @@
 #include "dev.h"
 
 #define MIPI_CAL_CTRL			0x00
+#define MIPI_CAL_CTRL_NOISE_FILTER(x)	(((x) & 0xf) << 26)
+#define MIPI_CAL_CTRL_PRESCALE(x)	(((x) & 0x3) << 24)
+#define MIPI_CAL_CTRL_CLKEN_OVR		(1 << 4)
 #define MIPI_CAL_CTRL_START		(1 << 0)
 
 #define MIPI_CAL_AUTOCAL_CTRL		0x01
@@ -44,15 +47,18 @@
 #define MIPI_CAL_CONFIG_CSIC		0x07
 #define MIPI_CAL_CONFIG_CSID		0x08
 #define MIPI_CAL_CONFIG_CSIE		0x09
+#define MIPI_CAL_CONFIG_CSIF		0x0a
 #define MIPI_CAL_CONFIG_DSIA		0x0e
 #define MIPI_CAL_CONFIG_DSIB		0x0f
 #define MIPI_CAL_CONFIG_DSIC		0x10
 #define MIPI_CAL_CONFIG_DSID		0x11
 
-#define MIPI_CAL_CONFIG_DSIAB_CLK	0x19
-#define MIPI_CAL_CONFIG_DSICD_CLK	0x1a
+#define MIPI_CAL_CONFIG_DSIA_CLK	0x19
+#define MIPI_CAL_CONFIG_DSIB_CLK	0x1a
 #define MIPI_CAL_CONFIG_CSIAB_CLK	0x1b
+#define MIPI_CAL_CONFIG_DSIC_CLK	0x1c
 #define MIPI_CAL_CONFIG_CSICD_CLK	0x1c
+#define MIPI_CAL_CONFIG_DSID_CLK	0x1d
 #define MIPI_CAL_CONFIG_CSIE_CLK	0x1d
 
 /* for data and clock lanes */
@@ -73,8 +79,11 @@
 
 #define MIPI_CAL_BIAS_PAD_CFG1		0x17
 #define MIPI_CAL_BIAS_PAD_DRV_DN_REF(x) (((x) & 0x7) << 16)
+#define MIPI_CAL_BIAS_PAD_DRV_UP_REF(x) (((x) & 0x7) << 8)
 
 #define MIPI_CAL_BIAS_PAD_CFG2		0x18
+#define MIPI_CAL_BIAS_PAD_VCLAMP(x)	(((x) & 0x7) << 16)
+#define MIPI_CAL_BIAS_PAD_VAUXP(x)	(((x) & 0x7) << 4)
 #define MIPI_CAL_BIAS_PAD_PDVREG	(1 << 1)
 
 struct tegra_mipi_pad {
@@ -86,13 +95,35 @@ struct tegra_mipi_soc {
 	bool has_clk_lane;
 	const struct tegra_mipi_pad *pads;
 	unsigned int num_pads;
+
+	bool clock_enable_override;
+	bool needs_vclamp_ref;
+
+	/* bias pad configuration settings */
+	u8 pad_drive_down_ref;
+	u8 pad_drive_up_ref;
+
+	u8 pad_vclamp_level;
+	u8 pad_vauxp_level;
+
+	/* calibration settings for data lanes */
+	u8 hspdos;
+	u8 hspuos;
+	u8 termos;
+
+	/* calibration settings for clock lanes */
+	u8 hsclkpdos;
+	u8 hsclkpuos;
 };
 
 struct tegra_mipi {
 	const struct tegra_mipi_soc *soc;
+	struct device *dev;
 	void __iomem *regs;
 	struct mutex lock;
 	struct clk *clk;
+
+	unsigned long usage_count;
 };
 
 struct tegra_mipi_device {
@@ -114,6 +145,67 @@ static inline void tegra_mipi_writel(struct tegra_mipi *mipi, u32 value,
 	writel(value, mipi->regs + (offset << 2));
 }
 
+static int tegra_mipi_power_up(struct tegra_mipi *mipi)
+{
+	u32 value;
+	int err;
+
+	err = clk_enable(mipi->clk);
+	if (err < 0)
+		return err;
+
+	value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG0);
+	value &= ~MIPI_CAL_BIAS_PAD_PDVCLAMP;
+
+	if (mipi->soc->needs_vclamp_ref)
+		value |= MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
+
+	tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
+
+	value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG2);
+	value &= ~MIPI_CAL_BIAS_PAD_PDVREG;
+	tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
+
+	clk_disable(mipi->clk);
+
+	return 0;
+}
+
+static int tegra_mipi_power_down(struct tegra_mipi *mipi)
+{
+	u32 value;
+	int err;
+
+	err = clk_enable(mipi->clk);
+	if (err < 0)
+		return err;
+
+	/*
+	 * The MIPI_CAL_BIAS_PAD_PDVREG controls a voltage regulator that
+	 * supplies the DSI pads. This must be kept enabled until none of the
+	 * DSI lanes are used anymore.
+	 */
+	value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG2);
+	value |= MIPI_CAL_BIAS_PAD_PDVREG;
+	tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
+
+	/*
+	 * MIPI_CAL_BIAS_PAD_PDVCLAMP and MIPI_CAL_BIAS_PAD_E_VCLAMP_REF
+	 * control a regulator that supplies current to the pre-driver logic.
+	 * Powering down this regulator causes DSI to fail, so it must remain
+	 * powered on until none of the DSI lanes are used anymore.
+	 */
+	value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG0);
+
+	if (mipi->soc->needs_vclamp_ref)
+		value &= ~MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
+
+	value |= MIPI_CAL_BIAS_PAD_PDVCLAMP;
+	tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
+
+	return 0;
+}
+
 struct tegra_mipi_device *tegra_mipi_request(struct device *device)
 {
 	struct device_node *np = device->of_node;
@@ -150,6 +242,20 @@ struct tegra_mipi_device *tegra_mipi_request(struct device *device)
 	dev->pads = args.args[0];
 	dev->device = device;
 
+	mutex_lock(&dev->mipi->lock);
+
+	if (dev->mipi->usage_count++ == 0) {
+		err = tegra_mipi_power_up(dev->mipi);
+		if (err < 0) {
+			dev_err(dev->mipi->dev,
+				"failed to power up MIPI bricks: %d\n",
+				err);
+			return ERR_PTR(err);
+		}
+	}
+
+	mutex_unlock(&dev->mipi->lock);
+
 	return dev;
 
 put:
@@ -164,6 +270,25 @@ EXPORT_SYMBOL(tegra_mipi_request);
 
 void tegra_mipi_free(struct tegra_mipi_device *device)
 {
+	int err;
+
+	mutex_lock(&device->mipi->lock);
+
+	if (--device->mipi->usage_count == 0) {
+		err = tegra_mipi_power_down(device->mipi);
+		if (err < 0) {
+			/*
+			 * Not much that can be done here, so an error message
+			 * will have to do.
+			 */
+			dev_err(device->mipi->dev,
+				"failed to power down MIPI bricks: %d\n",
+				err);
+		}
+	}
+
+	mutex_unlock(&device->mipi->lock);
+
 	platform_device_put(device->pdev);
 	kfree(device);
 }
@@ -199,16 +324,15 @@ int tegra_mipi_calibrate(struct tegra_mipi_device *device)
 
 	mutex_lock(&device->mipi->lock);
 
-	value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG0);
-	value &= ~MIPI_CAL_BIAS_PAD_PDVCLAMP;
-	value |= MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
-	tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
-
-	tegra_mipi_writel(device->mipi, MIPI_CAL_BIAS_PAD_DRV_DN_REF(2),
-			  MIPI_CAL_BIAS_PAD_CFG1);
+	value = MIPI_CAL_BIAS_PAD_DRV_DN_REF(soc->pad_drive_down_ref) |
+		MIPI_CAL_BIAS_PAD_DRV_UP_REF(soc->pad_drive_up_ref);
+	tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG1);
 
 	value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG2);
-	value &= ~MIPI_CAL_BIAS_PAD_PDVREG;
+	value &= ~MIPI_CAL_BIAS_PAD_VCLAMP(0x7);
+	value &= ~MIPI_CAL_BIAS_PAD_VAUXP(0x7);
+	value |= MIPI_CAL_BIAS_PAD_VCLAMP(soc->pad_vclamp_level);
+	value |= MIPI_CAL_BIAS_PAD_VAUXP(soc->pad_vauxp_level);
 	tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
 
 	for (i = 0; i < soc->num_pads; i++) {
@@ -216,21 +340,38 @@ int tegra_mipi_calibrate(struct tegra_mipi_device *device)
 
 		if (device->pads & BIT(i)) {
 			data = MIPI_CAL_CONFIG_SELECT |
-			       MIPI_CAL_CONFIG_HSPDOS(0) |
-			       MIPI_CAL_CONFIG_HSPUOS(4) |
-			       MIPI_CAL_CONFIG_TERMOS(5);
+			       MIPI_CAL_CONFIG_HSPDOS(soc->hspdos) |
+			       MIPI_CAL_CONFIG_HSPUOS(soc->hspuos) |
+			       MIPI_CAL_CONFIG_TERMOS(soc->termos);
 			clk = MIPI_CAL_CONFIG_SELECT |
-			      MIPI_CAL_CONFIG_HSCLKPDOSD(0) |
-			      MIPI_CAL_CONFIG_HSCLKPUOSD(4);
+			      MIPI_CAL_CONFIG_HSCLKPDOSD(soc->hsclkpdos) |
+			      MIPI_CAL_CONFIG_HSCLKPUOSD(soc->hsclkpuos);
 		}
 
 		tegra_mipi_writel(device->mipi, data, soc->pads[i].data);
 
-		if (soc->has_clk_lane)
+		if (soc->has_clk_lane && soc->pads[i].clk != 0)
 			tegra_mipi_writel(device->mipi, clk, soc->pads[i].clk);
 	}
 
 	value = tegra_mipi_readl(device->mipi, MIPI_CAL_CTRL);
+	value &= ~MIPI_CAL_CTRL_NOISE_FILTER(0xf);
+	value &= ~MIPI_CAL_CTRL_PRESCALE(0x3);
+	value |= MIPI_CAL_CTRL_NOISE_FILTER(0xa);
+	value |= MIPI_CAL_CTRL_PRESCALE(0x2);
+
+	if (!soc->clock_enable_override)
+		value &= ~MIPI_CAL_CTRL_CLKEN_OVR;
+	else
+		value |= MIPI_CAL_CTRL_CLKEN_OVR;
+
+	tegra_mipi_writel(device->mipi, value, MIPI_CAL_CTRL);
+
+	/* clear any pending status bits */
+	value = tegra_mipi_readl(device->mipi, MIPI_CAL_STATUS);
+	tegra_mipi_writel(device->mipi, value, MIPI_CAL_STATUS);
+
+	value = tegra_mipi_readl(device->mipi, MIPI_CAL_CTRL);
 	value |= MIPI_CAL_CTRL_START;
 	tegra_mipi_writel(device->mipi, value, MIPI_CAL_CTRL);
 
@@ -259,6 +400,17 @@ static const struct tegra_mipi_soc tegra114_mipi_soc = {
 	.has_clk_lane = false,
 	.pads = tegra114_mipi_pads,
 	.num_pads = ARRAY_SIZE(tegra114_mipi_pads),
+	.clock_enable_override = true,
+	.needs_vclamp_ref = true,
+	.pad_drive_down_ref = 0x2,
+	.pad_drive_up_ref = 0x0,
+	.pad_vclamp_level = 0x0,
+	.pad_vauxp_level = 0x0,
+	.hspdos = 0x0,
+	.hspuos = 0x4,
+	.termos = 0x5,
+	.hsclkpdos = 0x0,
+	.hsclkpuos = 0x4,
 };
 
 static const struct tegra_mipi_pad tegra124_mipi_pads[] = {
@@ -266,20 +418,80 @@ static const struct tegra_mipi_pad tegra124_mipi_pads[] = {
 	{ .data = MIPI_CAL_CONFIG_CSIB, .clk = MIPI_CAL_CONFIG_CSIAB_CLK },
 	{ .data = MIPI_CAL_CONFIG_CSIC, .clk = MIPI_CAL_CONFIG_CSICD_CLK },
 	{ .data = MIPI_CAL_CONFIG_CSID, .clk = MIPI_CAL_CONFIG_CSICD_CLK },
-	{ .data = MIPI_CAL_CONFIG_CSIE, .clk = MIPI_CAL_CONFIG_CSIE_CLK },
-	{ .data = MIPI_CAL_CONFIG_DSIA, .clk = MIPI_CAL_CONFIG_DSIAB_CLK },
-	{ .data = MIPI_CAL_CONFIG_DSIB, .clk = MIPI_CAL_CONFIG_DSIAB_CLK },
+	{ .data = MIPI_CAL_CONFIG_CSIE, .clk = MIPI_CAL_CONFIG_CSIE_CLK  },
+	{ .data = MIPI_CAL_CONFIG_DSIA, .clk = MIPI_CAL_CONFIG_DSIA_CLK  },
+	{ .data = MIPI_CAL_CONFIG_DSIB, .clk = MIPI_CAL_CONFIG_DSIB_CLK  },
 };
 
 static const struct tegra_mipi_soc tegra124_mipi_soc = {
 	.has_clk_lane = true,
 	.pads = tegra124_mipi_pads,
 	.num_pads = ARRAY_SIZE(tegra124_mipi_pads),
+	.clock_enable_override = true,
+	.needs_vclamp_ref = true,
+	.pad_drive_down_ref = 0x2,
+	.pad_drive_up_ref = 0x0,
+	.pad_vclamp_level = 0x0,
+	.pad_vauxp_level = 0x0,
+	.hspdos = 0x0,
+	.hspuos = 0x0,
+	.termos = 0x0,
+	.hsclkpdos = 0x1,
+	.hsclkpuos = 0x2,
+};
+
+static const struct tegra_mipi_soc tegra132_mipi_soc = {
+	.has_clk_lane = true,
+	.pads = tegra124_mipi_pads,
+	.num_pads = ARRAY_SIZE(tegra124_mipi_pads),
+	.clock_enable_override = false,
+	.needs_vclamp_ref = false,
+	.pad_drive_down_ref = 0x0,
+	.pad_drive_up_ref = 0x3,
+	.pad_vclamp_level = 0x0,
+	.pad_vauxp_level = 0x0,
+	.hspdos = 0x0,
+	.hspuos = 0x0,
+	.termos = 0x0,
+	.hsclkpdos = 0x3,
+	.hsclkpuos = 0x2,
+};
+
+static const struct tegra_mipi_pad tegra210_mipi_pads[] = {
+	{ .data = MIPI_CAL_CONFIG_CSIA, .clk = 0 },
+	{ .data = MIPI_CAL_CONFIG_CSIB, .clk = 0 },
+	{ .data = MIPI_CAL_CONFIG_CSIC, .clk = 0 },
+	{ .data = MIPI_CAL_CONFIG_CSID, .clk = 0 },
+	{ .data = MIPI_CAL_CONFIG_CSIE, .clk = 0 },
+	{ .data = MIPI_CAL_CONFIG_CSIF, .clk = 0 },
+	{ .data = MIPI_CAL_CONFIG_DSIA, .clk = MIPI_CAL_CONFIG_DSIA_CLK },
+	{ .data = MIPI_CAL_CONFIG_DSIB, .clk = MIPI_CAL_CONFIG_DSIB_CLK },
+	{ .data = MIPI_CAL_CONFIG_DSIC, .clk = MIPI_CAL_CONFIG_DSIC_CLK },
+	{ .data = MIPI_CAL_CONFIG_DSID, .clk = MIPI_CAL_CONFIG_DSID_CLK },
+};
+
+static const struct tegra_mipi_soc tegra210_mipi_soc = {
+	.has_clk_lane = true,
+	.pads = tegra210_mipi_pads,
+	.num_pads = ARRAY_SIZE(tegra210_mipi_pads),
+	.clock_enable_override = true,
+	.needs_vclamp_ref = false,
+	.pad_drive_down_ref = 0x0,
+	.pad_drive_up_ref = 0x3,
+	.pad_vclamp_level = 0x1,
+	.pad_vauxp_level = 0x1,
+	.hspdos = 0x0,
+	.hspuos = 0x2,
+	.termos = 0x0,
+	.hsclkpdos = 0x0,
+	.hsclkpuos = 0x2,
 };
 
-static struct of_device_id tegra_mipi_of_match[] = {
+static const struct of_device_id tegra_mipi_of_match[] = {
 	{ .compatible = "nvidia,tegra114-mipi", .data = &tegra114_mipi_soc },
 	{ .compatible = "nvidia,tegra124-mipi", .data = &tegra124_mipi_soc },
+	{ .compatible = "nvidia,tegra132-mipi", .data = &tegra132_mipi_soc },
+	{ .compatible = "nvidia,tegra210-mipi", .data = &tegra210_mipi_soc },
 	{ },
 };
 
@@ -299,6 +511,7 @@ static int tegra_mipi_probe(struct platform_device *pdev)
 		return -ENOMEM;
 
 	mipi->soc = match->data;
+	mipi->dev = &pdev->dev;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	mipi->regs = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 37ac7b5dbd06..21060668fd25 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -6,17 +6,19 @@
  * Licensed under GPLv2
  *
  * vga_switcheroo.c - Support for laptop with dual GPU using one set of outputs
-
- Switcher interface - methods require for ATPX and DCM
- - switchto - this throws the output MUX switch
- - discrete_set_power - sets the power state for the discrete card
-
- GPU driver interface
- - set_gpu_state - this should do the equiv of s/r for the card
-		  - this should *not* set the discrete power state
- - switch_check  - check if the device is in a position to switch now
+ *
+ * Switcher interface - methods require for ATPX and DCM
+ * - switchto - this throws the output MUX switch
+ * - discrete_set_power - sets the power state for the discrete card
+ *
+ * GPU driver interface
+ * - set_gpu_state - this should do the equiv of s/r for the card
+ *                 - this should *not* set the discrete power state
+ * - switch_check  - check if the device is in a position to switch now
  */
 
+#define pr_fmt(fmt) "vga_switcheroo: " fmt
+
 #include <linux/module.h>
 #include <linux/seq_file.h>
 #include <linux/uaccess.h>
@@ -111,7 +113,7 @@ int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler)
 
 	vgasr_priv.handler = handler;
 	if (vga_switcheroo_ready()) {
-		printk(KERN_INFO "vga_switcheroo: enabled\n");
+		pr_info("enabled\n");
 		vga_switcheroo_enable();
 	}
 	mutex_unlock(&vgasr_mutex);
@@ -124,7 +126,7 @@ void vga_switcheroo_unregister_handler(void)
 	mutex_lock(&vgasr_mutex);
 	vgasr_priv.handler = NULL;
 	if (vgasr_priv.active) {
-		pr_info("vga_switcheroo: disabled\n");
+		pr_info("disabled\n");
 		vga_switcheroo_debugfs_fini(&vgasr_priv);
 		vgasr_priv.active = false;
 	}
@@ -155,7 +157,7 @@ static int register_client(struct pci_dev *pdev,
 		vgasr_priv.registered_clients++;
 
 	if (vga_switcheroo_ready()) {
-		printk(KERN_INFO "vga_switcheroo: enabled\n");
+		pr_info("enabled\n");
 		vga_switcheroo_enable();
 	}
 	mutex_unlock(&vgasr_mutex);
@@ -167,7 +169,8 @@ int vga_switcheroo_register_client(struct pci_dev *pdev,
 				   bool driver_power_control)
 {
 	return register_client(pdev, ops, -1,
-			       pdev == vga_default_device(), driver_power_control);
+			       pdev == vga_default_device(),
+			       driver_power_control);
 }
 EXPORT_SYMBOL(vga_switcheroo_register_client);
 
@@ -183,6 +186,7 @@ static struct vga_switcheroo_client *
 find_client_from_pci(struct list_head *head, struct pci_dev *pdev)
 {
 	struct vga_switcheroo_client *client;
+
 	list_for_each_entry(client, head, list)
 		if (client->pdev == pdev)
 			return client;
@@ -193,6 +197,7 @@ static struct vga_switcheroo_client *
 find_client_from_id(struct list_head *head, int client_id)
 {
 	struct vga_switcheroo_client *client;
+
 	list_for_each_entry(client, head, list)
 		if (client->id == client_id)
 			return client;
@@ -203,6 +208,7 @@ static struct vga_switcheroo_client *
 find_active_client(struct list_head *head)
 {
 	struct vga_switcheroo_client *client;
+
 	list_for_each_entry(client, head, list)
 		if (client->active && client_is_vga(client))
 			return client;
@@ -235,7 +241,7 @@ void vga_switcheroo_unregister_client(struct pci_dev *pdev)
 		kfree(client);
 	}
 	if (vgasr_priv.active && vgasr_priv.registered_clients < 2) {
-		printk(KERN_INFO "vga_switcheroo: disabled\n");
+		pr_info("disabled\n");
 		vga_switcheroo_debugfs_fini(&vgasr_priv);
 		vgasr_priv.active = false;
 	}
@@ -260,10 +266,12 @@ static int vga_switcheroo_show(struct seq_file *m, void *v)
 {
 	struct vga_switcheroo_client *client;
 	int i = 0;
+
 	mutex_lock(&vgasr_mutex);
 	list_for_each_entry(client, &vgasr_priv.clients, list) {
 		seq_printf(m, "%d:%s%s:%c:%s%s:%s\n", i,
-			   client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" : "IGD",
+			   client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" :
+								     "IGD",
 			   client_is_vga(client) ? "" : "-Audio",
 			   client->active ? '+' : ' ',
 			   client->driver_power_control ? "Dyn" : "",
@@ -347,6 +355,7 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
 
 	if (new_client->fb_info) {
 		struct fb_event event;
+
 		console_lock();
 		event.info = new_client->fb_info;
 		fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event);
@@ -375,7 +384,7 @@ static bool check_can_switch(void)
 
 	list_for_each_entry(client, &vgasr_priv.clients, list) {
 		if (!client->ops->can_switch(client->pdev)) {
-			printk(KERN_ERR "vga_switcheroo: client %x refused switch\n", client->id);
+			pr_err("client %x refused switch\n", client->id);
 			return false;
 		}
 	}
@@ -484,20 +493,20 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
 	if (can_switch) {
 		ret = vga_switchto_stage1(client);
 		if (ret)
-			printk(KERN_ERR "vga_switcheroo: switching failed stage 1 %d\n", ret);
+			pr_err("switching failed stage 1 %d\n", ret);
 
 		ret = vga_switchto_stage2(client);
 		if (ret)
-			printk(KERN_ERR "vga_switcheroo: switching failed stage 2 %d\n", ret);
+			pr_err("switching failed stage 2 %d\n", ret);
 
 	} else {
-		printk(KERN_INFO "vga_switcheroo: setting delayed switch to client %d\n", client->id);
+		pr_info("setting delayed switch to client %d\n", client->id);
 		vgasr_priv.delayed_switch_active = true;
 		vgasr_priv.delayed_client_id = client_id;
 
 		ret = vga_switchto_stage1(client);
 		if (ret)
-			printk(KERN_ERR "vga_switcheroo: delayed switching stage 1 failed %d\n", ret);
+			pr_err("delayed switching stage 1 failed %d\n", ret);
 	}
 
 out:
@@ -516,32 +525,32 @@ static const struct file_operations vga_switcheroo_debugfs_fops = {
 
 static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv)
 {
-	if (priv->switch_file) {
-		debugfs_remove(priv->switch_file);
-		priv->switch_file = NULL;
-	}
-	if (priv->debugfs_root) {
-		debugfs_remove(priv->debugfs_root);
-		priv->debugfs_root = NULL;
-	}
+	debugfs_remove(priv->switch_file);
+	priv->switch_file = NULL;
+
+	debugfs_remove(priv->debugfs_root);
+	priv->debugfs_root = NULL;
 }
 
 static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv)
 {
+	static const char mp[] = "/sys/kernel/debug";
+
 	/* already initialised */
 	if (priv->debugfs_root)
 		return 0;
 	priv->debugfs_root = debugfs_create_dir("vgaswitcheroo", NULL);
 
 	if (!priv->debugfs_root) {
-		printk(KERN_ERR "vga_switcheroo: Cannot create /sys/kernel/debug/vgaswitcheroo\n");
+		pr_err("Cannot create %s/vgaswitcheroo\n", mp);
 		goto fail;
 	}
 
 	priv->switch_file = debugfs_create_file("switch", 0644,
-						priv->debugfs_root, NULL, &vga_switcheroo_debugfs_fops);
+						priv->debugfs_root, NULL,
+						&vga_switcheroo_debugfs_fops);
 	if (!priv->switch_file) {
-		printk(KERN_ERR "vga_switcheroo: cannot create /sys/kernel/debug/vgaswitcheroo/switch\n");
+		pr_err("cannot create %s/vgaswitcheroo/switch\n", mp);
 		goto fail;
 	}
 	return 0;
@@ -560,7 +569,8 @@ int vga_switcheroo_process_delayed_switch(void)
 	if (!vgasr_priv.delayed_switch_active)
 		goto err;
 
-	printk(KERN_INFO "vga_switcheroo: processing delayed switch to %d\n", vgasr_priv.delayed_client_id);
+	pr_info("processing delayed switch to %d\n",
+		vgasr_priv.delayed_client_id);
 
 	client = find_client_from_id(&vgasr_priv.clients,
 				     vgasr_priv.delayed_client_id);
@@ -569,7 +579,7 @@ int vga_switcheroo_process_delayed_switch(void)
 
 	ret = vga_switchto_stage2(client);
 	if (ret)
-		printk(KERN_ERR "vga_switcheroo: delayed switching failed stage 2 %d\n", ret);
+		pr_err("delayed switching failed stage 2 %d\n", ret);
 
 	vgasr_priv.delayed_switch_active = false;
 	err = 0;
@@ -579,7 +589,8 @@ err:
 }
 EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
 
-static void vga_switcheroo_power_switch(struct pci_dev *pdev, enum vga_switcheroo_state state)
+static void vga_switcheroo_power_switch(struct pci_dev *pdev,
+					enum vga_switcheroo_state state)
 {
 	struct vga_switcheroo_client *client;
 
@@ -598,7 +609,8 @@ static void vga_switcheroo_power_switch(struct pci_dev *pdev, enum vga_switchero
 
 /* force a PCI device to a certain state - mainly to turn off audio clients */
 
-void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic)
+void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev,
+				       enum vga_switcheroo_state dynamic)
 {
 	struct vga_switcheroo_client *client;
 
@@ -644,7 +656,8 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
 
 /* this version is for the case where the power switch is separate
    to the device being powered down. */
-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
+int vga_switcheroo_init_domain_pm_ops(struct device *dev,
+				      struct dev_pm_domain *domain)
 {
 	/* copy over all the bus versions */
 	if (dev->bus && dev->bus->pm) {
@@ -675,7 +688,8 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
 	/* we need to check if we have to switch back on the video
 	   device so the audio device can come back */
 	list_for_each_entry(client, &vgasr_priv.clients, list) {
-		if (PCI_SLOT(client->pdev->devfn) == PCI_SLOT(pdev->devfn) && client_is_vga(client)) {
+		if (PCI_SLOT(client->pdev->devfn) == PCI_SLOT(pdev->devfn) &&
+		    client_is_vga(client)) {
 			found = client;
 			ret = pm_runtime_get_sync(&client->pdev->dev);
 			if (ret) {
@@ -695,12 +709,15 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
 	return ret;
 }
 
-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
+int
+vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev,
+						 struct dev_pm_domain *domain)
 {
 	/* copy over all the bus versions */
 	if (dev->bus && dev->bus->pm) {
 		domain->ops = *dev->bus->pm;
-		domain->ops.runtime_resume = vga_switcheroo_runtime_resume_hdmi_audio;
+		domain->ops.runtime_resume =
+			vga_switcheroo_runtime_resume_hdmi_audio;
 
 		dev->pm_domain = domain;
 		return 0;
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 7bcbf863656e..a0b433456107 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -29,6 +29,8 @@
  *
  */
 
+#define pr_fmt(fmt) "vgaarb: " fmt
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/pci.h>
@@ -134,7 +136,6 @@ struct pci_dev *vga_default_device(void)
 {
 	return vga_default;
 }
-
 EXPORT_SYMBOL_GPL(vga_default_device);
 
 void vga_set_default_device(struct pci_dev *pdev)
@@ -298,9 +299,9 @@ enable_them:
 
 	pci_set_vga_state(vgadev->pdev, true, pci_bits, flags);
 
-	if (!vgadev->bridge_has_one_vga) {
+	if (!vgadev->bridge_has_one_vga)
 		vga_irq_set_state(vgadev, true);
-	}
+
 	vgadev->owns |= wants;
 lock_them:
 	vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK);
@@ -452,15 +453,15 @@ bail:
 }
 EXPORT_SYMBOL(vga_put);
 
-/* Rules for using a bridge to control a VGA descendant decoding:
-   if a bridge has only one VGA descendant then it can be used
-   to control the VGA routing for that device.
-   It should always use the bridge closest to the device to control it.
-   If a bridge has a direct VGA descendant, but also have a sub-bridge
-   VGA descendant then we cannot use that bridge to control the direct VGA descendant.
-   So for every device we register, we need to iterate all its parent bridges
-   so we can invalidate any devices using them properly.
-*/
+/*
+ * Rules for using a bridge to control a VGA descendant decoding: if a bridge
+ * has only one VGA descendant then it can be used to control the VGA routing
+ * for that device. It should always use the bridge closest to the device to
+ * control it. If a bridge has a direct VGA descendant, but also have a sub-
+ * bridge VGA descendant then we cannot use that bridge to control the direct
+ * VGA descendant. So for every device we register, we need to iterate all
+ * its parent bridges so we can invalidate any devices using them properly.
+ */
 static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev)
 {
 	struct vga_device *same_bridge_vgadev;
@@ -484,21 +485,26 @@ static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev)
 
 			/* see if the share a bridge with this device */
 			if (new_bridge == bridge) {
-				/* if their direct parent bridge is the same
-				   as any bridge of this device then it can't be used
-				   for that device */
+				/*
+				 * If their direct parent bridge is the same
+				 * as any bridge of this device then it can't
+				 * be used for that device.
+				 */
 				same_bridge_vgadev->bridge_has_one_vga = false;
 			}
 
-			/* now iterate the previous devices bridge hierarchy */
-			/* if the new devices parent bridge is in the other devices
-			   hierarchy then we can't use it to control this device */
+			/*
+			 * Now iterate the previous devices bridge hierarchy.
+			 * If the new devices parent bridge is in the other
+			 * devices hierarchy then we can't use it to control
+			 * this device
+			 */
 			while (bus) {
 				bridge = bus->self;
-				if (bridge) {
-					if (bridge == vgadev->pdev->bus->self)
-						vgadev->bridge_has_one_vga = false;
-				}
+
+				if (bridge && bridge == vgadev->pdev->bus->self)
+					vgadev->bridge_has_one_vga = false;
+
 				bus = bus->parent;
 			}
 		}
@@ -527,10 +533,10 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
 	/* Allocate structure */
 	vgadev = kmalloc(sizeof(struct vga_device), GFP_KERNEL);
 	if (vgadev == NULL) {
-		pr_err("vgaarb: failed to allocate pci device\n");
-		/* What to do on allocation failure ? For now, let's
-		 * just do nothing, I'm not sure there is anything saner
-		 * to be done
+		pr_err("failed to allocate pci device\n");
+		/*
+		 * What to do on allocation failure ? For now, let's just do
+		 * nothing, I'm not sure there is anything saner to be done.
 		 */
 		return false;
 	}
@@ -566,8 +572,8 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
 		bridge = bus->self;
 		if (bridge) {
 			u16 l;
-			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
-					     &l);
+
+			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &l);
 			if (!(l & PCI_BRIDGE_CTL_VGA)) {
 				vgadev->owns = 0;
 				break;
@@ -581,8 +587,7 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
 	 */
 	if (vga_default == NULL &&
 	    ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) {
-		pr_info("vgaarb: setting as boot device: PCI:%s\n",
-			pci_name(pdev));
+		pr_info("setting as boot device: PCI:%s\n", pci_name(pdev));
 		vga_set_default_device(pdev);
 	}
 
@@ -591,7 +596,7 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
 	/* Add to the list */
 	list_add(&vgadev->list, &vga_list);
 	vga_count++;
-	pr_info("vgaarb: device added: PCI:%s,decodes=%s,owns=%s,locks=%s\n",
+	pr_info("device added: PCI:%s,decodes=%s,owns=%s,locks=%s\n",
 		pci_name(pdev),
 		vga_iostate_to_str(vgadev->decodes),
 		vga_iostate_to_str(vgadev->owns),
@@ -651,7 +656,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
 	decodes_unlocked = vgadev->locks & decodes_removed;
 	vgadev->decodes = new_decodes;
 
-	pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
+	pr_info("device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
 		pci_name(vgadev->pdev),
 		vga_iostate_to_str(old_decodes),
 		vga_iostate_to_str(vgadev->decodes),
@@ -673,10 +678,12 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
 	if (!(old_decodes & VGA_RSRC_LEGACY_MASK) &&
 	    new_decodes & VGA_RSRC_LEGACY_MASK)
 		vga_decode_count++;
-	pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
+	pr_debug("decoding count now is: %d\n", vga_decode_count);
 }
 
-static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
+static void __vga_set_legacy_decoding(struct pci_dev *pdev,
+				      unsigned int decodes,
+				      bool userspace)
 {
 	struct vga_device *vgadev;
 	unsigned long flags;
@@ -712,7 +719,8 @@ EXPORT_SYMBOL(vga_set_legacy_decoding);
 /* call with NULL to unregister */
 int vga_client_register(struct pci_dev *pdev, void *cookie,
 			void (*irq_set_state)(void *cookie, bool state),
-			unsigned int (*set_vga_decode)(void *cookie, bool decode))
+			unsigned int (*set_vga_decode)(void *cookie,
+						       bool decode))
 {
 	int ret = -ENODEV;
 	struct vga_device *vgadev;
@@ -832,7 +840,7 @@ static int vga_pci_str_to_vars(char *buf, int count, unsigned int *domain,
 	return 1;
 }
 
-static ssize_t vga_arb_read(struct file *file, char __user * buf,
+static ssize_t vga_arb_read(struct file *file, char __user *buf,
 			    size_t count, loff_t *ppos)
 {
 	struct vga_arb_private *priv = file->private_data;
@@ -899,7 +907,7 @@ done:
  * TODO: To avoid parsing inside kernel and to improve the speed we may
  * consider use ioctl here
  */
-static ssize_t vga_arb_write(struct file *file, const char __user * buf,
+static ssize_t vga_arb_write(struct file *file, const char __user *buf,
 			     size_t count, loff_t *ppos)
 {
 	struct vga_arb_private *priv = file->private_data;
@@ -1075,13 +1083,13 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
 				ret_val = -EPROTO;
 				goto done;
 			}
-			pr_debug("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos,
+			pr_debug("%s ==> %x:%x:%x.%x\n", curr_pos,
 				domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
 
 			pdev = pci_get_domain_bus_and_slot(domain, bus, devfn);
-			pr_debug("vgaarb: pdev %p\n", pdev);
+			pr_debug("pdev %p\n", pdev);
 			if (!pdev) {
-				pr_err("vgaarb: invalid PCI address %x:%x:%x\n",
+				pr_err("invalid PCI address %x:%x:%x\n",
 					domain, bus, devfn);
 				ret_val = -ENODEV;
 				goto done;
@@ -1089,10 +1097,13 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
 		}
 
 		vgadev = vgadev_find(pdev);
-		pr_debug("vgaarb: vgadev %p\n", vgadev);
+		pr_debug("vgadev %p\n", vgadev);
 		if (vgadev == NULL) {
-			pr_err("vgaarb: this pci device is not a vga device\n");
-			pci_dev_put(pdev);
+			if (pdev) {
+				pr_err("this pci device is not a vga device\n");
+				pci_dev_put(pdev);
+			}
+
 			ret_val = -ENODEV;
 			goto done;
 		}
@@ -1109,7 +1120,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
 			}
 		}
 		if (i == MAX_USER_CARDS) {
-			pr_err("vgaarb: maximum user cards (%d) number reached!\n",
+			pr_err("maximum user cards (%d) number reached!\n",
 				MAX_USER_CARDS);
 			pci_dev_put(pdev);
 			/* XXX: which value to return? */
@@ -1125,7 +1136,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
 	} else if (strncmp(curr_pos, "decodes ", 8) == 0) {
 		curr_pos += 8;
 		remaining -= 8;
-		pr_debug("vgaarb: client 0x%p called 'decodes'\n", priv);
+		pr_debug("client 0x%p called 'decodes'\n", priv);
 
 		if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
 			ret_val = -EPROTO;
@@ -1150,7 +1161,7 @@ done:
 	return ret_val;
 }
 
-static unsigned int vga_arb_fpoll(struct file *file, poll_table * wait)
+static unsigned int vga_arb_fpoll(struct file *file, poll_table *wait)
 {
 	struct vga_arb_private *priv = file->private_data;
 
@@ -1246,7 +1257,8 @@ static void vga_arbiter_notify_clients(void)
 		else
 			new_state = true;
 		if (vgadev->set_vga_decode) {
-			new_decodes = vgadev->set_vga_decode(vgadev->cookie, new_state);
+			new_decodes = vgadev->set_vga_decode(vgadev->cookie,
+							     new_state);
 			vga_update_device_decodes(vgadev, new_decodes);
 		}
 	}
@@ -1300,7 +1312,7 @@ static int __init vga_arb_device_init(void)
 
 	rc = misc_register(&vga_arb_device);
 	if (rc < 0)
-		pr_err("vgaarb: error %d registering device\n", rc);
+		pr_err("error %d registering device\n", rc);
 
 	bus_register_notifier(&pci_bus_type, &pci_notifier);
 
@@ -1312,21 +1324,29 @@ static int __init vga_arb_device_init(void)
 			       PCI_ANY_ID, pdev)) != NULL)
 		vga_arbiter_add_pci_device(pdev);
 
-	pr_info("vgaarb: loaded\n");
+	pr_info("loaded\n");
 
 	list_for_each_entry(vgadev, &vga_list, list) {
 #if defined(CONFIG_X86) || defined(CONFIG_IA64)
-		/* Override I/O based detection done by vga_arbiter_add_pci_device()
-		 * as it may take the wrong device (e.g. on Apple system under EFI).
+		/*
+		 * Override vga_arbiter_add_pci_device()'s I/O based detection
+		 * as it may take the wrong device (e.g. on Apple system under
+		 * EFI).
 		 *
-		 * Select the device owning the boot framebuffer if there is one.
+		 * Select the device owning the boot framebuffer if there is
+		 * one.
 		 */
-		resource_size_t start, end;
+		resource_size_t start, end, limit;
+		unsigned long flags;
 		int i;
 
+		limit = screen_info.lfb_base + screen_info.lfb_size;
+
 		/* Does firmware framebuffer belong to us? */
 		for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
-			if (!(pci_resource_flags(vgadev->pdev, i) & IORESOURCE_MEM))
+			flags = pci_resource_flags(vgadev->pdev, i);
+
+			if ((flags & IORESOURCE_MEM) == 0)
 				continue;
 
 			start = pci_resource_start(vgadev->pdev, i);
@@ -1335,22 +1355,24 @@ static int __init vga_arb_device_init(void)
 			if (!start || !end)
 				continue;
 
-			if (screen_info.lfb_base < start ||
-			    (screen_info.lfb_base + screen_info.lfb_size) >= end)
+			if (screen_info.lfb_base < start || limit >= end)
 				continue;
+
 			if (!vga_default_device())
-				pr_info("vgaarb: setting as boot device: PCI:%s\n",
+				pr_info("setting as boot device: PCI:%s\n",
 					pci_name(vgadev->pdev));
 			else if (vgadev->pdev != vga_default_device())
-				pr_info("vgaarb: overriding boot device: PCI:%s\n",
+				pr_info("overriding boot device: PCI:%s\n",
 					pci_name(vgadev->pdev));
 			vga_set_default_device(vgadev->pdev);
 		}
 #endif
 		if (vgadev->bridge_has_one_vga)
-			pr_info("vgaarb: bridge control possible %s\n", pci_name(vgadev->pdev));
+			pr_info("bridge control possible %s\n",
+				pci_name(vgadev->pdev));
 		else
-			pr_info("vgaarb: no bridge control possible %s\n", pci_name(vgadev->pdev));
+			pr_info("no bridge control possible %s\n",
+				pci_name(vgadev->pdev));
 	}
 	return rc;
 }
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 5fa21739c54f..99d63675f073 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -331,6 +331,7 @@ config LPC_SCH
 
 config INTEL_SOC_PMIC
 	bool "Support for Intel Atom SoC PMIC"
+	depends on GPIOLIB
 	depends on I2C=y
 	select MFD_CORE
 	select REGMAP_I2C
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
index ffbf6f6680b0..d9e15cf7c6c8 100644
--- a/drivers/mfd/intel_soc_pmic_core.c
+++ b/drivers/mfd/intel_soc_pmic_core.c
@@ -24,8 +24,25 @@
 #include <linux/acpi.h>
 #include <linux/regmap.h>
 #include <linux/mfd/intel_soc_pmic.h>
+#include <linux/gpio/machine.h>
+#include <linux/pwm.h>
 #include "intel_soc_pmic_core.h"
 
+/* Lookup table for the Panel Enable/Disable line as GPIO signals */
+static struct gpiod_lookup_table panel_gpio_table = {
+	/* Intel GFX is consumer */
+	.dev_id = "0000:00:02.0",
+	.table = {
+		/* Panel EN/DISABLE */
+		GPIO_LOOKUP("gpio_crystalcove", 94, "panel", GPIO_ACTIVE_HIGH),
+	},
+};
+
+/* PWM consumed by the Intel GFX */
+static struct pwm_lookup crc_pwm_lookup[] = {
+	PWM_LOOKUP("crystal_cove_pwm", 0, "0000:00:02.0", "pwm_backlight", 0, PWM_POLARITY_NORMAL),
+};
+
 static int intel_soc_pmic_find_gpio_irq(struct device *dev)
 {
 	struct gpio_desc *desc;
@@ -85,6 +102,12 @@ static int intel_soc_pmic_i2c_probe(struct i2c_client *i2c,
 	if (ret)
 		dev_warn(dev, "Can't enable IRQ as wake source: %d\n", ret);
 
+	/* Add lookup table binding for Panel Control to the GPIO Chip */
+	gpiod_add_lookup_table(&panel_gpio_table);
+
+	/* Add lookup table for crc-pwm */
+	pwm_add_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
+
 	ret = mfd_add_devices(dev, -1, config->cell_dev,
 			      config->n_cell_devs, NULL, 0,
 			      regmap_irq_get_domain(pmic->irq_chip_data));
@@ -104,6 +127,12 @@ static int intel_soc_pmic_i2c_remove(struct i2c_client *i2c)
 
 	regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data);
 
+	/* Remove lookup table for Panel Control from the GPIO Chip */
+	gpiod_remove_lookup_table(&panel_gpio_table);
+
+	/* remove crc-pwm lookup table */
+	pwm_remove_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
+
 	mfd_remove_devices(&i2c->dev);
 
 	return 0;
diff --git a/drivers/mfd/intel_soc_pmic_crc.c b/drivers/mfd/intel_soc_pmic_crc.c
index 7436075e8983..4a7494872da2 100644
--- a/drivers/mfd/intel_soc_pmic_crc.c
+++ b/drivers/mfd/intel_soc_pmic_crc.c
@@ -109,6 +109,9 @@ static struct mfd_cell crystal_cove_dev[] = {
 	{
 		.name = "crystal_cove_pmic",
 	},
+	{
+		.name = "crystal_cove_pwm",
+	},
 };
 
 static const struct regmap_config crystal_cove_regmap_config = {
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index b1541f40fd8d..948d9abd27f1 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -111,6 +111,13 @@ config PWM_CLPS711X
 	  To compile this driver as a module, choose M here: the module
 	  will be called pwm-clps711x.
 
+config PWM_CRC
+	bool "Intel Crystalcove (CRC) PWM support"
+	depends on X86 && INTEL_SOC_PMIC
+	help
+	  Generic PWM framework driver for Crystalcove (CRC) PMIC based PWM
+	  control.
+
 config PWM_EP93XX
 	tristate "Cirrus Logic EP93xx PWM support"
 	depends on ARCH_EP93XX
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index ec50eb5b5a8f..d186f35a6538 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_PWM_BCM_KONA)	+= pwm-bcm-kona.o
 obj-$(CONFIG_PWM_BCM2835)	+= pwm-bcm2835.o
 obj-$(CONFIG_PWM_BFIN)		+= pwm-bfin.o
 obj-$(CONFIG_PWM_CLPS711X)	+= pwm-clps711x.o
+obj-$(CONFIG_PWM_CRC)		+= pwm-crc.o
 obj-$(CONFIG_PWM_EP93XX)	+= pwm-ep93xx.o
 obj-$(CONFIG_PWM_FSL_FTM)	+= pwm-fsl-ftm.o
 obj-$(CONFIG_PWM_IMG)		+= pwm-img.o
diff --git a/drivers/pwm/pwm-crc.c b/drivers/pwm/pwm-crc.c
new file mode 100644
index 000000000000..7101c7020bf4
--- /dev/null
+++ b/drivers/pwm/pwm-crc.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Author: Shobhit Kumar <shobhit.kumar@intel.com>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/pwm.h>
+
+#define PWM0_CLK_DIV		0x4B
+#define  PWM_OUTPUT_ENABLE	BIT(7)
+#define  PWM_DIV_CLK_0		0x00 /* DIVIDECLK = BASECLK */
+#define  PWM_DIV_CLK_100	0x63 /* DIVIDECLK = BASECLK/100 */
+#define  PWM_DIV_CLK_128	0x7F /* DIVIDECLK = BASECLK/128 */
+
+#define PWM0_DUTY_CYCLE		0x4E
+#define BACKLIGHT_EN		0x51
+
+#define PWM_MAX_LEVEL		0xFF
+
+#define PWM_BASE_CLK		6000000  /* 6 MHz */
+#define PWM_MAX_PERIOD_NS	21333    /* 46.875KHz */
+
+/**
+ * struct crystalcove_pwm - Crystal Cove PWM controller
+ * @chip: the abstract pwm_chip structure.
+ * @regmap: the regmap from the parent device.
+ */
+struct crystalcove_pwm {
+	struct pwm_chip chip;
+	struct regmap *regmap;
+};
+
+static inline struct crystalcove_pwm *to_crc_pwm(struct pwm_chip *pc)
+{
+	return container_of(pc, struct crystalcove_pwm, chip);
+}
+
+static int crc_pwm_enable(struct pwm_chip *c, struct pwm_device *pwm)
+{
+	struct crystalcove_pwm *crc_pwm = to_crc_pwm(c);
+
+	regmap_write(crc_pwm->regmap, BACKLIGHT_EN, 1);
+
+	return 0;
+}
+
+static void crc_pwm_disable(struct pwm_chip *c, struct pwm_device *pwm)
+{
+	struct crystalcove_pwm *crc_pwm = to_crc_pwm(c);
+
+	regmap_write(crc_pwm->regmap, BACKLIGHT_EN, 0);
+}
+
+static int crc_pwm_config(struct pwm_chip *c, struct pwm_device *pwm,
+			  int duty_ns, int period_ns)
+{
+	struct crystalcove_pwm *crc_pwm = to_crc_pwm(c);
+	struct device *dev = crc_pwm->chip.dev;
+	int level;
+
+	if (period_ns > PWM_MAX_PERIOD_NS) {
+		dev_err(dev, "un-supported period_ns\n");
+		return -EINVAL;
+	}
+
+	if (pwm->period != period_ns) {
+		int clk_div;
+
+		/* changing the clk divisor, need to disable fisrt */
+		crc_pwm_disable(c, pwm);
+		clk_div = PWM_BASE_CLK * period_ns / NSEC_PER_SEC;
+
+		regmap_write(crc_pwm->regmap, PWM0_CLK_DIV,
+					clk_div | PWM_OUTPUT_ENABLE);
+
+		/* enable back */
+		crc_pwm_enable(c, pwm);
+	}
+
+	/* change the pwm duty cycle */
+	level = duty_ns * PWM_MAX_LEVEL / period_ns;
+	regmap_write(crc_pwm->regmap, PWM0_DUTY_CYCLE, level);
+
+	return 0;
+}
+
+static const struct pwm_ops crc_pwm_ops = {
+	.config = crc_pwm_config,
+	.enable = crc_pwm_enable,
+	.disable = crc_pwm_disable,
+};
+
+static int crystalcove_pwm_probe(struct platform_device *pdev)
+{
+	struct crystalcove_pwm *pwm;
+	struct device *dev = pdev->dev.parent;
+	struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
+
+	pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
+	if (!pwm)
+		return -ENOMEM;
+
+	pwm->chip.dev = &pdev->dev;
+	pwm->chip.ops = &crc_pwm_ops;
+	pwm->chip.base = -1;
+	pwm->chip.npwm = 1;
+
+	/* get the PMIC regmap */
+	pwm->regmap = pmic->regmap;
+
+	platform_set_drvdata(pdev, pwm);
+
+	return pwmchip_add(&pwm->chip);
+}
+
+static int crystalcove_pwm_remove(struct platform_device *pdev)
+{
+	struct crystalcove_pwm *pwm = platform_get_drvdata(pdev);
+
+	return pwmchip_remove(&pwm->chip);
+}
+
+static struct platform_driver crystalcove_pwm_driver = {
+	.probe = crystalcove_pwm_probe,
+	.remove = crystalcove_pwm_remove,
+	.driver = {
+		.name = "crystal_cove_pwm",
+	},
+};
+
+builtin_platform_driver(crystalcove_pwm_driver);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 8bf495ffb020..e0606c01e8ac 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -22,9 +22,7 @@ source "drivers/gpu/vga/Kconfig"
 source "drivers/gpu/host1x/Kconfig"
 source "drivers/gpu/ipu-v3/Kconfig"
 
-menu "Direct Rendering Manager"
 source "drivers/gpu/drm/Kconfig"
-endmenu
 
 menu "Frame buffer Devices"
 source "drivers/video/fbdev/Kconfig"
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index de13bfc35634..bae79f3c4d28 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -12,6 +12,8 @@
 
 #include <drm/drmP.h>
 
+struct dw_hdmi;
+
 enum {
 	DW_HDMI_RES_8,
 	DW_HDMI_RES_10,
@@ -59,4 +61,9 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
 		 void *data, struct drm_encoder *encoder,
 		 struct resource *iores, int irq,
 		 const struct dw_hdmi_plat_data *plat_data);
+
+void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
+void dw_hdmi_audio_enable(struct dw_hdmi *hdmi);
+void dw_hdmi_audio_disable(struct dw_hdmi *hdmi);
+
 #endif /* __IMX_HDMI_H__ */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 5aa519711e0b..8b5ce7c5d9bb 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -137,17 +137,18 @@ void drm_err(const char *format, ...);
 /*@{*/
 
 /* driver capabilities and requirements mask */
-#define DRIVER_USE_AGP     0x1
-#define DRIVER_PCI_DMA     0x8
-#define DRIVER_SG          0x10
-#define DRIVER_HAVE_DMA    0x20
-#define DRIVER_HAVE_IRQ    0x40
-#define DRIVER_IRQ_SHARED  0x80
-#define DRIVER_GEM         0x1000
-#define DRIVER_MODESET     0x2000
-#define DRIVER_PRIME       0x4000
-#define DRIVER_RENDER      0x8000
-#define DRIVER_ATOMIC      0x10000
+#define DRIVER_USE_AGP			0x1
+#define DRIVER_PCI_DMA			0x8
+#define DRIVER_SG			0x10
+#define DRIVER_HAVE_DMA			0x20
+#define DRIVER_HAVE_IRQ			0x40
+#define DRIVER_IRQ_SHARED		0x80
+#define DRIVER_GEM			0x1000
+#define DRIVER_MODESET			0x2000
+#define DRIVER_PRIME			0x4000
+#define DRIVER_RENDER			0x8000
+#define DRIVER_ATOMIC			0x10000
+#define DRIVER_KMS_LEGACY_CONTEXT	0x20000
 
 /***********************************************************************/
 /** \name Macros to make printk easier */
@@ -675,13 +676,12 @@ struct drm_minor {
 
 	/* currently active master for this node. Protected by master_mutex */
 	struct drm_master *master;
-	struct drm_mode_group mode_group;
 };
 
 
 struct drm_pending_vblank_event {
 	struct drm_pending_event base;
-	int pipe;
+	unsigned int pipe;
 	struct drm_event_vblank event;
 };
 
@@ -700,7 +700,7 @@ struct drm_vblank_crtc {
 					/* for wraparound handling */
 	u32 last_wait;			/* Last vblank seqno waited per CRTC */
 	unsigned int inmodeset;		/* Display driver is setting mode */
-	int crtc;			/* crtc index */
+	unsigned int pipe;		/* crtc index */
 	bool enabled;			/* so we don't call enable more than
 					   once per disable */
 };
@@ -887,6 +887,7 @@ static inline bool drm_is_primary_client(const struct drm_file *file_priv)
 /*@{*/
 
 				/* Driver support (drm_drv.h) */
+extern int drm_ioctl_permit(u32 flags, struct drm_file *file_priv);
 extern long drm_ioctl(struct file *filp,
 		      unsigned int cmd, unsigned long arg);
 extern long drm_compat_ioctl(struct file *filp,
@@ -920,34 +921,34 @@ void drm_clflush_virt_range(void *addr, unsigned long length);
 extern int drm_irq_install(struct drm_device *dev, int irq);
 extern int drm_irq_uninstall(struct drm_device *dev);
 
-extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
+extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
 extern int drm_wait_vblank(struct drm_device *dev, void *data,
 			   struct drm_file *filp);
-extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
+extern u32 drm_vblank_count(struct drm_device *dev, int pipe);
 extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
-extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+extern u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
 				     struct timeval *vblanktime);
-extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
-				     struct drm_pending_vblank_event *e);
+extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
+				  struct drm_pending_vblank_event *e);
 extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
 				       struct drm_pending_vblank_event *e);
-extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
+extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
 extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
-extern int drm_vblank_get(struct drm_device *dev, int crtc);
-extern void drm_vblank_put(struct drm_device *dev, int crtc);
+extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe);
+extern void drm_vblank_put(struct drm_device *dev, unsigned int pipe);
 extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
 extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
-extern void drm_wait_one_vblank(struct drm_device *dev, int crtc);
+extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
 extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
-extern void drm_vblank_off(struct drm_device *dev, int crtc);
-extern void drm_vblank_on(struct drm_device *dev, int crtc);
+extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe);
+extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe);
 extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
 extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
 extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
 extern void drm_vblank_cleanup(struct drm_device *dev);
 
 extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
-						 int crtc, int *max_error,
+						 unsigned int pipe, int *max_error,
 						 struct timeval *vblank_time,
 						 unsigned flags,
 						 const struct drm_crtc *refcrtc,
@@ -968,8 +969,8 @@ static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc
 }
 
 /* Modesetting support */
-extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
-extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
+extern void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe);
+extern void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe);
 
 				/* Stub support (drm_stub.h) */
 extern struct drm_master *drm_master_get(struct drm_master *master);
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 8a3a913320eb..e67aeac2aee0 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -166,7 +166,8 @@ int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
 static inline bool
 drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state)
 {
-	return state->mode_changed || state->active_changed;
+	return state->mode_changed || state->active_changed ||
+	       state->connectors_changed;
 }
 
 
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index cc1fee8a12d0..11266d147a29 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -87,8 +87,8 @@ int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
 				struct drm_framebuffer *fb,
 				struct drm_pending_vblank_event *event,
 				uint32_t flags);
-void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
-				      int mode);
+int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
+				     int mode);
 
 /* default implementations for state handling */
 void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 3b4d8a4a23fb..faaeff7db684 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -255,12 +255,13 @@ struct drm_atomic_state;
  * @crtc: backpointer to the CRTC
  * @enable: whether the CRTC should be enabled, gates all other state
  * @active: whether the CRTC is actively displaying (used for DPMS)
- * @mode_changed: for use by helpers and drivers when computing state updates
- * @active_changed: for use by helpers and drivers when computing state updates
+ * @planes_changed: planes on this crtc are updated
+ * @mode_changed: crtc_state->mode or crtc_state->enable has been changed
+ * @active_changed: crtc_state->active has been toggled.
+ * @connectors_changed: connectors to this crtc have been updated
  * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
  * @last_vblank_count: for helpers and drivers to capture the vblank of the
  * 	update to ensure framebuffer cleanup isn't done too early
- * @planes_changed: for use by helpers and drivers when computing state updates
  * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
  * @mode: current mode timings
  * @event: optional pointer to a DRM event to signal upon completion of the
@@ -283,6 +284,7 @@ struct drm_crtc_state {
 	bool planes_changed : 1;
 	bool mode_changed : 1;
 	bool active_changed : 1;
+	bool connectors_changed : 1;
 
 	/* attached planes bitmask:
 	 * WARNING: transitional helpers do not maintain plane_mask so
@@ -525,7 +527,7 @@ struct drm_connector_state {
  * etc.
  */
 struct drm_connector_funcs {
-	void (*dpms)(struct drm_connector *connector, int mode);
+	int (*dpms)(struct drm_connector *connector, int mode);
 	void (*save)(struct drm_connector *connector);
 	void (*restore)(struct drm_connector *connector);
 	void (*reset)(struct drm_connector *connector);
@@ -861,7 +863,7 @@ struct drm_plane {
 
 	uint32_t possible_crtcs;
 	uint32_t *format_types;
-	uint32_t format_count;
+	unsigned int format_count;
 	bool format_default;
 
 	struct drm_crtc *crtc;
@@ -1016,29 +1018,6 @@ struct drm_mode_config_funcs {
 };
 
 /**
- * struct drm_mode_group - group of mode setting resources for potential sub-grouping
- * @num_crtcs: CRTC count
- * @num_encoders: encoder count
- * @num_connectors: connector count
- * @num_bridges: bridge count
- * @id_list: list of KMS object IDs in this group
- *
- * Currently this simply tracks the global mode setting state.  But in the
- * future it could allow groups of objects to be set aside into independent
- * control groups for use by different user level processes (e.g. two X servers
- * running simultaneously on different heads, each with their own mode
- * configuration and freedom of mode setting).
- */
-struct drm_mode_group {
-	uint32_t num_crtcs;
-	uint32_t num_encoders;
-	uint32_t num_connectors;
-
-	/* list of object IDs for this group */
-	uint32_t *id_list;
-};
-
-/**
  * struct drm_mode_config - Mode configuration control structure
  * @mutex: mutex protecting KMS related lists and structures
  * @connection_mutex: ww mutex protecting connector state and routing
@@ -1289,13 +1268,13 @@ extern int drm_universal_plane_init(struct drm_device *dev,
 				    unsigned long possible_crtcs,
 				    const struct drm_plane_funcs *funcs,
 				    const uint32_t *formats,
-				    uint32_t format_count,
+				    unsigned int format_count,
 				    enum drm_plane_type type);
 extern int drm_plane_init(struct drm_device *dev,
 			  struct drm_plane *plane,
 			  unsigned long possible_crtcs,
 			  const struct drm_plane_funcs *funcs,
-			  const uint32_t *formats, uint32_t format_count,
+			  const uint32_t *formats, unsigned int format_count,
 			  bool is_primary);
 extern void drm_plane_cleanup(struct drm_plane *plane);
 extern unsigned int drm_plane_index(struct drm_plane *plane);
@@ -1322,9 +1301,6 @@ extern const char *drm_get_tv_select_name(int val);
 extern void drm_fb_release(struct drm_file *file_priv);
 extern void drm_property_destroy_user_blobs(struct drm_device *dev,
                                             struct drm_file *file_priv);
-extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
-extern void drm_mode_group_destroy(struct drm_mode_group *group);
-extern void drm_reinit_primary_mode_group(struct drm_device *dev);
 extern bool drm_probe_ddc(struct i2c_adapter *adapter);
 extern struct edid *drm_get_edid(struct drm_connector *connector,
 				 struct i2c_adapter *adapter);
@@ -1577,8 +1553,45 @@ static inline struct drm_property *drm_property_find(struct drm_device *dev,
 }
 
 /* Plane list iterator for legacy (overlay only) planes. */
-#define drm_for_each_legacy_plane(plane, planelist) \
-	list_for_each_entry(plane, planelist, head) \
+#define drm_for_each_legacy_plane(plane, dev) \
+	list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
 		if (plane->type == DRM_PLANE_TYPE_OVERLAY)
 
+#define drm_for_each_plane(plane, dev) \
+	list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
+
+#define drm_for_each_crtc(crtc, dev) \
+	list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
+
+static inline void
+assert_drm_connector_list_read_locked(struct drm_mode_config *mode_config)
+{
+	/*
+	 * The connector hotadd/remove code currently grabs both locks when
+	 * updating lists. Hence readers need only hold either of them to be
+	 * safe and the check amounts to
+	 *
+	 * WARN_ON(not_holding(A) && not_holding(B)).
+	 */
+	WARN_ON(!mutex_is_locked(&mode_config->mutex) &&
+		!drm_modeset_is_locked(&mode_config->connection_mutex));
+}
+
+#define drm_for_each_connector(connector, dev) \
+	for (assert_drm_connector_list_read_locked(&(dev)->mode_config),	\
+	     connector = list_first_entry(&(dev)->mode_config.connector_list,	\
+					  struct drm_connector, head);		\
+	     &connector->head != (&(dev)->mode_config.connector_list);		\
+	     connector = list_next_entry(connector, head))
+
+#define drm_for_each_encoder(encoder, dev) \
+	list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head)
+
+#define drm_for_each_fb(fb, dev) \
+	for (WARN_ON(!mutex_is_locked(&(dev)->mode_config.fb_lock)),		\
+	     fb = list_first_entry(&(dev)->mode_config.fb_list,	\
+					  struct drm_framebuffer, head);	\
+	     &fb->head != (&(dev)->mode_config.fb_list);			\
+	     fb = list_next_entry(fb, head))
+
 #endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 918aa68b5199..2a747a91fded 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -108,8 +108,10 @@ struct drm_crtc_helper_funcs {
 	/* atomic helpers */
 	int (*atomic_check)(struct drm_crtc *crtc,
 			    struct drm_crtc_state *state);
-	void (*atomic_begin)(struct drm_crtc *crtc);
-	void (*atomic_flush)(struct drm_crtc *crtc);
+	void (*atomic_begin)(struct drm_crtc *crtc,
+			     struct drm_crtc_state *old_crtc_state);
+	void (*atomic_flush)(struct drm_crtc *crtc,
+			     struct drm_crtc_state *old_crtc_state);
 };
 
 /**
@@ -190,7 +192,7 @@ extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
 extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
 extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
 
-extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
+extern int drm_helper_connector_dpms(struct drm_connector *connector, int mode);
 
 extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
 
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 2e86f642fc33..499e9f625aef 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -420,7 +420,7 @@
 
 #define DP_TEST_SINK_MISC		    0x246
 # define DP_TEST_CRC_SUPPORTED		    (1 << 5)
-# define DP_TEST_COUNT_MASK		    0x7
+# define DP_TEST_COUNT_MASK		    0xf
 
 #define DP_TEST_RESPONSE		    0x260
 # define DP_TEST_ACK			    (1 << 0)
@@ -578,6 +578,7 @@ u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
 u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
 					  int lane);
 
+#define DP_BRANCH_OUI_HEADER_SIZE	0xc
 #define DP_RECEIVER_CAP_SIZE		0xf
 #define EDP_PSR_RECEIVER_CAP_SIZE	2
 
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 0dfd94def593..dbab4622b58f 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -122,6 +122,7 @@ struct drm_fb_helper {
 	bool delayed_hotplug;
 };
 
+#ifdef CONFIG_DRM_FBDEV_EMULATION
 void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
 			   const struct drm_fb_helper_funcs *funcs);
 int drm_fb_helper_init(struct drm_device *dev,
@@ -136,11 +137,38 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
 			    struct fb_info *info);
 
 bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper);
+
+struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper);
+void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper);
+void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper);
 void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
 			    uint32_t fb_width, uint32_t fb_height);
 void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
 			    uint32_t depth);
 
+void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper);
+
+ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
+			       size_t count, loff_t *ppos);
+ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
+				size_t count, loff_t *ppos);
+
+void drm_fb_helper_sys_fillrect(struct fb_info *info,
+				const struct fb_fillrect *rect);
+void drm_fb_helper_sys_copyarea(struct fb_info *info,
+				const struct fb_copyarea *area);
+void drm_fb_helper_sys_imageblit(struct fb_info *info,
+				 const struct fb_image *image);
+
+void drm_fb_helper_cfb_fillrect(struct fb_info *info,
+				const struct fb_fillrect *rect);
+void drm_fb_helper_cfb_copyarea(struct fb_info *info,
+				const struct fb_copyarea *area);
+void drm_fb_helper_cfb_imageblit(struct fb_info *info,
+				 const struct fb_image *image);
+
+void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state);
+
 int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
 
 int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
@@ -158,4 +186,188 @@ drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
 int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector);
 int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
 				       struct drm_connector *connector);
+#else
+static inline void drm_fb_helper_prepare(struct drm_device *dev,
+					struct drm_fb_helper *helper,
+					const struct drm_fb_helper_funcs *funcs)
+{
+}
+
+static inline int drm_fb_helper_init(struct drm_device *dev,
+		       struct drm_fb_helper *helper, int crtc_count,
+		       int max_conn)
+{
+	return 0;
+}
+
+static inline void drm_fb_helper_fini(struct drm_fb_helper *helper)
+{
+}
+
+static inline int drm_fb_helper_blank(int blank, struct fb_info *info)
+{
+	return 0;
+}
+
+static inline int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
+					    struct fb_info *info)
+{
+	return 0;
+}
+
+static inline int drm_fb_helper_set_par(struct fb_info *info)
+{
+	return 0;
+}
+
+static inline int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
+					  struct fb_info *info)
+{
+	return 0;
+}
+
+static inline bool
+drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
+{
+	return true;
+}
+
+static inline struct fb_info *
+drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
+{
+	return NULL;
+}
+
+static inline void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper)
+{
+}
+static inline void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper)
+{
+}
+
+static inline void drm_fb_helper_fill_var(struct fb_info *info,
+					  struct drm_fb_helper *fb_helper,
+					  uint32_t fb_width, uint32_t fb_height)
+{
+}
+
+static inline void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
+					  uint32_t depth)
+{
+}
+
+static inline int drm_fb_helper_setcmap(struct fb_cmap *cmap,
+					struct fb_info *info)
+{
+	return 0;
+}
+
+static inline void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
+{
+}
+
+static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info,
+					     char __user *buf, size_t count,
+					     loff_t *ppos)
+{
+	return -ENODEV;
+}
+
+static inline ssize_t drm_fb_helper_sys_write(struct fb_info *info,
+					      const char __user *buf,
+					      size_t count, loff_t *ppos)
+{
+	return -ENODEV;
+}
+
+static inline void drm_fb_helper_sys_fillrect(struct fb_info *info,
+					      const struct fb_fillrect *rect)
+{
+}
+
+static inline void drm_fb_helper_sys_copyarea(struct fb_info *info,
+					      const struct fb_copyarea *area)
+{
+}
+
+static inline void drm_fb_helper_sys_imageblit(struct fb_info *info,
+					       const struct fb_image *image)
+{
+}
+
+static inline void drm_fb_helper_cfb_fillrect(struct fb_info *info,
+					      const struct fb_fillrect *rect)
+{
+}
+
+static inline void drm_fb_helper_cfb_copyarea(struct fb_info *info,
+					      const struct fb_copyarea *area)
+{
+}
+
+static inline void drm_fb_helper_cfb_imageblit(struct fb_info *info,
+					       const struct fb_image *image)
+{
+}
+
+static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper,
+					     int state)
+{
+}
+
+static inline int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
+{
+	return 0;
+}
+
+static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper,
+					       int bpp_sel)
+{
+	return 0;
+}
+
+static inline int
+drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
+{
+	return 0;
+}
+
+static inline int drm_fb_helper_debug_enter(struct fb_info *info)
+{
+	return 0;
+}
+
+static inline int drm_fb_helper_debug_leave(struct fb_info *info)
+{
+	return 0;
+}
+
+static inline struct drm_display_mode *
+drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector,
+		       int width, int height)
+{
+	return NULL;
+}
+
+static inline struct drm_display_mode *
+drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
+		      int width, int height)
+{
+	return NULL;
+}
+
+static inline int
+drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
+				struct drm_connector *connector)
+{
+	return 0;
+}
+
+static inline int
+drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
+				   struct drm_connector *connector)
+{
+	return 0;
+}
+#endif
 #endif
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index 70595ff565ba..5dd18bfdf601 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -130,7 +130,6 @@ struct drm_crtc;
 struct drm_plane;
 
 void drm_modeset_lock_all(struct drm_device *dev);
-int __drm_modeset_lock_all(struct drm_device *dev, bool trylock);
 void drm_modeset_unlock_all(struct drm_device *dev);
 void drm_modeset_lock_crtc(struct drm_crtc *crtc,
 			   struct drm_plane *plane);
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 96e16283afb9..dda401bf910e 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -43,9 +43,8 @@
  * planes.
  */
 
-extern int drm_crtc_init(struct drm_device *dev,
-			 struct drm_crtc *crtc,
-			 const struct drm_crtc_funcs *funcs);
+int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+		  const struct drm_crtc_funcs *funcs);
 
 /**
  * drm_plane_helper_funcs - helper operations for CRTCs
@@ -79,26 +78,26 @@ static inline void drm_plane_helper_add(struct drm_plane *plane,
 	plane->helper_private = funcs;
 }
 
-extern int drm_plane_helper_check_update(struct drm_plane *plane,
-					 struct drm_crtc *crtc,
-					 struct drm_framebuffer *fb,
-					 struct drm_rect *src,
-					 struct drm_rect *dest,
-					 const struct drm_rect *clip,
-					 int min_scale,
-					 int max_scale,
-					 bool can_position,
-					 bool can_update_disabled,
-					 bool *visible);
-extern int drm_primary_helper_update(struct drm_plane *plane,
-				     struct drm_crtc *crtc,
-				     struct drm_framebuffer *fb,
-				     int crtc_x, int crtc_y,
-				     unsigned int crtc_w, unsigned int crtc_h,
-				     uint32_t src_x, uint32_t src_y,
-				     uint32_t src_w, uint32_t src_h);
-extern int drm_primary_helper_disable(struct drm_plane *plane);
-extern void drm_primary_helper_destroy(struct drm_plane *plane);
+int drm_plane_helper_check_update(struct drm_plane *plane,
+				  struct drm_crtc *crtc,
+				  struct drm_framebuffer *fb,
+				  struct drm_rect *src,
+				  struct drm_rect *dest,
+				  const struct drm_rect *clip,
+				  int min_scale,
+				  int max_scale,
+				  bool can_position,
+				  bool can_update_disabled,
+				  bool *visible);
+int drm_primary_helper_update(struct drm_plane *plane,
+			      struct drm_crtc *crtc,
+			      struct drm_framebuffer *fb,
+			      int crtc_x, int crtc_y,
+			      unsigned int crtc_w, unsigned int crtc_h,
+			      uint32_t src_x, uint32_t src_y,
+			      uint32_t src_w, uint32_t src_h);
+int drm_primary_helper_disable(struct drm_plane *plane);
+void drm_primary_helper_destroy(struct drm_plane *plane);
 extern const struct drm_plane_funcs drm_primary_helper_funcs;
 
 int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index b08bdade6002..9e9bddaa58a5 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -3,8 +3,8 @@
 #ifndef _DRM_INTEL_GTT_H
 #define	_DRM_INTEL_GTT_H
 
-void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
-		   phys_addr_t *mappable_base, unsigned long *mappable_end);
+void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
+		   phys_addr_t *mappable_base, u64 *mappable_end);
 
 int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
 		     struct agp_bridge_data *bridge);
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index e2706140eaff..c0d712d22b07 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -57,5 +57,6 @@ struct gpiod_lookup_table {
 }
 
 void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
+void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
 
 #endif /* __LINUX_GPIO_MACHINE_H */
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 2f295cde657e..8c5e8b91a3cb 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -34,6 +34,13 @@
 /* color index */
 #define DRM_FORMAT_C8		fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
 
+/* 8 bpp Red */
+#define DRM_FORMAT_R8		fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
+
+/* 16 bpp RG */
+#define DRM_FORMAT_RG88		fourcc_code('R', 'G', '8', '8') /* [15:0] R:G 8:8 little endian */
+#define DRM_FORMAT_GR88		fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */
+
 /* 8 bpp RGB */
 #define DRM_FORMAT_RGB332	fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
 #define DRM_FORMAT_BGR233	fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index db809b722985..dbd16a2d37db 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -354,9 +354,15 @@ typedef struct drm_i915_irq_wait {
 #define I915_PARAM_REVISION              32
 #define I915_PARAM_SUBSLICE_TOTAL	 33
 #define I915_PARAM_EU_TOTAL		 34
+#define I915_PARAM_HAS_GPU_RESET	 35
+#define I915_PARAM_HAS_RESOURCE_STREAMER 36
 
 typedef struct drm_i915_getparam {
-	int param;
+	s32 param;
+	/*
+	 * WARNING: Using pointers instead of fixed-size u64 means we need to write
+	 * compat32 code. Don't repeat this mistake.
+	 */
 	int __user *value;
 } drm_i915_getparam_t;
 
@@ -764,7 +770,12 @@ struct drm_i915_gem_execbuffer2 {
 #define I915_EXEC_BSD_RING1		(1<<13)
 #define I915_EXEC_BSD_RING2		(2<<13)
 
-#define __I915_EXEC_UNKNOWN_FLAGS -(1<<15)
+/** Tell the kernel that the batchbuffer is processed by
+ *  the resource streamer.
+ */
+#define I915_EXEC_RESOURCE_STREAMER     (1<<15)
+
+#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_RESOURCE_STREAMER<<1)
 
 #define I915_EXEC_CONTEXT_ID_MASK	(0xffffffff)
 #define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1114,6 +1125,7 @@ struct drm_i915_gem_context_param {
 	__u32 size;
 	__u64 param;
 #define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
+#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
 	__u64 value;
 };
 
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index c472bedbe38e..05b204954d16 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -64,6 +64,7 @@
 #define DRM_VMW_GB_SURFACE_CREATE    23
 #define DRM_VMW_GB_SURFACE_REF       24
 #define DRM_VMW_SYNCCPU              25
+#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
 
 /*************************************************************************/
 /**
@@ -88,6 +89,8 @@
 #define DRM_VMW_PARAM_3D_CAPS_SIZE     8
 #define DRM_VMW_PARAM_MAX_MOB_MEMORY   9
 #define DRM_VMW_PARAM_MAX_MOB_SIZE     10
+#define DRM_VMW_PARAM_SCREEN_TARGET    11
+#define DRM_VMW_PARAM_DX               12
 
 /**
  * enum drm_vmw_handle_type - handle type for ref ioctls
@@ -296,7 +299,7 @@ union drm_vmw_surface_reference_arg {
  * Argument to the DRM_VMW_EXECBUF Ioctl.
  */
 
-#define DRM_VMW_EXECBUF_VERSION 1
+#define DRM_VMW_EXECBUF_VERSION 2
 
 struct drm_vmw_execbuf_arg {
 	uint64_t commands;
@@ -305,6 +308,8 @@ struct drm_vmw_execbuf_arg {
 	uint64_t fence_rep;
 	uint32_t version;
 	uint32_t flags;
+	uint32_t context_handle;
+	uint32_t pad64;
 };
 
 /**
@@ -825,7 +830,6 @@ struct drm_vmw_update_layout_arg {
 enum drm_vmw_shader_type {
 	drm_vmw_shader_type_vs = 0,
 	drm_vmw_shader_type_ps,
-	drm_vmw_shader_type_gs
 };
 
 
@@ -907,6 +911,8 @@ enum drm_vmw_surface_flags {
  * @buffer_handle     Buffer handle of backup buffer. SVGA3D_INVALID_ID
  *                    if none.
  * @base_size         Size of the base mip level for all faces.
+ * @array_size        Must be zero for non-DX hardware, and if non-zero
+ *                    svga3d_flags must have proper bind flags setup.
  *
  * Input argument to the  DRM_VMW_GB_SURFACE_CREATE Ioctl.
  * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
@@ -919,7 +925,7 @@ struct drm_vmw_gb_surface_create_req {
 	uint32_t multisample_count;
 	uint32_t autogen_filter;
 	uint32_t buffer_handle;
-	uint32_t pad64;
+	uint32_t array_size;
 	struct drm_vmw_size base_size;
 };
 
@@ -1059,4 +1065,28 @@ struct drm_vmw_synccpu_arg {
 	uint32_t pad64;
 };
 
+/*************************************************************************/
+/**
+ * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
+ *
+ * Allocates a device unique context id, and queues a create context command
+ * for the host. Does not wait for host completion.
+ */
+enum drm_vmw_extended_context {
+	drm_vmw_context_legacy,
+	drm_vmw_context_dx
+};
+
+/**
+ * union drm_vmw_extended_context_arg
+ *
+ * @req: Context type.
+ * @rep: Context identifier.
+ *
+ * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
+ */
+union drm_vmw_extended_context_arg {
+	enum drm_vmw_extended_context req;
+	struct drm_vmw_context_arg rep;
+};
 #endif
diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h
index 0530e5a4c6b1..d8fc96ed11e9 100644
--- a/include/video/samsung_fimd.h
+++ b/include/video/samsung_fimd.h
@@ -296,6 +296,7 @@
 
 /* Video buffer addresses */
 #define VIDW_BUF_START(_buff)			(0xA0 + ((_buff) * 8))
+#define VIDW_BUF_START_S(_buff)			(0x40A0 + ((_buff) * 8))
 #define VIDW_BUF_START1(_buff)			(0xA4 + ((_buff) * 8))
 #define VIDW_BUF_END(_buff)			(0xD0 + ((_buff) * 8))
 #define VIDW_BUF_END1(_buff)			(0xD4 + ((_buff) * 8))